VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 33834

Last change on this file since 33834 was 33595, checked in by vboxsync, 14 years ago

src/*: more spelling fixes (logging), thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 170.6 KB
Line 
1/* $Id: VBoxRecompiler.c 33595 2010-10-29 10:35:00Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/rem.h>
30#include <VBox/vmapi.h>
31#include <VBox/tm.h>
32#include <VBox/ssm.h>
33#include <VBox/em.h>
34#include <VBox/trpm.h>
35#include <VBox/iom.h>
36#include <VBox/mm.h>
37#include <VBox/pgm.h>
38#include <VBox/pdm.h>
39#include <VBox/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/hwaccm.h>
42#include <VBox/patm.h>
43#include <VBox/csam.h>
44#include "REMInternal.h"
45#include <VBox/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
88static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
105static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149
150/*
151 * Global stuff.
152 */
153
154/** MMIO read callbacks. */
155CPUReadMemoryFunc *g_apfnMMIORead[3] =
156{
157 remR3MMIOReadU8,
158 remR3MMIOReadU16,
159 remR3MMIOReadU32
160};
161
162/** MMIO write callbacks. */
163CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
164{
165 remR3MMIOWriteU8,
166 remR3MMIOWriteU16,
167 remR3MMIOWriteU32
168};
169
170/** Handler read callbacks. */
171CPUReadMemoryFunc *g_apfnHandlerRead[3] =
172{
173 remR3HandlerReadU8,
174 remR3HandlerReadU16,
175 remR3HandlerReadU32
176};
177
178/** Handler write callbacks. */
179CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
180{
181 remR3HandlerWriteU8,
182 remR3HandlerWriteU16,
183 remR3HandlerWriteU32
184};
185
186
187#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
188/*
189 * Debugger commands.
190 */
191static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
192
193/** '.remstep' arguments. */
194static const DBGCVARDESC g_aArgRemStep[] =
195{
196 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
197 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
198};
199
200/** Command descriptors. */
201static const DBGCCMD g_aCmds[] =
202{
203 {
204 .pszCmd ="remstep",
205 .cArgsMin = 0,
206 .cArgsMax = 1,
207 .paArgDescs = &g_aArgRemStep[0],
208 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
209 .pResultDesc = NULL,
210 .fFlags = 0,
211 .pfnHandler = remR3CmdDisasEnableStepping,
212 .pszSyntax = "[on/off]",
213 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
214 "If no arguments show the current state."
215 }
216};
217#endif
218
219/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
220uint8_t *code_gen_prologue;
221
222
223/*******************************************************************************
224* Internal Functions *
225*******************************************************************************/
226void remAbort(int rc, const char *pszTip);
227extern int testmath(void);
228
229/* Put them here to avoid unused variable warning. */
230AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
231#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
232//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
233/* Why did this have to be identical?? */
234AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
235#else
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#endif
238
239
240/**
241 * Initializes the REM.
242 *
243 * @returns VBox status code.
244 * @param pVM The VM to operate on.
245 */
246REMR3DECL(int) REMR3Init(PVM pVM)
247{
248 PREMHANDLERNOTIFICATION pCur;
249 uint32_t u32Dummy;
250 int rc;
251 unsigned i;
252
253#ifdef VBOX_ENABLE_VBOXREM64
254 LogRel(("Using 64-bit aware REM\n"));
255#endif
256
257 /*
258 * Assert sanity.
259 */
260 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
261 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
262 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
263#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
264 Assert(!testmath());
265#endif
266
267 /*
268 * Init some internal data members.
269 */
270 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
271 pVM->rem.s.Env.pVM = pVM;
272#ifdef CPU_RAW_MODE_INIT
273 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
274#endif
275
276 /*
277 * Initialize the REM critical section.
278 *
279 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
280 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
281 * deadlocks. (mostly pgm vs rem locking)
282 */
283 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
284 AssertRCReturn(rc, rc);
285
286 /* ctx. */
287 pVM->rem.s.pCtx = NULL; /* set when executing code. */
288 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
289
290 /* ignore all notifications */
291 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
292
293 code_gen_prologue = RTMemExecAlloc(_1K);
294 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
295
296 cpu_exec_init_all(0);
297
298 /*
299 * Init the recompiler.
300 */
301 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
302 {
303 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
304 return VERR_GENERAL_FAILURE;
305 }
306 PVMCPU pVCpu = VMMGetCpu(pVM);
307 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
308 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
309
310 /* allocate code buffer for single instruction emulation. */
311 pVM->rem.s.Env.cbCodeBuffer = 4096;
312 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
313 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
314
315 /* finally, set the cpu_single_env global. */
316 cpu_single_env = &pVM->rem.s.Env;
317
318 /* Nothing is pending by default */
319 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
320
321 /*
322 * Register ram types.
323 */
324 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
325 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
326 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
328 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
329
330 /* stop ignoring. */
331 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
332
333 /*
334 * Register the saved state data unit.
335 */
336 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
337 NULL, NULL, NULL,
338 NULL, remR3Save, NULL,
339 NULL, remR3Load, NULL);
340 if (RT_FAILURE(rc))
341 return rc;
342
343#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
344 /*
345 * Debugger commands.
346 */
347 static bool fRegisteredCmds = false;
348 if (!fRegisteredCmds)
349 {
350 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
351 if (RT_SUCCESS(rc))
352 fRegisteredCmds = true;
353 }
354#endif
355
356#ifdef VBOX_WITH_STATISTICS
357 /*
358 * Statistics.
359 */
360 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
361 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
362 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
363 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
364 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
365 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
369 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
371 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
372
373 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
374
375 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
376 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
377 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
378 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
379 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
380 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
381 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
382 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
383 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
384 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
385 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
386
387 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
388 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
389 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
390 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
391
392 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
398
399 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
405
406 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
407#endif /* VBOX_WITH_STATISTICS */
408
409 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
410 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
411 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
412
413
414#ifdef DEBUG_ALL_LOGGING
415 loglevel = ~0;
416# ifdef DEBUG_TMP_LOGGING
417 logfile = fopen("/tmp/vbox-qemu.log", "w");
418# endif
419#endif
420
421 /*
422 * Init the handler notification lists.
423 */
424 pVM->rem.s.idxPendingList = UINT32_MAX;
425 pVM->rem.s.idxFreeList = 0;
426
427 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
428 {
429 pCur = &pVM->rem.s.aHandlerNotifications[i];
430 pCur->idxNext = i + 1;
431 pCur->idxSelf = i;
432 }
433 pCur->idxNext = UINT32_MAX; /* the last record. */
434
435 return rc;
436}
437
438
439/**
440 * Finalizes the REM initialization.
441 *
442 * This is called after all components, devices and drivers has
443 * been initialized. Its main purpose it to finish the RAM related
444 * initialization.
445 *
446 * @returns VBox status code.
447 *
448 * @param pVM The VM handle.
449 */
450REMR3DECL(int) REMR3InitFinalize(PVM pVM)
451{
452 int rc;
453
454 /*
455 * Ram size & dirty bit map.
456 */
457 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
458 pVM->rem.s.fGCPhysLastRamFixed = true;
459#ifdef RT_STRICT
460 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
461#else
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
463#endif
464 return rc;
465}
466
467
468/**
469 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
470 *
471 * @returns VBox status code.
472 * @param pVM The VM handle.
473 * @param fGuarded Whether to guard the map.
474 */
475static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
476{
477 int rc = VINF_SUCCESS;
478 RTGCPHYS cb;
479
480 cb = pVM->rem.s.GCPhysLastRam + 1;
481 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
482 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
483 VERR_OUT_OF_RANGE);
484 phys_ram_size = cb;
485 phys_ram_dirty_size = cb >> PAGE_SHIFT;
486 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
487
488 if (!fGuarded)
489 {
490 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
491 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
492 }
493 else
494 {
495 /*
496 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
497 */
498 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
499 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
500 if (cbBitmapFull == cbBitmapAligned)
501 cbBitmapFull += _4G >> PAGE_SHIFT;
502 else if (cbBitmapFull - cbBitmapAligned < _64K)
503 cbBitmapFull += _64K;
504
505 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
506 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
507
508 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
509 if (RT_FAILURE(rc))
510 {
511 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
512 AssertLogRelRCReturn(rc, rc);
513 }
514
515 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
516 }
517
518 /* initialize it. */
519 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
520 return rc;
521}
522
523
524/**
525 * Terminates the REM.
526 *
527 * Termination means cleaning up and freeing all resources,
528 * the VM it self is at this point powered off or suspended.
529 *
530 * @returns VBox status code.
531 * @param pVM The VM to operate on.
532 */
533REMR3DECL(int) REMR3Term(PVM pVM)
534{
535#ifdef VBOX_WITH_STATISTICS
536 /*
537 * Statistics.
538 */
539 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
540 STAM_DEREG(pVM, &gStatCompilationQEmu);
541 STAM_DEREG(pVM, &gStatRunCodeQEmu);
542 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
543 STAM_DEREG(pVM, &gStatTimers);
544 STAM_DEREG(pVM, &gStatTBLookup);
545 STAM_DEREG(pVM, &gStatIRQ);
546 STAM_DEREG(pVM, &gStatRawCheck);
547 STAM_DEREG(pVM, &gStatMemRead);
548 STAM_DEREG(pVM, &gStatMemWrite);
549 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
550 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
551
552 STAM_DEREG(pVM, &gStatCpuGetTSC);
553
554 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
555 STAM_DEREG(pVM, &gStatRefuseVM86);
556 STAM_DEREG(pVM, &gStatRefusePaging);
557 STAM_DEREG(pVM, &gStatRefusePAE);
558 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
559 STAM_DEREG(pVM, &gStatRefuseIF0);
560 STAM_DEREG(pVM, &gStatRefuseCode16);
561 STAM_DEREG(pVM, &gStatRefuseWP0);
562 STAM_DEREG(pVM, &gStatRefuseRing1or2);
563 STAM_DEREG(pVM, &gStatRefuseCanExecute);
564 STAM_DEREG(pVM, &gStatFlushTBs);
565
566 STAM_DEREG(pVM, &gStatREMGDTChange);
567 STAM_DEREG(pVM, &gStatREMLDTRChange);
568 STAM_DEREG(pVM, &gStatREMIDTChange);
569 STAM_DEREG(pVM, &gStatREMTRChange);
570
571 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
577
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
584
585 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
586#endif /* VBOX_WITH_STATISTICS */
587
588 STAM_REL_DEREG(pVM, &tb_flush_count);
589 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
590 STAM_REL_DEREG(pVM, &tlb_flush_count);
591
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * The VM is being reset.
598 *
599 * For the REM component this means to call the cpu_reset() and
600 * reinitialize some state variables.
601 *
602 * @param pVM VM handle.
603 */
604REMR3DECL(void) REMR3Reset(PVM pVM)
605{
606 /*
607 * Reset the REM cpu.
608 */
609 Assert(pVM->rem.s.cIgnoreAll == 0);
610 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
611 cpu_reset(&pVM->rem.s.Env);
612 pVM->rem.s.cInvalidatedPages = 0;
613 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
614 Assert(pVM->rem.s.cIgnoreAll == 0);
615
616 /* Clear raw ring 0 init state */
617 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
618
619 /* Flush the TBs the next time we execute code here. */
620 pVM->rem.s.fFlushTBs = true;
621}
622
623
624/**
625 * Execute state save operation.
626 *
627 * @returns VBox status code.
628 * @param pVM VM Handle.
629 * @param pSSM SSM operation handle.
630 */
631static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
632{
633 PREM pRem = &pVM->rem.s;
634
635 /*
636 * Save the required CPU Env bits.
637 * (Not much because we're never in REM when doing the save.)
638 */
639 LogFlow(("remR3Save:\n"));
640 Assert(!pRem->fInREM);
641 SSMR3PutU32(pSSM, pRem->Env.hflags);
642 SSMR3PutU32(pSSM, ~0); /* separator */
643
644 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
645 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
646 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
647
648 return SSMR3PutU32(pSSM, ~0); /* terminator */
649}
650
651
652/**
653 * Execute state load operation.
654 *
655 * @returns VBox status code.
656 * @param pVM VM Handle.
657 * @param pSSM SSM operation handle.
658 * @param uVersion Data layout version.
659 * @param uPass The data pass.
660 */
661static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
662{
663 uint32_t u32Dummy;
664 uint32_t fRawRing0 = false;
665 uint32_t u32Sep;
666 uint32_t i;
667 int rc;
668 PREM pRem;
669
670 LogFlow(("remR3Load:\n"));
671 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
672
673 /*
674 * Validate version.
675 */
676 if ( uVersion != REM_SAVED_STATE_VERSION
677 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
678 {
679 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
680 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
681 }
682
683 /*
684 * Do a reset to be on the safe side...
685 */
686 REMR3Reset(pVM);
687
688 /*
689 * Ignore all ignorable notifications.
690 * (Not doing this will cause serious trouble.)
691 */
692 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
693
694 /*
695 * Load the required CPU Env bits.
696 * (Not much because we're never in REM when doing the save.)
697 */
698 pRem = &pVM->rem.s;
699 Assert(!pRem->fInREM);
700 SSMR3GetU32(pSSM, &pRem->Env.hflags);
701 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /* Redundant REM CPU state has to be loaded, but can be ignored. */
704 CPUX86State_Ver16 temp;
705 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
706 }
707
708 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
709 if (RT_FAILURE(rc))
710 return rc;
711 if (u32Sep != ~0U)
712 {
713 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716
717 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
718 SSMR3GetUInt(pSSM, &fRawRing0);
719 if (fRawRing0)
720 pRem->Env.state |= CPU_RAW_RING0;
721
722 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
723 {
724 /*
725 * Load the REM stuff.
726 */
727 /** @todo r=bird: We should just drop all these items, restoring doesn't make
728 * sense. */
729 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
730 if (RT_FAILURE(rc))
731 return rc;
732 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
733 {
734 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
735 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
736 }
737 for (i = 0; i < pRem->cInvalidatedPages; i++)
738 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
739 }
740
741 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
742 if (RT_FAILURE(rc))
743 return rc;
744
745 /* check the terminator. */
746 rc = SSMR3GetU32(pSSM, &u32Sep);
747 if (RT_FAILURE(rc))
748 return rc;
749 if (u32Sep != ~0U)
750 {
751 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
752 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
753 }
754
755 /*
756 * Get the CPUID features.
757 */
758 PVMCPU pVCpu = VMMGetCpu(pVM);
759 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
760 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
761
762 /*
763 * Sync the Load Flush the TLB
764 */
765 tlb_flush(&pRem->Env, 1);
766
767 /*
768 * Stop ignoring ignorable notifications.
769 */
770 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
771
772 /*
773 * Sync the whole CPU state when executing code in the recompiler.
774 */
775 for (i = 0; i < pVM->cCpus; i++)
776 {
777 PVMCPU pVCpu = &pVM->aCpus[i];
778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
779 }
780 return VINF_SUCCESS;
781}
782
783
784
785#undef LOG_GROUP
786#define LOG_GROUP LOG_GROUP_REM_RUN
787
788/**
789 * Single steps an instruction in recompiled mode.
790 *
791 * Before calling this function the REM state needs to be in sync with
792 * the VM. Call REMR3State() to perform the sync. It's only necessary
793 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
794 * and after calling REMR3StateBack().
795 *
796 * @returns VBox status code.
797 *
798 * @param pVM VM Handle.
799 * @param pVCpu VMCPU Handle.
800 */
801REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
802{
803 int rc, interrupt_request;
804 RTGCPTR GCPtrPC;
805 bool fBp;
806
807 /*
808 * Lock the REM - we don't wanna have anyone interrupting us
809 * while stepping - and enabled single stepping. We also ignore
810 * pending interrupts and suchlike.
811 */
812 interrupt_request = pVM->rem.s.Env.interrupt_request;
813 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
814 pVM->rem.s.Env.interrupt_request = 0;
815 cpu_single_step(&pVM->rem.s.Env, 1);
816
817 /*
818 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
819 */
820 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
821 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
822
823 /*
824 * Execute and handle the return code.
825 * We execute without enabling the cpu tick, so on success we'll
826 * just flip it on and off to make sure it moves
827 */
828 rc = cpu_exec(&pVM->rem.s.Env);
829 if (rc == EXCP_DEBUG)
830 {
831 TMR3NotifyResume(pVM, pVCpu);
832 TMR3NotifySuspend(pVM, pVCpu);
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 else
836 {
837 switch (rc)
838 {
839 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
840 case EXCP_HLT:
841 case EXCP_HALTED: rc = VINF_EM_HALT; break;
842 case EXCP_RC:
843 rc = pVM->rem.s.rc;
844 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
845 break;
846 case EXCP_EXECUTE_RAW:
847 case EXCP_EXECUTE_HWACC:
848 /** @todo: is it correct? No! */
849 rc = VINF_SUCCESS;
850 break;
851 default:
852 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857
858 /*
859 * Restore the stuff we changed to prevent interruption.
860 * Unlock the REM.
861 */
862 if (fBp)
863 {
864 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
865 Assert(rc2 == 0); NOREF(rc2);
866 }
867 cpu_single_step(&pVM->rem.s.Env, 0);
868 pVM->rem.s.Env.interrupt_request = interrupt_request;
869
870 return rc;
871}
872
873
874/**
875 * Set a breakpoint using the REM facilities.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 * @param Address The breakpoint address.
880 * @thread The emulation thread.
881 */
882REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
883{
884 VM_ASSERT_EMT(pVM);
885 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
886 {
887 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
888 return VINF_SUCCESS;
889 }
890 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
891 return VERR_REM_NO_MORE_BP_SLOTS;
892}
893
894
895/**
896 * Clears a breakpoint set by REMR3BreakpointSet().
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
907 {
908 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
912 return VERR_REM_BP_NOT_FOUND;
913}
914
915
916/**
917 * Emulate an instruction.
918 *
919 * This function executes one instruction without letting anyone
920 * interrupt it. This is intended for being called while being in
921 * raw mode and thus will take care of all the state syncing between
922 * REM and the rest.
923 *
924 * @returns VBox status code.
925 * @param pVM VM handle.
926 * @param pVCpu VMCPU Handle.
927 */
928REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
929{
930 bool fFlushTBs;
931
932 int rc, rc2;
933 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
934
935 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
936 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
937 */
938 if (HWACCMIsEnabled(pVM))
939 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
940
941 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
942 fFlushTBs = pVM->rem.s.fFlushTBs;
943 pVM->rem.s.fFlushTBs = false;
944
945 /*
946 * Sync the state and enable single instruction / single stepping.
947 */
948 rc = REMR3State(pVM, pVCpu);
949 pVM->rem.s.fFlushTBs = fFlushTBs;
950 if (RT_SUCCESS(rc))
951 {
952 int interrupt_request = pVM->rem.s.Env.interrupt_request;
953 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
954 Assert(!pVM->rem.s.Env.singlestep_enabled);
955 /*
956 * Now we set the execute single instruction flag and enter the cpu_exec loop.
957 */
958 TMNotifyStartOfExecution(pVCpu);
959 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
960 rc = cpu_exec(&pVM->rem.s.Env);
961 TMNotifyEndOfExecution(pVCpu);
962 switch (rc)
963 {
964 /*
965 * Executed without anything out of the way happening.
966 */
967 case EXCP_SINGLE_INSTR:
968 rc = VINF_EM_RESCHEDULE;
969 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
970 break;
971
972 /*
973 * If we take a trap or start servicing a pending interrupt, we might end up here.
974 * (Timer thread or some other thread wishing EMT's attention.)
975 */
976 case EXCP_INTERRUPT:
977 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
978 rc = VINF_EM_RESCHEDULE;
979 break;
980
981 /*
982 * Single step, we assume!
983 * If there was a breakpoint there we're fucked now.
984 */
985 case EXCP_DEBUG:
986 {
987 /* breakpoint or single step? */
988 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
989 int iBP;
990 rc = VINF_EM_DBG_STEPPED;
991 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
992 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
993 {
994 rc = VINF_EM_DBG_BREAKPOINT;
995 break;
996 }
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
998 break;
999 }
1000
1001 /*
1002 * hlt instruction.
1003 */
1004 case EXCP_HLT:
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1006 rc = VINF_EM_HALT;
1007 break;
1008
1009 /*
1010 * The VM has halted.
1011 */
1012 case EXCP_HALTED:
1013 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * Switch to RAW-mode.
1019 */
1020 case EXCP_EXECUTE_RAW:
1021 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1022 rc = VINF_EM_RESCHEDULE_RAW;
1023 break;
1024
1025 /*
1026 * Switch to hardware accelerated RAW-mode.
1027 */
1028 case EXCP_EXECUTE_HWACC:
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1030 rc = VINF_EM_RESCHEDULE_HWACC;
1031 break;
1032
1033 /*
1034 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1035 */
1036 case EXCP_RC:
1037 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1038 rc = pVM->rem.s.rc;
1039 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1040 break;
1041
1042 /*
1043 * Figure out the rest when they arrive....
1044 */
1045 default:
1046 AssertMsgFailed(("rc=%d\n", rc));
1047 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1048 rc = VINF_EM_RESCHEDULE;
1049 break;
1050 }
1051
1052 /*
1053 * Switch back the state.
1054 */
1055 pVM->rem.s.Env.interrupt_request = interrupt_request;
1056 rc2 = REMR3StateBack(pVM, pVCpu);
1057 AssertRC(rc2);
1058 }
1059
1060 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1061 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1062 return rc;
1063}
1064
1065
1066/**
1067 * Runs code in recompiled mode.
1068 *
1069 * Before calling this function the REM state needs to be in sync with
1070 * the VM. Call REMR3State() to perform the sync. It's only necessary
1071 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1072 * and after calling REMR3StateBack().
1073 *
1074 * @returns VBox status code.
1075 *
1076 * @param pVM VM Handle.
1077 * @param pVCpu VMCPU Handle.
1078 */
1079REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1080{
1081 int rc;
1082 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1083 Assert(pVM->rem.s.fInREM);
1084
1085 TMNotifyStartOfExecution(pVCpu);
1086 rc = cpu_exec(&pVM->rem.s.Env);
1087 TMNotifyEndOfExecution(pVCpu);
1088 switch (rc)
1089 {
1090 /*
1091 * This happens when the execution was interrupted
1092 * by an external event, like pending timers.
1093 */
1094 case EXCP_INTERRUPT:
1095 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1096 rc = VINF_SUCCESS;
1097 break;
1098
1099 /*
1100 * hlt instruction.
1101 */
1102 case EXCP_HLT:
1103 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1104 rc = VINF_EM_HALT;
1105 break;
1106
1107 /*
1108 * The VM has halted.
1109 */
1110 case EXCP_HALTED:
1111 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1112 rc = VINF_EM_HALT;
1113 break;
1114
1115 /*
1116 * Breakpoint/single step.
1117 */
1118 case EXCP_DEBUG:
1119 {
1120#if 0//def DEBUG_bird
1121 static int iBP = 0;
1122 printf("howdy, breakpoint! iBP=%d\n", iBP);
1123 switch (iBP)
1124 {
1125 case 0:
1126 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1127 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1128 //pVM->rem.s.Env.interrupt_request = 0;
1129 //pVM->rem.s.Env.exception_index = -1;
1130 //g_fInterruptDisabled = 1;
1131 rc = VINF_SUCCESS;
1132 asm("int3");
1133 break;
1134 default:
1135 asm("int3");
1136 break;
1137 }
1138 iBP++;
1139#else
1140 /* breakpoint or single step? */
1141 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1142 int iBP;
1143 rc = VINF_EM_DBG_STEPPED;
1144 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1145 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1146 {
1147 rc = VINF_EM_DBG_BREAKPOINT;
1148 break;
1149 }
1150 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1151#endif
1152 break;
1153 }
1154
1155 /*
1156 * Switch to RAW-mode.
1157 */
1158 case EXCP_EXECUTE_RAW:
1159 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1160 rc = VINF_EM_RESCHEDULE_RAW;
1161 break;
1162
1163 /*
1164 * Switch to hardware accelerated RAW-mode.
1165 */
1166 case EXCP_EXECUTE_HWACC:
1167 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1168 rc = VINF_EM_RESCHEDULE_HWACC;
1169 break;
1170
1171 /*
1172 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1173 */
1174 case EXCP_RC:
1175 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1176 rc = pVM->rem.s.rc;
1177 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1178 break;
1179
1180 /*
1181 * Figure out the rest when they arrive....
1182 */
1183 default:
1184 AssertMsgFailed(("rc=%d\n", rc));
1185 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1186 rc = VINF_SUCCESS;
1187 break;
1188 }
1189
1190 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1191 return rc;
1192}
1193
1194
1195/**
1196 * Check if the cpu state is suitable for Raw execution.
1197 *
1198 * @returns boolean
1199 * @param env The CPU env struct.
1200 * @param eip The EIP to check this for (might differ from env->eip).
1201 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1202 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1203 *
1204 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1205 */
1206bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1207{
1208 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1211 uint32_t u32CR0;
1212
1213 /* Update counter. */
1214 env->pVM->rem.s.cCanExecuteRaw++;
1215
1216 if (HWACCMIsEnabled(env->pVM))
1217 {
1218 CPUMCTX Ctx;
1219
1220 env->state |= CPU_RAW_HWACC;
1221
1222 /*
1223 * Create partial context for HWACCMR3CanExecuteGuest
1224 */
1225 Ctx.cr0 = env->cr[0];
1226 Ctx.cr3 = env->cr[3];
1227 Ctx.cr4 = env->cr[4];
1228
1229 Ctx.tr = env->tr.selector;
1230 Ctx.trHid.u64Base = env->tr.base;
1231 Ctx.trHid.u32Limit = env->tr.limit;
1232 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1233
1234 Ctx.ldtr = env->ldt.selector;
1235 Ctx.ldtrHid.u64Base = env->ldt.base;
1236 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1237 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1238
1239 Ctx.idtr.cbIdt = env->idt.limit;
1240 Ctx.idtr.pIdt = env->idt.base;
1241
1242 Ctx.gdtr.cbGdt = env->gdt.limit;
1243 Ctx.gdtr.pGdt = env->gdt.base;
1244
1245 Ctx.rsp = env->regs[R_ESP];
1246 Ctx.rip = env->eip;
1247
1248 Ctx.eflags.u32 = env->eflags;
1249
1250 Ctx.cs = env->segs[R_CS].selector;
1251 Ctx.csHid.u64Base = env->segs[R_CS].base;
1252 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1253 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1254
1255 Ctx.ds = env->segs[R_DS].selector;
1256 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1257 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1258 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1259
1260 Ctx.es = env->segs[R_ES].selector;
1261 Ctx.esHid.u64Base = env->segs[R_ES].base;
1262 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1263 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1264
1265 Ctx.fs = env->segs[R_FS].selector;
1266 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1267 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1268 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1269
1270 Ctx.gs = env->segs[R_GS].selector;
1271 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1272 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1273 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1274
1275 Ctx.ss = env->segs[R_SS].selector;
1276 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1277 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1278 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1279
1280 Ctx.msrEFER = env->efer;
1281
1282 /* Hardware accelerated raw-mode:
1283 *
1284 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1285 */
1286 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1287 {
1288 *piException = EXCP_EXECUTE_HWACC;
1289 return true;
1290 }
1291 return false;
1292 }
1293
1294 /*
1295 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1296 * or 32 bits protected mode ring 0 code
1297 *
1298 * The tests are ordered by the likelihood of being true during normal execution.
1299 */
1300 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1301 {
1302 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1303 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1304 return false;
1305 }
1306
1307#ifndef VBOX_RAW_V86
1308 if (fFlags & VM_MASK) {
1309 STAM_COUNTER_INC(&gStatRefuseVM86);
1310 Log2(("raw mode refused: VM_MASK\n"));
1311 return false;
1312 }
1313#endif
1314
1315 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1316 {
1317#ifndef DEBUG_bird
1318 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1319#endif
1320 return false;
1321 }
1322
1323 if (env->singlestep_enabled)
1324 {
1325 //Log2(("raw mode refused: Single step\n"));
1326 return false;
1327 }
1328
1329 if (env->nb_breakpoints > 0)
1330 {
1331 //Log2(("raw mode refused: Breakpoints\n"));
1332 return false;
1333 }
1334
1335 u32CR0 = env->cr[0];
1336 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1337 {
1338 STAM_COUNTER_INC(&gStatRefusePaging);
1339 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1340 return false;
1341 }
1342
1343 if (env->cr[4] & CR4_PAE_MASK)
1344 {
1345 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1346 {
1347 STAM_COUNTER_INC(&gStatRefusePAE);
1348 return false;
1349 }
1350 }
1351
1352 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1353 {
1354 if (!EMIsRawRing3Enabled(env->pVM))
1355 return false;
1356
1357 if (!(env->eflags & IF_MASK))
1358 {
1359 STAM_COUNTER_INC(&gStatRefuseIF0);
1360 Log2(("raw mode refused: IF (RawR3)\n"));
1361 return false;
1362 }
1363
1364 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1365 {
1366 STAM_COUNTER_INC(&gStatRefuseWP0);
1367 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1368 return false;
1369 }
1370 }
1371 else
1372 {
1373 if (!EMIsRawRing0Enabled(env->pVM))
1374 return false;
1375
1376 // Let's start with pure 32 bits ring 0 code first
1377 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1378 {
1379 STAM_COUNTER_INC(&gStatRefuseCode16);
1380 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1381 return false;
1382 }
1383
1384 // Only R0
1385 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1386 {
1387 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1388 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1389 return false;
1390 }
1391
1392 if (!(u32CR0 & CR0_WP_MASK))
1393 {
1394 STAM_COUNTER_INC(&gStatRefuseWP0);
1395 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1396 return false;
1397 }
1398
1399 if (PATMIsPatchGCAddr(env->pVM, eip))
1400 {
1401 Log2(("raw r0 mode forced: patch code\n"));
1402 *piException = EXCP_EXECUTE_RAW;
1403 return true;
1404 }
1405
1406#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1407 if (!(env->eflags & IF_MASK))
1408 {
1409 STAM_COUNTER_INC(&gStatRefuseIF0);
1410 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1411 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1412 return false;
1413 }
1414#endif
1415
1416 env->state |= CPU_RAW_RING0;
1417 }
1418
1419 /*
1420 * Don't reschedule the first time we're called, because there might be
1421 * special reasons why we're here that is not covered by the above checks.
1422 */
1423 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1424 {
1425 Log2(("raw mode refused: first scheduling\n"));
1426 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1427 return false;
1428 }
1429
1430 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1431 *piException = EXCP_EXECUTE_RAW;
1432 return true;
1433}
1434
1435
1436/**
1437 * Fetches a code byte.
1438 *
1439 * @returns Success indicator (bool) for ease of use.
1440 * @param env The CPU environment structure.
1441 * @param GCPtrInstr Where to fetch code.
1442 * @param pu8Byte Where to store the byte on success
1443 */
1444bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1445{
1446 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1447 if (RT_SUCCESS(rc))
1448 return true;
1449 return false;
1450}
1451
1452
1453/**
1454 * Flush (or invalidate if you like) page table/dir entry.
1455 *
1456 * (invlpg instruction; tlb_flush_page)
1457 *
1458 * @param env Pointer to cpu environment.
1459 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1460 */
1461void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1462{
1463 PVM pVM = env->pVM;
1464 PCPUMCTX pCtx;
1465 int rc;
1466
1467 /*
1468 * When we're replaying invlpg instructions or restoring a saved
1469 * state we disable this path.
1470 */
1471 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1472 return;
1473 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1474 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1475
1476 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1477
1478 /*
1479 * Update the control registers before calling PGMFlushPage.
1480 */
1481 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1482 Assert(pCtx);
1483 pCtx->cr0 = env->cr[0];
1484 pCtx->cr3 = env->cr[3];
1485 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1486 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1487 pCtx->cr4 = env->cr[4];
1488
1489 /*
1490 * Let PGM do the rest.
1491 */
1492 Assert(env->pVCpu);
1493 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1494 if (RT_FAILURE(rc))
1495 {
1496 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1497 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1498 }
1499 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1500}
1501
1502
1503#ifndef REM_PHYS_ADDR_IN_TLB
1504/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1505void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1506{
1507 void *pv;
1508 int rc;
1509
1510 /* Address must be aligned enough to fiddle with lower bits */
1511 Assert((physAddr & 0x3) == 0);
1512
1513 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1514 Assert( rc == VINF_SUCCESS
1515 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1516 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1517 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1518 if (RT_FAILURE(rc))
1519 return (void *)1;
1520 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1521 return (void *)((uintptr_t)pv | 2);
1522 return pv;
1523}
1524#endif /* REM_PHYS_ADDR_IN_TLB */
1525
1526
1527/**
1528 * Called from tlb_protect_code in order to write monitor a code page.
1529 *
1530 * @param env Pointer to the CPU environment.
1531 * @param GCPtr Code page to monitor
1532 */
1533void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1534{
1535#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1536 Assert(env->pVM->rem.s.fInREM);
1537 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1538 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1539 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1540 && !(env->eflags & VM_MASK) /* no V86 mode */
1541 && !HWACCMIsEnabled(env->pVM))
1542 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1543#endif
1544}
1545
1546
1547/**
1548 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1549 *
1550 * @param env Pointer to the CPU environment.
1551 * @param GCPtr Code page to monitor
1552 */
1553void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1554{
1555 Assert(env->pVM->rem.s.fInREM);
1556#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1557 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1558 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1559 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1560 && !(env->eflags & VM_MASK) /* no V86 mode */
1561 && !HWACCMIsEnabled(env->pVM))
1562 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1563#endif
1564}
1565
1566
1567/**
1568 * Called when the CPU is initialized, any of the CRx registers are changed or
1569 * when the A20 line is modified.
1570 *
1571 * @param env Pointer to the CPU environment.
1572 * @param fGlobal Set if the flush is global.
1573 */
1574void remR3FlushTLB(CPUState *env, bool fGlobal)
1575{
1576 PVM pVM = env->pVM;
1577 PCPUMCTX pCtx;
1578
1579 /*
1580 * When we're replaying invlpg instructions or restoring a saved
1581 * state we disable this path.
1582 */
1583 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1584 return;
1585 Assert(pVM->rem.s.fInREM);
1586
1587 /*
1588 * The caller doesn't check cr4, so we have to do that for ourselves.
1589 */
1590 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1591 fGlobal = true;
1592 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1593
1594 /*
1595 * Update the control registers before calling PGMR3FlushTLB.
1596 */
1597 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1598 Assert(pCtx);
1599 pCtx->cr0 = env->cr[0];
1600 pCtx->cr3 = env->cr[3];
1601 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1602 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1603 pCtx->cr4 = env->cr[4];
1604
1605 /*
1606 * Let PGM do the rest.
1607 */
1608 Assert(env->pVCpu);
1609 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1610}
1611
1612
1613/**
1614 * Called when any of the cr0, cr4 or efer registers is updated.
1615 *
1616 * @param env Pointer to the CPU environment.
1617 */
1618void remR3ChangeCpuMode(CPUState *env)
1619{
1620 PVM pVM = env->pVM;
1621 uint64_t efer;
1622 PCPUMCTX pCtx;
1623 int rc;
1624
1625 /*
1626 * When we're replaying loads or restoring a saved
1627 * state this path is disabled.
1628 */
1629 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1630 return;
1631 Assert(pVM->rem.s.fInREM);
1632
1633 /*
1634 * Update the control registers before calling PGMChangeMode()
1635 * as it may need to map whatever cr3 is pointing to.
1636 */
1637 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1638 Assert(pCtx);
1639 pCtx->cr0 = env->cr[0];
1640 pCtx->cr3 = env->cr[3];
1641 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1642 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1643 pCtx->cr4 = env->cr[4];
1644
1645#ifdef TARGET_X86_64
1646 efer = env->efer;
1647#else
1648 efer = 0;
1649#endif
1650 Assert(env->pVCpu);
1651 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1652 if (rc != VINF_SUCCESS)
1653 {
1654 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1655 {
1656 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1657 remR3RaiseRC(env->pVM, rc);
1658 }
1659 else
1660 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1661 }
1662}
1663
1664
1665/**
1666 * Called from compiled code to run dma.
1667 *
1668 * @param env Pointer to the CPU environment.
1669 */
1670void remR3DmaRun(CPUState *env)
1671{
1672 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1673 PDMR3DmaRun(env->pVM);
1674 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1675}
1676
1677
1678/**
1679 * Called from compiled code to schedule pending timers in VMM
1680 *
1681 * @param env Pointer to the CPU environment.
1682 */
1683void remR3TimersRun(CPUState *env)
1684{
1685 LogFlow(("remR3TimersRun:\n"));
1686 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1687 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1688 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1689 TMR3TimerQueuesDo(env->pVM);
1690 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1691 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1692}
1693
1694
1695/**
1696 * Record trap occurrence
1697 *
1698 * @returns VBox status code
1699 * @param env Pointer to the CPU environment.
1700 * @param uTrap Trap nr
1701 * @param uErrorCode Error code
1702 * @param pvNextEIP Next EIP
1703 */
1704int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1705{
1706 PVM pVM = env->pVM;
1707#ifdef VBOX_WITH_STATISTICS
1708 static STAMCOUNTER s_aStatTrap[255];
1709 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1710#endif
1711
1712#ifdef VBOX_WITH_STATISTICS
1713 if (uTrap < 255)
1714 {
1715 if (!s_aRegisters[uTrap])
1716 {
1717 char szStatName[64];
1718 s_aRegisters[uTrap] = true;
1719 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1720 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1721 }
1722 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1723 }
1724#endif
1725 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1726 if( uTrap < 0x20
1727 && (env->cr[0] & X86_CR0_PE)
1728 && !(env->eflags & X86_EFL_VM))
1729 {
1730#ifdef DEBUG
1731 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1732#endif
1733 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1734 {
1735 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1736 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1737 return VERR_REM_TOO_MANY_TRAPS;
1738 }
1739 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1740 pVM->rem.s.cPendingExceptions = 1;
1741 pVM->rem.s.uPendingException = uTrap;
1742 pVM->rem.s.uPendingExcptEIP = env->eip;
1743 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1744 }
1745 else
1746 {
1747 pVM->rem.s.cPendingExceptions = 0;
1748 pVM->rem.s.uPendingException = uTrap;
1749 pVM->rem.s.uPendingExcptEIP = env->eip;
1750 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1751 }
1752 return VINF_SUCCESS;
1753}
1754
1755
1756/*
1757 * Clear current active trap
1758 *
1759 * @param pVM VM Handle.
1760 */
1761void remR3TrapClear(PVM pVM)
1762{
1763 pVM->rem.s.cPendingExceptions = 0;
1764 pVM->rem.s.uPendingException = 0;
1765 pVM->rem.s.uPendingExcptEIP = 0;
1766 pVM->rem.s.uPendingExcptCR2 = 0;
1767}
1768
1769
1770/*
1771 * Record previous call instruction addresses
1772 *
1773 * @param env Pointer to the CPU environment.
1774 */
1775void remR3RecordCall(CPUState *env)
1776{
1777 CSAMR3RecordCallAddress(env->pVM, env->eip);
1778}
1779
1780
1781/**
1782 * Syncs the internal REM state with the VM.
1783 *
1784 * This must be called before REMR3Run() is invoked whenever when the REM
1785 * state is not up to date. Calling it several times in a row is not
1786 * permitted.
1787 *
1788 * @returns VBox status code.
1789 *
1790 * @param pVM VM Handle.
1791 * @param pVCpu VMCPU Handle.
1792 *
1793 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1794 * no do this since the majority of the callers don't want any unnecessary of events
1795 * pending that would immediately interrupt execution.
1796 */
1797REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1798{
1799 register const CPUMCTX *pCtx;
1800 register unsigned fFlags;
1801 bool fHiddenSelRegsValid;
1802 unsigned i;
1803 TRPMEVENT enmType;
1804 uint8_t u8TrapNo;
1805 uint32_t uCpl;
1806 int rc;
1807
1808 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1809 Log2(("REMR3State:\n"));
1810
1811 pVM->rem.s.Env.pVCpu = pVCpu;
1812 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1813 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
1814
1815 Assert(!pVM->rem.s.fInREM);
1816 pVM->rem.s.fInStateSync = true;
1817
1818 /*
1819 * If we have to flush TBs, do that immediately.
1820 */
1821 if (pVM->rem.s.fFlushTBs)
1822 {
1823 STAM_COUNTER_INC(&gStatFlushTBs);
1824 tb_flush(&pVM->rem.s.Env);
1825 pVM->rem.s.fFlushTBs = false;
1826 }
1827
1828 /*
1829 * Copy the registers which require no special handling.
1830 */
1831#ifdef TARGET_X86_64
1832 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1833 Assert(R_EAX == 0);
1834 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1835 Assert(R_ECX == 1);
1836 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1837 Assert(R_EDX == 2);
1838 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1839 Assert(R_EBX == 3);
1840 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1841 Assert(R_ESP == 4);
1842 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1843 Assert(R_EBP == 5);
1844 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1845 Assert(R_ESI == 6);
1846 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1847 Assert(R_EDI == 7);
1848 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1849 pVM->rem.s.Env.regs[8] = pCtx->r8;
1850 pVM->rem.s.Env.regs[9] = pCtx->r9;
1851 pVM->rem.s.Env.regs[10] = pCtx->r10;
1852 pVM->rem.s.Env.regs[11] = pCtx->r11;
1853 pVM->rem.s.Env.regs[12] = pCtx->r12;
1854 pVM->rem.s.Env.regs[13] = pCtx->r13;
1855 pVM->rem.s.Env.regs[14] = pCtx->r14;
1856 pVM->rem.s.Env.regs[15] = pCtx->r15;
1857
1858 pVM->rem.s.Env.eip = pCtx->rip;
1859
1860 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1861#else
1862 Assert(R_EAX == 0);
1863 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1864 Assert(R_ECX == 1);
1865 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1866 Assert(R_EDX == 2);
1867 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1868 Assert(R_EBX == 3);
1869 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1870 Assert(R_ESP == 4);
1871 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1872 Assert(R_EBP == 5);
1873 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1874 Assert(R_ESI == 6);
1875 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1876 Assert(R_EDI == 7);
1877 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1878 pVM->rem.s.Env.eip = pCtx->eip;
1879
1880 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1881#endif
1882
1883 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1884
1885 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1886 for (i=0;i<8;i++)
1887 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1888
1889 /*
1890 * Clear the halted hidden flag (the interrupt waking up the CPU can
1891 * have been dispatched in raw mode).
1892 */
1893 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1894
1895 /*
1896 * Replay invlpg?
1897 */
1898 if (pVM->rem.s.cInvalidatedPages)
1899 {
1900 RTUINT i;
1901
1902 pVM->rem.s.fIgnoreInvlPg = true;
1903 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1904 {
1905 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1906 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1907 }
1908 pVM->rem.s.fIgnoreInvlPg = false;
1909 pVM->rem.s.cInvalidatedPages = 0;
1910 }
1911
1912 /* Replay notification changes. */
1913 REMR3ReplayHandlerNotifications(pVM);
1914
1915 /* Update MSRs; before CRx registers! */
1916 pVM->rem.s.Env.efer = pCtx->msrEFER;
1917 pVM->rem.s.Env.star = pCtx->msrSTAR;
1918 pVM->rem.s.Env.pat = pCtx->msrPAT;
1919#ifdef TARGET_X86_64
1920 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1921 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1922 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1923 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1924
1925 /* Update the internal long mode activate flag according to the new EFER value. */
1926 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1927 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1928 else
1929 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1930#endif
1931
1932 /*
1933 * Registers which are rarely changed and require special handling / order when changed.
1934 */
1935 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
1936 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
1937 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1938 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1939 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1940 {
1941 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1942 {
1943 pVM->rem.s.fIgnoreCR3Load = true;
1944 tlb_flush(&pVM->rem.s.Env, true);
1945 pVM->rem.s.fIgnoreCR3Load = false;
1946 }
1947
1948 /* CR4 before CR0! */
1949 if (fFlags & CPUM_CHANGED_CR4)
1950 {
1951 pVM->rem.s.fIgnoreCR3Load = true;
1952 pVM->rem.s.fIgnoreCpuMode = true;
1953 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1954 pVM->rem.s.fIgnoreCpuMode = false;
1955 pVM->rem.s.fIgnoreCR3Load = false;
1956 }
1957
1958 if (fFlags & CPUM_CHANGED_CR0)
1959 {
1960 pVM->rem.s.fIgnoreCR3Load = true;
1961 pVM->rem.s.fIgnoreCpuMode = true;
1962 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1963 pVM->rem.s.fIgnoreCpuMode = false;
1964 pVM->rem.s.fIgnoreCR3Load = false;
1965 }
1966
1967 if (fFlags & CPUM_CHANGED_CR3)
1968 {
1969 pVM->rem.s.fIgnoreCR3Load = true;
1970 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1971 pVM->rem.s.fIgnoreCR3Load = false;
1972 }
1973
1974 if (fFlags & CPUM_CHANGED_GDTR)
1975 {
1976 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1977 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1978 }
1979
1980 if (fFlags & CPUM_CHANGED_IDTR)
1981 {
1982 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1983 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1984 }
1985
1986 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1987 {
1988 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1989 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1990 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1991 }
1992
1993 if (fFlags & CPUM_CHANGED_LDTR)
1994 {
1995 if (fHiddenSelRegsValid)
1996 {
1997 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1998 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1999 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2000 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2001 }
2002 else
2003 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2004 }
2005
2006 if (fFlags & CPUM_CHANGED_CPUID)
2007 {
2008 uint32_t u32Dummy;
2009
2010 /*
2011 * Get the CPUID features.
2012 */
2013 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2014 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2015 }
2016
2017 /* Sync FPU state after CR4, CPUID and EFER (!). */
2018 if (fFlags & CPUM_CHANGED_FPU_REM)
2019 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2020 }
2021
2022 /*
2023 * Sync TR unconditionally to make life simpler.
2024 */
2025 pVM->rem.s.Env.tr.selector = pCtx->tr;
2026 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2027 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2028 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2029 /* Note! do_interrupt will fault if the busy flag is still set... */
2030 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2031
2032 /*
2033 * Update selector registers.
2034 * This must be done *after* we've synced gdt, ldt and crX registers
2035 * since we're reading the GDT/LDT om sync_seg. This will happen with
2036 * saved state which takes a quick dip into rawmode for instance.
2037 */
2038 /*
2039 * Stack; Note first check this one as the CPL might have changed. The
2040 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2041 */
2042
2043 if (fHiddenSelRegsValid)
2044 {
2045 /* The hidden selector registers are valid in the CPU context. */
2046 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2047
2048 /* Set current CPL */
2049 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2050
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2054 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2055 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2056 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2057 }
2058 else
2059 {
2060 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2061 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2062 {
2063 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2064
2065 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2066 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2067#ifdef VBOX_WITH_STATISTICS
2068 if (pVM->rem.s.Env.segs[R_SS].newselector)
2069 {
2070 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2071 }
2072#endif
2073 }
2074 else
2075 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2076
2077 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2078 {
2079 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2080 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2081#ifdef VBOX_WITH_STATISTICS
2082 if (pVM->rem.s.Env.segs[R_ES].newselector)
2083 {
2084 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2085 }
2086#endif
2087 }
2088 else
2089 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2090
2091 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2092 {
2093 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2094 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2095#ifdef VBOX_WITH_STATISTICS
2096 if (pVM->rem.s.Env.segs[R_CS].newselector)
2097 {
2098 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2099 }
2100#endif
2101 }
2102 else
2103 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2104
2105 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2106 {
2107 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2108 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2109#ifdef VBOX_WITH_STATISTICS
2110 if (pVM->rem.s.Env.segs[R_DS].newselector)
2111 {
2112 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2113 }
2114#endif
2115 }
2116 else
2117 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2118
2119 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2120 * be the same but not the base/limit. */
2121 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2122 {
2123 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2124 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2125#ifdef VBOX_WITH_STATISTICS
2126 if (pVM->rem.s.Env.segs[R_FS].newselector)
2127 {
2128 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2129 }
2130#endif
2131 }
2132 else
2133 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2134
2135 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2136 {
2137 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2138 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2139#ifdef VBOX_WITH_STATISTICS
2140 if (pVM->rem.s.Env.segs[R_GS].newselector)
2141 {
2142 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2143 }
2144#endif
2145 }
2146 else
2147 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2148 }
2149
2150 /*
2151 * Check for traps.
2152 */
2153 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2154 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2155 if (RT_SUCCESS(rc))
2156 {
2157#ifdef DEBUG
2158 if (u8TrapNo == 0x80)
2159 {
2160 remR3DumpLnxSyscall(pVCpu);
2161 remR3DumpOBsdSyscall(pVCpu);
2162 }
2163#endif
2164
2165 pVM->rem.s.Env.exception_index = u8TrapNo;
2166 if (enmType != TRPM_SOFTWARE_INT)
2167 {
2168 pVM->rem.s.Env.exception_is_int = 0;
2169 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2170 }
2171 else
2172 {
2173 /*
2174 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2175 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2176 * for int03 and into.
2177 */
2178 pVM->rem.s.Env.exception_is_int = 1;
2179 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2180 /* int 3 may be generated by one-byte 0xcc */
2181 if (u8TrapNo == 3)
2182 {
2183 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2184 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2185 }
2186 /* int 4 may be generated by one-byte 0xce */
2187 else if (u8TrapNo == 4)
2188 {
2189 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2190 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2191 }
2192 }
2193
2194 /* get error code and cr2 if needed. */
2195 switch (u8TrapNo)
2196 {
2197 case 0x0e:
2198 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2199 /* fallthru */
2200 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2201 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2202 break;
2203
2204 case 0x11: case 0x08:
2205 default:
2206 pVM->rem.s.Env.error_code = 0;
2207 break;
2208 }
2209
2210 /*
2211 * We can now reset the active trap since the recompiler is gonna have a go at it.
2212 */
2213 rc = TRPMResetTrap(pVCpu);
2214 AssertRC(rc);
2215 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2216 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2217 }
2218
2219 /*
2220 * Clear old interrupt request flags; Check for pending hardware interrupts.
2221 * (See @remark for why we don't check for other FFs.)
2222 */
2223 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2224 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2225 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2226 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2227
2228 /*
2229 * We're now in REM mode.
2230 */
2231 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2232 pVM->rem.s.fInREM = true;
2233 pVM->rem.s.fInStateSync = false;
2234 pVM->rem.s.cCanExecuteRaw = 0;
2235 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2236 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2237 return VINF_SUCCESS;
2238}
2239
2240
2241/**
2242 * Syncs back changes in the REM state to the the VM state.
2243 *
2244 * This must be called after invoking REMR3Run().
2245 * Calling it several times in a row is not permitted.
2246 *
2247 * @returns VBox status code.
2248 *
2249 * @param pVM VM Handle.
2250 * @param pVCpu VMCPU Handle.
2251 */
2252REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2253{
2254 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2255 Assert(pCtx);
2256 unsigned i;
2257
2258 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2259 Log2(("REMR3StateBack:\n"));
2260 Assert(pVM->rem.s.fInREM);
2261
2262 /*
2263 * Copy back the registers.
2264 * This is done in the order they are declared in the CPUMCTX structure.
2265 */
2266
2267 /** @todo FOP */
2268 /** @todo FPUIP */
2269 /** @todo CS */
2270 /** @todo FPUDP */
2271 /** @todo DS */
2272
2273 /** @todo check if FPU/XMM was actually used in the recompiler */
2274 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2275//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2276
2277#ifdef TARGET_X86_64
2278 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2279 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2280 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2281 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2282 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2283 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2284 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2285 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2286 pCtx->r8 = pVM->rem.s.Env.regs[8];
2287 pCtx->r9 = pVM->rem.s.Env.regs[9];
2288 pCtx->r10 = pVM->rem.s.Env.regs[10];
2289 pCtx->r11 = pVM->rem.s.Env.regs[11];
2290 pCtx->r12 = pVM->rem.s.Env.regs[12];
2291 pCtx->r13 = pVM->rem.s.Env.regs[13];
2292 pCtx->r14 = pVM->rem.s.Env.regs[14];
2293 pCtx->r15 = pVM->rem.s.Env.regs[15];
2294
2295 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2296
2297#else
2298 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2299 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2300 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2301 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2302 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2303 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2304 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2305
2306 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2307#endif
2308
2309 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2310
2311#ifdef VBOX_WITH_STATISTICS
2312 if (pVM->rem.s.Env.segs[R_SS].newselector)
2313 {
2314 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2315 }
2316 if (pVM->rem.s.Env.segs[R_GS].newselector)
2317 {
2318 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2319 }
2320 if (pVM->rem.s.Env.segs[R_FS].newselector)
2321 {
2322 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2323 }
2324 if (pVM->rem.s.Env.segs[R_ES].newselector)
2325 {
2326 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2327 }
2328 if (pVM->rem.s.Env.segs[R_DS].newselector)
2329 {
2330 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2331 }
2332 if (pVM->rem.s.Env.segs[R_CS].newselector)
2333 {
2334 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2335 }
2336#endif
2337 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2338 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2339 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2340 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2341 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2342
2343#ifdef TARGET_X86_64
2344 pCtx->rip = pVM->rem.s.Env.eip;
2345 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2346#else
2347 pCtx->eip = pVM->rem.s.Env.eip;
2348 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2349#endif
2350
2351 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2352 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2353 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2354 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2355 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2356 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2357
2358 for (i = 0; i < 8; i++)
2359 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2360
2361 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2362 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2363 {
2364 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2365 STAM_COUNTER_INC(&gStatREMGDTChange);
2366 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2367 }
2368
2369 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2370 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2371 {
2372 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2373 STAM_COUNTER_INC(&gStatREMIDTChange);
2374 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2375 }
2376
2377 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2378 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2379 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2380 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2381 {
2382 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2383 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2384 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2385 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2386 STAM_COUNTER_INC(&gStatREMLDTRChange);
2387 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2388 }
2389
2390 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2391 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2392 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2393 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2394 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2395 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2396 : 0) )
2397 {
2398 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2399 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2400 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2401 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2402 pCtx->tr = pVM->rem.s.Env.tr.selector;
2403 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2404 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2405 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2406 if (pCtx->trHid.Attr.u)
2407 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2408 STAM_COUNTER_INC(&gStatREMTRChange);
2409 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2410 }
2411
2412 /** @todo These values could still be out of sync! */
2413 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2414 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2415 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2416 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2417
2418 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2419 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2420 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2421
2422 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2423 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2424 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2425
2426 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2427 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2428 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2429
2430 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2431 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2432 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2433
2434 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2435 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2436 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2437
2438 /* Sysenter MSR */
2439 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2440 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2441 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2442
2443 /* System MSRs. */
2444 pCtx->msrEFER = pVM->rem.s.Env.efer;
2445 pCtx->msrSTAR = pVM->rem.s.Env.star;
2446 pCtx->msrPAT = pVM->rem.s.Env.pat;
2447#ifdef TARGET_X86_64
2448 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2449 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2450 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2451 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2452#endif
2453
2454 remR3TrapClear(pVM);
2455
2456 /*
2457 * Check for traps.
2458 */
2459 if ( pVM->rem.s.Env.exception_index >= 0
2460 && pVM->rem.s.Env.exception_index < 256)
2461 {
2462 int rc;
2463
2464 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2465 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2466 AssertRC(rc);
2467 switch (pVM->rem.s.Env.exception_index)
2468 {
2469 case 0x0e:
2470 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2471 /* fallthru */
2472 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2473 case 0x11: case 0x08: /* 0 */
2474 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2475 break;
2476 }
2477
2478 }
2479
2480 /*
2481 * We're not longer in REM mode.
2482 */
2483 CPUMR3RemLeave(pVCpu,
2484 HWACCMIsEnabled(pVM)
2485 || ( pVM->rem.s.Env.segs[R_SS].newselector
2486 | pVM->rem.s.Env.segs[R_GS].newselector
2487 | pVM->rem.s.Env.segs[R_FS].newselector
2488 | pVM->rem.s.Env.segs[R_ES].newselector
2489 | pVM->rem.s.Env.segs[R_DS].newselector
2490 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2491 );
2492 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2493 pVM->rem.s.fInREM = false;
2494 pVM->rem.s.pCtx = NULL;
2495 pVM->rem.s.Env.pVCpu = NULL;
2496 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2497 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2498 return VINF_SUCCESS;
2499}
2500
2501
2502/**
2503 * This is called by the disassembler when it wants to update the cpu state
2504 * before for instance doing a register dump.
2505 */
2506static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2507{
2508 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2509 unsigned i;
2510
2511 Assert(pVM->rem.s.fInREM);
2512
2513 /*
2514 * Copy back the registers.
2515 * This is done in the order they are declared in the CPUMCTX structure.
2516 */
2517
2518 /** @todo FOP */
2519 /** @todo FPUIP */
2520 /** @todo CS */
2521 /** @todo FPUDP */
2522 /** @todo DS */
2523 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2524 pCtx->fpu.MXCSR = 0;
2525 pCtx->fpu.MXCSR_MASK = 0;
2526
2527 /** @todo check if FPU/XMM was actually used in the recompiler */
2528 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2529//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2530
2531#ifdef TARGET_X86_64
2532 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2533 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2534 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2535 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2536 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2537 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2538 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2539 pCtx->r8 = pVM->rem.s.Env.regs[8];
2540 pCtx->r9 = pVM->rem.s.Env.regs[9];
2541 pCtx->r10 = pVM->rem.s.Env.regs[10];
2542 pCtx->r11 = pVM->rem.s.Env.regs[11];
2543 pCtx->r12 = pVM->rem.s.Env.regs[12];
2544 pCtx->r13 = pVM->rem.s.Env.regs[13];
2545 pCtx->r14 = pVM->rem.s.Env.regs[14];
2546 pCtx->r15 = pVM->rem.s.Env.regs[15];
2547
2548 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2549#else
2550 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2551 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2552 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2553 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2554 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2555 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2556 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2557
2558 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2559#endif
2560
2561 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2562
2563 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2564 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2565 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2566 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2567 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2568
2569#ifdef TARGET_X86_64
2570 pCtx->rip = pVM->rem.s.Env.eip;
2571 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2572#else
2573 pCtx->eip = pVM->rem.s.Env.eip;
2574 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2575#endif
2576
2577 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2578 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2579 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2580 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2581 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2582 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2583
2584 for (i = 0; i < 8; i++)
2585 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2586
2587 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2588 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2589 {
2590 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2591 STAM_COUNTER_INC(&gStatREMGDTChange);
2592 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2593 }
2594
2595 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2596 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2597 {
2598 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2599 STAM_COUNTER_INC(&gStatREMIDTChange);
2600 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2601 }
2602
2603 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2604 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2605 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2606 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2607 {
2608 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2609 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2610 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2611 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2612 STAM_COUNTER_INC(&gStatREMLDTRChange);
2613 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2614 }
2615
2616 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2617 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2618 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2619 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2620 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2621 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2622 : 0) )
2623 {
2624 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2625 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2626 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2627 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2628 pCtx->tr = pVM->rem.s.Env.tr.selector;
2629 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2630 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2631 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2632 if (pCtx->trHid.Attr.u)
2633 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2634 STAM_COUNTER_INC(&gStatREMTRChange);
2635 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2636 }
2637
2638 /** @todo These values could still be out of sync! */
2639 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2640 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2641 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2642 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2643
2644 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2645 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2646 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2647
2648 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2649 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2650 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2651
2652 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2653 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2654 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2655
2656 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2657 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2658 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2659
2660 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2661 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2662 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2663
2664 /* Sysenter MSR */
2665 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2666 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2667 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2668
2669 /* System MSRs. */
2670 pCtx->msrEFER = pVM->rem.s.Env.efer;
2671 pCtx->msrSTAR = pVM->rem.s.Env.star;
2672 pCtx->msrPAT = pVM->rem.s.Env.pat;
2673#ifdef TARGET_X86_64
2674 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2675 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2676 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2677 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2678#endif
2679
2680}
2681
2682
2683/**
2684 * Update the VMM state information if we're currently in REM.
2685 *
2686 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2687 * we're currently executing in REM and the VMM state is invalid. This method will of
2688 * course check that we're executing in REM before syncing any data over to the VMM.
2689 *
2690 * @param pVM The VM handle.
2691 * @param pVCpu The VMCPU handle.
2692 */
2693REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2694{
2695 if (pVM->rem.s.fInREM)
2696 remR3StateUpdate(pVM, pVCpu);
2697}
2698
2699
2700#undef LOG_GROUP
2701#define LOG_GROUP LOG_GROUP_REM
2702
2703
2704/**
2705 * Notify the recompiler about Address Gate 20 state change.
2706 *
2707 * This notification is required since A20 gate changes are
2708 * initialized from a device driver and the VM might just as
2709 * well be in REM mode as in RAW mode.
2710 *
2711 * @param pVM VM handle.
2712 * @param pVCpu VMCPU handle.
2713 * @param fEnable True if the gate should be enabled.
2714 * False if the gate should be disabled.
2715 */
2716REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2717{
2718 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2719 VM_ASSERT_EMT(pVM);
2720
2721 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2722 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2723 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2724}
2725
2726
2727/**
2728 * Replays the handler notification changes
2729 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2730 *
2731 * @param pVM VM handle.
2732 */
2733REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2734{
2735 /*
2736 * Replay the flushes.
2737 */
2738 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2739 VM_ASSERT_EMT(pVM);
2740
2741 /** @todo this isn't ensuring correct replay order. */
2742 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2743 {
2744 uint32_t idxNext;
2745 uint32_t idxRevHead;
2746 uint32_t idxHead;
2747#ifdef VBOX_STRICT
2748 int32_t c = 0;
2749#endif
2750
2751 /* Lockless purging of pending notifications. */
2752 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2753 if (idxHead == UINT32_MAX)
2754 return;
2755 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2756
2757 /*
2758 * Reverse the list to process it in FIFO order.
2759 */
2760 idxRevHead = UINT32_MAX;
2761 do
2762 {
2763 /* Save the index of the next rec. */
2764 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2765 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2766 /* Push the record onto the reversed list. */
2767 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2768 idxRevHead = idxHead;
2769 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2770 /* Advance. */
2771 idxHead = idxNext;
2772 } while (idxHead != UINT32_MAX);
2773
2774 /*
2775 * Loop thru the list, reinserting the record into the free list as they are
2776 * processed to avoid having other EMTs running out of entries while we're flushing.
2777 */
2778 idxHead = idxRevHead;
2779 do
2780 {
2781 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2782 uint32_t idxCur;
2783 Assert(--c >= 0);
2784
2785 switch (pCur->enmKind)
2786 {
2787 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2788 remR3NotifyHandlerPhysicalRegister(pVM,
2789 pCur->u.PhysicalRegister.enmType,
2790 pCur->u.PhysicalRegister.GCPhys,
2791 pCur->u.PhysicalRegister.cb,
2792 pCur->u.PhysicalRegister.fHasHCHandler);
2793 break;
2794
2795 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2796 remR3NotifyHandlerPhysicalDeregister(pVM,
2797 pCur->u.PhysicalDeregister.enmType,
2798 pCur->u.PhysicalDeregister.GCPhys,
2799 pCur->u.PhysicalDeregister.cb,
2800 pCur->u.PhysicalDeregister.fHasHCHandler,
2801 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2802 break;
2803
2804 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2805 remR3NotifyHandlerPhysicalModify(pVM,
2806 pCur->u.PhysicalModify.enmType,
2807 pCur->u.PhysicalModify.GCPhysOld,
2808 pCur->u.PhysicalModify.GCPhysNew,
2809 pCur->u.PhysicalModify.cb,
2810 pCur->u.PhysicalModify.fHasHCHandler,
2811 pCur->u.PhysicalModify.fRestoreAsRAM);
2812 break;
2813
2814 default:
2815 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2816 break;
2817 }
2818
2819 /*
2820 * Advance idxHead.
2821 */
2822 idxCur = idxHead;
2823 idxHead = pCur->idxNext;
2824 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2825
2826 /*
2827 * Put the record back into the free list.
2828 */
2829 do
2830 {
2831 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2832 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2833 ASMCompilerBarrier();
2834 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2835 } while (idxHead != UINT32_MAX);
2836
2837#ifdef VBOX_STRICT
2838 if (pVM->cCpus == 1)
2839 {
2840 unsigned c;
2841 /* Check that all records are now on the free list. */
2842 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2843 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2844 c++;
2845 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2846 }
2847#endif
2848 }
2849}
2850
2851
2852/**
2853 * Notify REM about changed code page.
2854 *
2855 * @returns VBox status code.
2856 * @param pVM VM handle.
2857 * @param pVCpu VMCPU handle.
2858 * @param pvCodePage Code page address
2859 */
2860REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2861{
2862#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2863 int rc;
2864 RTGCPHYS PhysGC;
2865 uint64_t flags;
2866
2867 VM_ASSERT_EMT(pVM);
2868
2869 /*
2870 * Get the physical page address.
2871 */
2872 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2873 if (rc == VINF_SUCCESS)
2874 {
2875 /*
2876 * Sync the required registers and flush the whole page.
2877 * (Easier to do the whole page than notifying it about each physical
2878 * byte that was changed.
2879 */
2880 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2881 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2882 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2883 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2884
2885 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2886 }
2887#endif
2888 return VINF_SUCCESS;
2889}
2890
2891
2892/**
2893 * Notification about a successful MMR3PhysRegister() call.
2894 *
2895 * @param pVM VM handle.
2896 * @param GCPhys The physical address the RAM.
2897 * @param cb Size of the memory.
2898 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2899 */
2900REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2901{
2902 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2903 VM_ASSERT_EMT(pVM);
2904
2905 /*
2906 * Validate input - we trust the caller.
2907 */
2908 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2909 Assert(cb);
2910 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2911 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2912
2913 /*
2914 * Base ram? Update GCPhysLastRam.
2915 */
2916 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2917 {
2918 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2919 {
2920 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2921 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2922 }
2923 }
2924
2925 /*
2926 * Register the ram.
2927 */
2928 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2929
2930 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2931 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2932 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2933
2934 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2935}
2936
2937
2938/**
2939 * Notification about a successful MMR3PhysRomRegister() call.
2940 *
2941 * @param pVM VM handle.
2942 * @param GCPhys The physical address of the ROM.
2943 * @param cb The size of the ROM.
2944 * @param pvCopy Pointer to the ROM copy.
2945 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2946 * This function will be called when ever the protection of the
2947 * shadow ROM changes (at reset and end of POST).
2948 */
2949REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2950{
2951 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2952 VM_ASSERT_EMT(pVM);
2953
2954 /*
2955 * Validate input - we trust the caller.
2956 */
2957 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2958 Assert(cb);
2959 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2960
2961 /*
2962 * Register the rom.
2963 */
2964 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2965
2966 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2967 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2968 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2969
2970 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2971}
2972
2973
2974/**
2975 * Notification about a successful memory deregistration or reservation.
2976 *
2977 * @param pVM VM Handle.
2978 * @param GCPhys Start physical address.
2979 * @param cb The size of the range.
2980 */
2981REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2982{
2983 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2984 VM_ASSERT_EMT(pVM);
2985
2986 /*
2987 * Validate input - we trust the caller.
2988 */
2989 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2990 Assert(cb);
2991 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2992
2993 /*
2994 * Unassigning the memory.
2995 */
2996 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2997
2998 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2999 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3000 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3001
3002 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3003}
3004
3005
3006/**
3007 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3008 *
3009 * @param pVM VM Handle.
3010 * @param enmType Handler type.
3011 * @param GCPhys Handler range address.
3012 * @param cb Size of the handler range.
3013 * @param fHasHCHandler Set if the handler has a HC callback function.
3014 *
3015 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3016 * Handler memory type to memory which has no HC handler.
3017 */
3018static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3019{
3020 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3021 enmType, GCPhys, cb, fHasHCHandler));
3022
3023 VM_ASSERT_EMT(pVM);
3024 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3025 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3026
3027
3028 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3029
3030 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3031 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3032 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3033 else if (fHasHCHandler)
3034 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3035 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3036
3037 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3038}
3039
3040/**
3041 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3042 *
3043 * @param pVM VM Handle.
3044 * @param enmType Handler type.
3045 * @param GCPhys Handler range address.
3046 * @param cb Size of the handler range.
3047 * @param fHasHCHandler Set if the handler has a HC callback function.
3048 *
3049 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3050 * Handler memory type to memory which has no HC handler.
3051 */
3052REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3053{
3054 REMR3ReplayHandlerNotifications(pVM);
3055
3056 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3057}
3058
3059/**
3060 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3061 *
3062 * @param pVM VM Handle.
3063 * @param enmType Handler type.
3064 * @param GCPhys Handler range address.
3065 * @param cb Size of the handler range.
3066 * @param fHasHCHandler Set if the handler has a HC callback function.
3067 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3068 */
3069static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3070{
3071 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3072 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3073 VM_ASSERT_EMT(pVM);
3074
3075
3076 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3077
3078 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3079 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3080 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3081 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3082 else if (fHasHCHandler)
3083 {
3084 if (!fRestoreAsRAM)
3085 {
3086 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3087 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3088 }
3089 else
3090 {
3091 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3092 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3093 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3094 }
3095 }
3096 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3097
3098 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3099}
3100
3101/**
3102 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3103 *
3104 * @param pVM VM Handle.
3105 * @param enmType Handler type.
3106 * @param GCPhys Handler range address.
3107 * @param cb Size of the handler range.
3108 * @param fHasHCHandler Set if the handler has a HC callback function.
3109 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3110 */
3111REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3112{
3113 REMR3ReplayHandlerNotifications(pVM);
3114 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3115}
3116
3117
3118/**
3119 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3120 *
3121 * @param pVM VM Handle.
3122 * @param enmType Handler type.
3123 * @param GCPhysOld Old handler range address.
3124 * @param GCPhysNew New handler range address.
3125 * @param cb Size of the handler range.
3126 * @param fHasHCHandler Set if the handler has a HC callback function.
3127 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3128 */
3129static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3130{
3131 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3132 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3133 VM_ASSERT_EMT(pVM);
3134 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3135
3136 if (fHasHCHandler)
3137 {
3138 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3139
3140 /*
3141 * Reset the old page.
3142 */
3143 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3144 if (!fRestoreAsRAM)
3145 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3146 else
3147 {
3148 /* This is not perfect, but it'll do for PD monitoring... */
3149 Assert(cb == PAGE_SIZE);
3150 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3151 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3152 }
3153
3154 /*
3155 * Update the new page.
3156 */
3157 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3158 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3159 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3160 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3161
3162 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3163 }
3164}
3165
3166/**
3167 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3168 *
3169 * @param pVM VM Handle.
3170 * @param enmType Handler type.
3171 * @param GCPhysOld Old handler range address.
3172 * @param GCPhysNew New handler range address.
3173 * @param cb Size of the handler range.
3174 * @param fHasHCHandler Set if the handler has a HC callback function.
3175 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3176 */
3177REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3178{
3179 REMR3ReplayHandlerNotifications(pVM);
3180
3181 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3182}
3183
3184/**
3185 * Checks if we're handling access to this page or not.
3186 *
3187 * @returns true if we're trapping access.
3188 * @returns false if we aren't.
3189 * @param pVM The VM handle.
3190 * @param GCPhys The physical address.
3191 *
3192 * @remark This function will only work correctly in VBOX_STRICT builds!
3193 */
3194REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3195{
3196#ifdef VBOX_STRICT
3197 unsigned long off;
3198 REMR3ReplayHandlerNotifications(pVM);
3199
3200 off = get_phys_page_offset(GCPhys);
3201 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3202 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3203 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3204#else
3205 return false;
3206#endif
3207}
3208
3209
3210/**
3211 * Deals with a rare case in get_phys_addr_code where the code
3212 * is being monitored.
3213 *
3214 * It could also be an MMIO page, in which case we will raise a fatal error.
3215 *
3216 * @returns The physical address corresponding to addr.
3217 * @param env The cpu environment.
3218 * @param addr The virtual address.
3219 * @param pTLBEntry The TLB entry.
3220 */
3221target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3222 target_ulong addr,
3223 CPUTLBEntry* pTLBEntry,
3224 target_phys_addr_t ioTLBEntry)
3225{
3226 PVM pVM = env->pVM;
3227
3228 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3229 {
3230 /* If code memory is being monitored, appropriate IOTLB entry will have
3231 handler IO type, and addend will provide real physical address, no
3232 matter if we store VA in TLB or not, as handlers are always passed PA */
3233 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3234 return ret;
3235 }
3236 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3237 "*** handlers\n",
3238 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3239 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3240 LogRel(("*** mmio\n"));
3241 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3242 LogRel(("*** phys\n"));
3243 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3244 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3245 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3246 AssertFatalFailed();
3247}
3248
3249/**
3250 * Read guest RAM and ROM.
3251 *
3252 * @param SrcGCPhys The source address (guest physical).
3253 * @param pvDst The destination address.
3254 * @param cb Number of bytes
3255 */
3256void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3257{
3258 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3259 VBOX_CHECK_ADDR(SrcGCPhys);
3260 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3261#ifdef VBOX_DEBUG_PHYS
3262 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3263#endif
3264 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3265}
3266
3267
3268/**
3269 * Read guest RAM and ROM, unsigned 8-bit.
3270 *
3271 * @param SrcGCPhys The source address (guest physical).
3272 */
3273RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3274{
3275 uint8_t val;
3276 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3277 VBOX_CHECK_ADDR(SrcGCPhys);
3278 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3279 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3280#ifdef VBOX_DEBUG_PHYS
3281 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3282#endif
3283 return val;
3284}
3285
3286
3287/**
3288 * Read guest RAM and ROM, signed 8-bit.
3289 *
3290 * @param SrcGCPhys The source address (guest physical).
3291 */
3292RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3293{
3294 int8_t val;
3295 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3296 VBOX_CHECK_ADDR(SrcGCPhys);
3297 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3298 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3299#ifdef VBOX_DEBUG_PHYS
3300 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3301#endif
3302 return val;
3303}
3304
3305
3306/**
3307 * Read guest RAM and ROM, unsigned 16-bit.
3308 *
3309 * @param SrcGCPhys The source address (guest physical).
3310 */
3311RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3312{
3313 uint16_t val;
3314 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3315 VBOX_CHECK_ADDR(SrcGCPhys);
3316 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3317 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3318#ifdef VBOX_DEBUG_PHYS
3319 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3320#endif
3321 return val;
3322}
3323
3324
3325/**
3326 * Read guest RAM and ROM, signed 16-bit.
3327 *
3328 * @param SrcGCPhys The source address (guest physical).
3329 */
3330RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3331{
3332 int16_t val;
3333 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3334 VBOX_CHECK_ADDR(SrcGCPhys);
3335 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3336 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3337#ifdef VBOX_DEBUG_PHYS
3338 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3339#endif
3340 return val;
3341}
3342
3343
3344/**
3345 * Read guest RAM and ROM, unsigned 32-bit.
3346 *
3347 * @param SrcGCPhys The source address (guest physical).
3348 */
3349RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3350{
3351 uint32_t val;
3352 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3353 VBOX_CHECK_ADDR(SrcGCPhys);
3354 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3355 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3356#ifdef VBOX_DEBUG_PHYS
3357 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3358#endif
3359 return val;
3360}
3361
3362
3363/**
3364 * Read guest RAM and ROM, signed 32-bit.
3365 *
3366 * @param SrcGCPhys The source address (guest physical).
3367 */
3368RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3369{
3370 int32_t val;
3371 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3372 VBOX_CHECK_ADDR(SrcGCPhys);
3373 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3374 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3375#ifdef VBOX_DEBUG_PHYS
3376 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3377#endif
3378 return val;
3379}
3380
3381
3382/**
3383 * Read guest RAM and ROM, unsigned 64-bit.
3384 *
3385 * @param SrcGCPhys The source address (guest physical).
3386 */
3387uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3388{
3389 uint64_t val;
3390 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3391 VBOX_CHECK_ADDR(SrcGCPhys);
3392 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3393 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3394#ifdef VBOX_DEBUG_PHYS
3395 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3396#endif
3397 return val;
3398}
3399
3400
3401/**
3402 * Read guest RAM and ROM, signed 64-bit.
3403 *
3404 * @param SrcGCPhys The source address (guest physical).
3405 */
3406int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3407{
3408 int64_t val;
3409 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3410 VBOX_CHECK_ADDR(SrcGCPhys);
3411 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3412 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3413#ifdef VBOX_DEBUG_PHYS
3414 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3415#endif
3416 return val;
3417}
3418
3419
3420/**
3421 * Write guest RAM.
3422 *
3423 * @param DstGCPhys The destination address (guest physical).
3424 * @param pvSrc The source address.
3425 * @param cb Number of bytes to write
3426 */
3427void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3428{
3429 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3430 VBOX_CHECK_ADDR(DstGCPhys);
3431 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3432 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3433#ifdef VBOX_DEBUG_PHYS
3434 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3435#endif
3436}
3437
3438
3439/**
3440 * Write guest RAM, unsigned 8-bit.
3441 *
3442 * @param DstGCPhys The destination address (guest physical).
3443 * @param val Value
3444 */
3445void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3446{
3447 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3448 VBOX_CHECK_ADDR(DstGCPhys);
3449 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3450 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3451#ifdef VBOX_DEBUG_PHYS
3452 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3453#endif
3454}
3455
3456
3457/**
3458 * Write guest RAM, unsigned 8-bit.
3459 *
3460 * @param DstGCPhys The destination address (guest physical).
3461 * @param val Value
3462 */
3463void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3464{
3465 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3466 VBOX_CHECK_ADDR(DstGCPhys);
3467 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3468 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3469#ifdef VBOX_DEBUG_PHYS
3470 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3471#endif
3472}
3473
3474
3475/**
3476 * Write guest RAM, unsigned 32-bit.
3477 *
3478 * @param DstGCPhys The destination address (guest physical).
3479 * @param val Value
3480 */
3481void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3482{
3483 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3484 VBOX_CHECK_ADDR(DstGCPhys);
3485 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3486 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3487#ifdef VBOX_DEBUG_PHYS
3488 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3489#endif
3490}
3491
3492
3493/**
3494 * Write guest RAM, unsigned 64-bit.
3495 *
3496 * @param DstGCPhys The destination address (guest physical).
3497 * @param val Value
3498 */
3499void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3500{
3501 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3502 VBOX_CHECK_ADDR(DstGCPhys);
3503 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3504 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3505#ifdef VBOX_DEBUG_PHYS
3506 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3507#endif
3508}
3509
3510#undef LOG_GROUP
3511#define LOG_GROUP LOG_GROUP_REM_MMIO
3512
3513/** Read MMIO memory. */
3514static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3515{
3516 uint32_t u32 = 0;
3517 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3518 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3519 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3520 return u32;
3521}
3522
3523/** Read MMIO memory. */
3524static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3525{
3526 uint32_t u32 = 0;
3527 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3528 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3529 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3530 return u32;
3531}
3532
3533/** Read MMIO memory. */
3534static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3535{
3536 uint32_t u32 = 0;
3537 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3538 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3539 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3540 return u32;
3541}
3542
3543/** Write to MMIO memory. */
3544static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3545{
3546 int rc;
3547 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3548 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3549 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3550}
3551
3552/** Write to MMIO memory. */
3553static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3554{
3555 int rc;
3556 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3557 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3558 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3559}
3560
3561/** Write to MMIO memory. */
3562static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3563{
3564 int rc;
3565 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3566 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3567 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3568}
3569
3570
3571#undef LOG_GROUP
3572#define LOG_GROUP LOG_GROUP_REM_HANDLER
3573
3574/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3575
3576static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3577{
3578 uint8_t u8;
3579 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3580 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3581 return u8;
3582}
3583
3584static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3585{
3586 uint16_t u16;
3587 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3588 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3589 return u16;
3590}
3591
3592static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3593{
3594 uint32_t u32;
3595 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3596 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3597 return u32;
3598}
3599
3600static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3601{
3602 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3603 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3604}
3605
3606static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3607{
3608 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3609 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3610}
3611
3612static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3613{
3614 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3615 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3616}
3617
3618/* -+- disassembly -+- */
3619
3620#undef LOG_GROUP
3621#define LOG_GROUP LOG_GROUP_REM_DISAS
3622
3623
3624/**
3625 * Enables or disables singled stepped disassembly.
3626 *
3627 * @returns VBox status code.
3628 * @param pVM VM handle.
3629 * @param fEnable To enable set this flag, to disable clear it.
3630 */
3631static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3632{
3633 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3634 VM_ASSERT_EMT(pVM);
3635
3636 if (fEnable)
3637 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3638 else
3639 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3640 return VINF_SUCCESS;
3641}
3642
3643
3644/**
3645 * Enables or disables singled stepped disassembly.
3646 *
3647 * @returns VBox status code.
3648 * @param pVM VM handle.
3649 * @param fEnable To enable set this flag, to disable clear it.
3650 */
3651REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3652{
3653 int rc;
3654
3655 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3656 if (VM_IS_EMT(pVM))
3657 return remR3DisasEnableStepping(pVM, fEnable);
3658
3659 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3660 AssertRC(rc);
3661 return rc;
3662}
3663
3664
3665#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3666/**
3667 * External Debugger Command: .remstep [on|off|1|0]
3668 */
3669static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3670{
3671 bool fEnable;
3672 int rc;
3673
3674 /* print status */
3675 if (cArgs == 0)
3676 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3677 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3678
3679 /* convert the argument and change the mode. */
3680 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3681 if (RT_FAILURE(rc))
3682 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3683 rc = REMR3DisasEnableStepping(pVM, fEnable);
3684 if (RT_FAILURE(rc))
3685 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3686 return rc;
3687}
3688#endif
3689
3690
3691/**
3692 * Disassembles one instruction and prints it to the log.
3693 *
3694 * @returns Success indicator.
3695 * @param env Pointer to the recompiler CPU structure.
3696 * @param f32BitCode Indicates that whether or not the code should
3697 * be disassembled as 16 or 32 bit. If -1 the CS
3698 * selector will be inspected.
3699 * @param pszPrefix
3700 */
3701bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3702{
3703 PVM pVM = env->pVM;
3704 const bool fLog = LogIsEnabled();
3705 const bool fLog2 = LogIs2Enabled();
3706 int rc = VINF_SUCCESS;
3707
3708 /*
3709 * Don't bother if there ain't any log output to do.
3710 */
3711 if (!fLog && !fLog2)
3712 return true;
3713
3714 /*
3715 * Update the state so DBGF reads the correct register values.
3716 */
3717 remR3StateUpdate(pVM, env->pVCpu);
3718
3719 /*
3720 * Log registers if requested.
3721 */
3722 if (!fLog2)
3723 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3724
3725 /*
3726 * Disassemble to log.
3727 */
3728 if (fLog)
3729 {
3730 PVMCPU pVCpu = VMMGetCpu(pVM);
3731 char szBuf[256];
3732 szBuf[0] = '\0';
3733 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3734 pVCpu->idCpu,
3735 0, /* Sel */
3736 0, /* GCPtr */
3737 DBGF_DISAS_FLAGS_CURRENT_GUEST
3738 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3739 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3740 szBuf,
3741 sizeof(szBuf),
3742 NULL);
3743 if (RT_FAILURE(rc))
3744 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3745 if (pszPrefix && *pszPrefix)
3746 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3747 else
3748 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3749 }
3750
3751 return RT_SUCCESS(rc);
3752}
3753
3754
3755/**
3756 * Disassemble recompiled code.
3757 *
3758 * @param phFileIgnored Ignored, logfile usually.
3759 * @param pvCode Pointer to the code block.
3760 * @param cb Size of the code block.
3761 */
3762void disas(FILE *phFile, void *pvCode, unsigned long cb)
3763{
3764#ifdef DEBUG_TMP_LOGGING
3765# define DISAS_PRINTF(x...) fprintf(phFile, x)
3766#else
3767# define DISAS_PRINTF(x...) RTLogPrintf(x)
3768 if (LogIs2Enabled())
3769#endif
3770 {
3771 unsigned off = 0;
3772 char szOutput[256];
3773 DISCPUSTATE Cpu;
3774
3775 memset(&Cpu, 0, sizeof(Cpu));
3776#ifdef RT_ARCH_X86
3777 Cpu.mode = CPUMODE_32BIT;
3778#else
3779 Cpu.mode = CPUMODE_64BIT;
3780#endif
3781
3782 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3783 while (off < cb)
3784 {
3785 uint32_t cbInstr;
3786 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3787 DISAS_PRINTF("%s", szOutput);
3788 else
3789 {
3790 DISAS_PRINTF("disas error\n");
3791 cbInstr = 1;
3792#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
3793 break;
3794#endif
3795 }
3796 off += cbInstr;
3797 }
3798 }
3799
3800#undef DISAS_PRINTF
3801}
3802
3803
3804/**
3805 * Disassemble guest code.
3806 *
3807 * @param phFileIgnored Ignored, logfile usually.
3808 * @param uCode The guest address of the code to disassemble. (flat?)
3809 * @param cb Number of bytes to disassemble.
3810 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3811 */
3812void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3813{
3814#ifdef DEBUG_TMP_LOGGING
3815# define DISAS_PRINTF(x...) fprintf(phFile, x)
3816#else
3817# define DISAS_PRINTF(x...) RTLogPrintf(x)
3818 if (LogIs2Enabled())
3819#endif
3820 {
3821 PVM pVM = cpu_single_env->pVM;
3822 PVMCPU pVCpu = cpu_single_env->pVCpu;
3823 RTSEL cs;
3824 RTGCUINTPTR eip;
3825
3826 Assert(pVCpu);
3827
3828 /*
3829 * Update the state so DBGF reads the correct register values (flags).
3830 */
3831 remR3StateUpdate(pVM, pVCpu);
3832
3833 /*
3834 * Do the disassembling.
3835 */
3836 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3837 cs = cpu_single_env->segs[R_CS].selector;
3838 eip = uCode - cpu_single_env->segs[R_CS].base;
3839 for (;;)
3840 {
3841 char szBuf[256];
3842 uint32_t cbInstr;
3843 int rc = DBGFR3DisasInstrEx(pVM,
3844 pVCpu->idCpu,
3845 cs,
3846 eip,
3847 DBGF_DISAS_FLAGS_DEFAULT_MODE,
3848 szBuf, sizeof(szBuf),
3849 &cbInstr);
3850 if (RT_SUCCESS(rc))
3851 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3852 else
3853 {
3854 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3855 cbInstr = 1;
3856 }
3857
3858 /* next */
3859 if (cb <= cbInstr)
3860 break;
3861 cb -= cbInstr;
3862 uCode += cbInstr;
3863 eip += cbInstr;
3864 }
3865 }
3866#undef DISAS_PRINTF
3867}
3868
3869
3870/**
3871 * Looks up a guest symbol.
3872 *
3873 * @returns Pointer to symbol name. This is a static buffer.
3874 * @param orig_addr The address in question.
3875 */
3876const char *lookup_symbol(target_ulong orig_addr)
3877{
3878 PVM pVM = cpu_single_env->pVM;
3879 RTGCINTPTR off = 0;
3880 RTDBGSYMBOL Sym;
3881 DBGFADDRESS Addr;
3882
3883 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3884 if (RT_SUCCESS(rc))
3885 {
3886 static char szSym[sizeof(Sym.szName) + 48];
3887 if (!off)
3888 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3889 else if (off > 0)
3890 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3891 else
3892 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3893 return szSym;
3894 }
3895 return "<N/A>";
3896}
3897
3898
3899#undef LOG_GROUP
3900#define LOG_GROUP LOG_GROUP_REM
3901
3902
3903/* -+- FF notifications -+- */
3904
3905
3906/**
3907 * Notification about a pending interrupt.
3908 *
3909 * @param pVM VM Handle.
3910 * @param pVCpu VMCPU Handle.
3911 * @param u8Interrupt Interrupt
3912 * @thread The emulation thread.
3913 */
3914REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3915{
3916 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3917 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3918}
3919
3920/**
3921 * Notification about a pending interrupt.
3922 *
3923 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3924 * @param pVM VM Handle.
3925 * @param pVCpu VMCPU Handle.
3926 * @thread The emulation thread.
3927 */
3928REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3929{
3930 return pVM->rem.s.u32PendingInterrupt;
3931}
3932
3933/**
3934 * Notification about the interrupt FF being set.
3935 *
3936 * @param pVM VM Handle.
3937 * @param pVCpu VMCPU Handle.
3938 * @thread The emulation thread.
3939 */
3940REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3941{
3942 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3943 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3944 if (pVM->rem.s.fInREM)
3945 {
3946 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3947 CPU_INTERRUPT_EXTERNAL_HARD);
3948 }
3949}
3950
3951
3952/**
3953 * Notification about the interrupt FF being set.
3954 *
3955 * @param pVM VM Handle.
3956 * @param pVCpu VMCPU Handle.
3957 * @thread Any.
3958 */
3959REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3960{
3961 LogFlow(("REMR3NotifyInterruptClear:\n"));
3962 if (pVM->rem.s.fInREM)
3963 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3964}
3965
3966
3967/**
3968 * Notification about pending timer(s).
3969 *
3970 * @param pVM VM Handle.
3971 * @param pVCpuDst The target cpu for this notification.
3972 * TM will not broadcast pending timer events, but use
3973 * a dedicated EMT for them. So, only interrupt REM
3974 * execution if the given CPU is executing in REM.
3975 * @thread Any.
3976 */
3977REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3978{
3979#ifndef DEBUG_bird
3980 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3981#endif
3982 if (pVM->rem.s.fInREM)
3983 {
3984 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3985 {
3986 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3987 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3988 CPU_INTERRUPT_EXTERNAL_TIMER);
3989 }
3990 else
3991 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3992 }
3993 else
3994 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3995}
3996
3997
3998/**
3999 * Notification about pending DMA transfers.
4000 *
4001 * @param pVM VM Handle.
4002 * @thread Any.
4003 */
4004REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4005{
4006 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4007 if (pVM->rem.s.fInREM)
4008 {
4009 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4010 CPU_INTERRUPT_EXTERNAL_DMA);
4011 }
4012}
4013
4014
4015/**
4016 * Notification about pending timer(s).
4017 *
4018 * @param pVM VM Handle.
4019 * @thread Any.
4020 */
4021REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4022{
4023 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4024 if (pVM->rem.s.fInREM)
4025 {
4026 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4027 CPU_INTERRUPT_EXTERNAL_EXIT);
4028 }
4029}
4030
4031
4032/**
4033 * Notification about pending FF set by an external thread.
4034 *
4035 * @param pVM VM handle.
4036 * @thread Any.
4037 */
4038REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4039{
4040 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4041 if (pVM->rem.s.fInREM)
4042 {
4043 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4044 CPU_INTERRUPT_EXTERNAL_EXIT);
4045 }
4046}
4047
4048
4049#ifdef VBOX_WITH_STATISTICS
4050void remR3ProfileStart(int statcode)
4051{
4052 STAMPROFILEADV *pStat;
4053 switch(statcode)
4054 {
4055 case STATS_EMULATE_SINGLE_INSTR:
4056 pStat = &gStatExecuteSingleInstr;
4057 break;
4058 case STATS_QEMU_COMPILATION:
4059 pStat = &gStatCompilationQEmu;
4060 break;
4061 case STATS_QEMU_RUN_EMULATED_CODE:
4062 pStat = &gStatRunCodeQEmu;
4063 break;
4064 case STATS_QEMU_TOTAL:
4065 pStat = &gStatTotalTimeQEmu;
4066 break;
4067 case STATS_QEMU_RUN_TIMERS:
4068 pStat = &gStatTimers;
4069 break;
4070 case STATS_TLB_LOOKUP:
4071 pStat= &gStatTBLookup;
4072 break;
4073 case STATS_IRQ_HANDLING:
4074 pStat= &gStatIRQ;
4075 break;
4076 case STATS_RAW_CHECK:
4077 pStat = &gStatRawCheck;
4078 break;
4079
4080 default:
4081 AssertMsgFailed(("unknown stat %d\n", statcode));
4082 return;
4083 }
4084 STAM_PROFILE_ADV_START(pStat, a);
4085}
4086
4087
4088void remR3ProfileStop(int statcode)
4089{
4090 STAMPROFILEADV *pStat;
4091 switch(statcode)
4092 {
4093 case STATS_EMULATE_SINGLE_INSTR:
4094 pStat = &gStatExecuteSingleInstr;
4095 break;
4096 case STATS_QEMU_COMPILATION:
4097 pStat = &gStatCompilationQEmu;
4098 break;
4099 case STATS_QEMU_RUN_EMULATED_CODE:
4100 pStat = &gStatRunCodeQEmu;
4101 break;
4102 case STATS_QEMU_TOTAL:
4103 pStat = &gStatTotalTimeQEmu;
4104 break;
4105 case STATS_QEMU_RUN_TIMERS:
4106 pStat = &gStatTimers;
4107 break;
4108 case STATS_TLB_LOOKUP:
4109 pStat= &gStatTBLookup;
4110 break;
4111 case STATS_IRQ_HANDLING:
4112 pStat= &gStatIRQ;
4113 break;
4114 case STATS_RAW_CHECK:
4115 pStat = &gStatRawCheck;
4116 break;
4117 default:
4118 AssertMsgFailed(("unknown stat %d\n", statcode));
4119 return;
4120 }
4121 STAM_PROFILE_ADV_STOP(pStat, a);
4122}
4123#endif
4124
4125/**
4126 * Raise an RC, force rem exit.
4127 *
4128 * @param pVM VM handle.
4129 * @param rc The rc.
4130 */
4131void remR3RaiseRC(PVM pVM, int rc)
4132{
4133 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4134 Assert(pVM->rem.s.fInREM);
4135 VM_ASSERT_EMT(pVM);
4136 pVM->rem.s.rc = rc;
4137 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4138}
4139
4140
4141/* -+- timers -+- */
4142
4143uint64_t cpu_get_tsc(CPUX86State *env)
4144{
4145 STAM_COUNTER_INC(&gStatCpuGetTSC);
4146 return TMCpuTickGet(env->pVCpu);
4147}
4148
4149
4150/* -+- interrupts -+- */
4151
4152void cpu_set_ferr(CPUX86State *env)
4153{
4154 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4155 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4156}
4157
4158int cpu_get_pic_interrupt(CPUState *env)
4159{
4160 uint8_t u8Interrupt;
4161 int rc;
4162
4163 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4164 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4165 * with the (a)pic.
4166 */
4167 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4168 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4169 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4170 * remove this kludge. */
4171 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4172 {
4173 rc = VINF_SUCCESS;
4174 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4175 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4176 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4177 }
4178 else
4179 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4180
4181 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4182 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4183 if (RT_SUCCESS(rc))
4184 {
4185 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4186 env->interrupt_request |= CPU_INTERRUPT_HARD;
4187 return u8Interrupt;
4188 }
4189 return -1;
4190}
4191
4192
4193/* -+- local apic -+- */
4194
4195#if 0 /* CPUMSetGuestMsr does this now. */
4196void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4197{
4198 int rc = PDMApicSetBase(env->pVM, val);
4199 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4200}
4201#endif
4202
4203uint64_t cpu_get_apic_base(CPUX86State *env)
4204{
4205 uint64_t u64;
4206 int rc = PDMApicGetBase(env->pVM, &u64);
4207 if (RT_SUCCESS(rc))
4208 {
4209 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4210 return u64;
4211 }
4212 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4213 return 0;
4214}
4215
4216void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4217{
4218 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4219 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4220}
4221
4222uint8_t cpu_get_apic_tpr(CPUX86State *env)
4223{
4224 uint8_t u8;
4225 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4226 if (RT_SUCCESS(rc))
4227 {
4228 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4229 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4230 }
4231 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4232 return 0;
4233}
4234
4235/**
4236 * Read an MSR.
4237 *
4238 * @retval 0 success.
4239 * @retval -1 failure, raise \#GP(0).
4240 * @param env The cpu state.
4241 * @param idMsr The MSR to read.
4242 * @param puValue Where to return the value.
4243 */
4244int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4245{
4246 Assert(env->pVCpu);
4247 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4248}
4249
4250/**
4251 * Write to an MSR.
4252 *
4253 * @retval 0 success.
4254 * @retval -1 failure, raise \#GP(0).
4255 * @param env The cpu state.
4256 * @param idMsr The MSR to read.
4257 * @param puValue Where to return the value.
4258 */
4259int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4260{
4261 Assert(env->pVCpu);
4262 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4263}
4264
4265/* -+- I/O Ports -+- */
4266
4267#undef LOG_GROUP
4268#define LOG_GROUP LOG_GROUP_REM_IOPORT
4269
4270void cpu_outb(CPUState *env, int addr, int val)
4271{
4272 int rc;
4273
4274 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4275 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4276
4277 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4278 if (RT_LIKELY(rc == VINF_SUCCESS))
4279 return;
4280 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4281 {
4282 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4283 remR3RaiseRC(env->pVM, rc);
4284 return;
4285 }
4286 remAbort(rc, __FUNCTION__);
4287}
4288
4289void cpu_outw(CPUState *env, int addr, int val)
4290{
4291 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4292 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4293 if (RT_LIKELY(rc == VINF_SUCCESS))
4294 return;
4295 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4296 {
4297 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4298 remR3RaiseRC(env->pVM, rc);
4299 return;
4300 }
4301 remAbort(rc, __FUNCTION__);
4302}
4303
4304void cpu_outl(CPUState *env, int addr, int val)
4305{
4306 int rc;
4307 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4308 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4309 if (RT_LIKELY(rc == VINF_SUCCESS))
4310 return;
4311 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4312 {
4313 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4314 remR3RaiseRC(env->pVM, rc);
4315 return;
4316 }
4317 remAbort(rc, __FUNCTION__);
4318}
4319
4320int cpu_inb(CPUState *env, int addr)
4321{
4322 uint32_t u32 = 0;
4323 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4324 if (RT_LIKELY(rc == VINF_SUCCESS))
4325 {
4326 if (/*addr != 0x61 && */addr != 0x71)
4327 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4328 return (int)u32;
4329 }
4330 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4331 {
4332 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4333 remR3RaiseRC(env->pVM, rc);
4334 return (int)u32;
4335 }
4336 remAbort(rc, __FUNCTION__);
4337 return 0xff;
4338}
4339
4340int cpu_inw(CPUState *env, int addr)
4341{
4342 uint32_t u32 = 0;
4343 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4344 if (RT_LIKELY(rc == VINF_SUCCESS))
4345 {
4346 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4347 return (int)u32;
4348 }
4349 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4350 {
4351 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4352 remR3RaiseRC(env->pVM, rc);
4353 return (int)u32;
4354 }
4355 remAbort(rc, __FUNCTION__);
4356 return 0xffff;
4357}
4358
4359int cpu_inl(CPUState *env, int addr)
4360{
4361 uint32_t u32 = 0;
4362 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4363 if (RT_LIKELY(rc == VINF_SUCCESS))
4364 {
4365//if (addr==0x01f0 && u32 == 0x6b6d)
4366// loglevel = ~0;
4367 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4368 return (int)u32;
4369 }
4370 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4371 {
4372 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4373 remR3RaiseRC(env->pVM, rc);
4374 return (int)u32;
4375 }
4376 remAbort(rc, __FUNCTION__);
4377 return 0xffffffff;
4378}
4379
4380#undef LOG_GROUP
4381#define LOG_GROUP LOG_GROUP_REM
4382
4383
4384/* -+- helpers and misc other interfaces -+- */
4385
4386/**
4387 * Perform the CPUID instruction.
4388 *
4389 * ASMCpuId cannot be invoked from some source files where this is used because of global
4390 * register allocations.
4391 *
4392 * @param env Pointer to the recompiler CPU structure.
4393 * @param uOperator CPUID operation (eax).
4394 * @param pvEAX Where to store eax.
4395 * @param pvEBX Where to store ebx.
4396 * @param pvECX Where to store ecx.
4397 * @param pvEDX Where to store edx.
4398 */
4399void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4400{
4401 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4402}
4403
4404
4405#if 0 /* not used */
4406/**
4407 * Interface for qemu hardware to report back fatal errors.
4408 */
4409void hw_error(const char *pszFormat, ...)
4410{
4411 /*
4412 * Bitch about it.
4413 */
4414 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4415 * this in my Odin32 tree at home! */
4416 va_list args;
4417 va_start(args, pszFormat);
4418 RTLogPrintf("fatal error in virtual hardware:");
4419 RTLogPrintfV(pszFormat, args);
4420 va_end(args);
4421 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4422
4423 /*
4424 * If we're in REM context we'll sync back the state before 'jumping' to
4425 * the EMs failure handling.
4426 */
4427 PVM pVM = cpu_single_env->pVM;
4428 if (pVM->rem.s.fInREM)
4429 REMR3StateBack(pVM);
4430 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4431 AssertMsgFailed(("EMR3FatalError returned!\n"));
4432}
4433#endif
4434
4435/**
4436 * Interface for the qemu cpu to report unhandled situation
4437 * raising a fatal VM error.
4438 */
4439void cpu_abort(CPUState *env, const char *pszFormat, ...)
4440{
4441 va_list va;
4442 PVM pVM;
4443 PVMCPU pVCpu;
4444 char szMsg[256];
4445
4446 /*
4447 * Bitch about it.
4448 */
4449 RTLogFlags(NULL, "nodisabled nobuffered");
4450 RTLogFlush(NULL);
4451
4452 va_start(va, pszFormat);
4453#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4454 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4455 unsigned cArgs = 0;
4456 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4457 const char *psz = strchr(pszFormat, '%');
4458 while (psz && cArgs < 6)
4459 {
4460 auArgs[cArgs++] = va_arg(va, uintptr_t);
4461 psz = strchr(psz + 1, '%');
4462 }
4463 switch (cArgs)
4464 {
4465 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4466 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4467 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4468 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4469 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4470 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4471 default:
4472 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4473 }
4474#else
4475 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4476#endif
4477 va_end(va);
4478
4479 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4480 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4481
4482 /*
4483 * If we're in REM context we'll sync back the state before 'jumping' to
4484 * the EMs failure handling.
4485 */
4486 pVM = cpu_single_env->pVM;
4487 pVCpu = cpu_single_env->pVCpu;
4488 Assert(pVCpu);
4489
4490 if (pVM->rem.s.fInREM)
4491 REMR3StateBack(pVM, pVCpu);
4492 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4493 AssertMsgFailed(("EMR3FatalError returned!\n"));
4494}
4495
4496
4497/**
4498 * Aborts the VM.
4499 *
4500 * @param rc VBox error code.
4501 * @param pszTip Hint about why/when this happened.
4502 */
4503void remAbort(int rc, const char *pszTip)
4504{
4505 PVM pVM;
4506 PVMCPU pVCpu;
4507
4508 /*
4509 * Bitch about it.
4510 */
4511 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4512 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4513
4514 /*
4515 * Jump back to where we entered the recompiler.
4516 */
4517 pVM = cpu_single_env->pVM;
4518 pVCpu = cpu_single_env->pVCpu;
4519 Assert(pVCpu);
4520
4521 if (pVM->rem.s.fInREM)
4522 REMR3StateBack(pVM, pVCpu);
4523
4524 EMR3FatalError(pVCpu, rc);
4525 AssertMsgFailed(("EMR3FatalError returned!\n"));
4526}
4527
4528
4529/**
4530 * Dumps a linux system call.
4531 * @param pVCpu VMCPU handle.
4532 */
4533void remR3DumpLnxSyscall(PVMCPU pVCpu)
4534{
4535 static const char *apsz[] =
4536 {
4537 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4538 "sys_exit",
4539 "sys_fork",
4540 "sys_read",
4541 "sys_write",
4542 "sys_open", /* 5 */
4543 "sys_close",
4544 "sys_waitpid",
4545 "sys_creat",
4546 "sys_link",
4547 "sys_unlink", /* 10 */
4548 "sys_execve",
4549 "sys_chdir",
4550 "sys_time",
4551 "sys_mknod",
4552 "sys_chmod", /* 15 */
4553 "sys_lchown16",
4554 "sys_ni_syscall", /* old break syscall holder */
4555 "sys_stat",
4556 "sys_lseek",
4557 "sys_getpid", /* 20 */
4558 "sys_mount",
4559 "sys_oldumount",
4560 "sys_setuid16",
4561 "sys_getuid16",
4562 "sys_stime", /* 25 */
4563 "sys_ptrace",
4564 "sys_alarm",
4565 "sys_fstat",
4566 "sys_pause",
4567 "sys_utime", /* 30 */
4568 "sys_ni_syscall", /* old stty syscall holder */
4569 "sys_ni_syscall", /* old gtty syscall holder */
4570 "sys_access",
4571 "sys_nice",
4572 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4573 "sys_sync",
4574 "sys_kill",
4575 "sys_rename",
4576 "sys_mkdir",
4577 "sys_rmdir", /* 40 */
4578 "sys_dup",
4579 "sys_pipe",
4580 "sys_times",
4581 "sys_ni_syscall", /* old prof syscall holder */
4582 "sys_brk", /* 45 */
4583 "sys_setgid16",
4584 "sys_getgid16",
4585 "sys_signal",
4586 "sys_geteuid16",
4587 "sys_getegid16", /* 50 */
4588 "sys_acct",
4589 "sys_umount", /* recycled never used phys() */
4590 "sys_ni_syscall", /* old lock syscall holder */
4591 "sys_ioctl",
4592 "sys_fcntl", /* 55 */
4593 "sys_ni_syscall", /* old mpx syscall holder */
4594 "sys_setpgid",
4595 "sys_ni_syscall", /* old ulimit syscall holder */
4596 "sys_olduname",
4597 "sys_umask", /* 60 */
4598 "sys_chroot",
4599 "sys_ustat",
4600 "sys_dup2",
4601 "sys_getppid",
4602 "sys_getpgrp", /* 65 */
4603 "sys_setsid",
4604 "sys_sigaction",
4605 "sys_sgetmask",
4606 "sys_ssetmask",
4607 "sys_setreuid16", /* 70 */
4608 "sys_setregid16",
4609 "sys_sigsuspend",
4610 "sys_sigpending",
4611 "sys_sethostname",
4612 "sys_setrlimit", /* 75 */
4613 "sys_old_getrlimit",
4614 "sys_getrusage",
4615 "sys_gettimeofday",
4616 "sys_settimeofday",
4617 "sys_getgroups16", /* 80 */
4618 "sys_setgroups16",
4619 "old_select",
4620 "sys_symlink",
4621 "sys_lstat",
4622 "sys_readlink", /* 85 */
4623 "sys_uselib",
4624 "sys_swapon",
4625 "sys_reboot",
4626 "old_readdir",
4627 "old_mmap", /* 90 */
4628 "sys_munmap",
4629 "sys_truncate",
4630 "sys_ftruncate",
4631 "sys_fchmod",
4632 "sys_fchown16", /* 95 */
4633 "sys_getpriority",
4634 "sys_setpriority",
4635 "sys_ni_syscall", /* old profil syscall holder */
4636 "sys_statfs",
4637 "sys_fstatfs", /* 100 */
4638 "sys_ioperm",
4639 "sys_socketcall",
4640 "sys_syslog",
4641 "sys_setitimer",
4642 "sys_getitimer", /* 105 */
4643 "sys_newstat",
4644 "sys_newlstat",
4645 "sys_newfstat",
4646 "sys_uname",
4647 "sys_iopl", /* 110 */
4648 "sys_vhangup",
4649 "sys_ni_syscall", /* old "idle" system call */
4650 "sys_vm86old",
4651 "sys_wait4",
4652 "sys_swapoff", /* 115 */
4653 "sys_sysinfo",
4654 "sys_ipc",
4655 "sys_fsync",
4656 "sys_sigreturn",
4657 "sys_clone", /* 120 */
4658 "sys_setdomainname",
4659 "sys_newuname",
4660 "sys_modify_ldt",
4661 "sys_adjtimex",
4662 "sys_mprotect", /* 125 */
4663 "sys_sigprocmask",
4664 "sys_ni_syscall", /* old "create_module" */
4665 "sys_init_module",
4666 "sys_delete_module",
4667 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4668 "sys_quotactl",
4669 "sys_getpgid",
4670 "sys_fchdir",
4671 "sys_bdflush",
4672 "sys_sysfs", /* 135 */
4673 "sys_personality",
4674 "sys_ni_syscall", /* reserved for afs_syscall */
4675 "sys_setfsuid16",
4676 "sys_setfsgid16",
4677 "sys_llseek", /* 140 */
4678 "sys_getdents",
4679 "sys_select",
4680 "sys_flock",
4681 "sys_msync",
4682 "sys_readv", /* 145 */
4683 "sys_writev",
4684 "sys_getsid",
4685 "sys_fdatasync",
4686 "sys_sysctl",
4687 "sys_mlock", /* 150 */
4688 "sys_munlock",
4689 "sys_mlockall",
4690 "sys_munlockall",
4691 "sys_sched_setparam",
4692 "sys_sched_getparam", /* 155 */
4693 "sys_sched_setscheduler",
4694 "sys_sched_getscheduler",
4695 "sys_sched_yield",
4696 "sys_sched_get_priority_max",
4697 "sys_sched_get_priority_min", /* 160 */
4698 "sys_sched_rr_get_interval",
4699 "sys_nanosleep",
4700 "sys_mremap",
4701 "sys_setresuid16",
4702 "sys_getresuid16", /* 165 */
4703 "sys_vm86",
4704 "sys_ni_syscall", /* Old sys_query_module */
4705 "sys_poll",
4706 "sys_nfsservctl",
4707 "sys_setresgid16", /* 170 */
4708 "sys_getresgid16",
4709 "sys_prctl",
4710 "sys_rt_sigreturn",
4711 "sys_rt_sigaction",
4712 "sys_rt_sigprocmask", /* 175 */
4713 "sys_rt_sigpending",
4714 "sys_rt_sigtimedwait",
4715 "sys_rt_sigqueueinfo",
4716 "sys_rt_sigsuspend",
4717 "sys_pread64", /* 180 */
4718 "sys_pwrite64",
4719 "sys_chown16",
4720 "sys_getcwd",
4721 "sys_capget",
4722 "sys_capset", /* 185 */
4723 "sys_sigaltstack",
4724 "sys_sendfile",
4725 "sys_ni_syscall", /* reserved for streams1 */
4726 "sys_ni_syscall", /* reserved for streams2 */
4727 "sys_vfork", /* 190 */
4728 "sys_getrlimit",
4729 "sys_mmap2",
4730 "sys_truncate64",
4731 "sys_ftruncate64",
4732 "sys_stat64", /* 195 */
4733 "sys_lstat64",
4734 "sys_fstat64",
4735 "sys_lchown",
4736 "sys_getuid",
4737 "sys_getgid", /* 200 */
4738 "sys_geteuid",
4739 "sys_getegid",
4740 "sys_setreuid",
4741 "sys_setregid",
4742 "sys_getgroups", /* 205 */
4743 "sys_setgroups",
4744 "sys_fchown",
4745 "sys_setresuid",
4746 "sys_getresuid",
4747 "sys_setresgid", /* 210 */
4748 "sys_getresgid",
4749 "sys_chown",
4750 "sys_setuid",
4751 "sys_setgid",
4752 "sys_setfsuid", /* 215 */
4753 "sys_setfsgid",
4754 "sys_pivot_root",
4755 "sys_mincore",
4756 "sys_madvise",
4757 "sys_getdents64", /* 220 */
4758 "sys_fcntl64",
4759 "sys_ni_syscall", /* reserved for TUX */
4760 "sys_ni_syscall",
4761 "sys_gettid",
4762 "sys_readahead", /* 225 */
4763 "sys_setxattr",
4764 "sys_lsetxattr",
4765 "sys_fsetxattr",
4766 "sys_getxattr",
4767 "sys_lgetxattr", /* 230 */
4768 "sys_fgetxattr",
4769 "sys_listxattr",
4770 "sys_llistxattr",
4771 "sys_flistxattr",
4772 "sys_removexattr", /* 235 */
4773 "sys_lremovexattr",
4774 "sys_fremovexattr",
4775 "sys_tkill",
4776 "sys_sendfile64",
4777 "sys_futex", /* 240 */
4778 "sys_sched_setaffinity",
4779 "sys_sched_getaffinity",
4780 "sys_set_thread_area",
4781 "sys_get_thread_area",
4782 "sys_io_setup", /* 245 */
4783 "sys_io_destroy",
4784 "sys_io_getevents",
4785 "sys_io_submit",
4786 "sys_io_cancel",
4787 "sys_fadvise64", /* 250 */
4788 "sys_ni_syscall",
4789 "sys_exit_group",
4790 "sys_lookup_dcookie",
4791 "sys_epoll_create",
4792 "sys_epoll_ctl", /* 255 */
4793 "sys_epoll_wait",
4794 "sys_remap_file_pages",
4795 "sys_set_tid_address",
4796 "sys_timer_create",
4797 "sys_timer_settime", /* 260 */
4798 "sys_timer_gettime",
4799 "sys_timer_getoverrun",
4800 "sys_timer_delete",
4801 "sys_clock_settime",
4802 "sys_clock_gettime", /* 265 */
4803 "sys_clock_getres",
4804 "sys_clock_nanosleep",
4805 "sys_statfs64",
4806 "sys_fstatfs64",
4807 "sys_tgkill", /* 270 */
4808 "sys_utimes",
4809 "sys_fadvise64_64",
4810 "sys_ni_syscall" /* sys_vserver */
4811 };
4812
4813 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4814 switch (uEAX)
4815 {
4816 default:
4817 if (uEAX < RT_ELEMENTS(apsz))
4818 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4819 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4820 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4821 else
4822 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4823 break;
4824
4825 }
4826}
4827
4828
4829/**
4830 * Dumps an OpenBSD system call.
4831 * @param pVCpu VMCPU handle.
4832 */
4833void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4834{
4835 static const char *apsz[] =
4836 {
4837 "SYS_syscall", //0
4838 "SYS_exit", //1
4839 "SYS_fork", //2
4840 "SYS_read", //3
4841 "SYS_write", //4
4842 "SYS_open", //5
4843 "SYS_close", //6
4844 "SYS_wait4", //7
4845 "SYS_8",
4846 "SYS_link", //9
4847 "SYS_unlink", //10
4848 "SYS_11",
4849 "SYS_chdir", //12
4850 "SYS_fchdir", //13
4851 "SYS_mknod", //14
4852 "SYS_chmod", //15
4853 "SYS_chown", //16
4854 "SYS_break", //17
4855 "SYS_18",
4856 "SYS_19",
4857 "SYS_getpid", //20
4858 "SYS_mount", //21
4859 "SYS_unmount", //22
4860 "SYS_setuid", //23
4861 "SYS_getuid", //24
4862 "SYS_geteuid", //25
4863 "SYS_ptrace", //26
4864 "SYS_recvmsg", //27
4865 "SYS_sendmsg", //28
4866 "SYS_recvfrom", //29
4867 "SYS_accept", //30
4868 "SYS_getpeername", //31
4869 "SYS_getsockname", //32
4870 "SYS_access", //33
4871 "SYS_chflags", //34
4872 "SYS_fchflags", //35
4873 "SYS_sync", //36
4874 "SYS_kill", //37
4875 "SYS_38",
4876 "SYS_getppid", //39
4877 "SYS_40",
4878 "SYS_dup", //41
4879 "SYS_opipe", //42
4880 "SYS_getegid", //43
4881 "SYS_profil", //44
4882 "SYS_ktrace", //45
4883 "SYS_sigaction", //46
4884 "SYS_getgid", //47
4885 "SYS_sigprocmask", //48
4886 "SYS_getlogin", //49
4887 "SYS_setlogin", //50
4888 "SYS_acct", //51
4889 "SYS_sigpending", //52
4890 "SYS_osigaltstack", //53
4891 "SYS_ioctl", //54
4892 "SYS_reboot", //55
4893 "SYS_revoke", //56
4894 "SYS_symlink", //57
4895 "SYS_readlink", //58
4896 "SYS_execve", //59
4897 "SYS_umask", //60
4898 "SYS_chroot", //61
4899 "SYS_62",
4900 "SYS_63",
4901 "SYS_64",
4902 "SYS_65",
4903 "SYS_vfork", //66
4904 "SYS_67",
4905 "SYS_68",
4906 "SYS_sbrk", //69
4907 "SYS_sstk", //70
4908 "SYS_61",
4909 "SYS_vadvise", //72
4910 "SYS_munmap", //73
4911 "SYS_mprotect", //74
4912 "SYS_madvise", //75
4913 "SYS_76",
4914 "SYS_77",
4915 "SYS_mincore", //78
4916 "SYS_getgroups", //79
4917 "SYS_setgroups", //80
4918 "SYS_getpgrp", //81
4919 "SYS_setpgid", //82
4920 "SYS_setitimer", //83
4921 "SYS_84",
4922 "SYS_85",
4923 "SYS_getitimer", //86
4924 "SYS_87",
4925 "SYS_88",
4926 "SYS_89",
4927 "SYS_dup2", //90
4928 "SYS_91",
4929 "SYS_fcntl", //92
4930 "SYS_select", //93
4931 "SYS_94",
4932 "SYS_fsync", //95
4933 "SYS_setpriority", //96
4934 "SYS_socket", //97
4935 "SYS_connect", //98
4936 "SYS_99",
4937 "SYS_getpriority", //100
4938 "SYS_101",
4939 "SYS_102",
4940 "SYS_sigreturn", //103
4941 "SYS_bind", //104
4942 "SYS_setsockopt", //105
4943 "SYS_listen", //106
4944 "SYS_107",
4945 "SYS_108",
4946 "SYS_109",
4947 "SYS_110",
4948 "SYS_sigsuspend", //111
4949 "SYS_112",
4950 "SYS_113",
4951 "SYS_114",
4952 "SYS_115",
4953 "SYS_gettimeofday", //116
4954 "SYS_getrusage", //117
4955 "SYS_getsockopt", //118
4956 "SYS_119",
4957 "SYS_readv", //120
4958 "SYS_writev", //121
4959 "SYS_settimeofday", //122
4960 "SYS_fchown", //123
4961 "SYS_fchmod", //124
4962 "SYS_125",
4963 "SYS_setreuid", //126
4964 "SYS_setregid", //127
4965 "SYS_rename", //128
4966 "SYS_129",
4967 "SYS_130",
4968 "SYS_flock", //131
4969 "SYS_mkfifo", //132
4970 "SYS_sendto", //133
4971 "SYS_shutdown", //134
4972 "SYS_socketpair", //135
4973 "SYS_mkdir", //136
4974 "SYS_rmdir", //137
4975 "SYS_utimes", //138
4976 "SYS_139",
4977 "SYS_adjtime", //140
4978 "SYS_141",
4979 "SYS_142",
4980 "SYS_143",
4981 "SYS_144",
4982 "SYS_145",
4983 "SYS_146",
4984 "SYS_setsid", //147
4985 "SYS_quotactl", //148
4986 "SYS_149",
4987 "SYS_150",
4988 "SYS_151",
4989 "SYS_152",
4990 "SYS_153",
4991 "SYS_154",
4992 "SYS_nfssvc", //155
4993 "SYS_156",
4994 "SYS_157",
4995 "SYS_158",
4996 "SYS_159",
4997 "SYS_160",
4998 "SYS_getfh", //161
4999 "SYS_162",
5000 "SYS_163",
5001 "SYS_164",
5002 "SYS_sysarch", //165
5003 "SYS_166",
5004 "SYS_167",
5005 "SYS_168",
5006 "SYS_169",
5007 "SYS_170",
5008 "SYS_171",
5009 "SYS_172",
5010 "SYS_pread", //173
5011 "SYS_pwrite", //174
5012 "SYS_175",
5013 "SYS_176",
5014 "SYS_177",
5015 "SYS_178",
5016 "SYS_179",
5017 "SYS_180",
5018 "SYS_setgid", //181
5019 "SYS_setegid", //182
5020 "SYS_seteuid", //183
5021 "SYS_lfs_bmapv", //184
5022 "SYS_lfs_markv", //185
5023 "SYS_lfs_segclean", //186
5024 "SYS_lfs_segwait", //187
5025 "SYS_188",
5026 "SYS_189",
5027 "SYS_190",
5028 "SYS_pathconf", //191
5029 "SYS_fpathconf", //192
5030 "SYS_swapctl", //193
5031 "SYS_getrlimit", //194
5032 "SYS_setrlimit", //195
5033 "SYS_getdirentries", //196
5034 "SYS_mmap", //197
5035 "SYS___syscall", //198
5036 "SYS_lseek", //199
5037 "SYS_truncate", //200
5038 "SYS_ftruncate", //201
5039 "SYS___sysctl", //202
5040 "SYS_mlock", //203
5041 "SYS_munlock", //204
5042 "SYS_205",
5043 "SYS_futimes", //206
5044 "SYS_getpgid", //207
5045 "SYS_xfspioctl", //208
5046 "SYS_209",
5047 "SYS_210",
5048 "SYS_211",
5049 "SYS_212",
5050 "SYS_213",
5051 "SYS_214",
5052 "SYS_215",
5053 "SYS_216",
5054 "SYS_217",
5055 "SYS_218",
5056 "SYS_219",
5057 "SYS_220",
5058 "SYS_semget", //221
5059 "SYS_222",
5060 "SYS_223",
5061 "SYS_224",
5062 "SYS_msgget", //225
5063 "SYS_msgsnd", //226
5064 "SYS_msgrcv", //227
5065 "SYS_shmat", //228
5066 "SYS_229",
5067 "SYS_shmdt", //230
5068 "SYS_231",
5069 "SYS_clock_gettime", //232
5070 "SYS_clock_settime", //233
5071 "SYS_clock_getres", //234
5072 "SYS_235",
5073 "SYS_236",
5074 "SYS_237",
5075 "SYS_238",
5076 "SYS_239",
5077 "SYS_nanosleep", //240
5078 "SYS_241",
5079 "SYS_242",
5080 "SYS_243",
5081 "SYS_244",
5082 "SYS_245",
5083 "SYS_246",
5084 "SYS_247",
5085 "SYS_248",
5086 "SYS_249",
5087 "SYS_minherit", //250
5088 "SYS_rfork", //251
5089 "SYS_poll", //252
5090 "SYS_issetugid", //253
5091 "SYS_lchown", //254
5092 "SYS_getsid", //255
5093 "SYS_msync", //256
5094 "SYS_257",
5095 "SYS_258",
5096 "SYS_259",
5097 "SYS_getfsstat", //260
5098 "SYS_statfs", //261
5099 "SYS_fstatfs", //262
5100 "SYS_pipe", //263
5101 "SYS_fhopen", //264
5102 "SYS_265",
5103 "SYS_fhstatfs", //266
5104 "SYS_preadv", //267
5105 "SYS_pwritev", //268
5106 "SYS_kqueue", //269
5107 "SYS_kevent", //270
5108 "SYS_mlockall", //271
5109 "SYS_munlockall", //272
5110 "SYS_getpeereid", //273
5111 "SYS_274",
5112 "SYS_275",
5113 "SYS_276",
5114 "SYS_277",
5115 "SYS_278",
5116 "SYS_279",
5117 "SYS_280",
5118 "SYS_getresuid", //281
5119 "SYS_setresuid", //282
5120 "SYS_getresgid", //283
5121 "SYS_setresgid", //284
5122 "SYS_285",
5123 "SYS_mquery", //286
5124 "SYS_closefrom", //287
5125 "SYS_sigaltstack", //288
5126 "SYS_shmget", //289
5127 "SYS_semop", //290
5128 "SYS_stat", //291
5129 "SYS_fstat", //292
5130 "SYS_lstat", //293
5131 "SYS_fhstat", //294
5132 "SYS___semctl", //295
5133 "SYS_shmctl", //296
5134 "SYS_msgctl", //297
5135 "SYS_MAXSYSCALL", //298
5136 //299
5137 //300
5138 };
5139 uint32_t uEAX;
5140 if (!LogIsEnabled())
5141 return;
5142 uEAX = CPUMGetGuestEAX(pVCpu);
5143 switch (uEAX)
5144 {
5145 default:
5146 if (uEAX < RT_ELEMENTS(apsz))
5147 {
5148 uint32_t au32Args[8] = {0};
5149 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5150 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5151 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5152 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5153 }
5154 else
5155 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5156 break;
5157 }
5158}
5159
5160
5161#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5162/**
5163 * The Dll main entry point (stub).
5164 */
5165bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5166{
5167 return true;
5168}
5169
5170void *memcpy(void *dst, const void *src, size_t size)
5171{
5172 uint8_t*pbDst = dst, *pbSrc = src;
5173 while (size-- > 0)
5174 *pbDst++ = *pbSrc++;
5175 return dst;
5176}
5177
5178#endif
5179
5180void cpu_smm_update(CPUState *env)
5181{
5182}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette