VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36006

Last change on this file since 36006 was 35994, checked in by vboxsync, 14 years ago

Two cases where CPUM_CHANGED_GLOBAL_TLB_FLUSH was missing, causing stale TLB entries and mayhem.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 171.1 KB
Line 
1/* $Id: VBoxRecompiler.c 35994 2011-02-16 13:30:53Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
88static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
105static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149
150/*
151 * Global stuff.
152 */
153
154/** MMIO read callbacks. */
155CPUReadMemoryFunc *g_apfnMMIORead[3] =
156{
157 remR3MMIOReadU8,
158 remR3MMIOReadU16,
159 remR3MMIOReadU32
160};
161
162/** MMIO write callbacks. */
163CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
164{
165 remR3MMIOWriteU8,
166 remR3MMIOWriteU16,
167 remR3MMIOWriteU32
168};
169
170/** Handler read callbacks. */
171CPUReadMemoryFunc *g_apfnHandlerRead[3] =
172{
173 remR3HandlerReadU8,
174 remR3HandlerReadU16,
175 remR3HandlerReadU32
176};
177
178/** Handler write callbacks. */
179CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
180{
181 remR3HandlerWriteU8,
182 remR3HandlerWriteU16,
183 remR3HandlerWriteU32
184};
185
186
187#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
188/*
189 * Debugger commands.
190 */
191static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
192
193/** '.remstep' arguments. */
194static const DBGCVARDESC g_aArgRemStep[] =
195{
196 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
197 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
198};
199
200/** Command descriptors. */
201static const DBGCCMD g_aCmds[] =
202{
203 {
204 .pszCmd ="remstep",
205 .cArgsMin = 0,
206 .cArgsMax = 1,
207 .paArgDescs = &g_aArgRemStep[0],
208 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
209 .fFlags = 0,
210 .pfnHandler = remR3CmdDisasEnableStepping,
211 .pszSyntax = "[on/off]",
212 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
213 "If no arguments show the current state."
214 }
215};
216#endif
217
218/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
219uint8_t *code_gen_prologue;
220
221
222/*******************************************************************************
223* Internal Functions *
224*******************************************************************************/
225void remAbort(int rc, const char *pszTip);
226extern int testmath(void);
227
228/* Put them here to avoid unused variable warning. */
229AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
230#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
231//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
232/* Why did this have to be identical?? */
233AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
234#else
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#endif
237
238
239/**
240 * Initializes the REM.
241 *
242 * @returns VBox status code.
243 * @param pVM The VM to operate on.
244 */
245REMR3DECL(int) REMR3Init(PVM pVM)
246{
247 PREMHANDLERNOTIFICATION pCur;
248 uint32_t u32Dummy;
249 int rc;
250 unsigned i;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /*
276 * Initialize the REM critical section.
277 *
278 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
279 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
280 * deadlocks. (mostly pgm vs rem locking)
281 */
282 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
283 AssertRCReturn(rc, rc);
284
285 /* ctx. */
286 pVM->rem.s.pCtx = NULL; /* set when executing code. */
287 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
288
289 /* ignore all notifications */
290 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
291
292 code_gen_prologue = RTMemExecAlloc(_1K);
293 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
294
295 cpu_exec_init_all(0);
296
297 /*
298 * Init the recompiler.
299 */
300 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
301 {
302 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
303 return VERR_GENERAL_FAILURE;
304 }
305 PVMCPU pVCpu = VMMGetCpu(pVM);
306 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
307 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
308
309 /* allocate code buffer for single instruction emulation. */
310 pVM->rem.s.Env.cbCodeBuffer = 4096;
311 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
312 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
313
314 /* finally, set the cpu_single_env global. */
315 cpu_single_env = &pVM->rem.s.Env;
316
317 /* Nothing is pending by default */
318 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
319
320 /*
321 * Register ram types.
322 */
323 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
324 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
325 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
326 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
327 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
328
329 /* stop ignoring. */
330 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
331
332 /*
333 * Register the saved state data unit.
334 */
335 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
336 NULL, NULL, NULL,
337 NULL, remR3Save, NULL,
338 NULL, remR3Load, NULL);
339 if (RT_FAILURE(rc))
340 return rc;
341
342#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
343 /*
344 * Debugger commands.
345 */
346 static bool fRegisteredCmds = false;
347 if (!fRegisteredCmds)
348 {
349 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
350 if (RT_SUCCESS(rc))
351 fRegisteredCmds = true;
352 }
353#endif
354
355#ifdef VBOX_WITH_STATISTICS
356 /*
357 * Statistics.
358 */
359 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
360 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
361 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
362 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
363 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
364 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
365 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
368 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
369 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
370 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
371
372 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
373
374 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
375 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
376 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
377 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
378 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
379 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
380 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
381 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
382 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
383 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
384 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
385
386 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
387 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
388 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
389 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
390
391 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
392 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
397
398 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
404
405 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
406#endif /* VBOX_WITH_STATISTICS */
407
408 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
409 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
410 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
411
412
413#ifdef DEBUG_ALL_LOGGING
414 loglevel = ~0;
415# ifdef DEBUG_TMP_LOGGING
416 logfile = fopen("/tmp/vbox-qemu.log", "w");
417# endif
418#endif
419
420 /*
421 * Init the handler notification lists.
422 */
423 pVM->rem.s.idxPendingList = UINT32_MAX;
424 pVM->rem.s.idxFreeList = 0;
425
426 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
427 {
428 pCur = &pVM->rem.s.aHandlerNotifications[i];
429 pCur->idxNext = i + 1;
430 pCur->idxSelf = i;
431 }
432 pCur->idxNext = UINT32_MAX; /* the last record. */
433
434 return rc;
435}
436
437
438/**
439 * Finalizes the REM initialization.
440 *
441 * This is called after all components, devices and drivers has
442 * been initialized. Its main purpose it to finish the RAM related
443 * initialization.
444 *
445 * @returns VBox status code.
446 *
447 * @param pVM The VM handle.
448 */
449REMR3DECL(int) REMR3InitFinalize(PVM pVM)
450{
451 int rc;
452
453 /*
454 * Ram size & dirty bit map.
455 */
456 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
457 pVM->rem.s.fGCPhysLastRamFixed = true;
458#ifdef RT_STRICT
459 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
460#else
461 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
462#endif
463 return rc;
464}
465
466
467/**
468 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
469 *
470 * @returns VBox status code.
471 * @param pVM The VM handle.
472 * @param fGuarded Whether to guard the map.
473 */
474static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
475{
476 int rc = VINF_SUCCESS;
477 RTGCPHYS cb;
478
479 cb = pVM->rem.s.GCPhysLastRam + 1;
480 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
481 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
482 VERR_OUT_OF_RANGE);
483 phys_ram_size = cb;
484 phys_ram_dirty_size = cb >> PAGE_SHIFT;
485 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
486
487 if (!fGuarded)
488 {
489 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
490 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
491 }
492 else
493 {
494 /*
495 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
496 */
497 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
498 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
499 if (cbBitmapFull == cbBitmapAligned)
500 cbBitmapFull += _4G >> PAGE_SHIFT;
501 else if (cbBitmapFull - cbBitmapAligned < _64K)
502 cbBitmapFull += _64K;
503
504 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
505 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
506
507 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
508 if (RT_FAILURE(rc))
509 {
510 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
511 AssertLogRelRCReturn(rc, rc);
512 }
513
514 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
515 }
516
517 /* initialize it. */
518 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
519 return rc;
520}
521
522
523/**
524 * Terminates the REM.
525 *
526 * Termination means cleaning up and freeing all resources,
527 * the VM it self is at this point powered off or suspended.
528 *
529 * @returns VBox status code.
530 * @param pVM The VM to operate on.
531 */
532REMR3DECL(int) REMR3Term(PVM pVM)
533{
534#ifdef VBOX_WITH_STATISTICS
535 /*
536 * Statistics.
537 */
538 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
539 STAM_DEREG(pVM, &gStatCompilationQEmu);
540 STAM_DEREG(pVM, &gStatRunCodeQEmu);
541 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
542 STAM_DEREG(pVM, &gStatTimers);
543 STAM_DEREG(pVM, &gStatTBLookup);
544 STAM_DEREG(pVM, &gStatIRQ);
545 STAM_DEREG(pVM, &gStatRawCheck);
546 STAM_DEREG(pVM, &gStatMemRead);
547 STAM_DEREG(pVM, &gStatMemWrite);
548 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
549 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
550
551 STAM_DEREG(pVM, &gStatCpuGetTSC);
552
553 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
554 STAM_DEREG(pVM, &gStatRefuseVM86);
555 STAM_DEREG(pVM, &gStatRefusePaging);
556 STAM_DEREG(pVM, &gStatRefusePAE);
557 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
558 STAM_DEREG(pVM, &gStatRefuseIF0);
559 STAM_DEREG(pVM, &gStatRefuseCode16);
560 STAM_DEREG(pVM, &gStatRefuseWP0);
561 STAM_DEREG(pVM, &gStatRefuseRing1or2);
562 STAM_DEREG(pVM, &gStatRefuseCanExecute);
563 STAM_DEREG(pVM, &gStatFlushTBs);
564
565 STAM_DEREG(pVM, &gStatREMGDTChange);
566 STAM_DEREG(pVM, &gStatREMLDTRChange);
567 STAM_DEREG(pVM, &gStatREMIDTChange);
568 STAM_DEREG(pVM, &gStatREMTRChange);
569
570 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
571 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
576
577 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
583
584 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
585#endif /* VBOX_WITH_STATISTICS */
586
587 STAM_REL_DEREG(pVM, &tb_flush_count);
588 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
589 STAM_REL_DEREG(pVM, &tlb_flush_count);
590
591 return VINF_SUCCESS;
592}
593
594
595/**
596 * The VM is being reset.
597 *
598 * For the REM component this means to call the cpu_reset() and
599 * reinitialize some state variables.
600 *
601 * @param pVM VM handle.
602 */
603REMR3DECL(void) REMR3Reset(PVM pVM)
604{
605 /*
606 * Reset the REM cpu.
607 */
608 Assert(pVM->rem.s.cIgnoreAll == 0);
609 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
610 cpu_reset(&pVM->rem.s.Env);
611 pVM->rem.s.cInvalidatedPages = 0;
612 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614
615 /* Clear raw ring 0 init state */
616 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
617
618 /* Flush the TBs the next time we execute code here. */
619 pVM->rem.s.fFlushTBs = true;
620}
621
622
623/**
624 * Execute state save operation.
625 *
626 * @returns VBox status code.
627 * @param pVM VM Handle.
628 * @param pSSM SSM operation handle.
629 */
630static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
631{
632 PREM pRem = &pVM->rem.s;
633
634 /*
635 * Save the required CPU Env bits.
636 * (Not much because we're never in REM when doing the save.)
637 */
638 LogFlow(("remR3Save:\n"));
639 Assert(!pRem->fInREM);
640 SSMR3PutU32(pSSM, pRem->Env.hflags);
641 SSMR3PutU32(pSSM, ~0); /* separator */
642
643 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
644 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
645 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
646
647 return SSMR3PutU32(pSSM, ~0); /* terminator */
648}
649
650
651/**
652 * Execute state load operation.
653 *
654 * @returns VBox status code.
655 * @param pVM VM Handle.
656 * @param pSSM SSM operation handle.
657 * @param uVersion Data layout version.
658 * @param uPass The data pass.
659 */
660static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
661{
662 uint32_t u32Dummy;
663 uint32_t fRawRing0 = false;
664 uint32_t u32Sep;
665 uint32_t i;
666 int rc;
667 PREM pRem;
668
669 LogFlow(("remR3Load:\n"));
670 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
671
672 /*
673 * Validate version.
674 */
675 if ( uVersion != REM_SAVED_STATE_VERSION
676 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
677 {
678 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
679 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
680 }
681
682 /*
683 * Do a reset to be on the safe side...
684 */
685 REMR3Reset(pVM);
686
687 /*
688 * Ignore all ignorable notifications.
689 * (Not doing this will cause serious trouble.)
690 */
691 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
692
693 /*
694 * Load the required CPU Env bits.
695 * (Not much because we're never in REM when doing the save.)
696 */
697 pRem = &pVM->rem.s;
698 Assert(!pRem->fInREM);
699 SSMR3GetU32(pSSM, &pRem->Env.hflags);
700 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
701 {
702 /* Redundant REM CPU state has to be loaded, but can be ignored. */
703 CPUX86State_Ver16 temp;
704 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
705 }
706
707 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
708 if (RT_FAILURE(rc))
709 return rc;
710 if (u32Sep != ~0U)
711 {
712 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
713 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
714 }
715
716 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
717 SSMR3GetUInt(pSSM, &fRawRing0);
718 if (fRawRing0)
719 pRem->Env.state |= CPU_RAW_RING0;
720
721 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
722 {
723 /*
724 * Load the REM stuff.
725 */
726 /** @todo r=bird: We should just drop all these items, restoring doesn't make
727 * sense. */
728 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
732 {
733 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
734 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
735 }
736 for (i = 0; i < pRem->cInvalidatedPages; i++)
737 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
738 }
739
740 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
741 if (RT_FAILURE(rc))
742 return rc;
743
744 /* check the terminator. */
745 rc = SSMR3GetU32(pSSM, &u32Sep);
746 if (RT_FAILURE(rc))
747 return rc;
748 if (u32Sep != ~0U)
749 {
750 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
751 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
752 }
753
754 /*
755 * Get the CPUID features.
756 */
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
759 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
760
761 /*
762 * Sync the Load Flush the TLB
763 */
764 tlb_flush(&pRem->Env, 1);
765
766 /*
767 * Stop ignoring ignorable notifications.
768 */
769 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
770
771 /*
772 * Sync the whole CPU state when executing code in the recompiler.
773 */
774 for (i = 0; i < pVM->cCpus; i++)
775 {
776 PVMCPU pVCpu = &pVM->aCpus[i];
777 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
778 }
779 return VINF_SUCCESS;
780}
781
782
783
784#undef LOG_GROUP
785#define LOG_GROUP LOG_GROUP_REM_RUN
786
787/**
788 * Single steps an instruction in recompiled mode.
789 *
790 * Before calling this function the REM state needs to be in sync with
791 * the VM. Call REMR3State() to perform the sync. It's only necessary
792 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
793 * and after calling REMR3StateBack().
794 *
795 * @returns VBox status code.
796 *
797 * @param pVM VM Handle.
798 * @param pVCpu VMCPU Handle.
799 */
800REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
801{
802 int rc, interrupt_request;
803 RTGCPTR GCPtrPC;
804 bool fBp;
805
806 /*
807 * Lock the REM - we don't wanna have anyone interrupting us
808 * while stepping - and enabled single stepping. We also ignore
809 * pending interrupts and suchlike.
810 */
811 interrupt_request = pVM->rem.s.Env.interrupt_request;
812 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
813 pVM->rem.s.Env.interrupt_request = 0;
814 cpu_single_step(&pVM->rem.s.Env, 1);
815
816 /*
817 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
818 */
819 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
820 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
821
822 /*
823 * Execute and handle the return code.
824 * We execute without enabling the cpu tick, so on success we'll
825 * just flip it on and off to make sure it moves
826 */
827 rc = cpu_exec(&pVM->rem.s.Env);
828 if (rc == EXCP_DEBUG)
829 {
830 TMR3NotifyResume(pVM, pVCpu);
831 TMR3NotifySuspend(pVM, pVCpu);
832 rc = VINF_EM_DBG_STEPPED;
833 }
834 else
835 {
836 switch (rc)
837 {
838 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
839 case EXCP_HLT:
840 case EXCP_HALTED: rc = VINF_EM_HALT; break;
841 case EXCP_RC:
842 rc = pVM->rem.s.rc;
843 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
844 break;
845 case EXCP_EXECUTE_RAW:
846 case EXCP_EXECUTE_HWACC:
847 /** @todo: is it correct? No! */
848 rc = VINF_SUCCESS;
849 break;
850 default:
851 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
852 rc = VERR_INTERNAL_ERROR;
853 break;
854 }
855 }
856
857 /*
858 * Restore the stuff we changed to prevent interruption.
859 * Unlock the REM.
860 */
861 if (fBp)
862 {
863 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
864 Assert(rc2 == 0); NOREF(rc2);
865 }
866 cpu_single_step(&pVM->rem.s.Env, 0);
867 pVM->rem.s.Env.interrupt_request = interrupt_request;
868
869 return rc;
870}
871
872
873/**
874 * Set a breakpoint using the REM facilities.
875 *
876 * @returns VBox status code.
877 * @param pVM The VM handle.
878 * @param Address The breakpoint address.
879 * @thread The emulation thread.
880 */
881REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
882{
883 VM_ASSERT_EMT(pVM);
884 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
885 {
886 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
887 return VINF_SUCCESS;
888 }
889 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
890 return VERR_REM_NO_MORE_BP_SLOTS;
891}
892
893
894/**
895 * Clears a breakpoint set by REMR3BreakpointSet().
896 *
897 * @returns VBox status code.
898 * @param pVM The VM handle.
899 * @param Address The breakpoint address.
900 * @thread The emulation thread.
901 */
902REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
903{
904 VM_ASSERT_EMT(pVM);
905 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
906 {
907 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
908 return VINF_SUCCESS;
909 }
910 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
911 return VERR_REM_BP_NOT_FOUND;
912}
913
914
915/**
916 * Emulate an instruction.
917 *
918 * This function executes one instruction without letting anyone
919 * interrupt it. This is intended for being called while being in
920 * raw mode and thus will take care of all the state syncing between
921 * REM and the rest.
922 *
923 * @returns VBox status code.
924 * @param pVM VM handle.
925 * @param pVCpu VMCPU Handle.
926 */
927REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
928{
929 bool fFlushTBs;
930
931 int rc, rc2;
932 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
933
934 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
935 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
936 */
937 if (HWACCMIsEnabled(pVM))
938 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
939
940 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
941 fFlushTBs = pVM->rem.s.fFlushTBs;
942 pVM->rem.s.fFlushTBs = false;
943
944 /*
945 * Sync the state and enable single instruction / single stepping.
946 */
947 rc = REMR3State(pVM, pVCpu);
948 pVM->rem.s.fFlushTBs = fFlushTBs;
949 if (RT_SUCCESS(rc))
950 {
951 int interrupt_request = pVM->rem.s.Env.interrupt_request;
952 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
953 Assert(!pVM->rem.s.Env.singlestep_enabled);
954 /*
955 * Now we set the execute single instruction flag and enter the cpu_exec loop.
956 */
957 TMNotifyStartOfExecution(pVCpu);
958 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
959 rc = cpu_exec(&pVM->rem.s.Env);
960 TMNotifyEndOfExecution(pVCpu);
961 switch (rc)
962 {
963 /*
964 * Executed without anything out of the way happening.
965 */
966 case EXCP_SINGLE_INSTR:
967 rc = VINF_EM_RESCHEDULE;
968 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
969 break;
970
971 /*
972 * If we take a trap or start servicing a pending interrupt, we might end up here.
973 * (Timer thread or some other thread wishing EMT's attention.)
974 */
975 case EXCP_INTERRUPT:
976 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
977 rc = VINF_EM_RESCHEDULE;
978 break;
979
980 /*
981 * Single step, we assume!
982 * If there was a breakpoint there we're fucked now.
983 */
984 case EXCP_DEBUG:
985 {
986 /* breakpoint or single step? */
987 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
988 int iBP;
989 rc = VINF_EM_DBG_STEPPED;
990 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
991 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
992 {
993 rc = VINF_EM_DBG_BREAKPOINT;
994 break;
995 }
996 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
997 break;
998 }
999
1000 /*
1001 * hlt instruction.
1002 */
1003 case EXCP_HLT:
1004 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1005 rc = VINF_EM_HALT;
1006 break;
1007
1008 /*
1009 * The VM has halted.
1010 */
1011 case EXCP_HALTED:
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1013 rc = VINF_EM_HALT;
1014 break;
1015
1016 /*
1017 * Switch to RAW-mode.
1018 */
1019 case EXCP_EXECUTE_RAW:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1021 rc = VINF_EM_RESCHEDULE_RAW;
1022 break;
1023
1024 /*
1025 * Switch to hardware accelerated RAW-mode.
1026 */
1027 case EXCP_EXECUTE_HWACC:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1029 rc = VINF_EM_RESCHEDULE_HWACC;
1030 break;
1031
1032 /*
1033 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1034 */
1035 case EXCP_RC:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1037 rc = pVM->rem.s.rc;
1038 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1039 break;
1040
1041 /*
1042 * Figure out the rest when they arrive....
1043 */
1044 default:
1045 AssertMsgFailed(("rc=%d\n", rc));
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1047 rc = VINF_EM_RESCHEDULE;
1048 break;
1049 }
1050
1051 /*
1052 * Switch back the state.
1053 */
1054 pVM->rem.s.Env.interrupt_request = interrupt_request;
1055 rc2 = REMR3StateBack(pVM, pVCpu);
1056 AssertRC(rc2);
1057 }
1058
1059 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1060 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1061 return rc;
1062}
1063
1064
1065/**
1066 * Runs code in recompiled mode.
1067 *
1068 * Before calling this function the REM state needs to be in sync with
1069 * the VM. Call REMR3State() to perform the sync. It's only necessary
1070 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1071 * and after calling REMR3StateBack().
1072 *
1073 * @returns VBox status code.
1074 *
1075 * @param pVM VM Handle.
1076 * @param pVCpu VMCPU Handle.
1077 */
1078REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1079{
1080 int rc;
1081 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1082 Assert(pVM->rem.s.fInREM);
1083
1084 TMNotifyStartOfExecution(pVCpu);
1085 rc = cpu_exec(&pVM->rem.s.Env);
1086 TMNotifyEndOfExecution(pVCpu);
1087 switch (rc)
1088 {
1089 /*
1090 * This happens when the execution was interrupted
1091 * by an external event, like pending timers.
1092 */
1093 case EXCP_INTERRUPT:
1094 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1095 rc = VINF_SUCCESS;
1096 break;
1097
1098 /*
1099 * hlt instruction.
1100 */
1101 case EXCP_HLT:
1102 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1103 rc = VINF_EM_HALT;
1104 break;
1105
1106 /*
1107 * The VM has halted.
1108 */
1109 case EXCP_HALTED:
1110 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1111 rc = VINF_EM_HALT;
1112 break;
1113
1114 /*
1115 * Breakpoint/single step.
1116 */
1117 case EXCP_DEBUG:
1118 {
1119#if 0//def DEBUG_bird
1120 static int iBP = 0;
1121 printf("howdy, breakpoint! iBP=%d\n", iBP);
1122 switch (iBP)
1123 {
1124 case 0:
1125 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1126 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1127 //pVM->rem.s.Env.interrupt_request = 0;
1128 //pVM->rem.s.Env.exception_index = -1;
1129 //g_fInterruptDisabled = 1;
1130 rc = VINF_SUCCESS;
1131 asm("int3");
1132 break;
1133 default:
1134 asm("int3");
1135 break;
1136 }
1137 iBP++;
1138#else
1139 /* breakpoint or single step? */
1140 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1141 int iBP;
1142 rc = VINF_EM_DBG_STEPPED;
1143 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1144 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1145 {
1146 rc = VINF_EM_DBG_BREAKPOINT;
1147 break;
1148 }
1149 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1150#endif
1151 break;
1152 }
1153
1154 /*
1155 * Switch to RAW-mode.
1156 */
1157 case EXCP_EXECUTE_RAW:
1158 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1159 rc = VINF_EM_RESCHEDULE_RAW;
1160 break;
1161
1162 /*
1163 * Switch to hardware accelerated RAW-mode.
1164 */
1165 case EXCP_EXECUTE_HWACC:
1166 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1167 rc = VINF_EM_RESCHEDULE_HWACC;
1168 break;
1169
1170 /*
1171 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1172 */
1173 case EXCP_RC:
1174 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1175 rc = pVM->rem.s.rc;
1176 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1177 break;
1178
1179 /*
1180 * Figure out the rest when they arrive....
1181 */
1182 default:
1183 AssertMsgFailed(("rc=%d\n", rc));
1184 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1185 rc = VINF_SUCCESS;
1186 break;
1187 }
1188
1189 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1190 return rc;
1191}
1192
1193
1194/**
1195 * Check if the cpu state is suitable for Raw execution.
1196 *
1197 * @returns boolean
1198 * @param env The CPU env struct.
1199 * @param eip The EIP to check this for (might differ from env->eip).
1200 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1201 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1202 *
1203 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1204 */
1205bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1206{
1207 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1208 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 uint32_t u32CR0;
1211
1212 /* Update counter. */
1213 env->pVM->rem.s.cCanExecuteRaw++;
1214
1215 if (HWACCMIsEnabled(env->pVM))
1216 {
1217 CPUMCTX Ctx;
1218
1219 env->state |= CPU_RAW_HWACC;
1220
1221 /*
1222 * Create partial context for HWACCMR3CanExecuteGuest
1223 */
1224 Ctx.cr0 = env->cr[0];
1225 Ctx.cr3 = env->cr[3];
1226 Ctx.cr4 = env->cr[4];
1227
1228 Ctx.tr = env->tr.selector;
1229 Ctx.trHid.u64Base = env->tr.base;
1230 Ctx.trHid.u32Limit = env->tr.limit;
1231 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1232
1233 Ctx.ldtr = env->ldt.selector;
1234 Ctx.ldtrHid.u64Base = env->ldt.base;
1235 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1236 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1237
1238 Ctx.idtr.cbIdt = env->idt.limit;
1239 Ctx.idtr.pIdt = env->idt.base;
1240
1241 Ctx.gdtr.cbGdt = env->gdt.limit;
1242 Ctx.gdtr.pGdt = env->gdt.base;
1243
1244 Ctx.rsp = env->regs[R_ESP];
1245 Ctx.rip = env->eip;
1246
1247 Ctx.eflags.u32 = env->eflags;
1248
1249 Ctx.cs = env->segs[R_CS].selector;
1250 Ctx.csHid.u64Base = env->segs[R_CS].base;
1251 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1252 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1253
1254 Ctx.ds = env->segs[R_DS].selector;
1255 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1256 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1257 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1258
1259 Ctx.es = env->segs[R_ES].selector;
1260 Ctx.esHid.u64Base = env->segs[R_ES].base;
1261 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1262 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1263
1264 Ctx.fs = env->segs[R_FS].selector;
1265 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1266 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1267 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1268
1269 Ctx.gs = env->segs[R_GS].selector;
1270 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1271 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1272 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1273
1274 Ctx.ss = env->segs[R_SS].selector;
1275 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1276 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1277 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1278
1279 Ctx.msrEFER = env->efer;
1280
1281 /* Hardware accelerated raw-mode:
1282 *
1283 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1284 */
1285 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1286 {
1287 *piException = EXCP_EXECUTE_HWACC;
1288 return true;
1289 }
1290 return false;
1291 }
1292
1293 /*
1294 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1295 * or 32 bits protected mode ring 0 code
1296 *
1297 * The tests are ordered by the likelihood of being true during normal execution.
1298 */
1299 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1300 {
1301 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1302 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1303 return false;
1304 }
1305
1306#ifndef VBOX_RAW_V86
1307 if (fFlags & VM_MASK) {
1308 STAM_COUNTER_INC(&gStatRefuseVM86);
1309 Log2(("raw mode refused: VM_MASK\n"));
1310 return false;
1311 }
1312#endif
1313
1314 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1315 {
1316#ifndef DEBUG_bird
1317 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1318#endif
1319 return false;
1320 }
1321
1322 if (env->singlestep_enabled)
1323 {
1324 //Log2(("raw mode refused: Single step\n"));
1325 return false;
1326 }
1327
1328 if (env->nb_breakpoints > 0)
1329 {
1330 //Log2(("raw mode refused: Breakpoints\n"));
1331 return false;
1332 }
1333
1334 u32CR0 = env->cr[0];
1335 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1336 {
1337 STAM_COUNTER_INC(&gStatRefusePaging);
1338 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1339 return false;
1340 }
1341
1342 if (env->cr[4] & CR4_PAE_MASK)
1343 {
1344 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1345 {
1346 STAM_COUNTER_INC(&gStatRefusePAE);
1347 return false;
1348 }
1349 }
1350
1351 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1352 {
1353 if (!EMIsRawRing3Enabled(env->pVM))
1354 return false;
1355
1356 if (!(env->eflags & IF_MASK))
1357 {
1358 STAM_COUNTER_INC(&gStatRefuseIF0);
1359 Log2(("raw mode refused: IF (RawR3)\n"));
1360 return false;
1361 }
1362
1363 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1364 {
1365 STAM_COUNTER_INC(&gStatRefuseWP0);
1366 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1367 return false;
1368 }
1369 }
1370 else
1371 {
1372 if (!EMIsRawRing0Enabled(env->pVM))
1373 return false;
1374
1375 // Let's start with pure 32 bits ring 0 code first
1376 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1377 {
1378 STAM_COUNTER_INC(&gStatRefuseCode16);
1379 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1380 return false;
1381 }
1382
1383 // Only R0
1384 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1385 {
1386 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1387 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1388 return false;
1389 }
1390
1391 if (!(u32CR0 & CR0_WP_MASK))
1392 {
1393 STAM_COUNTER_INC(&gStatRefuseWP0);
1394 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1395 return false;
1396 }
1397
1398 if (PATMIsPatchGCAddr(env->pVM, eip))
1399 {
1400 Log2(("raw r0 mode forced: patch code\n"));
1401 *piException = EXCP_EXECUTE_RAW;
1402 return true;
1403 }
1404
1405#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1406 if (!(env->eflags & IF_MASK))
1407 {
1408 STAM_COUNTER_INC(&gStatRefuseIF0);
1409 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1410 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1411 return false;
1412 }
1413#endif
1414
1415 env->state |= CPU_RAW_RING0;
1416 }
1417
1418 /*
1419 * Don't reschedule the first time we're called, because there might be
1420 * special reasons why we're here that is not covered by the above checks.
1421 */
1422 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1423 {
1424 Log2(("raw mode refused: first scheduling\n"));
1425 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1426 return false;
1427 }
1428
1429 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1430 *piException = EXCP_EXECUTE_RAW;
1431 return true;
1432}
1433
1434
1435/**
1436 * Fetches a code byte.
1437 *
1438 * @returns Success indicator (bool) for ease of use.
1439 * @param env The CPU environment structure.
1440 * @param GCPtrInstr Where to fetch code.
1441 * @param pu8Byte Where to store the byte on success
1442 */
1443bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1444{
1445 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1446 if (RT_SUCCESS(rc))
1447 return true;
1448 return false;
1449}
1450
1451
1452/**
1453 * Flush (or invalidate if you like) page table/dir entry.
1454 *
1455 * (invlpg instruction; tlb_flush_page)
1456 *
1457 * @param env Pointer to cpu environment.
1458 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1459 */
1460void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1461{
1462 PVM pVM = env->pVM;
1463 PCPUMCTX pCtx;
1464 int rc;
1465
1466 /*
1467 * When we're replaying invlpg instructions or restoring a saved
1468 * state we disable this path.
1469 */
1470 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1471 return;
1472 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1473 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1474
1475 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1476
1477 /*
1478 * Update the control registers before calling PGMFlushPage.
1479 */
1480 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1481 Assert(pCtx);
1482 pCtx->cr0 = env->cr[0];
1483 pCtx->cr3 = env->cr[3];
1484 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1485 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1486 pCtx->cr4 = env->cr[4];
1487
1488 /*
1489 * Let PGM do the rest.
1490 */
1491 Assert(env->pVCpu);
1492 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1493 if (RT_FAILURE(rc))
1494 {
1495 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1496 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1497 }
1498 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1499}
1500
1501
1502#ifndef REM_PHYS_ADDR_IN_TLB
1503/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1504void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1505{
1506 void *pv;
1507 int rc;
1508
1509 /* Address must be aligned enough to fiddle with lower bits */
1510 Assert((physAddr & 0x3) == 0);
1511
1512 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1513 Assert( rc == VINF_SUCCESS
1514 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1515 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1516 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1517 if (RT_FAILURE(rc))
1518 return (void *)1;
1519 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1520 return (void *)((uintptr_t)pv | 2);
1521 return pv;
1522}
1523#endif /* REM_PHYS_ADDR_IN_TLB */
1524
1525
1526/**
1527 * Called from tlb_protect_code in order to write monitor a code page.
1528 *
1529 * @param env Pointer to the CPU environment.
1530 * @param GCPtr Code page to monitor
1531 */
1532void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1533{
1534#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1535 Assert(env->pVM->rem.s.fInREM);
1536 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1537 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1538 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1539 && !(env->eflags & VM_MASK) /* no V86 mode */
1540 && !HWACCMIsEnabled(env->pVM))
1541 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1542#endif
1543}
1544
1545
1546/**
1547 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1548 *
1549 * @param env Pointer to the CPU environment.
1550 * @param GCPtr Code page to monitor
1551 */
1552void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1553{
1554 Assert(env->pVM->rem.s.fInREM);
1555#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1556 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1557 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1558 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1559 && !(env->eflags & VM_MASK) /* no V86 mode */
1560 && !HWACCMIsEnabled(env->pVM))
1561 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1562#endif
1563}
1564
1565
1566/**
1567 * Called when the CPU is initialized, any of the CRx registers are changed or
1568 * when the A20 line is modified.
1569 *
1570 * @param env Pointer to the CPU environment.
1571 * @param fGlobal Set if the flush is global.
1572 */
1573void remR3FlushTLB(CPUState *env, bool fGlobal)
1574{
1575 PVM pVM = env->pVM;
1576 PCPUMCTX pCtx;
1577
1578 /*
1579 * When we're replaying invlpg instructions or restoring a saved
1580 * state we disable this path.
1581 */
1582 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1583 return;
1584 Assert(pVM->rem.s.fInREM);
1585
1586 /*
1587 * The caller doesn't check cr4, so we have to do that for ourselves.
1588 */
1589 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1590 fGlobal = true;
1591 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1592
1593 /*
1594 * Update the control registers before calling PGMR3FlushTLB.
1595 */
1596 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1597 Assert(pCtx);
1598 pCtx->cr0 = env->cr[0];
1599 pCtx->cr3 = env->cr[3];
1600 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1601 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1602 pCtx->cr4 = env->cr[4];
1603
1604 /*
1605 * Let PGM do the rest.
1606 */
1607 Assert(env->pVCpu);
1608 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1609}
1610
1611
1612/**
1613 * Called when any of the cr0, cr4 or efer registers is updated.
1614 *
1615 * @param env Pointer to the CPU environment.
1616 */
1617void remR3ChangeCpuMode(CPUState *env)
1618{
1619 PVM pVM = env->pVM;
1620 uint64_t efer;
1621 PCPUMCTX pCtx;
1622 int rc;
1623
1624 /*
1625 * When we're replaying loads or restoring a saved
1626 * state this path is disabled.
1627 */
1628 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1629 return;
1630 Assert(pVM->rem.s.fInREM);
1631
1632 /*
1633 * Update the control registers before calling PGMChangeMode()
1634 * as it may need to map whatever cr3 is pointing to.
1635 */
1636 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1637 Assert(pCtx);
1638 pCtx->cr0 = env->cr[0];
1639 pCtx->cr3 = env->cr[3];
1640 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1641 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1642 pCtx->cr4 = env->cr[4];
1643
1644#ifdef TARGET_X86_64
1645 efer = env->efer;
1646#else
1647 efer = 0;
1648#endif
1649 Assert(env->pVCpu);
1650 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1651 if (rc != VINF_SUCCESS)
1652 {
1653 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1654 {
1655 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1656 remR3RaiseRC(env->pVM, rc);
1657 }
1658 else
1659 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1660 }
1661}
1662
1663
1664/**
1665 * Called from compiled code to run dma.
1666 *
1667 * @param env Pointer to the CPU environment.
1668 */
1669void remR3DmaRun(CPUState *env)
1670{
1671 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1672 PDMR3DmaRun(env->pVM);
1673 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1674}
1675
1676
1677/**
1678 * Called from compiled code to schedule pending timers in VMM
1679 *
1680 * @param env Pointer to the CPU environment.
1681 */
1682void remR3TimersRun(CPUState *env)
1683{
1684 LogFlow(("remR3TimersRun:\n"));
1685 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1686 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1687 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1688 TMR3TimerQueuesDo(env->pVM);
1689 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1690 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1691}
1692
1693
1694/**
1695 * Record trap occurrence
1696 *
1697 * @returns VBox status code
1698 * @param env Pointer to the CPU environment.
1699 * @param uTrap Trap nr
1700 * @param uErrorCode Error code
1701 * @param pvNextEIP Next EIP
1702 */
1703int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1704{
1705 PVM pVM = env->pVM;
1706#ifdef VBOX_WITH_STATISTICS
1707 static STAMCOUNTER s_aStatTrap[255];
1708 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1709#endif
1710
1711#ifdef VBOX_WITH_STATISTICS
1712 if (uTrap < 255)
1713 {
1714 if (!s_aRegisters[uTrap])
1715 {
1716 char szStatName[64];
1717 s_aRegisters[uTrap] = true;
1718 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1719 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1720 }
1721 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1722 }
1723#endif
1724 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1725 if( uTrap < 0x20
1726 && (env->cr[0] & X86_CR0_PE)
1727 && !(env->eflags & X86_EFL_VM))
1728 {
1729#ifdef DEBUG
1730 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1731#endif
1732 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1733 {
1734 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1735 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1736 return VERR_REM_TOO_MANY_TRAPS;
1737 }
1738 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1739 pVM->rem.s.cPendingExceptions = 1;
1740 pVM->rem.s.uPendingException = uTrap;
1741 pVM->rem.s.uPendingExcptEIP = env->eip;
1742 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1743 }
1744 else
1745 {
1746 pVM->rem.s.cPendingExceptions = 0;
1747 pVM->rem.s.uPendingException = uTrap;
1748 pVM->rem.s.uPendingExcptEIP = env->eip;
1749 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1750 }
1751 return VINF_SUCCESS;
1752}
1753
1754
1755/*
1756 * Clear current active trap
1757 *
1758 * @param pVM VM Handle.
1759 */
1760void remR3TrapClear(PVM pVM)
1761{
1762 pVM->rem.s.cPendingExceptions = 0;
1763 pVM->rem.s.uPendingException = 0;
1764 pVM->rem.s.uPendingExcptEIP = 0;
1765 pVM->rem.s.uPendingExcptCR2 = 0;
1766}
1767
1768
1769/*
1770 * Record previous call instruction addresses
1771 *
1772 * @param env Pointer to the CPU environment.
1773 */
1774void remR3RecordCall(CPUState *env)
1775{
1776 CSAMR3RecordCallAddress(env->pVM, env->eip);
1777}
1778
1779
1780/**
1781 * Syncs the internal REM state with the VM.
1782 *
1783 * This must be called before REMR3Run() is invoked whenever when the REM
1784 * state is not up to date. Calling it several times in a row is not
1785 * permitted.
1786 *
1787 * @returns VBox status code.
1788 *
1789 * @param pVM VM Handle.
1790 * @param pVCpu VMCPU Handle.
1791 *
1792 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1793 * no do this since the majority of the callers don't want any unnecessary of events
1794 * pending that would immediately interrupt execution.
1795 */
1796REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1797{
1798 register const CPUMCTX *pCtx;
1799 register unsigned fFlags;
1800 bool fHiddenSelRegsValid;
1801 unsigned i;
1802 TRPMEVENT enmType;
1803 uint8_t u8TrapNo;
1804 uint32_t uCpl;
1805 int rc;
1806
1807 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1808 Log2(("REMR3State:\n"));
1809
1810 pVM->rem.s.Env.pVCpu = pVCpu;
1811 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1812 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
1813
1814 Assert(!pVM->rem.s.fInREM);
1815 pVM->rem.s.fInStateSync = true;
1816
1817 /*
1818 * If we have to flush TBs, do that immediately.
1819 */
1820 if (pVM->rem.s.fFlushTBs)
1821 {
1822 STAM_COUNTER_INC(&gStatFlushTBs);
1823 tb_flush(&pVM->rem.s.Env);
1824 pVM->rem.s.fFlushTBs = false;
1825 }
1826
1827 /*
1828 * Copy the registers which require no special handling.
1829 */
1830#ifdef TARGET_X86_64
1831 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1832 Assert(R_EAX == 0);
1833 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1834 Assert(R_ECX == 1);
1835 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1836 Assert(R_EDX == 2);
1837 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1838 Assert(R_EBX == 3);
1839 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1840 Assert(R_ESP == 4);
1841 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1842 Assert(R_EBP == 5);
1843 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1844 Assert(R_ESI == 6);
1845 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1846 Assert(R_EDI == 7);
1847 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1848 pVM->rem.s.Env.regs[8] = pCtx->r8;
1849 pVM->rem.s.Env.regs[9] = pCtx->r9;
1850 pVM->rem.s.Env.regs[10] = pCtx->r10;
1851 pVM->rem.s.Env.regs[11] = pCtx->r11;
1852 pVM->rem.s.Env.regs[12] = pCtx->r12;
1853 pVM->rem.s.Env.regs[13] = pCtx->r13;
1854 pVM->rem.s.Env.regs[14] = pCtx->r14;
1855 pVM->rem.s.Env.regs[15] = pCtx->r15;
1856
1857 pVM->rem.s.Env.eip = pCtx->rip;
1858
1859 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1860#else
1861 Assert(R_EAX == 0);
1862 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1863 Assert(R_ECX == 1);
1864 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1865 Assert(R_EDX == 2);
1866 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1867 Assert(R_EBX == 3);
1868 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1869 Assert(R_ESP == 4);
1870 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1871 Assert(R_EBP == 5);
1872 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1873 Assert(R_ESI == 6);
1874 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1875 Assert(R_EDI == 7);
1876 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1877 pVM->rem.s.Env.eip = pCtx->eip;
1878
1879 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1880#endif
1881
1882 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1883
1884 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1885 for (i=0;i<8;i++)
1886 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1887
1888 /*
1889 * Clear the halted hidden flag (the interrupt waking up the CPU can
1890 * have been dispatched in raw mode).
1891 */
1892 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1893
1894 /*
1895 * Replay invlpg?
1896 */
1897 if (pVM->rem.s.cInvalidatedPages)
1898 {
1899 RTUINT i;
1900
1901 pVM->rem.s.fIgnoreInvlPg = true;
1902 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1903 {
1904 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1905 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1906 }
1907 pVM->rem.s.fIgnoreInvlPg = false;
1908 pVM->rem.s.cInvalidatedPages = 0;
1909 }
1910
1911 /* Replay notification changes. */
1912 REMR3ReplayHandlerNotifications(pVM);
1913
1914 /* Update MSRs; before CRx registers! */
1915 pVM->rem.s.Env.efer = pCtx->msrEFER;
1916 pVM->rem.s.Env.star = pCtx->msrSTAR;
1917 pVM->rem.s.Env.pat = pCtx->msrPAT;
1918#ifdef TARGET_X86_64
1919 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1920 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1921 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1922 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1923
1924 /* Update the internal long mode activate flag according to the new EFER value. */
1925 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1926 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1927 else
1928 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1929#endif
1930
1931 /*
1932 * Registers which are rarely changed and require special handling / order when changed.
1933 */
1934 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
1935 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
1936 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
1937 | CPUM_CHANGED_CR4
1938 | CPUM_CHANGED_CR0
1939 | CPUM_CHANGED_CR3
1940 | CPUM_CHANGED_GDTR
1941 | CPUM_CHANGED_IDTR
1942 | CPUM_CHANGED_SYSENTER_MSR
1943 | CPUM_CHANGED_LDTR
1944 | CPUM_CHANGED_CPUID
1945 | CPUM_CHANGED_FPU_REM
1946 )
1947 )
1948 {
1949 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1950 {
1951 pVM->rem.s.fIgnoreCR3Load = true;
1952 tlb_flush(&pVM->rem.s.Env, true);
1953 pVM->rem.s.fIgnoreCR3Load = false;
1954 }
1955
1956 /* CR4 before CR0! */
1957 if (fFlags & CPUM_CHANGED_CR4)
1958 {
1959 pVM->rem.s.fIgnoreCR3Load = true;
1960 pVM->rem.s.fIgnoreCpuMode = true;
1961 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1962 pVM->rem.s.fIgnoreCpuMode = false;
1963 pVM->rem.s.fIgnoreCR3Load = false;
1964 }
1965
1966 if (fFlags & CPUM_CHANGED_CR0)
1967 {
1968 pVM->rem.s.fIgnoreCR3Load = true;
1969 pVM->rem.s.fIgnoreCpuMode = true;
1970 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1971 pVM->rem.s.fIgnoreCpuMode = false;
1972 pVM->rem.s.fIgnoreCR3Load = false;
1973 }
1974
1975 if (fFlags & CPUM_CHANGED_CR3)
1976 {
1977 pVM->rem.s.fIgnoreCR3Load = true;
1978 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1979 pVM->rem.s.fIgnoreCR3Load = false;
1980 }
1981
1982 if (fFlags & CPUM_CHANGED_GDTR)
1983 {
1984 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1985 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1986 }
1987
1988 if (fFlags & CPUM_CHANGED_IDTR)
1989 {
1990 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1991 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1992 }
1993
1994 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1995 {
1996 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1997 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1998 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1999 }
2000
2001 if (fFlags & CPUM_CHANGED_LDTR)
2002 {
2003 if (fHiddenSelRegsValid)
2004 {
2005 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2006 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2007 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2008 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2009 }
2010 else
2011 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2012 }
2013
2014 if (fFlags & CPUM_CHANGED_CPUID)
2015 {
2016 uint32_t u32Dummy;
2017
2018 /*
2019 * Get the CPUID features.
2020 */
2021 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2022 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2023 }
2024
2025 /* Sync FPU state after CR4, CPUID and EFER (!). */
2026 if (fFlags & CPUM_CHANGED_FPU_REM)
2027 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2028 }
2029
2030 /*
2031 * Sync TR unconditionally to make life simpler.
2032 */
2033 pVM->rem.s.Env.tr.selector = pCtx->tr;
2034 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2035 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2036 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2037 /* Note! do_interrupt will fault if the busy flag is still set... */
2038 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2039
2040 /*
2041 * Update selector registers.
2042 * This must be done *after* we've synced gdt, ldt and crX registers
2043 * since we're reading the GDT/LDT om sync_seg. This will happen with
2044 * saved state which takes a quick dip into rawmode for instance.
2045 */
2046 /*
2047 * Stack; Note first check this one as the CPL might have changed. The
2048 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2049 */
2050
2051 if (fHiddenSelRegsValid)
2052 {
2053 /* The hidden selector registers are valid in the CPU context. */
2054 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2055
2056 /* Set current CPL */
2057 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2058
2059 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2060 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2061 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2062 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2063 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2064 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2065 }
2066 else
2067 {
2068 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2069 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2070 {
2071 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2072
2073 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2074 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2075#ifdef VBOX_WITH_STATISTICS
2076 if (pVM->rem.s.Env.segs[R_SS].newselector)
2077 {
2078 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2079 }
2080#endif
2081 }
2082 else
2083 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2084
2085 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2086 {
2087 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2088 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2089#ifdef VBOX_WITH_STATISTICS
2090 if (pVM->rem.s.Env.segs[R_ES].newselector)
2091 {
2092 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2093 }
2094#endif
2095 }
2096 else
2097 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2098
2099 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2100 {
2101 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2102 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2103#ifdef VBOX_WITH_STATISTICS
2104 if (pVM->rem.s.Env.segs[R_CS].newselector)
2105 {
2106 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2107 }
2108#endif
2109 }
2110 else
2111 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2112
2113 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2114 {
2115 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2116 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2117#ifdef VBOX_WITH_STATISTICS
2118 if (pVM->rem.s.Env.segs[R_DS].newselector)
2119 {
2120 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2121 }
2122#endif
2123 }
2124 else
2125 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2126
2127 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2128 * be the same but not the base/limit. */
2129 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2130 {
2131 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2132 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2133#ifdef VBOX_WITH_STATISTICS
2134 if (pVM->rem.s.Env.segs[R_FS].newselector)
2135 {
2136 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2137 }
2138#endif
2139 }
2140 else
2141 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2142
2143 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2144 {
2145 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2146 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2147#ifdef VBOX_WITH_STATISTICS
2148 if (pVM->rem.s.Env.segs[R_GS].newselector)
2149 {
2150 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2151 }
2152#endif
2153 }
2154 else
2155 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2156 }
2157
2158 /*
2159 * Check for traps.
2160 */
2161 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2162 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2163 if (RT_SUCCESS(rc))
2164 {
2165#ifdef DEBUG
2166 if (u8TrapNo == 0x80)
2167 {
2168 remR3DumpLnxSyscall(pVCpu);
2169 remR3DumpOBsdSyscall(pVCpu);
2170 }
2171#endif
2172
2173 pVM->rem.s.Env.exception_index = u8TrapNo;
2174 if (enmType != TRPM_SOFTWARE_INT)
2175 {
2176 pVM->rem.s.Env.exception_is_int = 0;
2177 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2178 }
2179 else
2180 {
2181 /*
2182 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2183 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2184 * for int03 and into.
2185 */
2186 pVM->rem.s.Env.exception_is_int = 1;
2187 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2188 /* int 3 may be generated by one-byte 0xcc */
2189 if (u8TrapNo == 3)
2190 {
2191 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2192 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2193 }
2194 /* int 4 may be generated by one-byte 0xce */
2195 else if (u8TrapNo == 4)
2196 {
2197 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2198 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2199 }
2200 }
2201
2202 /* get error code and cr2 if needed. */
2203 switch (u8TrapNo)
2204 {
2205 case 0x0e:
2206 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2207 /* fallthru */
2208 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2209 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2210 break;
2211
2212 case 0x11: case 0x08:
2213 default:
2214 pVM->rem.s.Env.error_code = 0;
2215 break;
2216 }
2217
2218 /*
2219 * We can now reset the active trap since the recompiler is gonna have a go at it.
2220 */
2221 rc = TRPMResetTrap(pVCpu);
2222 AssertRC(rc);
2223 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2224 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2225 }
2226
2227 /*
2228 * Clear old interrupt request flags; Check for pending hardware interrupts.
2229 * (See @remark for why we don't check for other FFs.)
2230 */
2231 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2232 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2233 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2234 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2235
2236 /*
2237 * We're now in REM mode.
2238 */
2239 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2240 pVM->rem.s.fInREM = true;
2241 pVM->rem.s.fInStateSync = false;
2242 pVM->rem.s.cCanExecuteRaw = 0;
2243 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2244 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2245 return VINF_SUCCESS;
2246}
2247
2248
2249/**
2250 * Syncs back changes in the REM state to the the VM state.
2251 *
2252 * This must be called after invoking REMR3Run().
2253 * Calling it several times in a row is not permitted.
2254 *
2255 * @returns VBox status code.
2256 *
2257 * @param pVM VM Handle.
2258 * @param pVCpu VMCPU Handle.
2259 */
2260REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2261{
2262 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2263 Assert(pCtx);
2264 unsigned i;
2265
2266 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2267 Log2(("REMR3StateBack:\n"));
2268 Assert(pVM->rem.s.fInREM);
2269
2270 /*
2271 * Copy back the registers.
2272 * This is done in the order they are declared in the CPUMCTX structure.
2273 */
2274
2275 /** @todo FOP */
2276 /** @todo FPUIP */
2277 /** @todo CS */
2278 /** @todo FPUDP */
2279 /** @todo DS */
2280
2281 /** @todo check if FPU/XMM was actually used in the recompiler */
2282 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2283//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2284
2285#ifdef TARGET_X86_64
2286 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2287 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2288 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2289 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2290 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2291 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2292 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2293 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2294 pCtx->r8 = pVM->rem.s.Env.regs[8];
2295 pCtx->r9 = pVM->rem.s.Env.regs[9];
2296 pCtx->r10 = pVM->rem.s.Env.regs[10];
2297 pCtx->r11 = pVM->rem.s.Env.regs[11];
2298 pCtx->r12 = pVM->rem.s.Env.regs[12];
2299 pCtx->r13 = pVM->rem.s.Env.regs[13];
2300 pCtx->r14 = pVM->rem.s.Env.regs[14];
2301 pCtx->r15 = pVM->rem.s.Env.regs[15];
2302
2303 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2304
2305#else
2306 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2307 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2308 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2309 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2310 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2311 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2312 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2313
2314 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2315#endif
2316
2317 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2318
2319#ifdef VBOX_WITH_STATISTICS
2320 if (pVM->rem.s.Env.segs[R_SS].newselector)
2321 {
2322 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2323 }
2324 if (pVM->rem.s.Env.segs[R_GS].newselector)
2325 {
2326 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2327 }
2328 if (pVM->rem.s.Env.segs[R_FS].newselector)
2329 {
2330 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2331 }
2332 if (pVM->rem.s.Env.segs[R_ES].newselector)
2333 {
2334 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2335 }
2336 if (pVM->rem.s.Env.segs[R_DS].newselector)
2337 {
2338 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2339 }
2340 if (pVM->rem.s.Env.segs[R_CS].newselector)
2341 {
2342 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2343 }
2344#endif
2345 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2346 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2347 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2348 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2349 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2350
2351#ifdef TARGET_X86_64
2352 pCtx->rip = pVM->rem.s.Env.eip;
2353 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2354#else
2355 pCtx->eip = pVM->rem.s.Env.eip;
2356 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2357#endif
2358
2359 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2360 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2361 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2362 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2363 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2364 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2365
2366 for (i = 0; i < 8; i++)
2367 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2368
2369 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2370 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2371 {
2372 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2373 STAM_COUNTER_INC(&gStatREMGDTChange);
2374 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2375 }
2376
2377 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2378 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2379 {
2380 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2381 STAM_COUNTER_INC(&gStatREMIDTChange);
2382 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2383 }
2384
2385 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2386 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2387 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2388 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2389 {
2390 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2391 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2392 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2393 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2394 STAM_COUNTER_INC(&gStatREMLDTRChange);
2395 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2396 }
2397
2398 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2399 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2400 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2401 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2402 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2403 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2404 : 0) )
2405 {
2406 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2407 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2408 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2409 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2410 pCtx->tr = pVM->rem.s.Env.tr.selector;
2411 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2412 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2413 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2414 if (pCtx->trHid.Attr.u)
2415 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2416 STAM_COUNTER_INC(&gStatREMTRChange);
2417 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2418 }
2419
2420 /** @todo These values could still be out of sync! */
2421 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2422 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2423 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2424 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2425
2426 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2427 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2428 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2429
2430 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2431 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2432 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2433
2434 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2435 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2436 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2437
2438 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2439 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2440 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2441
2442 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2443 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2444 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2445
2446 /* Sysenter MSR */
2447 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2448 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2449 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2450
2451 /* System MSRs. */
2452 pCtx->msrEFER = pVM->rem.s.Env.efer;
2453 pCtx->msrSTAR = pVM->rem.s.Env.star;
2454 pCtx->msrPAT = pVM->rem.s.Env.pat;
2455#ifdef TARGET_X86_64
2456 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2457 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2458 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2459 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2460#endif
2461
2462 remR3TrapClear(pVM);
2463
2464 /*
2465 * Check for traps.
2466 */
2467 if ( pVM->rem.s.Env.exception_index >= 0
2468 && pVM->rem.s.Env.exception_index < 256)
2469 {
2470 int rc;
2471
2472 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2473 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2474 AssertRC(rc);
2475 switch (pVM->rem.s.Env.exception_index)
2476 {
2477 case 0x0e:
2478 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2479 /* fallthru */
2480 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2481 case 0x11: case 0x08: /* 0 */
2482 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2483 break;
2484 }
2485
2486 }
2487
2488 /*
2489 * We're not longer in REM mode.
2490 */
2491 CPUMR3RemLeave(pVCpu,
2492 HWACCMIsEnabled(pVM)
2493 || ( pVM->rem.s.Env.segs[R_SS].newselector
2494 | pVM->rem.s.Env.segs[R_GS].newselector
2495 | pVM->rem.s.Env.segs[R_FS].newselector
2496 | pVM->rem.s.Env.segs[R_ES].newselector
2497 | pVM->rem.s.Env.segs[R_DS].newselector
2498 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2499 );
2500 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2501 pVM->rem.s.fInREM = false;
2502 pVM->rem.s.pCtx = NULL;
2503 pVM->rem.s.Env.pVCpu = NULL;
2504 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2505 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2506 return VINF_SUCCESS;
2507}
2508
2509
2510/**
2511 * This is called by the disassembler when it wants to update the cpu state
2512 * before for instance doing a register dump.
2513 */
2514static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2515{
2516 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2517 unsigned i;
2518
2519 Assert(pVM->rem.s.fInREM);
2520
2521 /*
2522 * Copy back the registers.
2523 * This is done in the order they are declared in the CPUMCTX structure.
2524 */
2525
2526 /** @todo FOP */
2527 /** @todo FPUIP */
2528 /** @todo CS */
2529 /** @todo FPUDP */
2530 /** @todo DS */
2531 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2532 pCtx->fpu.MXCSR = 0;
2533 pCtx->fpu.MXCSR_MASK = 0;
2534
2535 /** @todo check if FPU/XMM was actually used in the recompiler */
2536 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2537//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2538
2539#ifdef TARGET_X86_64
2540 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2541 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2542 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2543 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2544 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2545 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2546 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2547 pCtx->r8 = pVM->rem.s.Env.regs[8];
2548 pCtx->r9 = pVM->rem.s.Env.regs[9];
2549 pCtx->r10 = pVM->rem.s.Env.regs[10];
2550 pCtx->r11 = pVM->rem.s.Env.regs[11];
2551 pCtx->r12 = pVM->rem.s.Env.regs[12];
2552 pCtx->r13 = pVM->rem.s.Env.regs[13];
2553 pCtx->r14 = pVM->rem.s.Env.regs[14];
2554 pCtx->r15 = pVM->rem.s.Env.regs[15];
2555
2556 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2557#else
2558 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2559 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2560 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2561 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2562 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2563 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2564 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2565
2566 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2567#endif
2568
2569 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2570
2571 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2572 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2573 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2574 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2575 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2576
2577#ifdef TARGET_X86_64
2578 pCtx->rip = pVM->rem.s.Env.eip;
2579 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2580#else
2581 pCtx->eip = pVM->rem.s.Env.eip;
2582 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2583#endif
2584
2585 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2586 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2587 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2588 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2589 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2590 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2591
2592 for (i = 0; i < 8; i++)
2593 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2594
2595 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2596 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2597 {
2598 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2599 STAM_COUNTER_INC(&gStatREMGDTChange);
2600 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2601 }
2602
2603 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2604 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2605 {
2606 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2607 STAM_COUNTER_INC(&gStatREMIDTChange);
2608 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2609 }
2610
2611 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2612 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2613 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2614 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2615 {
2616 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2617 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2618 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2619 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2620 STAM_COUNTER_INC(&gStatREMLDTRChange);
2621 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2622 }
2623
2624 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2625 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2626 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2627 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2628 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2629 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2630 : 0) )
2631 {
2632 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2633 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2634 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2635 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2636 pCtx->tr = pVM->rem.s.Env.tr.selector;
2637 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2638 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2639 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2640 if (pCtx->trHid.Attr.u)
2641 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2642 STAM_COUNTER_INC(&gStatREMTRChange);
2643 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2644 }
2645
2646 /** @todo These values could still be out of sync! */
2647 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2648 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2649 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2650 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2651
2652 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2653 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2654 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2655
2656 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2657 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2658 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2659
2660 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2661 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2662 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2663
2664 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2665 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2666 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2667
2668 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2669 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2670 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2671
2672 /* Sysenter MSR */
2673 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2674 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2675 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2676
2677 /* System MSRs. */
2678 pCtx->msrEFER = pVM->rem.s.Env.efer;
2679 pCtx->msrSTAR = pVM->rem.s.Env.star;
2680 pCtx->msrPAT = pVM->rem.s.Env.pat;
2681#ifdef TARGET_X86_64
2682 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2683 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2684 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2685 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2686#endif
2687
2688}
2689
2690
2691/**
2692 * Update the VMM state information if we're currently in REM.
2693 *
2694 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2695 * we're currently executing in REM and the VMM state is invalid. This method will of
2696 * course check that we're executing in REM before syncing any data over to the VMM.
2697 *
2698 * @param pVM The VM handle.
2699 * @param pVCpu The VMCPU handle.
2700 */
2701REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2702{
2703 if (pVM->rem.s.fInREM)
2704 remR3StateUpdate(pVM, pVCpu);
2705}
2706
2707
2708#undef LOG_GROUP
2709#define LOG_GROUP LOG_GROUP_REM
2710
2711
2712/**
2713 * Notify the recompiler about Address Gate 20 state change.
2714 *
2715 * This notification is required since A20 gate changes are
2716 * initialized from a device driver and the VM might just as
2717 * well be in REM mode as in RAW mode.
2718 *
2719 * @param pVM VM handle.
2720 * @param pVCpu VMCPU handle.
2721 * @param fEnable True if the gate should be enabled.
2722 * False if the gate should be disabled.
2723 */
2724REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2725{
2726 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2727 VM_ASSERT_EMT(pVM);
2728
2729 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2730 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2731 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2732}
2733
2734
2735/**
2736 * Replays the handler notification changes
2737 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2738 *
2739 * @param pVM VM handle.
2740 */
2741REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2742{
2743 /*
2744 * Replay the flushes.
2745 */
2746 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2747 VM_ASSERT_EMT(pVM);
2748
2749 /** @todo this isn't ensuring correct replay order. */
2750 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2751 {
2752 uint32_t idxNext;
2753 uint32_t idxRevHead;
2754 uint32_t idxHead;
2755#ifdef VBOX_STRICT
2756 int32_t c = 0;
2757#endif
2758
2759 /* Lockless purging of pending notifications. */
2760 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2761 if (idxHead == UINT32_MAX)
2762 return;
2763 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2764
2765 /*
2766 * Reverse the list to process it in FIFO order.
2767 */
2768 idxRevHead = UINT32_MAX;
2769 do
2770 {
2771 /* Save the index of the next rec. */
2772 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2773 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2774 /* Push the record onto the reversed list. */
2775 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2776 idxRevHead = idxHead;
2777 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2778 /* Advance. */
2779 idxHead = idxNext;
2780 } while (idxHead != UINT32_MAX);
2781
2782 /*
2783 * Loop thru the list, reinserting the record into the free list as they are
2784 * processed to avoid having other EMTs running out of entries while we're flushing.
2785 */
2786 idxHead = idxRevHead;
2787 do
2788 {
2789 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2790 uint32_t idxCur;
2791 Assert(--c >= 0);
2792
2793 switch (pCur->enmKind)
2794 {
2795 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2796 remR3NotifyHandlerPhysicalRegister(pVM,
2797 pCur->u.PhysicalRegister.enmType,
2798 pCur->u.PhysicalRegister.GCPhys,
2799 pCur->u.PhysicalRegister.cb,
2800 pCur->u.PhysicalRegister.fHasHCHandler);
2801 break;
2802
2803 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2804 remR3NotifyHandlerPhysicalDeregister(pVM,
2805 pCur->u.PhysicalDeregister.enmType,
2806 pCur->u.PhysicalDeregister.GCPhys,
2807 pCur->u.PhysicalDeregister.cb,
2808 pCur->u.PhysicalDeregister.fHasHCHandler,
2809 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2810 break;
2811
2812 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2813 remR3NotifyHandlerPhysicalModify(pVM,
2814 pCur->u.PhysicalModify.enmType,
2815 pCur->u.PhysicalModify.GCPhysOld,
2816 pCur->u.PhysicalModify.GCPhysNew,
2817 pCur->u.PhysicalModify.cb,
2818 pCur->u.PhysicalModify.fHasHCHandler,
2819 pCur->u.PhysicalModify.fRestoreAsRAM);
2820 break;
2821
2822 default:
2823 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2824 break;
2825 }
2826
2827 /*
2828 * Advance idxHead.
2829 */
2830 idxCur = idxHead;
2831 idxHead = pCur->idxNext;
2832 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2833
2834 /*
2835 * Put the record back into the free list.
2836 */
2837 do
2838 {
2839 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2840 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2841 ASMCompilerBarrier();
2842 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2843 } while (idxHead != UINT32_MAX);
2844
2845#ifdef VBOX_STRICT
2846 if (pVM->cCpus == 1)
2847 {
2848 unsigned c;
2849 /* Check that all records are now on the free list. */
2850 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2851 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2852 c++;
2853 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2854 }
2855#endif
2856 }
2857}
2858
2859
2860/**
2861 * Notify REM about changed code page.
2862 *
2863 * @returns VBox status code.
2864 * @param pVM VM handle.
2865 * @param pVCpu VMCPU handle.
2866 * @param pvCodePage Code page address
2867 */
2868REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2869{
2870#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2871 int rc;
2872 RTGCPHYS PhysGC;
2873 uint64_t flags;
2874
2875 VM_ASSERT_EMT(pVM);
2876
2877 /*
2878 * Get the physical page address.
2879 */
2880 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2881 if (rc == VINF_SUCCESS)
2882 {
2883 /*
2884 * Sync the required registers and flush the whole page.
2885 * (Easier to do the whole page than notifying it about each physical
2886 * byte that was changed.
2887 */
2888 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2889 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2890 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2891 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2892
2893 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2894 }
2895#endif
2896 return VINF_SUCCESS;
2897}
2898
2899
2900/**
2901 * Notification about a successful MMR3PhysRegister() call.
2902 *
2903 * @param pVM VM handle.
2904 * @param GCPhys The physical address the RAM.
2905 * @param cb Size of the memory.
2906 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2907 */
2908REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2909{
2910 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2911 VM_ASSERT_EMT(pVM);
2912
2913 /*
2914 * Validate input - we trust the caller.
2915 */
2916 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2917 Assert(cb);
2918 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2919 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2920
2921 /*
2922 * Base ram? Update GCPhysLastRam.
2923 */
2924 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2925 {
2926 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2927 {
2928 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2929 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2930 }
2931 }
2932
2933 /*
2934 * Register the ram.
2935 */
2936 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2937
2938 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2939 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2940 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2941
2942 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2943}
2944
2945
2946/**
2947 * Notification about a successful MMR3PhysRomRegister() call.
2948 *
2949 * @param pVM VM handle.
2950 * @param GCPhys The physical address of the ROM.
2951 * @param cb The size of the ROM.
2952 * @param pvCopy Pointer to the ROM copy.
2953 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2954 * This function will be called when ever the protection of the
2955 * shadow ROM changes (at reset and end of POST).
2956 */
2957REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2958{
2959 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2960 VM_ASSERT_EMT(pVM);
2961
2962 /*
2963 * Validate input - we trust the caller.
2964 */
2965 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2966 Assert(cb);
2967 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2968
2969 /*
2970 * Register the rom.
2971 */
2972 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2973
2974 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2975 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2976 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2977
2978 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2979}
2980
2981
2982/**
2983 * Notification about a successful memory deregistration or reservation.
2984 *
2985 * @param pVM VM Handle.
2986 * @param GCPhys Start physical address.
2987 * @param cb The size of the range.
2988 */
2989REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2990{
2991 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2992 VM_ASSERT_EMT(pVM);
2993
2994 /*
2995 * Validate input - we trust the caller.
2996 */
2997 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2998 Assert(cb);
2999 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3000
3001 /*
3002 * Unassigning the memory.
3003 */
3004 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3005
3006 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3007 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3008 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3009
3010 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3011}
3012
3013
3014/**
3015 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3016 *
3017 * @param pVM VM Handle.
3018 * @param enmType Handler type.
3019 * @param GCPhys Handler range address.
3020 * @param cb Size of the handler range.
3021 * @param fHasHCHandler Set if the handler has a HC callback function.
3022 *
3023 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3024 * Handler memory type to memory which has no HC handler.
3025 */
3026static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3027{
3028 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3029 enmType, GCPhys, cb, fHasHCHandler));
3030
3031 VM_ASSERT_EMT(pVM);
3032 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3033 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3034
3035
3036 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3037
3038 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3039 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3040 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3041 else if (fHasHCHandler)
3042 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3043 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3044
3045 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3046}
3047
3048/**
3049 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3050 *
3051 * @param pVM VM Handle.
3052 * @param enmType Handler type.
3053 * @param GCPhys Handler range address.
3054 * @param cb Size of the handler range.
3055 * @param fHasHCHandler Set if the handler has a HC callback function.
3056 *
3057 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3058 * Handler memory type to memory which has no HC handler.
3059 */
3060REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3061{
3062 REMR3ReplayHandlerNotifications(pVM);
3063
3064 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3065}
3066
3067/**
3068 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3069 *
3070 * @param pVM VM Handle.
3071 * @param enmType Handler type.
3072 * @param GCPhys Handler range address.
3073 * @param cb Size of the handler range.
3074 * @param fHasHCHandler Set if the handler has a HC callback function.
3075 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3076 */
3077static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3078{
3079 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3080 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3081 VM_ASSERT_EMT(pVM);
3082
3083
3084 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3085
3086 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3087 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3088 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3089 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3090 else if (fHasHCHandler)
3091 {
3092 if (!fRestoreAsRAM)
3093 {
3094 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3095 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3096 }
3097 else
3098 {
3099 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3100 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3101 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3102 }
3103 }
3104 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3105
3106 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3107}
3108
3109/**
3110 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3111 *
3112 * @param pVM VM Handle.
3113 * @param enmType Handler type.
3114 * @param GCPhys Handler range address.
3115 * @param cb Size of the handler range.
3116 * @param fHasHCHandler Set if the handler has a HC callback function.
3117 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3118 */
3119REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3120{
3121 REMR3ReplayHandlerNotifications(pVM);
3122 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3123}
3124
3125
3126/**
3127 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3128 *
3129 * @param pVM VM Handle.
3130 * @param enmType Handler type.
3131 * @param GCPhysOld Old handler range address.
3132 * @param GCPhysNew New handler range address.
3133 * @param cb Size of the handler range.
3134 * @param fHasHCHandler Set if the handler has a HC callback function.
3135 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3136 */
3137static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3138{
3139 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3140 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3141 VM_ASSERT_EMT(pVM);
3142 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3143
3144 if (fHasHCHandler)
3145 {
3146 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3147
3148 /*
3149 * Reset the old page.
3150 */
3151 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3152 if (!fRestoreAsRAM)
3153 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3154 else
3155 {
3156 /* This is not perfect, but it'll do for PD monitoring... */
3157 Assert(cb == PAGE_SIZE);
3158 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3159 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3160 }
3161
3162 /*
3163 * Update the new page.
3164 */
3165 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3166 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3167 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3168 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3169
3170 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3171 }
3172}
3173
3174/**
3175 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3176 *
3177 * @param pVM VM Handle.
3178 * @param enmType Handler type.
3179 * @param GCPhysOld Old handler range address.
3180 * @param GCPhysNew New handler range address.
3181 * @param cb Size of the handler range.
3182 * @param fHasHCHandler Set if the handler has a HC callback function.
3183 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3184 */
3185REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3186{
3187 REMR3ReplayHandlerNotifications(pVM);
3188
3189 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3190}
3191
3192/**
3193 * Checks if we're handling access to this page or not.
3194 *
3195 * @returns true if we're trapping access.
3196 * @returns false if we aren't.
3197 * @param pVM The VM handle.
3198 * @param GCPhys The physical address.
3199 *
3200 * @remark This function will only work correctly in VBOX_STRICT builds!
3201 */
3202REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3203{
3204#ifdef VBOX_STRICT
3205 unsigned long off;
3206 REMR3ReplayHandlerNotifications(pVM);
3207
3208 off = get_phys_page_offset(GCPhys);
3209 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3210 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3211 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3212#else
3213 return false;
3214#endif
3215}
3216
3217
3218/**
3219 * Deals with a rare case in get_phys_addr_code where the code
3220 * is being monitored.
3221 *
3222 * It could also be an MMIO page, in which case we will raise a fatal error.
3223 *
3224 * @returns The physical address corresponding to addr.
3225 * @param env The cpu environment.
3226 * @param addr The virtual address.
3227 * @param pTLBEntry The TLB entry.
3228 */
3229target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3230 target_ulong addr,
3231 CPUTLBEntry* pTLBEntry,
3232 target_phys_addr_t ioTLBEntry)
3233{
3234 PVM pVM = env->pVM;
3235
3236 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3237 {
3238 /* If code memory is being monitored, appropriate IOTLB entry will have
3239 handler IO type, and addend will provide real physical address, no
3240 matter if we store VA in TLB or not, as handlers are always passed PA */
3241 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3242 return ret;
3243 }
3244 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3245 "*** handlers\n",
3246 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3247 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3248 LogRel(("*** mmio\n"));
3249 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3250 LogRel(("*** phys\n"));
3251 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3252 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3253 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3254 AssertFatalFailed();
3255}
3256
3257/**
3258 * Read guest RAM and ROM.
3259 *
3260 * @param SrcGCPhys The source address (guest physical).
3261 * @param pvDst The destination address.
3262 * @param cb Number of bytes
3263 */
3264void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3265{
3266 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3267 VBOX_CHECK_ADDR(SrcGCPhys);
3268 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3269#ifdef VBOX_DEBUG_PHYS
3270 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3271#endif
3272 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3273}
3274
3275
3276/**
3277 * Read guest RAM and ROM, unsigned 8-bit.
3278 *
3279 * @param SrcGCPhys The source address (guest physical).
3280 */
3281RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3282{
3283 uint8_t val;
3284 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3285 VBOX_CHECK_ADDR(SrcGCPhys);
3286 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3287 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3288#ifdef VBOX_DEBUG_PHYS
3289 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3290#endif
3291 return val;
3292}
3293
3294
3295/**
3296 * Read guest RAM and ROM, signed 8-bit.
3297 *
3298 * @param SrcGCPhys The source address (guest physical).
3299 */
3300RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3301{
3302 int8_t val;
3303 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3304 VBOX_CHECK_ADDR(SrcGCPhys);
3305 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3306 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3307#ifdef VBOX_DEBUG_PHYS
3308 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3309#endif
3310 return val;
3311}
3312
3313
3314/**
3315 * Read guest RAM and ROM, unsigned 16-bit.
3316 *
3317 * @param SrcGCPhys The source address (guest physical).
3318 */
3319RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3320{
3321 uint16_t val;
3322 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3323 VBOX_CHECK_ADDR(SrcGCPhys);
3324 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3325 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3326#ifdef VBOX_DEBUG_PHYS
3327 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3328#endif
3329 return val;
3330}
3331
3332
3333/**
3334 * Read guest RAM and ROM, signed 16-bit.
3335 *
3336 * @param SrcGCPhys The source address (guest physical).
3337 */
3338RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3339{
3340 int16_t val;
3341 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3342 VBOX_CHECK_ADDR(SrcGCPhys);
3343 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3344 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3345#ifdef VBOX_DEBUG_PHYS
3346 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3347#endif
3348 return val;
3349}
3350
3351
3352/**
3353 * Read guest RAM and ROM, unsigned 32-bit.
3354 *
3355 * @param SrcGCPhys The source address (guest physical).
3356 */
3357RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3358{
3359 uint32_t val;
3360 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3361 VBOX_CHECK_ADDR(SrcGCPhys);
3362 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3363 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3364#ifdef VBOX_DEBUG_PHYS
3365 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3366#endif
3367 return val;
3368}
3369
3370
3371/**
3372 * Read guest RAM and ROM, signed 32-bit.
3373 *
3374 * @param SrcGCPhys The source address (guest physical).
3375 */
3376RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3377{
3378 int32_t val;
3379 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3380 VBOX_CHECK_ADDR(SrcGCPhys);
3381 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3382 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3383#ifdef VBOX_DEBUG_PHYS
3384 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3385#endif
3386 return val;
3387}
3388
3389
3390/**
3391 * Read guest RAM and ROM, unsigned 64-bit.
3392 *
3393 * @param SrcGCPhys The source address (guest physical).
3394 */
3395uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3396{
3397 uint64_t val;
3398 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3399 VBOX_CHECK_ADDR(SrcGCPhys);
3400 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3401 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3402#ifdef VBOX_DEBUG_PHYS
3403 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3404#endif
3405 return val;
3406}
3407
3408
3409/**
3410 * Read guest RAM and ROM, signed 64-bit.
3411 *
3412 * @param SrcGCPhys The source address (guest physical).
3413 */
3414int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3415{
3416 int64_t val;
3417 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3418 VBOX_CHECK_ADDR(SrcGCPhys);
3419 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3420 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3421#ifdef VBOX_DEBUG_PHYS
3422 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3423#endif
3424 return val;
3425}
3426
3427
3428/**
3429 * Write guest RAM.
3430 *
3431 * @param DstGCPhys The destination address (guest physical).
3432 * @param pvSrc The source address.
3433 * @param cb Number of bytes to write
3434 */
3435void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3436{
3437 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3438 VBOX_CHECK_ADDR(DstGCPhys);
3439 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3440 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3441#ifdef VBOX_DEBUG_PHYS
3442 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3443#endif
3444}
3445
3446
3447/**
3448 * Write guest RAM, unsigned 8-bit.
3449 *
3450 * @param DstGCPhys The destination address (guest physical).
3451 * @param val Value
3452 */
3453void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3454{
3455 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3456 VBOX_CHECK_ADDR(DstGCPhys);
3457 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3458 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3459#ifdef VBOX_DEBUG_PHYS
3460 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3461#endif
3462}
3463
3464
3465/**
3466 * Write guest RAM, unsigned 8-bit.
3467 *
3468 * @param DstGCPhys The destination address (guest physical).
3469 * @param val Value
3470 */
3471void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3472{
3473 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3474 VBOX_CHECK_ADDR(DstGCPhys);
3475 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3476 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3477#ifdef VBOX_DEBUG_PHYS
3478 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3479#endif
3480}
3481
3482
3483/**
3484 * Write guest RAM, unsigned 32-bit.
3485 *
3486 * @param DstGCPhys The destination address (guest physical).
3487 * @param val Value
3488 */
3489void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3490{
3491 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3492 VBOX_CHECK_ADDR(DstGCPhys);
3493 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3494 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3495#ifdef VBOX_DEBUG_PHYS
3496 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3497#endif
3498}
3499
3500
3501/**
3502 * Write guest RAM, unsigned 64-bit.
3503 *
3504 * @param DstGCPhys The destination address (guest physical).
3505 * @param val Value
3506 */
3507void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3508{
3509 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3510 VBOX_CHECK_ADDR(DstGCPhys);
3511 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3512 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3513#ifdef VBOX_DEBUG_PHYS
3514 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3515#endif
3516}
3517
3518#undef LOG_GROUP
3519#define LOG_GROUP LOG_GROUP_REM_MMIO
3520
3521/** Read MMIO memory. */
3522static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3523{
3524 uint32_t u32 = 0;
3525 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3526 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3527 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3528 return u32;
3529}
3530
3531/** Read MMIO memory. */
3532static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3533{
3534 uint32_t u32 = 0;
3535 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3536 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3537 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3538 return u32;
3539}
3540
3541/** Read MMIO memory. */
3542static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3543{
3544 uint32_t u32 = 0;
3545 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3546 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3547 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3548 return u32;
3549}
3550
3551/** Write to MMIO memory. */
3552static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3553{
3554 int rc;
3555 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3556 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3557 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3558}
3559
3560/** Write to MMIO memory. */
3561static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3562{
3563 int rc;
3564 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3565 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3566 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3567}
3568
3569/** Write to MMIO memory. */
3570static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3571{
3572 int rc;
3573 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3574 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3575 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3576}
3577
3578
3579#undef LOG_GROUP
3580#define LOG_GROUP LOG_GROUP_REM_HANDLER
3581
3582/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3583
3584static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3585{
3586 uint8_t u8;
3587 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3588 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3589 return u8;
3590}
3591
3592static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3593{
3594 uint16_t u16;
3595 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3596 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3597 return u16;
3598}
3599
3600static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3601{
3602 uint32_t u32;
3603 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3604 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3605 return u32;
3606}
3607
3608static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3609{
3610 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3611 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3612}
3613
3614static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3615{
3616 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3617 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3618}
3619
3620static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3621{
3622 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3623 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3624}
3625
3626/* -+- disassembly -+- */
3627
3628#undef LOG_GROUP
3629#define LOG_GROUP LOG_GROUP_REM_DISAS
3630
3631
3632/**
3633 * Enables or disables singled stepped disassembly.
3634 *
3635 * @returns VBox status code.
3636 * @param pVM VM handle.
3637 * @param fEnable To enable set this flag, to disable clear it.
3638 */
3639static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3640{
3641 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3642 VM_ASSERT_EMT(pVM);
3643
3644 if (fEnable)
3645 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3646 else
3647 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3648 return VINF_SUCCESS;
3649}
3650
3651
3652/**
3653 * Enables or disables singled stepped disassembly.
3654 *
3655 * @returns VBox status code.
3656 * @param pVM VM handle.
3657 * @param fEnable To enable set this flag, to disable clear it.
3658 */
3659REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3660{
3661 int rc;
3662
3663 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3664 if (VM_IS_EMT(pVM))
3665 return remR3DisasEnableStepping(pVM, fEnable);
3666
3667 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3668 AssertRC(rc);
3669 return rc;
3670}
3671
3672
3673#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3674/**
3675 * External Debugger Command: .remstep [on|off|1|0]
3676 */
3677static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3678{
3679 int rc;
3680
3681 if (cArgs == 0)
3682 /*
3683 * Print the current status.
3684 */
3685 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3686 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3687 else
3688 {
3689 /*
3690 * Convert the argument and change the mode.
3691 */
3692 bool fEnable;
3693 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3694 if (RT_SUCCESS(rc))
3695 {
3696 rc = REMR3DisasEnableStepping(pVM, fEnable);
3697 if (RT_SUCCESS(rc))
3698 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3699 else
3700 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3701 }
3702 else
3703 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3704 }
3705 return rc;
3706}
3707#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3708
3709
3710/**
3711 * Disassembles one instruction and prints it to the log.
3712 *
3713 * @returns Success indicator.
3714 * @param env Pointer to the recompiler CPU structure.
3715 * @param f32BitCode Indicates that whether or not the code should
3716 * be disassembled as 16 or 32 bit. If -1 the CS
3717 * selector will be inspected.
3718 * @param pszPrefix
3719 */
3720bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3721{
3722 PVM pVM = env->pVM;
3723 const bool fLog = LogIsEnabled();
3724 const bool fLog2 = LogIs2Enabled();
3725 int rc = VINF_SUCCESS;
3726
3727 /*
3728 * Don't bother if there ain't any log output to do.
3729 */
3730 if (!fLog && !fLog2)
3731 return true;
3732
3733 /*
3734 * Update the state so DBGF reads the correct register values.
3735 */
3736 remR3StateUpdate(pVM, env->pVCpu);
3737
3738 /*
3739 * Log registers if requested.
3740 */
3741 if (!fLog2)
3742 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3743
3744 /*
3745 * Disassemble to log.
3746 */
3747 if (fLog)
3748 {
3749 PVMCPU pVCpu = VMMGetCpu(pVM);
3750 char szBuf[256];
3751 szBuf[0] = '\0';
3752 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3753 pVCpu->idCpu,
3754 0, /* Sel */
3755 0, /* GCPtr */
3756 DBGF_DISAS_FLAGS_CURRENT_GUEST
3757 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3758 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3759 szBuf,
3760 sizeof(szBuf),
3761 NULL);
3762 if (RT_FAILURE(rc))
3763 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3764 if (pszPrefix && *pszPrefix)
3765 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3766 else
3767 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3768 }
3769
3770 return RT_SUCCESS(rc);
3771}
3772
3773
3774/**
3775 * Disassemble recompiled code.
3776 *
3777 * @param phFileIgnored Ignored, logfile usually.
3778 * @param pvCode Pointer to the code block.
3779 * @param cb Size of the code block.
3780 */
3781void disas(FILE *phFile, void *pvCode, unsigned long cb)
3782{
3783#ifdef DEBUG_TMP_LOGGING
3784# define DISAS_PRINTF(x...) fprintf(phFile, x)
3785#else
3786# define DISAS_PRINTF(x...) RTLogPrintf(x)
3787 if (LogIs2Enabled())
3788#endif
3789 {
3790 unsigned off = 0;
3791 char szOutput[256];
3792 DISCPUSTATE Cpu;
3793
3794 memset(&Cpu, 0, sizeof(Cpu));
3795#ifdef RT_ARCH_X86
3796 Cpu.mode = CPUMODE_32BIT;
3797#else
3798 Cpu.mode = CPUMODE_64BIT;
3799#endif
3800
3801 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3802 while (off < cb)
3803 {
3804 uint32_t cbInstr;
3805 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3806 DISAS_PRINTF("%s", szOutput);
3807 else
3808 {
3809 DISAS_PRINTF("disas error\n");
3810 cbInstr = 1;
3811#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
3812 break;
3813#endif
3814 }
3815 off += cbInstr;
3816 }
3817 }
3818
3819#undef DISAS_PRINTF
3820}
3821
3822
3823/**
3824 * Disassemble guest code.
3825 *
3826 * @param phFileIgnored Ignored, logfile usually.
3827 * @param uCode The guest address of the code to disassemble. (flat?)
3828 * @param cb Number of bytes to disassemble.
3829 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3830 */
3831void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3832{
3833#ifdef DEBUG_TMP_LOGGING
3834# define DISAS_PRINTF(x...) fprintf(phFile, x)
3835#else
3836# define DISAS_PRINTF(x...) RTLogPrintf(x)
3837 if (LogIs2Enabled())
3838#endif
3839 {
3840 PVM pVM = cpu_single_env->pVM;
3841 PVMCPU pVCpu = cpu_single_env->pVCpu;
3842 RTSEL cs;
3843 RTGCUINTPTR eip;
3844
3845 Assert(pVCpu);
3846
3847 /*
3848 * Update the state so DBGF reads the correct register values (flags).
3849 */
3850 remR3StateUpdate(pVM, pVCpu);
3851
3852 /*
3853 * Do the disassembling.
3854 */
3855 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3856 cs = cpu_single_env->segs[R_CS].selector;
3857 eip = uCode - cpu_single_env->segs[R_CS].base;
3858 for (;;)
3859 {
3860 char szBuf[256];
3861 uint32_t cbInstr;
3862 int rc = DBGFR3DisasInstrEx(pVM,
3863 pVCpu->idCpu,
3864 cs,
3865 eip,
3866 DBGF_DISAS_FLAGS_DEFAULT_MODE,
3867 szBuf, sizeof(szBuf),
3868 &cbInstr);
3869 if (RT_SUCCESS(rc))
3870 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3871 else
3872 {
3873 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3874 cbInstr = 1;
3875 }
3876
3877 /* next */
3878 if (cb <= cbInstr)
3879 break;
3880 cb -= cbInstr;
3881 uCode += cbInstr;
3882 eip += cbInstr;
3883 }
3884 }
3885#undef DISAS_PRINTF
3886}
3887
3888
3889/**
3890 * Looks up a guest symbol.
3891 *
3892 * @returns Pointer to symbol name. This is a static buffer.
3893 * @param orig_addr The address in question.
3894 */
3895const char *lookup_symbol(target_ulong orig_addr)
3896{
3897 PVM pVM = cpu_single_env->pVM;
3898 RTGCINTPTR off = 0;
3899 RTDBGSYMBOL Sym;
3900 DBGFADDRESS Addr;
3901
3902 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3903 if (RT_SUCCESS(rc))
3904 {
3905 static char szSym[sizeof(Sym.szName) + 48];
3906 if (!off)
3907 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3908 else if (off > 0)
3909 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3910 else
3911 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3912 return szSym;
3913 }
3914 return "<N/A>";
3915}
3916
3917
3918#undef LOG_GROUP
3919#define LOG_GROUP LOG_GROUP_REM
3920
3921
3922/* -+- FF notifications -+- */
3923
3924
3925/**
3926 * Notification about a pending interrupt.
3927 *
3928 * @param pVM VM Handle.
3929 * @param pVCpu VMCPU Handle.
3930 * @param u8Interrupt Interrupt
3931 * @thread The emulation thread.
3932 */
3933REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3934{
3935 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3936 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3937}
3938
3939/**
3940 * Notification about a pending interrupt.
3941 *
3942 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3943 * @param pVM VM Handle.
3944 * @param pVCpu VMCPU Handle.
3945 * @thread The emulation thread.
3946 */
3947REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3948{
3949 return pVM->rem.s.u32PendingInterrupt;
3950}
3951
3952/**
3953 * Notification about the interrupt FF being set.
3954 *
3955 * @param pVM VM Handle.
3956 * @param pVCpu VMCPU Handle.
3957 * @thread The emulation thread.
3958 */
3959REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3960{
3961 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3962 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3963 if (pVM->rem.s.fInREM)
3964 {
3965 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3966 CPU_INTERRUPT_EXTERNAL_HARD);
3967 }
3968}
3969
3970
3971/**
3972 * Notification about the interrupt FF being set.
3973 *
3974 * @param pVM VM Handle.
3975 * @param pVCpu VMCPU Handle.
3976 * @thread Any.
3977 */
3978REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3979{
3980 LogFlow(("REMR3NotifyInterruptClear:\n"));
3981 if (pVM->rem.s.fInREM)
3982 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3983}
3984
3985
3986/**
3987 * Notification about pending timer(s).
3988 *
3989 * @param pVM VM Handle.
3990 * @param pVCpuDst The target cpu for this notification.
3991 * TM will not broadcast pending timer events, but use
3992 * a dedicated EMT for them. So, only interrupt REM
3993 * execution if the given CPU is executing in REM.
3994 * @thread Any.
3995 */
3996REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3997{
3998#ifndef DEBUG_bird
3999 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4000#endif
4001 if (pVM->rem.s.fInREM)
4002 {
4003 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4004 {
4005 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4006 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4007 CPU_INTERRUPT_EXTERNAL_TIMER);
4008 }
4009 else
4010 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4011 }
4012 else
4013 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4014}
4015
4016
4017/**
4018 * Notification about pending DMA transfers.
4019 *
4020 * @param pVM VM Handle.
4021 * @thread Any.
4022 */
4023REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4024{
4025 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4026 if (pVM->rem.s.fInREM)
4027 {
4028 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4029 CPU_INTERRUPT_EXTERNAL_DMA);
4030 }
4031}
4032
4033
4034/**
4035 * Notification about pending timer(s).
4036 *
4037 * @param pVM VM Handle.
4038 * @thread Any.
4039 */
4040REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4041{
4042 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4043 if (pVM->rem.s.fInREM)
4044 {
4045 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4046 CPU_INTERRUPT_EXTERNAL_EXIT);
4047 }
4048}
4049
4050
4051/**
4052 * Notification about pending FF set by an external thread.
4053 *
4054 * @param pVM VM handle.
4055 * @thread Any.
4056 */
4057REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4058{
4059 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4060 if (pVM->rem.s.fInREM)
4061 {
4062 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4063 CPU_INTERRUPT_EXTERNAL_EXIT);
4064 }
4065}
4066
4067
4068#ifdef VBOX_WITH_STATISTICS
4069void remR3ProfileStart(int statcode)
4070{
4071 STAMPROFILEADV *pStat;
4072 switch(statcode)
4073 {
4074 case STATS_EMULATE_SINGLE_INSTR:
4075 pStat = &gStatExecuteSingleInstr;
4076 break;
4077 case STATS_QEMU_COMPILATION:
4078 pStat = &gStatCompilationQEmu;
4079 break;
4080 case STATS_QEMU_RUN_EMULATED_CODE:
4081 pStat = &gStatRunCodeQEmu;
4082 break;
4083 case STATS_QEMU_TOTAL:
4084 pStat = &gStatTotalTimeQEmu;
4085 break;
4086 case STATS_QEMU_RUN_TIMERS:
4087 pStat = &gStatTimers;
4088 break;
4089 case STATS_TLB_LOOKUP:
4090 pStat= &gStatTBLookup;
4091 break;
4092 case STATS_IRQ_HANDLING:
4093 pStat= &gStatIRQ;
4094 break;
4095 case STATS_RAW_CHECK:
4096 pStat = &gStatRawCheck;
4097 break;
4098
4099 default:
4100 AssertMsgFailed(("unknown stat %d\n", statcode));
4101 return;
4102 }
4103 STAM_PROFILE_ADV_START(pStat, a);
4104}
4105
4106
4107void remR3ProfileStop(int statcode)
4108{
4109 STAMPROFILEADV *pStat;
4110 switch(statcode)
4111 {
4112 case STATS_EMULATE_SINGLE_INSTR:
4113 pStat = &gStatExecuteSingleInstr;
4114 break;
4115 case STATS_QEMU_COMPILATION:
4116 pStat = &gStatCompilationQEmu;
4117 break;
4118 case STATS_QEMU_RUN_EMULATED_CODE:
4119 pStat = &gStatRunCodeQEmu;
4120 break;
4121 case STATS_QEMU_TOTAL:
4122 pStat = &gStatTotalTimeQEmu;
4123 break;
4124 case STATS_QEMU_RUN_TIMERS:
4125 pStat = &gStatTimers;
4126 break;
4127 case STATS_TLB_LOOKUP:
4128 pStat= &gStatTBLookup;
4129 break;
4130 case STATS_IRQ_HANDLING:
4131 pStat= &gStatIRQ;
4132 break;
4133 case STATS_RAW_CHECK:
4134 pStat = &gStatRawCheck;
4135 break;
4136 default:
4137 AssertMsgFailed(("unknown stat %d\n", statcode));
4138 return;
4139 }
4140 STAM_PROFILE_ADV_STOP(pStat, a);
4141}
4142#endif
4143
4144/**
4145 * Raise an RC, force rem exit.
4146 *
4147 * @param pVM VM handle.
4148 * @param rc The rc.
4149 */
4150void remR3RaiseRC(PVM pVM, int rc)
4151{
4152 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4153 Assert(pVM->rem.s.fInREM);
4154 VM_ASSERT_EMT(pVM);
4155 pVM->rem.s.rc = rc;
4156 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4157}
4158
4159
4160/* -+- timers -+- */
4161
4162uint64_t cpu_get_tsc(CPUX86State *env)
4163{
4164 STAM_COUNTER_INC(&gStatCpuGetTSC);
4165 return TMCpuTickGet(env->pVCpu);
4166}
4167
4168
4169/* -+- interrupts -+- */
4170
4171void cpu_set_ferr(CPUX86State *env)
4172{
4173 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4174 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4175}
4176
4177int cpu_get_pic_interrupt(CPUState *env)
4178{
4179 uint8_t u8Interrupt;
4180 int rc;
4181
4182 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4183 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4184 * with the (a)pic.
4185 */
4186 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4187 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4188 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4189 * remove this kludge. */
4190 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4191 {
4192 rc = VINF_SUCCESS;
4193 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4194 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4195 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4196 }
4197 else
4198 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4199
4200 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4201 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4202 if (RT_SUCCESS(rc))
4203 {
4204 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4205 env->interrupt_request |= CPU_INTERRUPT_HARD;
4206 return u8Interrupt;
4207 }
4208 return -1;
4209}
4210
4211
4212/* -+- local apic -+- */
4213
4214#if 0 /* CPUMSetGuestMsr does this now. */
4215void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4216{
4217 int rc = PDMApicSetBase(env->pVM, val);
4218 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4219}
4220#endif
4221
4222uint64_t cpu_get_apic_base(CPUX86State *env)
4223{
4224 uint64_t u64;
4225 int rc = PDMApicGetBase(env->pVM, &u64);
4226 if (RT_SUCCESS(rc))
4227 {
4228 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4229 return u64;
4230 }
4231 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4232 return 0;
4233}
4234
4235void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4236{
4237 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4238 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4239}
4240
4241uint8_t cpu_get_apic_tpr(CPUX86State *env)
4242{
4243 uint8_t u8;
4244 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4245 if (RT_SUCCESS(rc))
4246 {
4247 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4248 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4249 }
4250 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4251 return 0;
4252}
4253
4254/**
4255 * Read an MSR.
4256 *
4257 * @retval 0 success.
4258 * @retval -1 failure, raise \#GP(0).
4259 * @param env The cpu state.
4260 * @param idMsr The MSR to read.
4261 * @param puValue Where to return the value.
4262 */
4263int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4264{
4265 Assert(env->pVCpu);
4266 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4267}
4268
4269/**
4270 * Write to an MSR.
4271 *
4272 * @retval 0 success.
4273 * @retval -1 failure, raise \#GP(0).
4274 * @param env The cpu state.
4275 * @param idMsr The MSR to read.
4276 * @param puValue Where to return the value.
4277 */
4278int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4279{
4280 Assert(env->pVCpu);
4281 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4282}
4283
4284/* -+- I/O Ports -+- */
4285
4286#undef LOG_GROUP
4287#define LOG_GROUP LOG_GROUP_REM_IOPORT
4288
4289void cpu_outb(CPUState *env, int addr, int val)
4290{
4291 int rc;
4292
4293 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4294 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4295
4296 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4297 if (RT_LIKELY(rc == VINF_SUCCESS))
4298 return;
4299 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4300 {
4301 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4302 remR3RaiseRC(env->pVM, rc);
4303 return;
4304 }
4305 remAbort(rc, __FUNCTION__);
4306}
4307
4308void cpu_outw(CPUState *env, int addr, int val)
4309{
4310 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4311 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4312 if (RT_LIKELY(rc == VINF_SUCCESS))
4313 return;
4314 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4315 {
4316 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4317 remR3RaiseRC(env->pVM, rc);
4318 return;
4319 }
4320 remAbort(rc, __FUNCTION__);
4321}
4322
4323void cpu_outl(CPUState *env, int addr, int val)
4324{
4325 int rc;
4326 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4327 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4328 if (RT_LIKELY(rc == VINF_SUCCESS))
4329 return;
4330 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4331 {
4332 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4333 remR3RaiseRC(env->pVM, rc);
4334 return;
4335 }
4336 remAbort(rc, __FUNCTION__);
4337}
4338
4339int cpu_inb(CPUState *env, int addr)
4340{
4341 uint32_t u32 = 0;
4342 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4343 if (RT_LIKELY(rc == VINF_SUCCESS))
4344 {
4345 if (/*addr != 0x61 && */addr != 0x71)
4346 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4347 return (int)u32;
4348 }
4349 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4350 {
4351 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4352 remR3RaiseRC(env->pVM, rc);
4353 return (int)u32;
4354 }
4355 remAbort(rc, __FUNCTION__);
4356 return 0xff;
4357}
4358
4359int cpu_inw(CPUState *env, int addr)
4360{
4361 uint32_t u32 = 0;
4362 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4363 if (RT_LIKELY(rc == VINF_SUCCESS))
4364 {
4365 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4366 return (int)u32;
4367 }
4368 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4369 {
4370 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4371 remR3RaiseRC(env->pVM, rc);
4372 return (int)u32;
4373 }
4374 remAbort(rc, __FUNCTION__);
4375 return 0xffff;
4376}
4377
4378int cpu_inl(CPUState *env, int addr)
4379{
4380 uint32_t u32 = 0;
4381 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4382 if (RT_LIKELY(rc == VINF_SUCCESS))
4383 {
4384//if (addr==0x01f0 && u32 == 0x6b6d)
4385// loglevel = ~0;
4386 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4387 return (int)u32;
4388 }
4389 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4390 {
4391 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4392 remR3RaiseRC(env->pVM, rc);
4393 return (int)u32;
4394 }
4395 remAbort(rc, __FUNCTION__);
4396 return 0xffffffff;
4397}
4398
4399#undef LOG_GROUP
4400#define LOG_GROUP LOG_GROUP_REM
4401
4402
4403/* -+- helpers and misc other interfaces -+- */
4404
4405/**
4406 * Perform the CPUID instruction.
4407 *
4408 * ASMCpuId cannot be invoked from some source files where this is used because of global
4409 * register allocations.
4410 *
4411 * @param env Pointer to the recompiler CPU structure.
4412 * @param uOperator CPUID operation (eax).
4413 * @param pvEAX Where to store eax.
4414 * @param pvEBX Where to store ebx.
4415 * @param pvECX Where to store ecx.
4416 * @param pvEDX Where to store edx.
4417 */
4418void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4419{
4420 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4421}
4422
4423
4424#if 0 /* not used */
4425/**
4426 * Interface for qemu hardware to report back fatal errors.
4427 */
4428void hw_error(const char *pszFormat, ...)
4429{
4430 /*
4431 * Bitch about it.
4432 */
4433 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4434 * this in my Odin32 tree at home! */
4435 va_list args;
4436 va_start(args, pszFormat);
4437 RTLogPrintf("fatal error in virtual hardware:");
4438 RTLogPrintfV(pszFormat, args);
4439 va_end(args);
4440 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4441
4442 /*
4443 * If we're in REM context we'll sync back the state before 'jumping' to
4444 * the EMs failure handling.
4445 */
4446 PVM pVM = cpu_single_env->pVM;
4447 if (pVM->rem.s.fInREM)
4448 REMR3StateBack(pVM);
4449 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4450 AssertMsgFailed(("EMR3FatalError returned!\n"));
4451}
4452#endif
4453
4454/**
4455 * Interface for the qemu cpu to report unhandled situation
4456 * raising a fatal VM error.
4457 */
4458void cpu_abort(CPUState *env, const char *pszFormat, ...)
4459{
4460 va_list va;
4461 PVM pVM;
4462 PVMCPU pVCpu;
4463 char szMsg[256];
4464
4465 /*
4466 * Bitch about it.
4467 */
4468 RTLogFlags(NULL, "nodisabled nobuffered");
4469 RTLogFlush(NULL);
4470
4471 va_start(va, pszFormat);
4472#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4473 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4474 unsigned cArgs = 0;
4475 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4476 const char *psz = strchr(pszFormat, '%');
4477 while (psz && cArgs < 6)
4478 {
4479 auArgs[cArgs++] = va_arg(va, uintptr_t);
4480 psz = strchr(psz + 1, '%');
4481 }
4482 switch (cArgs)
4483 {
4484 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4485 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4486 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4487 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4488 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4489 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4490 default:
4491 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4492 }
4493#else
4494 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4495#endif
4496 va_end(va);
4497
4498 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4499 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4500
4501 /*
4502 * If we're in REM context we'll sync back the state before 'jumping' to
4503 * the EMs failure handling.
4504 */
4505 pVM = cpu_single_env->pVM;
4506 pVCpu = cpu_single_env->pVCpu;
4507 Assert(pVCpu);
4508
4509 if (pVM->rem.s.fInREM)
4510 REMR3StateBack(pVM, pVCpu);
4511 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4512 AssertMsgFailed(("EMR3FatalError returned!\n"));
4513}
4514
4515
4516/**
4517 * Aborts the VM.
4518 *
4519 * @param rc VBox error code.
4520 * @param pszTip Hint about why/when this happened.
4521 */
4522void remAbort(int rc, const char *pszTip)
4523{
4524 PVM pVM;
4525 PVMCPU pVCpu;
4526
4527 /*
4528 * Bitch about it.
4529 */
4530 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4531 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4532
4533 /*
4534 * Jump back to where we entered the recompiler.
4535 */
4536 pVM = cpu_single_env->pVM;
4537 pVCpu = cpu_single_env->pVCpu;
4538 Assert(pVCpu);
4539
4540 if (pVM->rem.s.fInREM)
4541 REMR3StateBack(pVM, pVCpu);
4542
4543 EMR3FatalError(pVCpu, rc);
4544 AssertMsgFailed(("EMR3FatalError returned!\n"));
4545}
4546
4547
4548/**
4549 * Dumps a linux system call.
4550 * @param pVCpu VMCPU handle.
4551 */
4552void remR3DumpLnxSyscall(PVMCPU pVCpu)
4553{
4554 static const char *apsz[] =
4555 {
4556 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4557 "sys_exit",
4558 "sys_fork",
4559 "sys_read",
4560 "sys_write",
4561 "sys_open", /* 5 */
4562 "sys_close",
4563 "sys_waitpid",
4564 "sys_creat",
4565 "sys_link",
4566 "sys_unlink", /* 10 */
4567 "sys_execve",
4568 "sys_chdir",
4569 "sys_time",
4570 "sys_mknod",
4571 "sys_chmod", /* 15 */
4572 "sys_lchown16",
4573 "sys_ni_syscall", /* old break syscall holder */
4574 "sys_stat",
4575 "sys_lseek",
4576 "sys_getpid", /* 20 */
4577 "sys_mount",
4578 "sys_oldumount",
4579 "sys_setuid16",
4580 "sys_getuid16",
4581 "sys_stime", /* 25 */
4582 "sys_ptrace",
4583 "sys_alarm",
4584 "sys_fstat",
4585 "sys_pause",
4586 "sys_utime", /* 30 */
4587 "sys_ni_syscall", /* old stty syscall holder */
4588 "sys_ni_syscall", /* old gtty syscall holder */
4589 "sys_access",
4590 "sys_nice",
4591 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4592 "sys_sync",
4593 "sys_kill",
4594 "sys_rename",
4595 "sys_mkdir",
4596 "sys_rmdir", /* 40 */
4597 "sys_dup",
4598 "sys_pipe",
4599 "sys_times",
4600 "sys_ni_syscall", /* old prof syscall holder */
4601 "sys_brk", /* 45 */
4602 "sys_setgid16",
4603 "sys_getgid16",
4604 "sys_signal",
4605 "sys_geteuid16",
4606 "sys_getegid16", /* 50 */
4607 "sys_acct",
4608 "sys_umount", /* recycled never used phys() */
4609 "sys_ni_syscall", /* old lock syscall holder */
4610 "sys_ioctl",
4611 "sys_fcntl", /* 55 */
4612 "sys_ni_syscall", /* old mpx syscall holder */
4613 "sys_setpgid",
4614 "sys_ni_syscall", /* old ulimit syscall holder */
4615 "sys_olduname",
4616 "sys_umask", /* 60 */
4617 "sys_chroot",
4618 "sys_ustat",
4619 "sys_dup2",
4620 "sys_getppid",
4621 "sys_getpgrp", /* 65 */
4622 "sys_setsid",
4623 "sys_sigaction",
4624 "sys_sgetmask",
4625 "sys_ssetmask",
4626 "sys_setreuid16", /* 70 */
4627 "sys_setregid16",
4628 "sys_sigsuspend",
4629 "sys_sigpending",
4630 "sys_sethostname",
4631 "sys_setrlimit", /* 75 */
4632 "sys_old_getrlimit",
4633 "sys_getrusage",
4634 "sys_gettimeofday",
4635 "sys_settimeofday",
4636 "sys_getgroups16", /* 80 */
4637 "sys_setgroups16",
4638 "old_select",
4639 "sys_symlink",
4640 "sys_lstat",
4641 "sys_readlink", /* 85 */
4642 "sys_uselib",
4643 "sys_swapon",
4644 "sys_reboot",
4645 "old_readdir",
4646 "old_mmap", /* 90 */
4647 "sys_munmap",
4648 "sys_truncate",
4649 "sys_ftruncate",
4650 "sys_fchmod",
4651 "sys_fchown16", /* 95 */
4652 "sys_getpriority",
4653 "sys_setpriority",
4654 "sys_ni_syscall", /* old profil syscall holder */
4655 "sys_statfs",
4656 "sys_fstatfs", /* 100 */
4657 "sys_ioperm",
4658 "sys_socketcall",
4659 "sys_syslog",
4660 "sys_setitimer",
4661 "sys_getitimer", /* 105 */
4662 "sys_newstat",
4663 "sys_newlstat",
4664 "sys_newfstat",
4665 "sys_uname",
4666 "sys_iopl", /* 110 */
4667 "sys_vhangup",
4668 "sys_ni_syscall", /* old "idle" system call */
4669 "sys_vm86old",
4670 "sys_wait4",
4671 "sys_swapoff", /* 115 */
4672 "sys_sysinfo",
4673 "sys_ipc",
4674 "sys_fsync",
4675 "sys_sigreturn",
4676 "sys_clone", /* 120 */
4677 "sys_setdomainname",
4678 "sys_newuname",
4679 "sys_modify_ldt",
4680 "sys_adjtimex",
4681 "sys_mprotect", /* 125 */
4682 "sys_sigprocmask",
4683 "sys_ni_syscall", /* old "create_module" */
4684 "sys_init_module",
4685 "sys_delete_module",
4686 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4687 "sys_quotactl",
4688 "sys_getpgid",
4689 "sys_fchdir",
4690 "sys_bdflush",
4691 "sys_sysfs", /* 135 */
4692 "sys_personality",
4693 "sys_ni_syscall", /* reserved for afs_syscall */
4694 "sys_setfsuid16",
4695 "sys_setfsgid16",
4696 "sys_llseek", /* 140 */
4697 "sys_getdents",
4698 "sys_select",
4699 "sys_flock",
4700 "sys_msync",
4701 "sys_readv", /* 145 */
4702 "sys_writev",
4703 "sys_getsid",
4704 "sys_fdatasync",
4705 "sys_sysctl",
4706 "sys_mlock", /* 150 */
4707 "sys_munlock",
4708 "sys_mlockall",
4709 "sys_munlockall",
4710 "sys_sched_setparam",
4711 "sys_sched_getparam", /* 155 */
4712 "sys_sched_setscheduler",
4713 "sys_sched_getscheduler",
4714 "sys_sched_yield",
4715 "sys_sched_get_priority_max",
4716 "sys_sched_get_priority_min", /* 160 */
4717 "sys_sched_rr_get_interval",
4718 "sys_nanosleep",
4719 "sys_mremap",
4720 "sys_setresuid16",
4721 "sys_getresuid16", /* 165 */
4722 "sys_vm86",
4723 "sys_ni_syscall", /* Old sys_query_module */
4724 "sys_poll",
4725 "sys_nfsservctl",
4726 "sys_setresgid16", /* 170 */
4727 "sys_getresgid16",
4728 "sys_prctl",
4729 "sys_rt_sigreturn",
4730 "sys_rt_sigaction",
4731 "sys_rt_sigprocmask", /* 175 */
4732 "sys_rt_sigpending",
4733 "sys_rt_sigtimedwait",
4734 "sys_rt_sigqueueinfo",
4735 "sys_rt_sigsuspend",
4736 "sys_pread64", /* 180 */
4737 "sys_pwrite64",
4738 "sys_chown16",
4739 "sys_getcwd",
4740 "sys_capget",
4741 "sys_capset", /* 185 */
4742 "sys_sigaltstack",
4743 "sys_sendfile",
4744 "sys_ni_syscall", /* reserved for streams1 */
4745 "sys_ni_syscall", /* reserved for streams2 */
4746 "sys_vfork", /* 190 */
4747 "sys_getrlimit",
4748 "sys_mmap2",
4749 "sys_truncate64",
4750 "sys_ftruncate64",
4751 "sys_stat64", /* 195 */
4752 "sys_lstat64",
4753 "sys_fstat64",
4754 "sys_lchown",
4755 "sys_getuid",
4756 "sys_getgid", /* 200 */
4757 "sys_geteuid",
4758 "sys_getegid",
4759 "sys_setreuid",
4760 "sys_setregid",
4761 "sys_getgroups", /* 205 */
4762 "sys_setgroups",
4763 "sys_fchown",
4764 "sys_setresuid",
4765 "sys_getresuid",
4766 "sys_setresgid", /* 210 */
4767 "sys_getresgid",
4768 "sys_chown",
4769 "sys_setuid",
4770 "sys_setgid",
4771 "sys_setfsuid", /* 215 */
4772 "sys_setfsgid",
4773 "sys_pivot_root",
4774 "sys_mincore",
4775 "sys_madvise",
4776 "sys_getdents64", /* 220 */
4777 "sys_fcntl64",
4778 "sys_ni_syscall", /* reserved for TUX */
4779 "sys_ni_syscall",
4780 "sys_gettid",
4781 "sys_readahead", /* 225 */
4782 "sys_setxattr",
4783 "sys_lsetxattr",
4784 "sys_fsetxattr",
4785 "sys_getxattr",
4786 "sys_lgetxattr", /* 230 */
4787 "sys_fgetxattr",
4788 "sys_listxattr",
4789 "sys_llistxattr",
4790 "sys_flistxattr",
4791 "sys_removexattr", /* 235 */
4792 "sys_lremovexattr",
4793 "sys_fremovexattr",
4794 "sys_tkill",
4795 "sys_sendfile64",
4796 "sys_futex", /* 240 */
4797 "sys_sched_setaffinity",
4798 "sys_sched_getaffinity",
4799 "sys_set_thread_area",
4800 "sys_get_thread_area",
4801 "sys_io_setup", /* 245 */
4802 "sys_io_destroy",
4803 "sys_io_getevents",
4804 "sys_io_submit",
4805 "sys_io_cancel",
4806 "sys_fadvise64", /* 250 */
4807 "sys_ni_syscall",
4808 "sys_exit_group",
4809 "sys_lookup_dcookie",
4810 "sys_epoll_create",
4811 "sys_epoll_ctl", /* 255 */
4812 "sys_epoll_wait",
4813 "sys_remap_file_pages",
4814 "sys_set_tid_address",
4815 "sys_timer_create",
4816 "sys_timer_settime", /* 260 */
4817 "sys_timer_gettime",
4818 "sys_timer_getoverrun",
4819 "sys_timer_delete",
4820 "sys_clock_settime",
4821 "sys_clock_gettime", /* 265 */
4822 "sys_clock_getres",
4823 "sys_clock_nanosleep",
4824 "sys_statfs64",
4825 "sys_fstatfs64",
4826 "sys_tgkill", /* 270 */
4827 "sys_utimes",
4828 "sys_fadvise64_64",
4829 "sys_ni_syscall" /* sys_vserver */
4830 };
4831
4832 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4833 switch (uEAX)
4834 {
4835 default:
4836 if (uEAX < RT_ELEMENTS(apsz))
4837 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4838 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4839 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4840 else
4841 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4842 break;
4843
4844 }
4845}
4846
4847
4848/**
4849 * Dumps an OpenBSD system call.
4850 * @param pVCpu VMCPU handle.
4851 */
4852void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4853{
4854 static const char *apsz[] =
4855 {
4856 "SYS_syscall", //0
4857 "SYS_exit", //1
4858 "SYS_fork", //2
4859 "SYS_read", //3
4860 "SYS_write", //4
4861 "SYS_open", //5
4862 "SYS_close", //6
4863 "SYS_wait4", //7
4864 "SYS_8",
4865 "SYS_link", //9
4866 "SYS_unlink", //10
4867 "SYS_11",
4868 "SYS_chdir", //12
4869 "SYS_fchdir", //13
4870 "SYS_mknod", //14
4871 "SYS_chmod", //15
4872 "SYS_chown", //16
4873 "SYS_break", //17
4874 "SYS_18",
4875 "SYS_19",
4876 "SYS_getpid", //20
4877 "SYS_mount", //21
4878 "SYS_unmount", //22
4879 "SYS_setuid", //23
4880 "SYS_getuid", //24
4881 "SYS_geteuid", //25
4882 "SYS_ptrace", //26
4883 "SYS_recvmsg", //27
4884 "SYS_sendmsg", //28
4885 "SYS_recvfrom", //29
4886 "SYS_accept", //30
4887 "SYS_getpeername", //31
4888 "SYS_getsockname", //32
4889 "SYS_access", //33
4890 "SYS_chflags", //34
4891 "SYS_fchflags", //35
4892 "SYS_sync", //36
4893 "SYS_kill", //37
4894 "SYS_38",
4895 "SYS_getppid", //39
4896 "SYS_40",
4897 "SYS_dup", //41
4898 "SYS_opipe", //42
4899 "SYS_getegid", //43
4900 "SYS_profil", //44
4901 "SYS_ktrace", //45
4902 "SYS_sigaction", //46
4903 "SYS_getgid", //47
4904 "SYS_sigprocmask", //48
4905 "SYS_getlogin", //49
4906 "SYS_setlogin", //50
4907 "SYS_acct", //51
4908 "SYS_sigpending", //52
4909 "SYS_osigaltstack", //53
4910 "SYS_ioctl", //54
4911 "SYS_reboot", //55
4912 "SYS_revoke", //56
4913 "SYS_symlink", //57
4914 "SYS_readlink", //58
4915 "SYS_execve", //59
4916 "SYS_umask", //60
4917 "SYS_chroot", //61
4918 "SYS_62",
4919 "SYS_63",
4920 "SYS_64",
4921 "SYS_65",
4922 "SYS_vfork", //66
4923 "SYS_67",
4924 "SYS_68",
4925 "SYS_sbrk", //69
4926 "SYS_sstk", //70
4927 "SYS_61",
4928 "SYS_vadvise", //72
4929 "SYS_munmap", //73
4930 "SYS_mprotect", //74
4931 "SYS_madvise", //75
4932 "SYS_76",
4933 "SYS_77",
4934 "SYS_mincore", //78
4935 "SYS_getgroups", //79
4936 "SYS_setgroups", //80
4937 "SYS_getpgrp", //81
4938 "SYS_setpgid", //82
4939 "SYS_setitimer", //83
4940 "SYS_84",
4941 "SYS_85",
4942 "SYS_getitimer", //86
4943 "SYS_87",
4944 "SYS_88",
4945 "SYS_89",
4946 "SYS_dup2", //90
4947 "SYS_91",
4948 "SYS_fcntl", //92
4949 "SYS_select", //93
4950 "SYS_94",
4951 "SYS_fsync", //95
4952 "SYS_setpriority", //96
4953 "SYS_socket", //97
4954 "SYS_connect", //98
4955 "SYS_99",
4956 "SYS_getpriority", //100
4957 "SYS_101",
4958 "SYS_102",
4959 "SYS_sigreturn", //103
4960 "SYS_bind", //104
4961 "SYS_setsockopt", //105
4962 "SYS_listen", //106
4963 "SYS_107",
4964 "SYS_108",
4965 "SYS_109",
4966 "SYS_110",
4967 "SYS_sigsuspend", //111
4968 "SYS_112",
4969 "SYS_113",
4970 "SYS_114",
4971 "SYS_115",
4972 "SYS_gettimeofday", //116
4973 "SYS_getrusage", //117
4974 "SYS_getsockopt", //118
4975 "SYS_119",
4976 "SYS_readv", //120
4977 "SYS_writev", //121
4978 "SYS_settimeofday", //122
4979 "SYS_fchown", //123
4980 "SYS_fchmod", //124
4981 "SYS_125",
4982 "SYS_setreuid", //126
4983 "SYS_setregid", //127
4984 "SYS_rename", //128
4985 "SYS_129",
4986 "SYS_130",
4987 "SYS_flock", //131
4988 "SYS_mkfifo", //132
4989 "SYS_sendto", //133
4990 "SYS_shutdown", //134
4991 "SYS_socketpair", //135
4992 "SYS_mkdir", //136
4993 "SYS_rmdir", //137
4994 "SYS_utimes", //138
4995 "SYS_139",
4996 "SYS_adjtime", //140
4997 "SYS_141",
4998 "SYS_142",
4999 "SYS_143",
5000 "SYS_144",
5001 "SYS_145",
5002 "SYS_146",
5003 "SYS_setsid", //147
5004 "SYS_quotactl", //148
5005 "SYS_149",
5006 "SYS_150",
5007 "SYS_151",
5008 "SYS_152",
5009 "SYS_153",
5010 "SYS_154",
5011 "SYS_nfssvc", //155
5012 "SYS_156",
5013 "SYS_157",
5014 "SYS_158",
5015 "SYS_159",
5016 "SYS_160",
5017 "SYS_getfh", //161
5018 "SYS_162",
5019 "SYS_163",
5020 "SYS_164",
5021 "SYS_sysarch", //165
5022 "SYS_166",
5023 "SYS_167",
5024 "SYS_168",
5025 "SYS_169",
5026 "SYS_170",
5027 "SYS_171",
5028 "SYS_172",
5029 "SYS_pread", //173
5030 "SYS_pwrite", //174
5031 "SYS_175",
5032 "SYS_176",
5033 "SYS_177",
5034 "SYS_178",
5035 "SYS_179",
5036 "SYS_180",
5037 "SYS_setgid", //181
5038 "SYS_setegid", //182
5039 "SYS_seteuid", //183
5040 "SYS_lfs_bmapv", //184
5041 "SYS_lfs_markv", //185
5042 "SYS_lfs_segclean", //186
5043 "SYS_lfs_segwait", //187
5044 "SYS_188",
5045 "SYS_189",
5046 "SYS_190",
5047 "SYS_pathconf", //191
5048 "SYS_fpathconf", //192
5049 "SYS_swapctl", //193
5050 "SYS_getrlimit", //194
5051 "SYS_setrlimit", //195
5052 "SYS_getdirentries", //196
5053 "SYS_mmap", //197
5054 "SYS___syscall", //198
5055 "SYS_lseek", //199
5056 "SYS_truncate", //200
5057 "SYS_ftruncate", //201
5058 "SYS___sysctl", //202
5059 "SYS_mlock", //203
5060 "SYS_munlock", //204
5061 "SYS_205",
5062 "SYS_futimes", //206
5063 "SYS_getpgid", //207
5064 "SYS_xfspioctl", //208
5065 "SYS_209",
5066 "SYS_210",
5067 "SYS_211",
5068 "SYS_212",
5069 "SYS_213",
5070 "SYS_214",
5071 "SYS_215",
5072 "SYS_216",
5073 "SYS_217",
5074 "SYS_218",
5075 "SYS_219",
5076 "SYS_220",
5077 "SYS_semget", //221
5078 "SYS_222",
5079 "SYS_223",
5080 "SYS_224",
5081 "SYS_msgget", //225
5082 "SYS_msgsnd", //226
5083 "SYS_msgrcv", //227
5084 "SYS_shmat", //228
5085 "SYS_229",
5086 "SYS_shmdt", //230
5087 "SYS_231",
5088 "SYS_clock_gettime", //232
5089 "SYS_clock_settime", //233
5090 "SYS_clock_getres", //234
5091 "SYS_235",
5092 "SYS_236",
5093 "SYS_237",
5094 "SYS_238",
5095 "SYS_239",
5096 "SYS_nanosleep", //240
5097 "SYS_241",
5098 "SYS_242",
5099 "SYS_243",
5100 "SYS_244",
5101 "SYS_245",
5102 "SYS_246",
5103 "SYS_247",
5104 "SYS_248",
5105 "SYS_249",
5106 "SYS_minherit", //250
5107 "SYS_rfork", //251
5108 "SYS_poll", //252
5109 "SYS_issetugid", //253
5110 "SYS_lchown", //254
5111 "SYS_getsid", //255
5112 "SYS_msync", //256
5113 "SYS_257",
5114 "SYS_258",
5115 "SYS_259",
5116 "SYS_getfsstat", //260
5117 "SYS_statfs", //261
5118 "SYS_fstatfs", //262
5119 "SYS_pipe", //263
5120 "SYS_fhopen", //264
5121 "SYS_265",
5122 "SYS_fhstatfs", //266
5123 "SYS_preadv", //267
5124 "SYS_pwritev", //268
5125 "SYS_kqueue", //269
5126 "SYS_kevent", //270
5127 "SYS_mlockall", //271
5128 "SYS_munlockall", //272
5129 "SYS_getpeereid", //273
5130 "SYS_274",
5131 "SYS_275",
5132 "SYS_276",
5133 "SYS_277",
5134 "SYS_278",
5135 "SYS_279",
5136 "SYS_280",
5137 "SYS_getresuid", //281
5138 "SYS_setresuid", //282
5139 "SYS_getresgid", //283
5140 "SYS_setresgid", //284
5141 "SYS_285",
5142 "SYS_mquery", //286
5143 "SYS_closefrom", //287
5144 "SYS_sigaltstack", //288
5145 "SYS_shmget", //289
5146 "SYS_semop", //290
5147 "SYS_stat", //291
5148 "SYS_fstat", //292
5149 "SYS_lstat", //293
5150 "SYS_fhstat", //294
5151 "SYS___semctl", //295
5152 "SYS_shmctl", //296
5153 "SYS_msgctl", //297
5154 "SYS_MAXSYSCALL", //298
5155 //299
5156 //300
5157 };
5158 uint32_t uEAX;
5159 if (!LogIsEnabled())
5160 return;
5161 uEAX = CPUMGetGuestEAX(pVCpu);
5162 switch (uEAX)
5163 {
5164 default:
5165 if (uEAX < RT_ELEMENTS(apsz))
5166 {
5167 uint32_t au32Args[8] = {0};
5168 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5169 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5170 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5171 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5172 }
5173 else
5174 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5175 break;
5176 }
5177}
5178
5179
5180#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5181/**
5182 * The Dll main entry point (stub).
5183 */
5184bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5185{
5186 return true;
5187}
5188
5189void *memcpy(void *dst, const void *src, size_t size)
5190{
5191 uint8_t*pbDst = dst, *pbSrc = src;
5192 while (size-- > 0)
5193 *pbDst++ = *pbSrc++;
5194 return dst;
5195}
5196
5197#endif
5198
5199void cpu_smm_update(CPUState *env)
5200{
5201}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette