VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 42407

Last change on this file since 42407 was 42407, checked in by vboxsync, 12 years ago

VMM: Futher work on dealing with hidden segment register, esp. when going stale.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.5 KB
Line 
1/* $Id: VBoxRecompiler.c 42407 2012-07-26 11:41:35Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gaStatRefuseStale[6];
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
380
381 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
382
383 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
384 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
385 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
386 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
387 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
388 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
389 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
390 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
391 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
392 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
393 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
394 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
395 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
396 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
397 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
398 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
399 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
400
401 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
402 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
403 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
404 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
405
406 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
412
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
414 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
415 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
416 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
417 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
418 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
419
420 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
421#endif /* VBOX_WITH_STATISTICS */
422 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
423 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
424
425 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
426 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
427 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
428
429
430#ifdef DEBUG_ALL_LOGGING
431 loglevel = ~0;
432#endif
433
434 /*
435 * Init the handler notification lists.
436 */
437 pVM->rem.s.idxPendingList = UINT32_MAX;
438 pVM->rem.s.idxFreeList = 0;
439
440 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
441 {
442 pCur = &pVM->rem.s.aHandlerNotifications[i];
443 pCur->idxNext = i + 1;
444 pCur->idxSelf = i;
445 }
446 pCur->idxNext = UINT32_MAX; /* the last record. */
447
448 return rc;
449}
450
451
452/**
453 * Finalizes the REM initialization.
454 *
455 * This is called after all components, devices and drivers has
456 * been initialized. Its main purpose it to finish the RAM related
457 * initialization.
458 *
459 * @returns VBox status code.
460 *
461 * @param pVM The VM handle.
462 */
463REMR3DECL(int) REMR3InitFinalize(PVM pVM)
464{
465 int rc;
466
467 /*
468 * Ram size & dirty bit map.
469 */
470 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
471 pVM->rem.s.fGCPhysLastRamFixed = true;
472#ifdef RT_STRICT
473 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
474#else
475 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
476#endif
477 return rc;
478}
479
480/**
481 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
482 *
483 * @returns VBox status code.
484 * @param pVM The VM handle.
485 * @param fGuarded Whether to guard the map.
486 */
487static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
488{
489 int rc = VINF_SUCCESS;
490 RTGCPHYS cb;
491
492 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
493
494 cb = pVM->rem.s.GCPhysLastRam + 1;
495 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
496 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
497 VERR_OUT_OF_RANGE);
498
499 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
500 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
501
502 if (!fGuarded)
503 {
504 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
505 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
506 }
507 else
508 {
509 /*
510 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
511 */
512 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
513 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
514 if (cbBitmapFull == cbBitmapAligned)
515 cbBitmapFull += _4G >> PAGE_SHIFT;
516 else if (cbBitmapFull - cbBitmapAligned < _64K)
517 cbBitmapFull += _64K;
518
519 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
520 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
521
522 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
523 if (RT_FAILURE(rc))
524 {
525 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
526 AssertLogRelRCReturn(rc, rc);
527 }
528
529 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
530 }
531
532 /* initialize it. */
533 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
534 return rc;
535}
536
537
538/**
539 * Terminates the REM.
540 *
541 * Termination means cleaning up and freeing all resources,
542 * the VM it self is at this point powered off or suspended.
543 *
544 * @returns VBox status code.
545 * @param pVM The VM to operate on.
546 */
547REMR3DECL(int) REMR3Term(PVM pVM)
548{
549#ifdef VBOX_WITH_STATISTICS
550 /*
551 * Statistics.
552 */
553 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
554 STAM_DEREG(pVM, &gStatCompilationQEmu);
555 STAM_DEREG(pVM, &gStatRunCodeQEmu);
556 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
557 STAM_DEREG(pVM, &gStatTimers);
558 STAM_DEREG(pVM, &gStatTBLookup);
559 STAM_DEREG(pVM, &gStatIRQ);
560 STAM_DEREG(pVM, &gStatRawCheck);
561 STAM_DEREG(pVM, &gStatMemRead);
562 STAM_DEREG(pVM, &gStatMemWrite);
563 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
564
565 STAM_DEREG(pVM, &gStatCpuGetTSC);
566
567 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
568 STAM_DEREG(pVM, &gStatRefuseVM86);
569 STAM_DEREG(pVM, &gStatRefusePaging);
570 STAM_DEREG(pVM, &gStatRefusePAE);
571 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
572 STAM_DEREG(pVM, &gStatRefuseIF0);
573 STAM_DEREG(pVM, &gStatRefuseCode16);
574 STAM_DEREG(pVM, &gStatRefuseWP0);
575 STAM_DEREG(pVM, &gStatRefuseRing1or2);
576 STAM_DEREG(pVM, &gStatRefuseCanExecute);
577 STAM_DEREG(pVM, &gaStatRefuseStale[0]);
578 STAM_DEREG(pVM, &gaStatRefuseStale[1]);
579 STAM_DEREG(pVM, &gaStatRefuseStale[2]);
580 STAM_DEREG(pVM, &gaStatRefuseStale[3]);
581 STAM_DEREG(pVM, &gaStatRefuseStale[4]);
582 STAM_DEREG(pVM, &gaStatRefuseStale[5]);
583 STAM_DEREG(pVM, &gStatFlushTBs);
584
585 STAM_DEREG(pVM, &gStatREMGDTChange);
586 STAM_DEREG(pVM, &gStatREMLDTRChange);
587 STAM_DEREG(pVM, &gStatREMIDTChange);
588 STAM_DEREG(pVM, &gStatREMTRChange);
589
590 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
591 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
592 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
593 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
594 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
595 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
596
597 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
598 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
599 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
600 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
601 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
602 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
603
604 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
605#endif /* VBOX_WITH_STATISTICS */
606
607 STAM_REL_DEREG(pVM, &tb_flush_count);
608 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
609 STAM_REL_DEREG(pVM, &tlb_flush_count);
610
611 return VINF_SUCCESS;
612}
613
614
615/**
616 * The VM is being reset.
617 *
618 * For the REM component this means to call the cpu_reset() and
619 * reinitialize some state variables.
620 *
621 * @param pVM VM handle.
622 */
623REMR3DECL(void) REMR3Reset(PVM pVM)
624{
625 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
626
627 /*
628 * Reset the REM cpu.
629 */
630 Assert(pVM->rem.s.cIgnoreAll == 0);
631 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
632 cpu_reset(&pVM->rem.s.Env);
633 pVM->rem.s.cInvalidatedPages = 0;
634 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
635 Assert(pVM->rem.s.cIgnoreAll == 0);
636
637 /* Clear raw ring 0 init state */
638 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
639
640 /* Flush the TBs the next time we execute code here. */
641 pVM->rem.s.fFlushTBs = true;
642
643 EMRemUnlock(pVM);
644}
645
646
647/**
648 * Execute state save operation.
649 *
650 * @returns VBox status code.
651 * @param pVM VM Handle.
652 * @param pSSM SSM operation handle.
653 */
654static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
655{
656 PREM pRem = &pVM->rem.s;
657
658 /*
659 * Save the required CPU Env bits.
660 * (Not much because we're never in REM when doing the save.)
661 */
662 LogFlow(("remR3Save:\n"));
663 Assert(!pRem->fInREM);
664 SSMR3PutU32(pSSM, pRem->Env.hflags);
665 SSMR3PutU32(pSSM, ~0); /* separator */
666
667 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
668 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
669 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
670
671 return SSMR3PutU32(pSSM, ~0); /* terminator */
672}
673
674
675/**
676 * Execute state load operation.
677 *
678 * @returns VBox status code.
679 * @param pVM VM Handle.
680 * @param pSSM SSM operation handle.
681 * @param uVersion Data layout version.
682 * @param uPass The data pass.
683 */
684static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
685{
686 uint32_t u32Dummy;
687 uint32_t fRawRing0 = false;
688 uint32_t u32Sep;
689 uint32_t i;
690 int rc;
691 PREM pRem;
692
693 LogFlow(("remR3Load:\n"));
694 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
695
696 /*
697 * Validate version.
698 */
699 if ( uVersion != REM_SAVED_STATE_VERSION
700 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
701 {
702 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
703 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
704 }
705
706 /*
707 * Do a reset to be on the safe side...
708 */
709 REMR3Reset(pVM);
710
711 /*
712 * Ignore all ignorable notifications.
713 * (Not doing this will cause serious trouble.)
714 */
715 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
716
717 /*
718 * Load the required CPU Env bits.
719 * (Not much because we're never in REM when doing the save.)
720 */
721 pRem = &pVM->rem.s;
722 Assert(!pRem->fInREM);
723 SSMR3GetU32(pSSM, &pRem->Env.hflags);
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /* Redundant REM CPU state has to be loaded, but can be ignored. */
727 CPUX86State_Ver16 temp;
728 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
729 }
730
731 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
732 if (RT_FAILURE(rc))
733 return rc;
734 if (u32Sep != ~0U)
735 {
736 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739
740 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
741 SSMR3GetUInt(pSSM, &fRawRing0);
742 if (fRawRing0)
743 pRem->Env.state |= CPU_RAW_RING0;
744
745 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
746 {
747 /*
748 * Load the REM stuff.
749 */
750 /** @todo r=bird: We should just drop all these items, restoring doesn't make
751 * sense. */
752 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
753 if (RT_FAILURE(rc))
754 return rc;
755 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
756 {
757 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
758 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
759 }
760 for (i = 0; i < pRem->cInvalidatedPages; i++)
761 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
762 }
763
764 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
765 if (RT_FAILURE(rc))
766 return rc;
767
768 /* check the terminator. */
769 rc = SSMR3GetU32(pSSM, &u32Sep);
770 if (RT_FAILURE(rc))
771 return rc;
772 if (u32Sep != ~0U)
773 {
774 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
775 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
776 }
777
778 /*
779 * Get the CPUID features.
780 */
781 PVMCPU pVCpu = VMMGetCpu(pVM);
782 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
783 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
784
785 /*
786 * Stop ignoring ignorable notifications.
787 */
788 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
789
790 /*
791 * Sync the whole CPU state when executing code in the recompiler.
792 */
793 for (i = 0; i < pVM->cCpus; i++)
794 {
795 PVMCPU pVCpu = &pVM->aCpus[i];
796 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
797 }
798 return VINF_SUCCESS;
799}
800
801
802
803#undef LOG_GROUP
804#define LOG_GROUP LOG_GROUP_REM_RUN
805
806/**
807 * Single steps an instruction in recompiled mode.
808 *
809 * Before calling this function the REM state needs to be in sync with
810 * the VM. Call REMR3State() to perform the sync. It's only necessary
811 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
812 * and after calling REMR3StateBack().
813 *
814 * @returns VBox status code.
815 *
816 * @param pVM VM Handle.
817 * @param pVCpu VMCPU Handle.
818 */
819REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
820{
821 int rc, interrupt_request;
822 RTGCPTR GCPtrPC;
823 bool fBp;
824
825 /*
826 * Lock the REM - we don't wanna have anyone interrupting us
827 * while stepping - and enabled single stepping. We also ignore
828 * pending interrupts and suchlike.
829 */
830 interrupt_request = pVM->rem.s.Env.interrupt_request;
831 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
832 pVM->rem.s.Env.interrupt_request = 0;
833 cpu_single_step(&pVM->rem.s.Env, 1);
834
835 /*
836 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
837 */
838 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
839 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
840
841 /*
842 * Execute and handle the return code.
843 * We execute without enabling the cpu tick, so on success we'll
844 * just flip it on and off to make sure it moves
845 */
846 rc = cpu_exec(&pVM->rem.s.Env);
847 if (rc == EXCP_DEBUG)
848 {
849 TMR3NotifyResume(pVM, pVCpu);
850 TMR3NotifySuspend(pVM, pVCpu);
851 rc = VINF_EM_DBG_STEPPED;
852 }
853 else
854 {
855 switch (rc)
856 {
857 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
858 case EXCP_HLT:
859 case EXCP_HALTED: rc = VINF_EM_HALT; break;
860 case EXCP_RC:
861 rc = pVM->rem.s.rc;
862 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
863 break;
864 case EXCP_EXECUTE_RAW:
865 case EXCP_EXECUTE_HWACC:
866 /** @todo: is it correct? No! */
867 rc = VINF_SUCCESS;
868 break;
869 default:
870 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
871 rc = VERR_INTERNAL_ERROR;
872 break;
873 }
874 }
875
876 /*
877 * Restore the stuff we changed to prevent interruption.
878 * Unlock the REM.
879 */
880 if (fBp)
881 {
882 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
883 Assert(rc2 == 0); NOREF(rc2);
884 }
885 cpu_single_step(&pVM->rem.s.Env, 0);
886 pVM->rem.s.Env.interrupt_request = interrupt_request;
887
888 return rc;
889}
890
891
892/**
893 * Set a breakpoint using the REM facilities.
894 *
895 * @returns VBox status code.
896 * @param pVM The VM handle.
897 * @param Address The breakpoint address.
898 * @thread The emulation thread.
899 */
900REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
901{
902 VM_ASSERT_EMT(pVM);
903 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
904 {
905 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
906 return VINF_SUCCESS;
907 }
908 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
909 return VERR_REM_NO_MORE_BP_SLOTS;
910}
911
912
913/**
914 * Clears a breakpoint set by REMR3BreakpointSet().
915 *
916 * @returns VBox status code.
917 * @param pVM The VM handle.
918 * @param Address The breakpoint address.
919 * @thread The emulation thread.
920 */
921REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
922{
923 VM_ASSERT_EMT(pVM);
924 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
925 {
926 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
927 return VINF_SUCCESS;
928 }
929 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
930 return VERR_REM_BP_NOT_FOUND;
931}
932
933
934/**
935 * Emulate an instruction.
936 *
937 * This function executes one instruction without letting anyone
938 * interrupt it. This is intended for being called while being in
939 * raw mode and thus will take care of all the state syncing between
940 * REM and the rest.
941 *
942 * @returns VBox status code.
943 * @param pVM VM handle.
944 * @param pVCpu VMCPU Handle.
945 */
946REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
947{
948 bool fFlushTBs;
949
950 int rc, rc2;
951 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
952
953 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
954 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
955 */
956 if (HWACCMIsEnabled(pVM))
957 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
958
959 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
960 fFlushTBs = pVM->rem.s.fFlushTBs;
961 pVM->rem.s.fFlushTBs = false;
962
963 /*
964 * Sync the state and enable single instruction / single stepping.
965 */
966 rc = REMR3State(pVM, pVCpu);
967 pVM->rem.s.fFlushTBs = fFlushTBs;
968 if (RT_SUCCESS(rc))
969 {
970 int interrupt_request = pVM->rem.s.Env.interrupt_request;
971 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
972#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
973 cpu_single_step(&pVM->rem.s.Env, 0);
974#endif
975 Assert(!pVM->rem.s.Env.singlestep_enabled);
976
977 /*
978 * Now we set the execute single instruction flag and enter the cpu_exec loop.
979 */
980 TMNotifyStartOfExecution(pVCpu);
981 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
982 rc = cpu_exec(&pVM->rem.s.Env);
983 TMNotifyEndOfExecution(pVCpu);
984 switch (rc)
985 {
986 /*
987 * Executed without anything out of the way happening.
988 */
989 case EXCP_SINGLE_INSTR:
990 rc = VINF_EM_RESCHEDULE;
991 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
992 break;
993
994 /*
995 * If we take a trap or start servicing a pending interrupt, we might end up here.
996 * (Timer thread or some other thread wishing EMT's attention.)
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_EM_RESCHEDULE;
1001 break;
1002
1003 /*
1004 * Single step, we assume!
1005 * If there was a breakpoint there we're fucked now.
1006 */
1007 case EXCP_DEBUG:
1008 if (pVM->rem.s.Env.watchpoint_hit)
1009 {
1010 /** @todo deal with watchpoints */
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1012 rc = VINF_EM_DBG_BREAKPOINT;
1013 }
1014 else
1015 {
1016 CPUBreakpoint *pBP;
1017 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1018 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1019 if (pBP->pc == GCPtrPC)
1020 break;
1021 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1022 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1023 }
1024 break;
1025
1026 /*
1027 * hlt instruction.
1028 */
1029 case EXCP_HLT:
1030 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1031 rc = VINF_EM_HALT;
1032 break;
1033
1034 /*
1035 * The VM has halted.
1036 */
1037 case EXCP_HALTED:
1038 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1039 rc = VINF_EM_HALT;
1040 break;
1041
1042 /*
1043 * Switch to RAW-mode.
1044 */
1045 case EXCP_EXECUTE_RAW:
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1047 rc = VINF_EM_RESCHEDULE_RAW;
1048 break;
1049
1050 /*
1051 * Switch to hardware accelerated RAW-mode.
1052 */
1053 case EXCP_EXECUTE_HWACC:
1054 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1055 rc = VINF_EM_RESCHEDULE_HWACC;
1056 break;
1057
1058 /*
1059 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1060 */
1061 case EXCP_RC:
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1063 rc = pVM->rem.s.rc;
1064 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1065 break;
1066
1067 /*
1068 * Figure out the rest when they arrive....
1069 */
1070 default:
1071 AssertMsgFailed(("rc=%d\n", rc));
1072 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1073 rc = VINF_EM_RESCHEDULE;
1074 break;
1075 }
1076
1077 /*
1078 * Switch back the state.
1079 */
1080 pVM->rem.s.Env.interrupt_request = interrupt_request;
1081 rc2 = REMR3StateBack(pVM, pVCpu);
1082 AssertRC(rc2);
1083 }
1084
1085 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1086 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1087 return rc;
1088}
1089
1090
1091/**
1092 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1093 *
1094 * @returns VBox status code.
1095 *
1096 * @param pVM The VM handle.
1097 * @param pVCpu The Virtual CPU handle.
1098 */
1099static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1100{
1101 int rc;
1102
1103 Assert(pVM->rem.s.fInREM);
1104#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1105 cpu_single_step(&pVM->rem.s.Env, 1);
1106#else
1107 Assert(!pVM->rem.s.Env.singlestep_enabled);
1108#endif
1109
1110 /*
1111 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1112 */
1113 for (;;)
1114 {
1115 char szBuf[256];
1116
1117 /*
1118 * Log the current registers state and instruction.
1119 */
1120 remR3StateUpdate(pVM, pVCpu);
1121 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1122 szBuf[0] = '\0';
1123 rc = DBGFR3DisasInstrEx(pVM,
1124 pVCpu->idCpu,
1125 0, /* Sel */
1126 0, /* GCPtr */
1127 DBGF_DISAS_FLAGS_CURRENT_GUEST
1128 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1129 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1130 szBuf,
1131 sizeof(szBuf),
1132 NULL);
1133 if (RT_FAILURE(rc))
1134 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1135 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1136
1137 /*
1138 * Execute the instruction.
1139 */
1140 TMNotifyStartOfExecution(pVCpu);
1141
1142 if ( pVM->rem.s.Env.exception_index < 0
1143 || pVM->rem.s.Env.exception_index > 256)
1144 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1145
1146#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1147 pVM->rem.s.Env.interrupt_request = 0;
1148#else
1149 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1150#endif
1151 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1152 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1153 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1154 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1155 pVM->rem.s.Env.interrupt_request,
1156 pVM->rem.s.Env.halted,
1157 pVM->rem.s.Env.exception_index
1158 );
1159
1160 rc = cpu_exec(&pVM->rem.s.Env);
1161
1162 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1163 pVM->rem.s.Env.interrupt_request,
1164 pVM->rem.s.Env.halted,
1165 pVM->rem.s.Env.exception_index
1166 );
1167
1168 TMNotifyEndOfExecution(pVCpu);
1169
1170 switch (rc)
1171 {
1172#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1173 /*
1174 * The normal exit.
1175 */
1176 case EXCP_SINGLE_INSTR:
1177 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1178 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1179 continue;
1180 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1181 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1182 rc = VINF_SUCCESS;
1183 break;
1184
1185#else
1186 /*
1187 * The normal exit, check for breakpoints at PC just to be sure.
1188 */
1189#endif
1190 case EXCP_DEBUG:
1191 if (pVM->rem.s.Env.watchpoint_hit)
1192 {
1193 /** @todo deal with watchpoints */
1194 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1195 rc = VINF_EM_DBG_BREAKPOINT;
1196 }
1197 else
1198 {
1199 CPUBreakpoint *pBP;
1200 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1201 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1202 if (pBP->pc == GCPtrPC)
1203 break;
1204 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1205 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1206 }
1207#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1208 if (rc == VINF_EM_DBG_STEPPED)
1209 {
1210 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1211 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1212 continue;
1213
1214 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1215 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1216 rc = VINF_SUCCESS;
1217 }
1218#endif
1219 break;
1220
1221 /*
1222 * If we take a trap or start servicing a pending interrupt, we might end up here.
1223 * (Timer thread or some other thread wishing EMT's attention.)
1224 */
1225 case EXCP_INTERRUPT:
1226 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1227 rc = VINF_SUCCESS;
1228 break;
1229
1230 /*
1231 * hlt instruction.
1232 */
1233 case EXCP_HLT:
1234 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1235 rc = VINF_EM_HALT;
1236 break;
1237
1238 /*
1239 * The VM has halted.
1240 */
1241 case EXCP_HALTED:
1242 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1243 rc = VINF_EM_HALT;
1244 break;
1245
1246 /*
1247 * Switch to RAW-mode.
1248 */
1249 case EXCP_EXECUTE_RAW:
1250 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1251 rc = VINF_EM_RESCHEDULE_RAW;
1252 break;
1253
1254 /*
1255 * Switch to hardware accelerated RAW-mode.
1256 */
1257 case EXCP_EXECUTE_HWACC:
1258 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1259 rc = VINF_EM_RESCHEDULE_HWACC;
1260 break;
1261
1262 /*
1263 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1264 */
1265 case EXCP_RC:
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1267 rc = pVM->rem.s.rc;
1268 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1269 break;
1270
1271 /*
1272 * Figure out the rest when they arrive....
1273 */
1274 default:
1275 AssertMsgFailed(("rc=%d\n", rc));
1276 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1277 rc = VINF_EM_RESCHEDULE;
1278 break;
1279 }
1280 break;
1281 }
1282
1283#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1284// cpu_single_step(&pVM->rem.s.Env, 0);
1285#else
1286 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1287#endif
1288 return rc;
1289}
1290
1291
1292/**
1293 * Runs code in recompiled mode.
1294 *
1295 * Before calling this function the REM state needs to be in sync with
1296 * the VM. Call REMR3State() to perform the sync. It's only necessary
1297 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1298 * and after calling REMR3StateBack().
1299 *
1300 * @returns VBox status code.
1301 *
1302 * @param pVM VM Handle.
1303 * @param pVCpu VMCPU Handle.
1304 */
1305REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1306{
1307 int rc;
1308
1309 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1310 return remR3RunLoggingStep(pVM, pVCpu);
1311
1312 Assert(pVM->rem.s.fInREM);
1313 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1314
1315 TMNotifyStartOfExecution(pVCpu);
1316 rc = cpu_exec(&pVM->rem.s.Env);
1317 TMNotifyEndOfExecution(pVCpu);
1318 switch (rc)
1319 {
1320 /*
1321 * This happens when the execution was interrupted
1322 * by an external event, like pending timers.
1323 */
1324 case EXCP_INTERRUPT:
1325 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1326 rc = VINF_SUCCESS;
1327 break;
1328
1329 /*
1330 * hlt instruction.
1331 */
1332 case EXCP_HLT:
1333 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1334 rc = VINF_EM_HALT;
1335 break;
1336
1337 /*
1338 * The VM has halted.
1339 */
1340 case EXCP_HALTED:
1341 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1342 rc = VINF_EM_HALT;
1343 break;
1344
1345 /*
1346 * Breakpoint/single step.
1347 */
1348 case EXCP_DEBUG:
1349 if (pVM->rem.s.Env.watchpoint_hit)
1350 {
1351 /** @todo deal with watchpoints */
1352 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1353 rc = VINF_EM_DBG_BREAKPOINT;
1354 }
1355 else
1356 {
1357 CPUBreakpoint *pBP;
1358 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1359 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1360 if (pBP->pc == GCPtrPC)
1361 break;
1362 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1363 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1364 }
1365 break;
1366
1367 /*
1368 * Switch to RAW-mode.
1369 */
1370 case EXCP_EXECUTE_RAW:
1371 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1372 rc = VINF_EM_RESCHEDULE_RAW;
1373 break;
1374
1375 /*
1376 * Switch to hardware accelerated RAW-mode.
1377 */
1378 case EXCP_EXECUTE_HWACC:
1379 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1380 rc = VINF_EM_RESCHEDULE_HWACC;
1381 break;
1382
1383 /*
1384 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1385 */
1386 case EXCP_RC:
1387 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1388 rc = pVM->rem.s.rc;
1389 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1390 break;
1391
1392 /*
1393 * Figure out the rest when they arrive....
1394 */
1395 default:
1396 AssertMsgFailed(("rc=%d\n", rc));
1397 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1398 rc = VINF_SUCCESS;
1399 break;
1400 }
1401
1402 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1403 return rc;
1404}
1405
1406
1407/**
1408 * Check if the cpu state is suitable for Raw execution.
1409 *
1410 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1411 *
1412 * @param env The CPU env struct.
1413 * @param eip The EIP to check this for (might differ from env->eip).
1414 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1415 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1416 *
1417 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1418 */
1419bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1420{
1421 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1422 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1423 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1424 uint32_t u32CR0;
1425
1426#ifdef IEM_VERIFICATION_MODE
1427 return false;
1428#endif
1429
1430 /* Update counter. */
1431 env->pVM->rem.s.cCanExecuteRaw++;
1432
1433 /* Never when single stepping+logging guest code. */
1434 if (env->state & CPU_EMULATE_SINGLE_STEP)
1435 return false;
1436
1437 if (HWACCMIsEnabled(env->pVM))
1438 {
1439 CPUMCTX Ctx;
1440
1441 env->state |= CPU_RAW_HWACC;
1442
1443 /*
1444 * The simple check first...
1445 */
1446 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1447 return false;
1448
1449 /*
1450 * Create partial context for HWACCMR3CanExecuteGuest
1451 */
1452 Ctx.cr0 = env->cr[0];
1453 Ctx.cr3 = env->cr[3];
1454 Ctx.cr4 = env->cr[4];
1455
1456 Ctx.tr.Sel = env->tr.selector;
1457 Ctx.tr.ValidSel = env->tr.selector;
1458 Ctx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1459 Ctx.tr.u64Base = env->tr.base;
1460 Ctx.tr.u32Limit = env->tr.limit;
1461 Ctx.tr.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1462
1463 Ctx.ldtr.Sel = env->ldt.selector;
1464 Ctx.ldtr.ValidSel = env->ldt.selector;
1465 Ctx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1466 Ctx.ldtr.u64Base = env->ldt.base;
1467 Ctx.ldtr.u32Limit = env->ldt.limit;
1468 Ctx.ldtr.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1469
1470 Ctx.idtr.cbIdt = env->idt.limit;
1471 Ctx.idtr.pIdt = env->idt.base;
1472
1473 Ctx.gdtr.cbGdt = env->gdt.limit;
1474 Ctx.gdtr.pGdt = env->gdt.base;
1475
1476 Ctx.rsp = env->regs[R_ESP];
1477 Ctx.rip = env->eip;
1478
1479 Ctx.eflags.u32 = env->eflags;
1480
1481 Ctx.cs.Sel = env->segs[R_CS].selector;
1482 Ctx.cs.ValidSel = env->segs[R_CS].selector;
1483 Ctx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1484 Ctx.cs.u64Base = env->segs[R_CS].base;
1485 Ctx.cs.u32Limit = env->segs[R_CS].limit;
1486 Ctx.cs.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1487
1488 Ctx.ds.Sel = env->segs[R_DS].selector;
1489 Ctx.ds.ValidSel = env->segs[R_DS].selector;
1490 Ctx.ds.fFlags = CPUMSELREG_FLAGS_VALID;
1491 Ctx.ds.u64Base = env->segs[R_DS].base;
1492 Ctx.ds.u32Limit = env->segs[R_DS].limit;
1493 Ctx.ds.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1494
1495 Ctx.es.Sel = env->segs[R_ES].selector;
1496 Ctx.es.ValidSel = env->segs[R_ES].selector;
1497 Ctx.es.fFlags = CPUMSELREG_FLAGS_VALID;
1498 Ctx.es.u64Base = env->segs[R_ES].base;
1499 Ctx.es.u32Limit = env->segs[R_ES].limit;
1500 Ctx.es.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1501
1502 Ctx.fs.Sel = env->segs[R_FS].selector;
1503 Ctx.fs.ValidSel = env->segs[R_FS].selector;
1504 Ctx.fs.fFlags = CPUMSELREG_FLAGS_VALID;
1505 Ctx.fs.u64Base = env->segs[R_FS].base;
1506 Ctx.fs.u32Limit = env->segs[R_FS].limit;
1507 Ctx.fs.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1508
1509 Ctx.gs.Sel = env->segs[R_GS].selector;
1510 Ctx.gs.ValidSel = env->segs[R_GS].selector;
1511 Ctx.gs.fFlags = CPUMSELREG_FLAGS_VALID;
1512 Ctx.gs.u64Base = env->segs[R_GS].base;
1513 Ctx.gs.u32Limit = env->segs[R_GS].limit;
1514 Ctx.gs.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1515
1516 Ctx.ss.Sel = env->segs[R_SS].selector;
1517 Ctx.ss.ValidSel = env->segs[R_SS].selector;
1518 Ctx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1519 Ctx.ss.u64Base = env->segs[R_SS].base;
1520 Ctx.ss.u32Limit = env->segs[R_SS].limit;
1521 Ctx.ss.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1522
1523 Ctx.msrEFER = env->efer;
1524
1525 /* Hardware accelerated raw-mode:
1526 *
1527 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1528 */
1529 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1530 {
1531 *piException = EXCP_EXECUTE_HWACC;
1532 return true;
1533 }
1534 return false;
1535 }
1536
1537 /*
1538 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1539 * or 32 bits protected mode ring 0 code
1540 *
1541 * The tests are ordered by the likelihood of being true during normal execution.
1542 */
1543 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1544 {
1545 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1546 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1547 return false;
1548 }
1549
1550#ifndef VBOX_RAW_V86
1551 if (fFlags & VM_MASK) {
1552 STAM_COUNTER_INC(&gStatRefuseVM86);
1553 Log2(("raw mode refused: VM_MASK\n"));
1554 return false;
1555 }
1556#endif
1557
1558 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1559 {
1560#ifndef DEBUG_bird
1561 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1562#endif
1563 return false;
1564 }
1565
1566 if (env->singlestep_enabled)
1567 {
1568 //Log2(("raw mode refused: Single step\n"));
1569 return false;
1570 }
1571
1572 if (!QTAILQ_EMPTY(&env->breakpoints))
1573 {
1574 //Log2(("raw mode refused: Breakpoints\n"));
1575 return false;
1576 }
1577
1578 if (!QTAILQ_EMPTY(&env->watchpoints))
1579 {
1580 //Log2(("raw mode refused: Watchpoints\n"));
1581 return false;
1582 }
1583
1584 u32CR0 = env->cr[0];
1585 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1586 {
1587 STAM_COUNTER_INC(&gStatRefusePaging);
1588 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1589 return false;
1590 }
1591
1592 if (env->cr[4] & CR4_PAE_MASK)
1593 {
1594 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1595 {
1596 STAM_COUNTER_INC(&gStatRefusePAE);
1597 return false;
1598 }
1599 }
1600
1601 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1602 {
1603 if (!EMIsRawRing3Enabled(env->pVM))
1604 return false;
1605
1606 if (!(env->eflags & IF_MASK))
1607 {
1608 STAM_COUNTER_INC(&gStatRefuseIF0);
1609 Log2(("raw mode refused: IF (RawR3)\n"));
1610 return false;
1611 }
1612
1613 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1614 {
1615 STAM_COUNTER_INC(&gStatRefuseWP0);
1616 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1617 return false;
1618 }
1619 }
1620 else
1621 {
1622 if (!EMIsRawRing0Enabled(env->pVM))
1623 return false;
1624
1625 // Let's start with pure 32 bits ring 0 code first
1626 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1627 {
1628 STAM_COUNTER_INC(&gStatRefuseCode16);
1629 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1630 return false;
1631 }
1632
1633 // Only R0
1634 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1635 {
1636 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1637 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1638 return false;
1639 }
1640
1641 if (!(u32CR0 & CR0_WP_MASK))
1642 {
1643 STAM_COUNTER_INC(&gStatRefuseWP0);
1644 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1645 return false;
1646 }
1647
1648 if (PATMIsPatchGCAddr(env->pVM, eip))
1649 {
1650 Log2(("raw r0 mode forced: patch code\n"));
1651 *piException = EXCP_EXECUTE_RAW;
1652 return true;
1653 }
1654
1655#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1656 if (!(env->eflags & IF_MASK))
1657 {
1658 STAM_COUNTER_INC(&gStatRefuseIF0);
1659 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1660 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1661 return false;
1662 }
1663#endif
1664
1665 env->state |= CPU_RAW_RING0;
1666 }
1667
1668 /*
1669 * Don't reschedule the first time we're called, because there might be
1670 * special reasons why we're here that is not covered by the above checks.
1671 */
1672 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1673 {
1674 Log2(("raw mode refused: first scheduling\n"));
1675 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1676 return false;
1677 }
1678
1679 /*
1680 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1681 */
1682 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1683 {
1684 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1685 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1686 return EMSTATE_REM;
1687 }
1688 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1689 {
1690 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1691 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1692 return EMSTATE_REM;
1693 }
1694 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1695 {
1696 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1697 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1698 return EMSTATE_REM;
1699 }
1700 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1701 {
1702 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1703 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1704 return EMSTATE_REM;
1705 }
1706 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1707 {
1708 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1709 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1710 return EMSTATE_REM;
1711 }
1712 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1713 {
1714 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1715 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1716 return EMSTATE_REM;
1717 }
1718
1719/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1720 *piException = EXCP_EXECUTE_RAW;
1721 return true;
1722}
1723
1724
1725/**
1726 * Fetches a code byte.
1727 *
1728 * @returns Success indicator (bool) for ease of use.
1729 * @param env The CPU environment structure.
1730 * @param GCPtrInstr Where to fetch code.
1731 * @param pu8Byte Where to store the byte on success
1732 */
1733bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1734{
1735 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1736 if (RT_SUCCESS(rc))
1737 return true;
1738 return false;
1739}
1740
1741
1742/**
1743 * Flush (or invalidate if you like) page table/dir entry.
1744 *
1745 * (invlpg instruction; tlb_flush_page)
1746 *
1747 * @param env Pointer to cpu environment.
1748 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1749 */
1750void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1751{
1752 PVM pVM = env->pVM;
1753 PCPUMCTX pCtx;
1754 int rc;
1755
1756 Assert(EMRemIsLockOwner(env->pVM));
1757
1758 /*
1759 * When we're replaying invlpg instructions or restoring a saved
1760 * state we disable this path.
1761 */
1762 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1763 return;
1764 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1765 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1766
1767 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1768
1769 /*
1770 * Update the control registers before calling PGMFlushPage.
1771 */
1772 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1773 Assert(pCtx);
1774 pCtx->cr0 = env->cr[0];
1775 pCtx->cr3 = env->cr[3];
1776 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1777 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1778 pCtx->cr4 = env->cr[4];
1779
1780 /*
1781 * Let PGM do the rest.
1782 */
1783 Assert(env->pVCpu);
1784 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1785 if (RT_FAILURE(rc))
1786 {
1787 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1788 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1789 }
1790 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1791}
1792
1793
1794#ifndef REM_PHYS_ADDR_IN_TLB
1795/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1796void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1797{
1798 void *pv;
1799 int rc;
1800
1801
1802 /* Address must be aligned enough to fiddle with lower bits */
1803 Assert((physAddr & 0x3) == 0);
1804 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1805
1806 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1807 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1808 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1809 Assert( rc == VINF_SUCCESS
1810 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1811 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1812 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1813 if (RT_FAILURE(rc))
1814 return (void *)1;
1815 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1816 return (void *)((uintptr_t)pv | 2);
1817 return pv;
1818}
1819#endif /* REM_PHYS_ADDR_IN_TLB */
1820
1821
1822/**
1823 * Called from tlb_protect_code in order to write monitor a code page.
1824 *
1825 * @param env Pointer to the CPU environment.
1826 * @param GCPtr Code page to monitor
1827 */
1828void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1829{
1830#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1831 Assert(env->pVM->rem.s.fInREM);
1832 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1833 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1834 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1835 && !(env->eflags & VM_MASK) /* no V86 mode */
1836 && !HWACCMIsEnabled(env->pVM))
1837 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1838#endif
1839}
1840
1841
1842/**
1843 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1844 *
1845 * @param env Pointer to the CPU environment.
1846 * @param GCPtr Code page to monitor
1847 */
1848void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1849{
1850 Assert(env->pVM->rem.s.fInREM);
1851#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1852 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1853 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1854 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1855 && !(env->eflags & VM_MASK) /* no V86 mode */
1856 && !HWACCMIsEnabled(env->pVM))
1857 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1858#endif
1859}
1860
1861
1862/**
1863 * Called when the CPU is initialized, any of the CRx registers are changed or
1864 * when the A20 line is modified.
1865 *
1866 * @param env Pointer to the CPU environment.
1867 * @param fGlobal Set if the flush is global.
1868 */
1869void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1870{
1871 PVM pVM = env->pVM;
1872 PCPUMCTX pCtx;
1873 Assert(EMRemIsLockOwner(pVM));
1874
1875 /*
1876 * When we're replaying invlpg instructions or restoring a saved
1877 * state we disable this path.
1878 */
1879 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1880 return;
1881 Assert(pVM->rem.s.fInREM);
1882
1883 /*
1884 * The caller doesn't check cr4, so we have to do that for ourselves.
1885 */
1886 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1887 fGlobal = true;
1888 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1889
1890 /*
1891 * Update the control registers before calling PGMR3FlushTLB.
1892 */
1893 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1894 Assert(pCtx);
1895 pCtx->cr0 = env->cr[0];
1896 pCtx->cr3 = env->cr[3];
1897 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1898 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1899 pCtx->cr4 = env->cr[4];
1900
1901 /*
1902 * Let PGM do the rest.
1903 */
1904 Assert(env->pVCpu);
1905 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1906}
1907
1908
1909/**
1910 * Called when any of the cr0, cr4 or efer registers is updated.
1911 *
1912 * @param env Pointer to the CPU environment.
1913 */
1914void remR3ChangeCpuMode(CPUX86State *env)
1915{
1916 PVM pVM = env->pVM;
1917 uint64_t efer;
1918 PCPUMCTX pCtx;
1919 int rc;
1920
1921 /*
1922 * When we're replaying loads or restoring a saved
1923 * state this path is disabled.
1924 */
1925 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1926 return;
1927 Assert(pVM->rem.s.fInREM);
1928
1929 /*
1930 * Update the control registers before calling PGMChangeMode()
1931 * as it may need to map whatever cr3 is pointing to.
1932 */
1933 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1934 Assert(pCtx);
1935 pCtx->cr0 = env->cr[0];
1936 pCtx->cr3 = env->cr[3];
1937 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1938 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1939 pCtx->cr4 = env->cr[4];
1940#ifdef TARGET_X86_64
1941 efer = env->efer;
1942 pCtx->msrEFER = efer;
1943#else
1944 efer = 0;
1945#endif
1946 Assert(env->pVCpu);
1947 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1948 if (rc != VINF_SUCCESS)
1949 {
1950 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1951 {
1952 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1953 remR3RaiseRC(env->pVM, rc);
1954 }
1955 else
1956 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1957 }
1958}
1959
1960
1961/**
1962 * Called from compiled code to run dma.
1963 *
1964 * @param env Pointer to the CPU environment.
1965 */
1966void remR3DmaRun(CPUX86State *env)
1967{
1968 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1969 PDMR3DmaRun(env->pVM);
1970 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1971}
1972
1973
1974/**
1975 * Called from compiled code to schedule pending timers in VMM
1976 *
1977 * @param env Pointer to the CPU environment.
1978 */
1979void remR3TimersRun(CPUX86State *env)
1980{
1981 LogFlow(("remR3TimersRun:\n"));
1982 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1983 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1984 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1985 TMR3TimerQueuesDo(env->pVM);
1986 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1987 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1988}
1989
1990
1991/**
1992 * Record trap occurrence
1993 *
1994 * @returns VBox status code
1995 * @param env Pointer to the CPU environment.
1996 * @param uTrap Trap nr
1997 * @param uErrorCode Error code
1998 * @param pvNextEIP Next EIP
1999 */
2000int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
2001{
2002 PVM pVM = env->pVM;
2003#ifdef VBOX_WITH_STATISTICS
2004 static STAMCOUNTER s_aStatTrap[255];
2005 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
2006#endif
2007
2008#ifdef VBOX_WITH_STATISTICS
2009 if (uTrap < 255)
2010 {
2011 if (!s_aRegisters[uTrap])
2012 {
2013 char szStatName[64];
2014 s_aRegisters[uTrap] = true;
2015 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2016 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2017 }
2018 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2019 }
2020#endif
2021 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2022 if( uTrap < 0x20
2023 && (env->cr[0] & X86_CR0_PE)
2024 && !(env->eflags & X86_EFL_VM))
2025 {
2026#ifdef DEBUG
2027 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2028#endif
2029 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2030 {
2031 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2032 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2033 return VERR_REM_TOO_MANY_TRAPS;
2034 }
2035 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2036 pVM->rem.s.cPendingExceptions = 1;
2037 pVM->rem.s.uPendingException = uTrap;
2038 pVM->rem.s.uPendingExcptEIP = env->eip;
2039 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2040 }
2041 else
2042 {
2043 pVM->rem.s.cPendingExceptions = 0;
2044 pVM->rem.s.uPendingException = uTrap;
2045 pVM->rem.s.uPendingExcptEIP = env->eip;
2046 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2047 }
2048 return VINF_SUCCESS;
2049}
2050
2051
2052/*
2053 * Clear current active trap
2054 *
2055 * @param pVM VM Handle.
2056 */
2057void remR3TrapClear(PVM pVM)
2058{
2059 pVM->rem.s.cPendingExceptions = 0;
2060 pVM->rem.s.uPendingException = 0;
2061 pVM->rem.s.uPendingExcptEIP = 0;
2062 pVM->rem.s.uPendingExcptCR2 = 0;
2063}
2064
2065
2066/*
2067 * Record previous call instruction addresses
2068 *
2069 * @param env Pointer to the CPU environment.
2070 */
2071void remR3RecordCall(CPUX86State *env)
2072{
2073 CSAMR3RecordCallAddress(env->pVM, env->eip);
2074}
2075
2076
2077/**
2078 * Syncs the internal REM state with the VM.
2079 *
2080 * This must be called before REMR3Run() is invoked whenever when the REM
2081 * state is not up to date. Calling it several times in a row is not
2082 * permitted.
2083 *
2084 * @returns VBox status code.
2085 *
2086 * @param pVM VM Handle.
2087 * @param pVCpu VMCPU Handle.
2088 *
2089 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2090 * no do this since the majority of the callers don't want any unnecessary of events
2091 * pending that would immediately interrupt execution.
2092 */
2093REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2094{
2095 register const CPUMCTX *pCtx;
2096 register unsigned fFlags;
2097 unsigned i;
2098 TRPMEVENT enmType;
2099 uint8_t u8TrapNo;
2100 uint32_t uCpl;
2101 int rc;
2102
2103 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2104 Log2(("REMR3State:\n"));
2105
2106 pVM->rem.s.Env.pVCpu = pVCpu;
2107 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2108
2109 Assert(!pVM->rem.s.fInREM);
2110 pVM->rem.s.fInStateSync = true;
2111
2112 /*
2113 * If we have to flush TBs, do that immediately.
2114 */
2115 if (pVM->rem.s.fFlushTBs)
2116 {
2117 STAM_COUNTER_INC(&gStatFlushTBs);
2118 tb_flush(&pVM->rem.s.Env);
2119 pVM->rem.s.fFlushTBs = false;
2120 }
2121
2122 /*
2123 * Copy the registers which require no special handling.
2124 */
2125#ifdef TARGET_X86_64
2126 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2127 Assert(R_EAX == 0);
2128 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2129 Assert(R_ECX == 1);
2130 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2131 Assert(R_EDX == 2);
2132 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2133 Assert(R_EBX == 3);
2134 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2135 Assert(R_ESP == 4);
2136 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2137 Assert(R_EBP == 5);
2138 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2139 Assert(R_ESI == 6);
2140 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2141 Assert(R_EDI == 7);
2142 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2143 pVM->rem.s.Env.regs[8] = pCtx->r8;
2144 pVM->rem.s.Env.regs[9] = pCtx->r9;
2145 pVM->rem.s.Env.regs[10] = pCtx->r10;
2146 pVM->rem.s.Env.regs[11] = pCtx->r11;
2147 pVM->rem.s.Env.regs[12] = pCtx->r12;
2148 pVM->rem.s.Env.regs[13] = pCtx->r13;
2149 pVM->rem.s.Env.regs[14] = pCtx->r14;
2150 pVM->rem.s.Env.regs[15] = pCtx->r15;
2151
2152 pVM->rem.s.Env.eip = pCtx->rip;
2153
2154 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2155#else
2156 Assert(R_EAX == 0);
2157 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2158 Assert(R_ECX == 1);
2159 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2160 Assert(R_EDX == 2);
2161 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2162 Assert(R_EBX == 3);
2163 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2164 Assert(R_ESP == 4);
2165 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2166 Assert(R_EBP == 5);
2167 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2168 Assert(R_ESI == 6);
2169 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2170 Assert(R_EDI == 7);
2171 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2172 pVM->rem.s.Env.eip = pCtx->eip;
2173
2174 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2175#endif
2176
2177 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2178
2179 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2180 for (i=0;i<8;i++)
2181 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2182
2183#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2184 /*
2185 * Clear the halted hidden flag (the interrupt waking up the CPU can
2186 * have been dispatched in raw mode).
2187 */
2188 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2189#endif
2190
2191 /*
2192 * Replay invlpg? Only if we're not flushing the TLB.
2193 */
2194 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2195 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2196 if (pVM->rem.s.cInvalidatedPages)
2197 {
2198 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2199 {
2200 RTUINT i;
2201
2202 pVM->rem.s.fIgnoreCR3Load = true;
2203 pVM->rem.s.fIgnoreInvlPg = true;
2204 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2205 {
2206 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2207 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2208 }
2209 pVM->rem.s.fIgnoreInvlPg = false;
2210 pVM->rem.s.fIgnoreCR3Load = false;
2211 }
2212 pVM->rem.s.cInvalidatedPages = 0;
2213 }
2214
2215 /* Replay notification changes. */
2216 REMR3ReplayHandlerNotifications(pVM);
2217
2218 /* Update MSRs; before CRx registers! */
2219 pVM->rem.s.Env.efer = pCtx->msrEFER;
2220 pVM->rem.s.Env.star = pCtx->msrSTAR;
2221 pVM->rem.s.Env.pat = pCtx->msrPAT;
2222#ifdef TARGET_X86_64
2223 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2224 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2225 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2226 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2227
2228 /* Update the internal long mode activate flag according to the new EFER value. */
2229 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2230 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2231 else
2232 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2233#endif
2234
2235 /* Update the inhibit IRQ mask. */
2236 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2237 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2238 {
2239 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2240 if (InhibitPC == pCtx->rip)
2241 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2242 else
2243 {
2244 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2245 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2246 }
2247 }
2248
2249 /*
2250 * Sync the A20 gate.
2251 */
2252 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2253 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2254 {
2255 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2256 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2257 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2258 }
2259
2260 /*
2261 * Registers which are rarely changed and require special handling / order when changed.
2262 */
2263 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2264 | CPUM_CHANGED_CR4
2265 | CPUM_CHANGED_CR0
2266 | CPUM_CHANGED_CR3
2267 | CPUM_CHANGED_GDTR
2268 | CPUM_CHANGED_IDTR
2269 | CPUM_CHANGED_SYSENTER_MSR
2270 | CPUM_CHANGED_LDTR
2271 | CPUM_CHANGED_CPUID
2272 | CPUM_CHANGED_FPU_REM
2273 )
2274 )
2275 {
2276 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2277 {
2278 pVM->rem.s.fIgnoreCR3Load = true;
2279 tlb_flush(&pVM->rem.s.Env, true);
2280 pVM->rem.s.fIgnoreCR3Load = false;
2281 }
2282
2283 /* CR4 before CR0! */
2284 if (fFlags & CPUM_CHANGED_CR4)
2285 {
2286 pVM->rem.s.fIgnoreCR3Load = true;
2287 pVM->rem.s.fIgnoreCpuMode = true;
2288 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2289 pVM->rem.s.fIgnoreCpuMode = false;
2290 pVM->rem.s.fIgnoreCR3Load = false;
2291 }
2292
2293 if (fFlags & CPUM_CHANGED_CR0)
2294 {
2295 pVM->rem.s.fIgnoreCR3Load = true;
2296 pVM->rem.s.fIgnoreCpuMode = true;
2297 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2298 pVM->rem.s.fIgnoreCpuMode = false;
2299 pVM->rem.s.fIgnoreCR3Load = false;
2300 }
2301
2302 if (fFlags & CPUM_CHANGED_CR3)
2303 {
2304 pVM->rem.s.fIgnoreCR3Load = true;
2305 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2306 pVM->rem.s.fIgnoreCR3Load = false;
2307 }
2308
2309 if (fFlags & CPUM_CHANGED_GDTR)
2310 {
2311 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2312 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2313 }
2314
2315 if (fFlags & CPUM_CHANGED_IDTR)
2316 {
2317 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2318 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2319 }
2320
2321 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2322 {
2323 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2324 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2325 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2326 }
2327
2328 if (fFlags & CPUM_CHANGED_LDTR)
2329 {
2330 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2331 {
2332 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2333 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2334 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2335 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u << 8) & 0xFFFFFF;
2336 }
2337 else
2338 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2339 }
2340
2341 if (fFlags & CPUM_CHANGED_CPUID)
2342 {
2343 uint32_t u32Dummy;
2344
2345 /*
2346 * Get the CPUID features.
2347 */
2348 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2349 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2350 }
2351
2352 /* Sync FPU state after CR4, CPUID and EFER (!). */
2353 if (fFlags & CPUM_CHANGED_FPU_REM)
2354 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2355 }
2356
2357 /*
2358 * Sync TR unconditionally to make life simpler.
2359 */
2360 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2361 pVM->rem.s.Env.tr.newselector = 0;
2362 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2363 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2364 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2365 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
2366 /* Note! do_interrupt will fault if the busy flag is still set... */
2367 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2368
2369 /*
2370 * Update selector registers.
2371 *
2372 * This must be done *after* we've synced gdt, ldt and crX registers
2373 * since we're reading the GDT/LDT om sync_seg. This will happen with
2374 * saved state which takes a quick dip into rawmode for instance.
2375 *
2376 * CPL/Stack; Note first check this one as the CPL might have changed.
2377 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2378 */
2379 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2380 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2381#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2382 do \
2383 { \
2384 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2385 { \
2386 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2387 (a_pVBoxSReg)->Sel, \
2388 (a_pVBoxSReg)->u64Base, \
2389 (a_pVBoxSReg)->u32Limit, \
2390 ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \
2391 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2392 } \
2393 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2394 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2395 { \
2396 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2397 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2398 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2399 if ((a_pRemSReg)->newselector) \
2400 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2401 } \
2402 else \
2403 (a_pRemSReg)->newselector = 0; \
2404 } while (0)
2405
2406 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2407 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2408 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2409 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2410 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2411 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2412 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2413 * be the same but not the base/limit. */
2414
2415 /*
2416 * Check for traps.
2417 */
2418 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2419 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2420 if (RT_SUCCESS(rc))
2421 {
2422#ifdef DEBUG
2423 if (u8TrapNo == 0x80)
2424 {
2425 remR3DumpLnxSyscall(pVCpu);
2426 remR3DumpOBsdSyscall(pVCpu);
2427 }
2428#endif
2429
2430 pVM->rem.s.Env.exception_index = u8TrapNo;
2431 if (enmType != TRPM_SOFTWARE_INT)
2432 {
2433 pVM->rem.s.Env.exception_is_int = 0;
2434 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2435 }
2436 else
2437 {
2438 /*
2439 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2440 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2441 * for int03 and into.
2442 */
2443 pVM->rem.s.Env.exception_is_int = 1;
2444 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2445 /* int 3 may be generated by one-byte 0xcc */
2446 if (u8TrapNo == 3)
2447 {
2448 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2449 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2450 }
2451 /* int 4 may be generated by one-byte 0xce */
2452 else if (u8TrapNo == 4)
2453 {
2454 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2455 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2456 }
2457 }
2458
2459 /* get error code and cr2 if needed. */
2460 if (enmType == TRPM_TRAP)
2461 {
2462 switch (u8TrapNo)
2463 {
2464 case 0x0e:
2465 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2466 /* fallthru */
2467 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2468 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2469 break;
2470
2471 case 0x11: case 0x08:
2472 default:
2473 pVM->rem.s.Env.error_code = 0;
2474 break;
2475 }
2476 }
2477 else
2478 pVM->rem.s.Env.error_code = 0;
2479
2480 /*
2481 * We can now reset the active trap since the recompiler is gonna have a go at it.
2482 */
2483 rc = TRPMResetTrap(pVCpu);
2484 AssertRC(rc);
2485 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2486 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2487 }
2488
2489 /*
2490 * Clear old interrupt request flags; Check for pending hardware interrupts.
2491 * (See @remark for why we don't check for other FFs.)
2492 */
2493 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2494 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2495 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2496 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2497
2498 /*
2499 * We're now in REM mode.
2500 */
2501 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2502 pVM->rem.s.fInREM = true;
2503 pVM->rem.s.fInStateSync = false;
2504 pVM->rem.s.cCanExecuteRaw = 0;
2505 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2506 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2507 return VINF_SUCCESS;
2508}
2509
2510
2511/**
2512 * Syncs back changes in the REM state to the the VM state.
2513 *
2514 * This must be called after invoking REMR3Run().
2515 * Calling it several times in a row is not permitted.
2516 *
2517 * @returns VBox status code.
2518 *
2519 * @param pVM VM Handle.
2520 * @param pVCpu VMCPU Handle.
2521 */
2522REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2523{
2524 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2525 Assert(pCtx);
2526 unsigned i;
2527
2528 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2529 Log2(("REMR3StateBack:\n"));
2530 Assert(pVM->rem.s.fInREM);
2531
2532 /*
2533 * Copy back the registers.
2534 * This is done in the order they are declared in the CPUMCTX structure.
2535 */
2536
2537 /** @todo FOP */
2538 /** @todo FPUIP */
2539 /** @todo CS */
2540 /** @todo FPUDP */
2541 /** @todo DS */
2542
2543 /** @todo check if FPU/XMM was actually used in the recompiler */
2544 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2545//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2546
2547#ifdef TARGET_X86_64
2548 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2549 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2550 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2551 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2552 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2553 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2554 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2555 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2556 pCtx->r8 = pVM->rem.s.Env.regs[8];
2557 pCtx->r9 = pVM->rem.s.Env.regs[9];
2558 pCtx->r10 = pVM->rem.s.Env.regs[10];
2559 pCtx->r11 = pVM->rem.s.Env.regs[11];
2560 pCtx->r12 = pVM->rem.s.Env.regs[12];
2561 pCtx->r13 = pVM->rem.s.Env.regs[13];
2562 pCtx->r14 = pVM->rem.s.Env.regs[14];
2563 pCtx->r15 = pVM->rem.s.Env.regs[15];
2564
2565 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2566
2567#else
2568 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2569 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2570 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2571 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2572 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2573 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2574 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2575
2576 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2577#endif
2578
2579#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2580 do \
2581 { \
2582 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2583 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2584 { \
2585 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2586 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2587 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2588 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2589 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ \
2590 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> 8) & 0xF0FF; \
2591 } \
2592 else \
2593 { \
2594 pCtx->a_sreg.fFlags = 0; \
2595 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2596 } \
2597 } while (0)
2598
2599 SYNC_BACK_SREG(es, ES);
2600 SYNC_BACK_SREG(cs, CS);
2601 SYNC_BACK_SREG(ss, SS);
2602 SYNC_BACK_SREG(ds, DS);
2603 SYNC_BACK_SREG(fs, FS);
2604 SYNC_BACK_SREG(gs, GS);
2605
2606#ifdef TARGET_X86_64
2607 pCtx->rip = pVM->rem.s.Env.eip;
2608 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2609#else
2610 pCtx->eip = pVM->rem.s.Env.eip;
2611 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2612#endif
2613
2614 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2615 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2616 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2617 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2618 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2619 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2620
2621 for (i = 0; i < 8; i++)
2622 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2623
2624 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2625 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2626 {
2627 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2628 STAM_COUNTER_INC(&gStatREMGDTChange);
2629 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2630 }
2631
2632 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2633 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2634 {
2635 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2636 STAM_COUNTER_INC(&gStatREMIDTChange);
2637 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2638 }
2639
2640 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2641 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2642 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2643 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2644 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2645 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2646 )
2647 {
2648 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2649 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2650 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2651 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2652 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2653 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2654 STAM_COUNTER_INC(&gStatREMLDTRChange);
2655 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2656 }
2657
2658 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2659 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2660 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2661 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2662 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2663 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2664 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2665 : 0)
2666 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2667 )
2668 {
2669 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2670 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2671 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2672 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2673 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2674 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2675 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2676 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2677 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2678 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2679 if (pCtx->tr.Attr.u)
2680 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2681 STAM_COUNTER_INC(&gStatREMTRChange);
2682 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2683 }
2684
2685 /* Sysenter MSR */
2686 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2687 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2688 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2689
2690 /* System MSRs. */
2691 pCtx->msrEFER = pVM->rem.s.Env.efer;
2692 pCtx->msrSTAR = pVM->rem.s.Env.star;
2693 pCtx->msrPAT = pVM->rem.s.Env.pat;
2694#ifdef TARGET_X86_64
2695 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2696 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2697 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2698 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2699#endif
2700
2701 /* Inhibit interrupt flag. */
2702 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2703 {
2704 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2705 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2706 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2707 }
2708 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2709 {
2710 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2711 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2712 }
2713
2714 remR3TrapClear(pVM);
2715
2716 /*
2717 * Check for traps.
2718 */
2719 if ( pVM->rem.s.Env.exception_index >= 0
2720 && pVM->rem.s.Env.exception_index < 256)
2721 {
2722 int rc;
2723
2724 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2725 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2726 AssertRC(rc);
2727 switch (pVM->rem.s.Env.exception_index)
2728 {
2729 case 0x0e:
2730 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2731 /* fallthru */
2732 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2733 case 0x11: case 0x08: /* 0 */
2734 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2735 break;
2736 }
2737
2738 }
2739
2740 /*
2741 * We're not longer in REM mode.
2742 */
2743 CPUMR3RemLeave(pVCpu,
2744 HWACCMIsEnabled(pVM)
2745 || ( pVM->rem.s.Env.segs[R_SS].newselector
2746 | pVM->rem.s.Env.segs[R_GS].newselector
2747 | pVM->rem.s.Env.segs[R_FS].newselector
2748 | pVM->rem.s.Env.segs[R_ES].newselector
2749 | pVM->rem.s.Env.segs[R_DS].newselector
2750 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2751 );
2752 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2753 pVM->rem.s.fInREM = false;
2754 pVM->rem.s.pCtx = NULL;
2755 pVM->rem.s.Env.pVCpu = NULL;
2756 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2757 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2758 return VINF_SUCCESS;
2759}
2760
2761
2762/**
2763 * This is called by the disassembler when it wants to update the cpu state
2764 * before for instance doing a register dump.
2765 */
2766static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2767{
2768 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2769 unsigned i;
2770
2771 Assert(pVM->rem.s.fInREM);
2772
2773 /*
2774 * Copy back the registers.
2775 * This is done in the order they are declared in the CPUMCTX structure.
2776 */
2777
2778 /** @todo FOP */
2779 /** @todo FPUIP */
2780 /** @todo CS */
2781 /** @todo FPUDP */
2782 /** @todo DS */
2783 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2784 pCtx->fpu.MXCSR = 0;
2785 pCtx->fpu.MXCSR_MASK = 0;
2786
2787 /** @todo check if FPU/XMM was actually used in the recompiler */
2788 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2789//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2790
2791#ifdef TARGET_X86_64
2792 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2793 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2794 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2795 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2796 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2797 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2798 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2799 pCtx->r8 = pVM->rem.s.Env.regs[8];
2800 pCtx->r9 = pVM->rem.s.Env.regs[9];
2801 pCtx->r10 = pVM->rem.s.Env.regs[10];
2802 pCtx->r11 = pVM->rem.s.Env.regs[11];
2803 pCtx->r12 = pVM->rem.s.Env.regs[12];
2804 pCtx->r13 = pVM->rem.s.Env.regs[13];
2805 pCtx->r14 = pVM->rem.s.Env.regs[14];
2806 pCtx->r15 = pVM->rem.s.Env.regs[15];
2807
2808 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2809#else
2810 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2811 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2812 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2813 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2814 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2815 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2816 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2817
2818 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2819#endif
2820
2821 SYNC_BACK_SREG(es, ES);
2822 SYNC_BACK_SREG(cs, CS);
2823 SYNC_BACK_SREG(ss, SS);
2824 SYNC_BACK_SREG(ds, DS);
2825 SYNC_BACK_SREG(fs, FS);
2826 SYNC_BACK_SREG(gs, GS);
2827
2828#ifdef TARGET_X86_64
2829 pCtx->rip = pVM->rem.s.Env.eip;
2830 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2831#else
2832 pCtx->eip = pVM->rem.s.Env.eip;
2833 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2834#endif
2835
2836 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2837 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2838 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2839 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2840 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2841 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2842
2843 for (i = 0; i < 8; i++)
2844 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2845
2846 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2847 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2848 {
2849 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2850 STAM_COUNTER_INC(&gStatREMGDTChange);
2851 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2852 }
2853
2854 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2855 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2856 {
2857 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2858 STAM_COUNTER_INC(&gStatREMIDTChange);
2859 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2860 }
2861
2862 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2863 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2864 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2865 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2866 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2867 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2868 )
2869 {
2870 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2871 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2872 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2873 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2874 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2875 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2876 STAM_COUNTER_INC(&gStatREMLDTRChange);
2877 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2878 }
2879
2880 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2881 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2882 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2883 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2884 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2885 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2886 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2887 : 0)
2888 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2889 )
2890 {
2891 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2892 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2893 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2894 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2895 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2896 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2897 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2898 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2899 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2900 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2901 if (pCtx->tr.Attr.u)
2902 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2903 STAM_COUNTER_INC(&gStatREMTRChange);
2904 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2905 }
2906
2907 /* Sysenter MSR */
2908 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2909 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2910 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2911
2912 /* System MSRs. */
2913 pCtx->msrEFER = pVM->rem.s.Env.efer;
2914 pCtx->msrSTAR = pVM->rem.s.Env.star;
2915 pCtx->msrPAT = pVM->rem.s.Env.pat;
2916#ifdef TARGET_X86_64
2917 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2918 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2919 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2920 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2921#endif
2922
2923}
2924
2925
2926/**
2927 * Update the VMM state information if we're currently in REM.
2928 *
2929 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2930 * we're currently executing in REM and the VMM state is invalid. This method will of
2931 * course check that we're executing in REM before syncing any data over to the VMM.
2932 *
2933 * @param pVM The VM handle.
2934 * @param pVCpu The VMCPU handle.
2935 */
2936REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2937{
2938 if (pVM->rem.s.fInREM)
2939 remR3StateUpdate(pVM, pVCpu);
2940}
2941
2942
2943#undef LOG_GROUP
2944#define LOG_GROUP LOG_GROUP_REM
2945
2946
2947/**
2948 * Notify the recompiler about Address Gate 20 state change.
2949 *
2950 * This notification is required since A20 gate changes are
2951 * initialized from a device driver and the VM might just as
2952 * well be in REM mode as in RAW mode.
2953 *
2954 * @param pVM VM handle.
2955 * @param pVCpu VMCPU handle.
2956 * @param fEnable True if the gate should be enabled.
2957 * False if the gate should be disabled.
2958 */
2959REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2960{
2961 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2962 VM_ASSERT_EMT(pVM);
2963
2964 /** @todo SMP and the A20 gate... */
2965 if (pVM->rem.s.Env.pVCpu == pVCpu)
2966 {
2967 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2968 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2969 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2970 }
2971}
2972
2973
2974/**
2975 * Replays the handler notification changes
2976 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2977 *
2978 * @param pVM VM handle.
2979 */
2980REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2981{
2982 /*
2983 * Replay the flushes.
2984 */
2985 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2986 VM_ASSERT_EMT(pVM);
2987
2988 /** @todo this isn't ensuring correct replay order. */
2989 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2990 {
2991 uint32_t idxNext;
2992 uint32_t idxRevHead;
2993 uint32_t idxHead;
2994#ifdef VBOX_STRICT
2995 int32_t c = 0;
2996#endif
2997
2998 /* Lockless purging of pending notifications. */
2999 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3000 if (idxHead == UINT32_MAX)
3001 return;
3002 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3003
3004 /*
3005 * Reverse the list to process it in FIFO order.
3006 */
3007 idxRevHead = UINT32_MAX;
3008 do
3009 {
3010 /* Save the index of the next rec. */
3011 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3012 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3013 /* Push the record onto the reversed list. */
3014 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3015 idxRevHead = idxHead;
3016 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3017 /* Advance. */
3018 idxHead = idxNext;
3019 } while (idxHead != UINT32_MAX);
3020
3021 /*
3022 * Loop thru the list, reinserting the record into the free list as they are
3023 * processed to avoid having other EMTs running out of entries while we're flushing.
3024 */
3025 idxHead = idxRevHead;
3026 do
3027 {
3028 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3029 uint32_t idxCur;
3030 Assert(--c >= 0);
3031
3032 switch (pCur->enmKind)
3033 {
3034 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3035 remR3NotifyHandlerPhysicalRegister(pVM,
3036 pCur->u.PhysicalRegister.enmType,
3037 pCur->u.PhysicalRegister.GCPhys,
3038 pCur->u.PhysicalRegister.cb,
3039 pCur->u.PhysicalRegister.fHasHCHandler);
3040 break;
3041
3042 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3043 remR3NotifyHandlerPhysicalDeregister(pVM,
3044 pCur->u.PhysicalDeregister.enmType,
3045 pCur->u.PhysicalDeregister.GCPhys,
3046 pCur->u.PhysicalDeregister.cb,
3047 pCur->u.PhysicalDeregister.fHasHCHandler,
3048 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3049 break;
3050
3051 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3052 remR3NotifyHandlerPhysicalModify(pVM,
3053 pCur->u.PhysicalModify.enmType,
3054 pCur->u.PhysicalModify.GCPhysOld,
3055 pCur->u.PhysicalModify.GCPhysNew,
3056 pCur->u.PhysicalModify.cb,
3057 pCur->u.PhysicalModify.fHasHCHandler,
3058 pCur->u.PhysicalModify.fRestoreAsRAM);
3059 break;
3060
3061 default:
3062 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3063 break;
3064 }
3065
3066 /*
3067 * Advance idxHead.
3068 */
3069 idxCur = idxHead;
3070 idxHead = pCur->idxNext;
3071 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3072
3073 /*
3074 * Put the record back into the free list.
3075 */
3076 do
3077 {
3078 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3079 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3080 ASMCompilerBarrier();
3081 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3082 } while (idxHead != UINT32_MAX);
3083
3084#ifdef VBOX_STRICT
3085 if (pVM->cCpus == 1)
3086 {
3087 unsigned c;
3088 /* Check that all records are now on the free list. */
3089 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3090 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3091 c++;
3092 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3093 }
3094#endif
3095 }
3096}
3097
3098
3099/**
3100 * Notify REM about changed code page.
3101 *
3102 * @returns VBox status code.
3103 * @param pVM VM handle.
3104 * @param pVCpu VMCPU handle.
3105 * @param pvCodePage Code page address
3106 */
3107REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3108{
3109#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3110 int rc;
3111 RTGCPHYS PhysGC;
3112 uint64_t flags;
3113
3114 VM_ASSERT_EMT(pVM);
3115
3116 /*
3117 * Get the physical page address.
3118 */
3119 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3120 if (rc == VINF_SUCCESS)
3121 {
3122 /*
3123 * Sync the required registers and flush the whole page.
3124 * (Easier to do the whole page than notifying it about each physical
3125 * byte that was changed.
3126 */
3127 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3128 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3129 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3130 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3131
3132 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3133 }
3134#endif
3135 return VINF_SUCCESS;
3136}
3137
3138
3139/**
3140 * Notification about a successful MMR3PhysRegister() call.
3141 *
3142 * @param pVM VM handle.
3143 * @param GCPhys The physical address the RAM.
3144 * @param cb Size of the memory.
3145 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3146 */
3147REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3148{
3149 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3150 VM_ASSERT_EMT(pVM);
3151
3152 /*
3153 * Validate input - we trust the caller.
3154 */
3155 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3156 Assert(cb);
3157 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3158 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3159
3160 /*
3161 * Base ram? Update GCPhysLastRam.
3162 */
3163 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3164 {
3165 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3166 {
3167 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3168 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3169 }
3170 }
3171
3172 /*
3173 * Register the ram.
3174 */
3175 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3176
3177 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3178 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3179 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3180
3181 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3182}
3183
3184
3185/**
3186 * Notification about a successful MMR3PhysRomRegister() call.
3187 *
3188 * @param pVM VM handle.
3189 * @param GCPhys The physical address of the ROM.
3190 * @param cb The size of the ROM.
3191 * @param pvCopy Pointer to the ROM copy.
3192 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3193 * This function will be called when ever the protection of the
3194 * shadow ROM changes (at reset and end of POST).
3195 */
3196REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3197{
3198 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3199 VM_ASSERT_EMT(pVM);
3200
3201 /*
3202 * Validate input - we trust the caller.
3203 */
3204 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3205 Assert(cb);
3206 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3207
3208 /*
3209 * Register the rom.
3210 */
3211 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3212
3213 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3214 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3215 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3216
3217 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3218}
3219
3220
3221/**
3222 * Notification about a successful memory deregistration or reservation.
3223 *
3224 * @param pVM VM Handle.
3225 * @param GCPhys Start physical address.
3226 * @param cb The size of the range.
3227 */
3228REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3229{
3230 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3231 VM_ASSERT_EMT(pVM);
3232
3233 /*
3234 * Validate input - we trust the caller.
3235 */
3236 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3237 Assert(cb);
3238 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3239
3240 /*
3241 * Unassigning the memory.
3242 */
3243 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3244
3245 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3246 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3247 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3248
3249 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3250}
3251
3252
3253/**
3254 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3255 *
3256 * @param pVM VM Handle.
3257 * @param enmType Handler type.
3258 * @param GCPhys Handler range address.
3259 * @param cb Size of the handler range.
3260 * @param fHasHCHandler Set if the handler has a HC callback function.
3261 *
3262 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3263 * Handler memory type to memory which has no HC handler.
3264 */
3265static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3266{
3267 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3268 enmType, GCPhys, cb, fHasHCHandler));
3269
3270 VM_ASSERT_EMT(pVM);
3271 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3272 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3273
3274
3275 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3276
3277 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3278 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3279 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3280 else if (fHasHCHandler)
3281 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3282 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3283
3284 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3285}
3286
3287/**
3288 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3289 *
3290 * @param pVM VM Handle.
3291 * @param enmType Handler type.
3292 * @param GCPhys Handler range address.
3293 * @param cb Size of the handler range.
3294 * @param fHasHCHandler Set if the handler has a HC callback function.
3295 *
3296 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3297 * Handler memory type to memory which has no HC handler.
3298 */
3299REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3300{
3301 REMR3ReplayHandlerNotifications(pVM);
3302
3303 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3304}
3305
3306/**
3307 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3308 *
3309 * @param pVM VM Handle.
3310 * @param enmType Handler type.
3311 * @param GCPhys Handler range address.
3312 * @param cb Size of the handler range.
3313 * @param fHasHCHandler Set if the handler has a HC callback function.
3314 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3315 */
3316static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3317{
3318 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3319 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3320 VM_ASSERT_EMT(pVM);
3321
3322
3323 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3324
3325 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3326 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3327 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3328 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3329 else if (fHasHCHandler)
3330 {
3331 if (!fRestoreAsRAM)
3332 {
3333 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3334 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3335 }
3336 else
3337 {
3338 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3339 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3340 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3341 }
3342 }
3343 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3344
3345 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3346}
3347
3348/**
3349 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3350 *
3351 * @param pVM VM Handle.
3352 * @param enmType Handler type.
3353 * @param GCPhys Handler range address.
3354 * @param cb Size of the handler range.
3355 * @param fHasHCHandler Set if the handler has a HC callback function.
3356 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3357 */
3358REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3359{
3360 REMR3ReplayHandlerNotifications(pVM);
3361 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3362}
3363
3364
3365/**
3366 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3367 *
3368 * @param pVM VM Handle.
3369 * @param enmType Handler type.
3370 * @param GCPhysOld Old handler range address.
3371 * @param GCPhysNew New handler range address.
3372 * @param cb Size of the handler range.
3373 * @param fHasHCHandler Set if the handler has a HC callback function.
3374 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3375 */
3376static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3377{
3378 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3379 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3380 VM_ASSERT_EMT(pVM);
3381 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3382
3383 if (fHasHCHandler)
3384 {
3385 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3386
3387 /*
3388 * Reset the old page.
3389 */
3390 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3391 if (!fRestoreAsRAM)
3392 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3393 else
3394 {
3395 /* This is not perfect, but it'll do for PD monitoring... */
3396 Assert(cb == PAGE_SIZE);
3397 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3398 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3399 }
3400
3401 /*
3402 * Update the new page.
3403 */
3404 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3405 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3406 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3407 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3408
3409 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3410 }
3411}
3412
3413/**
3414 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3415 *
3416 * @param pVM VM Handle.
3417 * @param enmType Handler type.
3418 * @param GCPhysOld Old handler range address.
3419 * @param GCPhysNew New handler range address.
3420 * @param cb Size of the handler range.
3421 * @param fHasHCHandler Set if the handler has a HC callback function.
3422 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3423 */
3424REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3425{
3426 REMR3ReplayHandlerNotifications(pVM);
3427
3428 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3429}
3430
3431/**
3432 * Checks if we're handling access to this page or not.
3433 *
3434 * @returns true if we're trapping access.
3435 * @returns false if we aren't.
3436 * @param pVM The VM handle.
3437 * @param GCPhys The physical address.
3438 *
3439 * @remark This function will only work correctly in VBOX_STRICT builds!
3440 */
3441REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3442{
3443#ifdef VBOX_STRICT
3444 unsigned long off;
3445 REMR3ReplayHandlerNotifications(pVM);
3446
3447 off = get_phys_page_offset(GCPhys);
3448 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3449 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3450 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3451#else
3452 return false;
3453#endif
3454}
3455
3456
3457/**
3458 * Deals with a rare case in get_phys_addr_code where the code
3459 * is being monitored.
3460 *
3461 * It could also be an MMIO page, in which case we will raise a fatal error.
3462 *
3463 * @returns The physical address corresponding to addr.
3464 * @param env The cpu environment.
3465 * @param addr The virtual address.
3466 * @param pTLBEntry The TLB entry.
3467 */
3468target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3469 target_ulong addr,
3470 CPUTLBEntry *pTLBEntry,
3471 target_phys_addr_t ioTLBEntry)
3472{
3473 PVM pVM = env->pVM;
3474
3475 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3476 {
3477 /* If code memory is being monitored, appropriate IOTLB entry will have
3478 handler IO type, and addend will provide real physical address, no
3479 matter if we store VA in TLB or not, as handlers are always passed PA */
3480 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3481 return ret;
3482 }
3483 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3484 "*** handlers\n",
3485 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3486 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3487 LogRel(("*** mmio\n"));
3488 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3489 LogRel(("*** phys\n"));
3490 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3491 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3492 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3493 AssertFatalFailed();
3494}
3495
3496/**
3497 * Read guest RAM and ROM.
3498 *
3499 * @param SrcGCPhys The source address (guest physical).
3500 * @param pvDst The destination address.
3501 * @param cb Number of bytes
3502 */
3503void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3504{
3505 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3506 VBOX_CHECK_ADDR(SrcGCPhys);
3507 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3508#ifdef VBOX_DEBUG_PHYS
3509 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3510#endif
3511 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3512}
3513
3514
3515/**
3516 * Read guest RAM and ROM, unsigned 8-bit.
3517 *
3518 * @param SrcGCPhys The source address (guest physical).
3519 */
3520RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3521{
3522 uint8_t val;
3523 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3524 VBOX_CHECK_ADDR(SrcGCPhys);
3525 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3526 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3527#ifdef VBOX_DEBUG_PHYS
3528 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3529#endif
3530 return val;
3531}
3532
3533
3534/**
3535 * Read guest RAM and ROM, signed 8-bit.
3536 *
3537 * @param SrcGCPhys The source address (guest physical).
3538 */
3539RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3540{
3541 int8_t val;
3542 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3543 VBOX_CHECK_ADDR(SrcGCPhys);
3544 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3545 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3546#ifdef VBOX_DEBUG_PHYS
3547 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3548#endif
3549 return val;
3550}
3551
3552
3553/**
3554 * Read guest RAM and ROM, unsigned 16-bit.
3555 *
3556 * @param SrcGCPhys The source address (guest physical).
3557 */
3558RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3559{
3560 uint16_t val;
3561 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3562 VBOX_CHECK_ADDR(SrcGCPhys);
3563 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3564 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3565#ifdef VBOX_DEBUG_PHYS
3566 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3567#endif
3568 return val;
3569}
3570
3571
3572/**
3573 * Read guest RAM and ROM, signed 16-bit.
3574 *
3575 * @param SrcGCPhys The source address (guest physical).
3576 */
3577RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3578{
3579 int16_t val;
3580 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3581 VBOX_CHECK_ADDR(SrcGCPhys);
3582 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3583 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3584#ifdef VBOX_DEBUG_PHYS
3585 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3586#endif
3587 return val;
3588}
3589
3590
3591/**
3592 * Read guest RAM and ROM, unsigned 32-bit.
3593 *
3594 * @param SrcGCPhys The source address (guest physical).
3595 */
3596RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3597{
3598 uint32_t val;
3599 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3600 VBOX_CHECK_ADDR(SrcGCPhys);
3601 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3602 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3603#ifdef VBOX_DEBUG_PHYS
3604 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3605#endif
3606 return val;
3607}
3608
3609
3610/**
3611 * Read guest RAM and ROM, signed 32-bit.
3612 *
3613 * @param SrcGCPhys The source address (guest physical).
3614 */
3615RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3616{
3617 int32_t val;
3618 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3619 VBOX_CHECK_ADDR(SrcGCPhys);
3620 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3621 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3622#ifdef VBOX_DEBUG_PHYS
3623 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3624#endif
3625 return val;
3626}
3627
3628
3629/**
3630 * Read guest RAM and ROM, unsigned 64-bit.
3631 *
3632 * @param SrcGCPhys The source address (guest physical).
3633 */
3634uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3635{
3636 uint64_t val;
3637 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3638 VBOX_CHECK_ADDR(SrcGCPhys);
3639 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3640 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3641#ifdef VBOX_DEBUG_PHYS
3642 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3643#endif
3644 return val;
3645}
3646
3647
3648/**
3649 * Read guest RAM and ROM, signed 64-bit.
3650 *
3651 * @param SrcGCPhys The source address (guest physical).
3652 */
3653int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3654{
3655 int64_t val;
3656 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3657 VBOX_CHECK_ADDR(SrcGCPhys);
3658 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3659 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3660#ifdef VBOX_DEBUG_PHYS
3661 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3662#endif
3663 return val;
3664}
3665
3666
3667/**
3668 * Write guest RAM.
3669 *
3670 * @param DstGCPhys The destination address (guest physical).
3671 * @param pvSrc The source address.
3672 * @param cb Number of bytes to write
3673 */
3674void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3675{
3676 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3677 VBOX_CHECK_ADDR(DstGCPhys);
3678 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3679 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3680#ifdef VBOX_DEBUG_PHYS
3681 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3682#endif
3683}
3684
3685
3686/**
3687 * Write guest RAM, unsigned 8-bit.
3688 *
3689 * @param DstGCPhys The destination address (guest physical).
3690 * @param val Value
3691 */
3692void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3693{
3694 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3695 VBOX_CHECK_ADDR(DstGCPhys);
3696 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3697 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3698#ifdef VBOX_DEBUG_PHYS
3699 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3700#endif
3701}
3702
3703
3704/**
3705 * Write guest RAM, unsigned 8-bit.
3706 *
3707 * @param DstGCPhys The destination address (guest physical).
3708 * @param val Value
3709 */
3710void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3711{
3712 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3713 VBOX_CHECK_ADDR(DstGCPhys);
3714 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3715 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3716#ifdef VBOX_DEBUG_PHYS
3717 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3718#endif
3719}
3720
3721
3722/**
3723 * Write guest RAM, unsigned 32-bit.
3724 *
3725 * @param DstGCPhys The destination address (guest physical).
3726 * @param val Value
3727 */
3728void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3729{
3730 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3731 VBOX_CHECK_ADDR(DstGCPhys);
3732 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3733 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3734#ifdef VBOX_DEBUG_PHYS
3735 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3736#endif
3737}
3738
3739
3740/**
3741 * Write guest RAM, unsigned 64-bit.
3742 *
3743 * @param DstGCPhys The destination address (guest physical).
3744 * @param val Value
3745 */
3746void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3747{
3748 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3749 VBOX_CHECK_ADDR(DstGCPhys);
3750 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3751 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3752#ifdef VBOX_DEBUG_PHYS
3753 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3754#endif
3755}
3756
3757#undef LOG_GROUP
3758#define LOG_GROUP LOG_GROUP_REM_MMIO
3759
3760/** Read MMIO memory. */
3761static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3762{
3763 uint32_t u32 = 0;
3764 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3765 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3766 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3767 return u32;
3768}
3769
3770/** Read MMIO memory. */
3771static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3772{
3773 uint32_t u32 = 0;
3774 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3775 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3776 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3777 return u32;
3778}
3779
3780/** Read MMIO memory. */
3781static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3782{
3783 uint32_t u32 = 0;
3784 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3785 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3786 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3787 return u32;
3788}
3789
3790/** Write to MMIO memory. */
3791static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3792{
3793 int rc;
3794 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3795 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3796 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3797}
3798
3799/** Write to MMIO memory. */
3800static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3801{
3802 int rc;
3803 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3804 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3805 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3806}
3807
3808/** Write to MMIO memory. */
3809static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3810{
3811 int rc;
3812 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3813 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3814 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3815}
3816
3817
3818#undef LOG_GROUP
3819#define LOG_GROUP LOG_GROUP_REM_HANDLER
3820
3821/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3822
3823static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3824{
3825 uint8_t u8;
3826 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3827 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3828 return u8;
3829}
3830
3831static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3832{
3833 uint16_t u16;
3834 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3835 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3836 return u16;
3837}
3838
3839static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3840{
3841 uint32_t u32;
3842 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3843 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3844 return u32;
3845}
3846
3847static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3848{
3849 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3850 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3851}
3852
3853static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3854{
3855 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3856 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3857}
3858
3859static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3860{
3861 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3862 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3863}
3864
3865/* -+- disassembly -+- */
3866
3867#undef LOG_GROUP
3868#define LOG_GROUP LOG_GROUP_REM_DISAS
3869
3870
3871/**
3872 * Enables or disables singled stepped disassembly.
3873 *
3874 * @returns VBox status code.
3875 * @param pVM VM handle.
3876 * @param fEnable To enable set this flag, to disable clear it.
3877 */
3878static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3879{
3880 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3881 VM_ASSERT_EMT(pVM);
3882
3883 if (fEnable)
3884 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3885 else
3886 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3887#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3888 cpu_single_step(&pVM->rem.s.Env, fEnable);
3889#endif
3890 return VINF_SUCCESS;
3891}
3892
3893
3894/**
3895 * Enables or disables singled stepped disassembly.
3896 *
3897 * @returns VBox status code.
3898 * @param pVM VM handle.
3899 * @param fEnable To enable set this flag, to disable clear it.
3900 */
3901REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3902{
3903 int rc;
3904
3905 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3906 if (VM_IS_EMT(pVM))
3907 return remR3DisasEnableStepping(pVM, fEnable);
3908
3909 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3910 AssertRC(rc);
3911 return rc;
3912}
3913
3914
3915#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3916/**
3917 * External Debugger Command: .remstep [on|off|1|0]
3918 */
3919static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3920{
3921 int rc;
3922
3923 if (cArgs == 0)
3924 /*
3925 * Print the current status.
3926 */
3927 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3928 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3929 else
3930 {
3931 /*
3932 * Convert the argument and change the mode.
3933 */
3934 bool fEnable;
3935 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3936 if (RT_SUCCESS(rc))
3937 {
3938 rc = REMR3DisasEnableStepping(pVM, fEnable);
3939 if (RT_SUCCESS(rc))
3940 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3941 else
3942 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3943 }
3944 else
3945 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3946 }
3947 return rc;
3948}
3949#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3950
3951
3952/**
3953 * Disassembles one instruction and prints it to the log.
3954 *
3955 * @returns Success indicator.
3956 * @param env Pointer to the recompiler CPU structure.
3957 * @param f32BitCode Indicates that whether or not the code should
3958 * be disassembled as 16 or 32 bit. If -1 the CS
3959 * selector will be inspected.
3960 * @param pszPrefix
3961 */
3962bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3963{
3964 PVM pVM = env->pVM;
3965 const bool fLog = LogIsEnabled();
3966 const bool fLog2 = LogIs2Enabled();
3967 int rc = VINF_SUCCESS;
3968
3969 /*
3970 * Don't bother if there ain't any log output to do.
3971 */
3972 if (!fLog && !fLog2)
3973 return true;
3974
3975 /*
3976 * Update the state so DBGF reads the correct register values.
3977 */
3978 remR3StateUpdate(pVM, env->pVCpu);
3979
3980 /*
3981 * Log registers if requested.
3982 */
3983 if (fLog2)
3984 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3985
3986 /*
3987 * Disassemble to log.
3988 */
3989 if (fLog)
3990 {
3991 PVMCPU pVCpu = VMMGetCpu(pVM);
3992 char szBuf[256];
3993 szBuf[0] = '\0';
3994 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3995 pVCpu->idCpu,
3996 0, /* Sel */
3997 0, /* GCPtr */
3998 DBGF_DISAS_FLAGS_CURRENT_GUEST
3999 | DBGF_DISAS_FLAGS_DEFAULT_MODE
4000 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
4001 szBuf,
4002 sizeof(szBuf),
4003 NULL);
4004 if (RT_FAILURE(rc))
4005 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4006 if (pszPrefix && *pszPrefix)
4007 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4008 else
4009 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4010 }
4011
4012 return RT_SUCCESS(rc);
4013}
4014
4015
4016/**
4017 * Disassemble recompiled code.
4018 *
4019 * @param phFileIgnored Ignored, logfile usually.
4020 * @param pvCode Pointer to the code block.
4021 * @param cb Size of the code block.
4022 */
4023void disas(FILE *phFile, void *pvCode, unsigned long cb)
4024{
4025 if (LogIs2Enabled())
4026 {
4027 unsigned off = 0;
4028 char szOutput[256];
4029 DISCPUSTATE Cpu;
4030#ifdef RT_ARCH_X86
4031 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4032#else
4033 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4034#endif
4035
4036 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4037 while (off < cb)
4038 {
4039 uint32_t cbInstr;
4040 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4041 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4042 if (RT_SUCCESS(rc))
4043 RTLogPrintf("%s", szOutput);
4044 else
4045 {
4046 RTLogPrintf("disas error %Rrc\n", rc);
4047 cbInstr = 1;
4048 }
4049 off += cbInstr;
4050 }
4051 }
4052}
4053
4054
4055/**
4056 * Disassemble guest code.
4057 *
4058 * @param phFileIgnored Ignored, logfile usually.
4059 * @param uCode The guest address of the code to disassemble. (flat?)
4060 * @param cb Number of bytes to disassemble.
4061 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4062 */
4063void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4064{
4065 if (LogIs2Enabled())
4066 {
4067 PVM pVM = cpu_single_env->pVM;
4068 PVMCPU pVCpu = cpu_single_env->pVCpu;
4069 RTSEL cs;
4070 RTGCUINTPTR eip;
4071
4072 Assert(pVCpu);
4073
4074 /*
4075 * Update the state so DBGF reads the correct register values (flags).
4076 */
4077 remR3StateUpdate(pVM, pVCpu);
4078
4079 /*
4080 * Do the disassembling.
4081 */
4082 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4083 cs = cpu_single_env->segs[R_CS].selector;
4084 eip = uCode - cpu_single_env->segs[R_CS].base;
4085 for (;;)
4086 {
4087 char szBuf[256];
4088 uint32_t cbInstr;
4089 int rc = DBGFR3DisasInstrEx(pVM,
4090 pVCpu->idCpu,
4091 cs,
4092 eip,
4093 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4094 szBuf, sizeof(szBuf),
4095 &cbInstr);
4096 if (RT_SUCCESS(rc))
4097 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4098 else
4099 {
4100 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4101 cbInstr = 1;
4102 }
4103
4104 /* next */
4105 if (cb <= cbInstr)
4106 break;
4107 cb -= cbInstr;
4108 uCode += cbInstr;
4109 eip += cbInstr;
4110 }
4111 }
4112}
4113
4114
4115/**
4116 * Looks up a guest symbol.
4117 *
4118 * @returns Pointer to symbol name. This is a static buffer.
4119 * @param orig_addr The address in question.
4120 */
4121const char *lookup_symbol(target_ulong orig_addr)
4122{
4123 PVM pVM = cpu_single_env->pVM;
4124 RTGCINTPTR off = 0;
4125 RTDBGSYMBOL Sym;
4126 DBGFADDRESS Addr;
4127
4128 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4129 if (RT_SUCCESS(rc))
4130 {
4131 static char szSym[sizeof(Sym.szName) + 48];
4132 if (!off)
4133 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4134 else if (off > 0)
4135 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4136 else
4137 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4138 return szSym;
4139 }
4140 return "<N/A>";
4141}
4142
4143
4144#undef LOG_GROUP
4145#define LOG_GROUP LOG_GROUP_REM
4146
4147
4148/* -+- FF notifications -+- */
4149
4150
4151/**
4152 * Notification about a pending interrupt.
4153 *
4154 * @param pVM VM Handle.
4155 * @param pVCpu VMCPU Handle.
4156 * @param u8Interrupt Interrupt
4157 * @thread The emulation thread.
4158 */
4159REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4160{
4161 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4162 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4163}
4164
4165/**
4166 * Notification about a pending interrupt.
4167 *
4168 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4169 * @param pVM VM Handle.
4170 * @param pVCpu VMCPU Handle.
4171 * @thread The emulation thread.
4172 */
4173REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4174{
4175 return pVM->rem.s.u32PendingInterrupt;
4176}
4177
4178/**
4179 * Notification about the interrupt FF being set.
4180 *
4181 * @param pVM VM Handle.
4182 * @param pVCpu VMCPU Handle.
4183 * @thread The emulation thread.
4184 */
4185REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4186{
4187#ifndef IEM_VERIFICATION_MODE
4188 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4189 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4190 if (pVM->rem.s.fInREM)
4191 {
4192 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4193 CPU_INTERRUPT_EXTERNAL_HARD);
4194 }
4195#endif
4196}
4197
4198
4199/**
4200 * Notification about the interrupt FF being set.
4201 *
4202 * @param pVM VM Handle.
4203 * @param pVCpu VMCPU Handle.
4204 * @thread Any.
4205 */
4206REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4207{
4208 LogFlow(("REMR3NotifyInterruptClear:\n"));
4209 if (pVM->rem.s.fInREM)
4210 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4211}
4212
4213
4214/**
4215 * Notification about pending timer(s).
4216 *
4217 * @param pVM VM Handle.
4218 * @param pVCpuDst The target cpu for this notification.
4219 * TM will not broadcast pending timer events, but use
4220 * a dedicated EMT for them. So, only interrupt REM
4221 * execution if the given CPU is executing in REM.
4222 * @thread Any.
4223 */
4224REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4225{
4226#ifndef IEM_VERIFICATION_MODE
4227#ifndef DEBUG_bird
4228 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4229#endif
4230 if (pVM->rem.s.fInREM)
4231 {
4232 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4233 {
4234 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4235 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4236 CPU_INTERRUPT_EXTERNAL_TIMER);
4237 }
4238 else
4239 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4240 }
4241 else
4242 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4243#endif
4244}
4245
4246
4247/**
4248 * Notification about pending DMA transfers.
4249 *
4250 * @param pVM VM Handle.
4251 * @thread Any.
4252 */
4253REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4254{
4255#ifndef IEM_VERIFICATION_MODE
4256 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4257 if (pVM->rem.s.fInREM)
4258 {
4259 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4260 CPU_INTERRUPT_EXTERNAL_DMA);
4261 }
4262#endif
4263}
4264
4265
4266/**
4267 * Notification about pending timer(s).
4268 *
4269 * @param pVM VM Handle.
4270 * @thread Any.
4271 */
4272REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4273{
4274#ifndef IEM_VERIFICATION_MODE
4275 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4276 if (pVM->rem.s.fInREM)
4277 {
4278 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4279 CPU_INTERRUPT_EXTERNAL_EXIT);
4280 }
4281#endif
4282}
4283
4284
4285/**
4286 * Notification about pending FF set by an external thread.
4287 *
4288 * @param pVM VM handle.
4289 * @thread Any.
4290 */
4291REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4292{
4293#ifndef IEM_VERIFICATION_MODE
4294 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4295 if (pVM->rem.s.fInREM)
4296 {
4297 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4298 CPU_INTERRUPT_EXTERNAL_EXIT);
4299 }
4300#endif
4301}
4302
4303
4304#ifdef VBOX_WITH_STATISTICS
4305void remR3ProfileStart(int statcode)
4306{
4307 STAMPROFILEADV *pStat;
4308 switch(statcode)
4309 {
4310 case STATS_EMULATE_SINGLE_INSTR:
4311 pStat = &gStatExecuteSingleInstr;
4312 break;
4313 case STATS_QEMU_COMPILATION:
4314 pStat = &gStatCompilationQEmu;
4315 break;
4316 case STATS_QEMU_RUN_EMULATED_CODE:
4317 pStat = &gStatRunCodeQEmu;
4318 break;
4319 case STATS_QEMU_TOTAL:
4320 pStat = &gStatTotalTimeQEmu;
4321 break;
4322 case STATS_QEMU_RUN_TIMERS:
4323 pStat = &gStatTimers;
4324 break;
4325 case STATS_TLB_LOOKUP:
4326 pStat= &gStatTBLookup;
4327 break;
4328 case STATS_IRQ_HANDLING:
4329 pStat= &gStatIRQ;
4330 break;
4331 case STATS_RAW_CHECK:
4332 pStat = &gStatRawCheck;
4333 break;
4334
4335 default:
4336 AssertMsgFailed(("unknown stat %d\n", statcode));
4337 return;
4338 }
4339 STAM_PROFILE_ADV_START(pStat, a);
4340}
4341
4342
4343void remR3ProfileStop(int statcode)
4344{
4345 STAMPROFILEADV *pStat;
4346 switch(statcode)
4347 {
4348 case STATS_EMULATE_SINGLE_INSTR:
4349 pStat = &gStatExecuteSingleInstr;
4350 break;
4351 case STATS_QEMU_COMPILATION:
4352 pStat = &gStatCompilationQEmu;
4353 break;
4354 case STATS_QEMU_RUN_EMULATED_CODE:
4355 pStat = &gStatRunCodeQEmu;
4356 break;
4357 case STATS_QEMU_TOTAL:
4358 pStat = &gStatTotalTimeQEmu;
4359 break;
4360 case STATS_QEMU_RUN_TIMERS:
4361 pStat = &gStatTimers;
4362 break;
4363 case STATS_TLB_LOOKUP:
4364 pStat= &gStatTBLookup;
4365 break;
4366 case STATS_IRQ_HANDLING:
4367 pStat= &gStatIRQ;
4368 break;
4369 case STATS_RAW_CHECK:
4370 pStat = &gStatRawCheck;
4371 break;
4372 default:
4373 AssertMsgFailed(("unknown stat %d\n", statcode));
4374 return;
4375 }
4376 STAM_PROFILE_ADV_STOP(pStat, a);
4377}
4378#endif
4379
4380/**
4381 * Raise an RC, force rem exit.
4382 *
4383 * @param pVM VM handle.
4384 * @param rc The rc.
4385 */
4386void remR3RaiseRC(PVM pVM, int rc)
4387{
4388 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4389 Assert(pVM->rem.s.fInREM);
4390 VM_ASSERT_EMT(pVM);
4391 pVM->rem.s.rc = rc;
4392 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4393}
4394
4395
4396/* -+- timers -+- */
4397
4398uint64_t cpu_get_tsc(CPUX86State *env)
4399{
4400 STAM_COUNTER_INC(&gStatCpuGetTSC);
4401 return TMCpuTickGet(env->pVCpu);
4402}
4403
4404
4405/* -+- interrupts -+- */
4406
4407void cpu_set_ferr(CPUX86State *env)
4408{
4409 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4410 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4411}
4412
4413int cpu_get_pic_interrupt(CPUX86State *env)
4414{
4415 uint8_t u8Interrupt;
4416 int rc;
4417
4418 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4419 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4420 * with the (a)pic.
4421 */
4422 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4423 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4424 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4425 * remove this kludge. */
4426 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4427 {
4428 rc = VINF_SUCCESS;
4429 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4430 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4431 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4432 }
4433 else
4434 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4435
4436 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4437 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4438 if (RT_SUCCESS(rc))
4439 {
4440 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4441 env->interrupt_request |= CPU_INTERRUPT_HARD;
4442 return u8Interrupt;
4443 }
4444 return -1;
4445}
4446
4447
4448/* -+- local apic -+- */
4449
4450#if 0 /* CPUMSetGuestMsr does this now. */
4451void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4452{
4453 int rc = PDMApicSetBase(env->pVM, val);
4454 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4455}
4456#endif
4457
4458uint64_t cpu_get_apic_base(CPUX86State *env)
4459{
4460 uint64_t u64;
4461 int rc = PDMApicGetBase(env->pVM, &u64);
4462 if (RT_SUCCESS(rc))
4463 {
4464 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4465 return u64;
4466 }
4467 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4468 return 0;
4469}
4470
4471void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4472{
4473 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4474 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4475}
4476
4477uint8_t cpu_get_apic_tpr(CPUX86State *env)
4478{
4479 uint8_t u8;
4480 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4481 if (RT_SUCCESS(rc))
4482 {
4483 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4484 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4485 }
4486 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4487 return 0;
4488}
4489
4490/**
4491 * Read an MSR.
4492 *
4493 * @retval 0 success.
4494 * @retval -1 failure, raise \#GP(0).
4495 * @param env The cpu state.
4496 * @param idMsr The MSR to read.
4497 * @param puValue Where to return the value.
4498 */
4499int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4500{
4501 Assert(env->pVCpu);
4502 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4503}
4504
4505/**
4506 * Write to an MSR.
4507 *
4508 * @retval 0 success.
4509 * @retval -1 failure, raise \#GP(0).
4510 * @param env The cpu state.
4511 * @param idMsr The MSR to read.
4512 * @param puValue Where to return the value.
4513 */
4514int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4515{
4516 Assert(env->pVCpu);
4517 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4518}
4519
4520/* -+- I/O Ports -+- */
4521
4522#undef LOG_GROUP
4523#define LOG_GROUP LOG_GROUP_REM_IOPORT
4524
4525void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4526{
4527 int rc;
4528
4529 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4530 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4531
4532 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4533 if (RT_LIKELY(rc == VINF_SUCCESS))
4534 return;
4535 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4536 {
4537 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4538 remR3RaiseRC(env->pVM, rc);
4539 return;
4540 }
4541 remAbort(rc, __FUNCTION__);
4542}
4543
4544void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4545{
4546 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4547 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4548 if (RT_LIKELY(rc == VINF_SUCCESS))
4549 return;
4550 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4551 {
4552 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4553 remR3RaiseRC(env->pVM, rc);
4554 return;
4555 }
4556 remAbort(rc, __FUNCTION__);
4557}
4558
4559void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4560{
4561 int rc;
4562 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4563 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4564 if (RT_LIKELY(rc == VINF_SUCCESS))
4565 return;
4566 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4567 {
4568 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4569 remR3RaiseRC(env->pVM, rc);
4570 return;
4571 }
4572 remAbort(rc, __FUNCTION__);
4573}
4574
4575uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4576{
4577 uint32_t u32 = 0;
4578 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4579 if (RT_LIKELY(rc == VINF_SUCCESS))
4580 {
4581 if (/*addr != 0x61 && */addr != 0x71)
4582 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4583 return (uint8_t)u32;
4584 }
4585 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4586 {
4587 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4588 remR3RaiseRC(env->pVM, rc);
4589 return (uint8_t)u32;
4590 }
4591 remAbort(rc, __FUNCTION__);
4592 return UINT8_C(0xff);
4593}
4594
4595uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4596{
4597 uint32_t u32 = 0;
4598 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4599 if (RT_LIKELY(rc == VINF_SUCCESS))
4600 {
4601 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4602 return (uint16_t)u32;
4603 }
4604 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4605 {
4606 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4607 remR3RaiseRC(env->pVM, rc);
4608 return (uint16_t)u32;
4609 }
4610 remAbort(rc, __FUNCTION__);
4611 return UINT16_C(0xffff);
4612}
4613
4614uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4615{
4616 uint32_t u32 = 0;
4617 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4618 if (RT_LIKELY(rc == VINF_SUCCESS))
4619 {
4620//if (addr==0x01f0 && u32 == 0x6b6d)
4621// loglevel = ~0;
4622 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4623 return u32;
4624 }
4625 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4626 {
4627 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4628 remR3RaiseRC(env->pVM, rc);
4629 return u32;
4630 }
4631 remAbort(rc, __FUNCTION__);
4632 return UINT32_C(0xffffffff);
4633}
4634
4635#undef LOG_GROUP
4636#define LOG_GROUP LOG_GROUP_REM
4637
4638
4639/* -+- helpers and misc other interfaces -+- */
4640
4641/**
4642 * Perform the CPUID instruction.
4643 *
4644 * @param env Pointer to the recompiler CPU structure.
4645 * @param idx The CPUID leaf (eax).
4646 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4647 * @param pvEAX Where to store eax.
4648 * @param pvEBX Where to store ebx.
4649 * @param pvECX Where to store ecx.
4650 * @param pvEDX Where to store edx.
4651 */
4652void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4653 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4654{
4655 NOREF(idxSub);
4656 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4657}
4658
4659
4660#if 0 /* not used */
4661/**
4662 * Interface for qemu hardware to report back fatal errors.
4663 */
4664void hw_error(const char *pszFormat, ...)
4665{
4666 /*
4667 * Bitch about it.
4668 */
4669 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4670 * this in my Odin32 tree at home! */
4671 va_list args;
4672 va_start(args, pszFormat);
4673 RTLogPrintf("fatal error in virtual hardware:");
4674 RTLogPrintfV(pszFormat, args);
4675 va_end(args);
4676 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4677
4678 /*
4679 * If we're in REM context we'll sync back the state before 'jumping' to
4680 * the EMs failure handling.
4681 */
4682 PVM pVM = cpu_single_env->pVM;
4683 if (pVM->rem.s.fInREM)
4684 REMR3StateBack(pVM);
4685 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4686 AssertMsgFailed(("EMR3FatalError returned!\n"));
4687}
4688#endif
4689
4690/**
4691 * Interface for the qemu cpu to report unhandled situation
4692 * raising a fatal VM error.
4693 */
4694void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4695{
4696 va_list va;
4697 PVM pVM;
4698 PVMCPU pVCpu;
4699 char szMsg[256];
4700
4701 /*
4702 * Bitch about it.
4703 */
4704 RTLogFlags(NULL, "nodisabled nobuffered");
4705 RTLogFlush(NULL);
4706
4707 va_start(va, pszFormat);
4708#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4709 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4710 unsigned cArgs = 0;
4711 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4712 const char *psz = strchr(pszFormat, '%');
4713 while (psz && cArgs < 6)
4714 {
4715 auArgs[cArgs++] = va_arg(va, uintptr_t);
4716 psz = strchr(psz + 1, '%');
4717 }
4718 switch (cArgs)
4719 {
4720 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4721 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4722 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4723 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4724 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4725 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4726 default:
4727 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4728 }
4729#else
4730 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4731#endif
4732 va_end(va);
4733
4734 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4735 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4736
4737 /*
4738 * If we're in REM context we'll sync back the state before 'jumping' to
4739 * the EMs failure handling.
4740 */
4741 pVM = cpu_single_env->pVM;
4742 pVCpu = cpu_single_env->pVCpu;
4743 Assert(pVCpu);
4744
4745 if (pVM->rem.s.fInREM)
4746 REMR3StateBack(pVM, pVCpu);
4747 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4748 AssertMsgFailed(("EMR3FatalError returned!\n"));
4749}
4750
4751
4752/**
4753 * Aborts the VM.
4754 *
4755 * @param rc VBox error code.
4756 * @param pszTip Hint about why/when this happened.
4757 */
4758void remAbort(int rc, const char *pszTip)
4759{
4760 PVM pVM;
4761 PVMCPU pVCpu;
4762
4763 /*
4764 * Bitch about it.
4765 */
4766 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4767 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4768
4769 /*
4770 * Jump back to where we entered the recompiler.
4771 */
4772 pVM = cpu_single_env->pVM;
4773 pVCpu = cpu_single_env->pVCpu;
4774 Assert(pVCpu);
4775
4776 if (pVM->rem.s.fInREM)
4777 REMR3StateBack(pVM, pVCpu);
4778
4779 EMR3FatalError(pVCpu, rc);
4780 AssertMsgFailed(("EMR3FatalError returned!\n"));
4781}
4782
4783
4784/**
4785 * Dumps a linux system call.
4786 * @param pVCpu VMCPU handle.
4787 */
4788void remR3DumpLnxSyscall(PVMCPU pVCpu)
4789{
4790 static const char *apsz[] =
4791 {
4792 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4793 "sys_exit",
4794 "sys_fork",
4795 "sys_read",
4796 "sys_write",
4797 "sys_open", /* 5 */
4798 "sys_close",
4799 "sys_waitpid",
4800 "sys_creat",
4801 "sys_link",
4802 "sys_unlink", /* 10 */
4803 "sys_execve",
4804 "sys_chdir",
4805 "sys_time",
4806 "sys_mknod",
4807 "sys_chmod", /* 15 */
4808 "sys_lchown16",
4809 "sys_ni_syscall", /* old break syscall holder */
4810 "sys_stat",
4811 "sys_lseek",
4812 "sys_getpid", /* 20 */
4813 "sys_mount",
4814 "sys_oldumount",
4815 "sys_setuid16",
4816 "sys_getuid16",
4817 "sys_stime", /* 25 */
4818 "sys_ptrace",
4819 "sys_alarm",
4820 "sys_fstat",
4821 "sys_pause",
4822 "sys_utime", /* 30 */
4823 "sys_ni_syscall", /* old stty syscall holder */
4824 "sys_ni_syscall", /* old gtty syscall holder */
4825 "sys_access",
4826 "sys_nice",
4827 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4828 "sys_sync",
4829 "sys_kill",
4830 "sys_rename",
4831 "sys_mkdir",
4832 "sys_rmdir", /* 40 */
4833 "sys_dup",
4834 "sys_pipe",
4835 "sys_times",
4836 "sys_ni_syscall", /* old prof syscall holder */
4837 "sys_brk", /* 45 */
4838 "sys_setgid16",
4839 "sys_getgid16",
4840 "sys_signal",
4841 "sys_geteuid16",
4842 "sys_getegid16", /* 50 */
4843 "sys_acct",
4844 "sys_umount", /* recycled never used phys() */
4845 "sys_ni_syscall", /* old lock syscall holder */
4846 "sys_ioctl",
4847 "sys_fcntl", /* 55 */
4848 "sys_ni_syscall", /* old mpx syscall holder */
4849 "sys_setpgid",
4850 "sys_ni_syscall", /* old ulimit syscall holder */
4851 "sys_olduname",
4852 "sys_umask", /* 60 */
4853 "sys_chroot",
4854 "sys_ustat",
4855 "sys_dup2",
4856 "sys_getppid",
4857 "sys_getpgrp", /* 65 */
4858 "sys_setsid",
4859 "sys_sigaction",
4860 "sys_sgetmask",
4861 "sys_ssetmask",
4862 "sys_setreuid16", /* 70 */
4863 "sys_setregid16",
4864 "sys_sigsuspend",
4865 "sys_sigpending",
4866 "sys_sethostname",
4867 "sys_setrlimit", /* 75 */
4868 "sys_old_getrlimit",
4869 "sys_getrusage",
4870 "sys_gettimeofday",
4871 "sys_settimeofday",
4872 "sys_getgroups16", /* 80 */
4873 "sys_setgroups16",
4874 "old_select",
4875 "sys_symlink",
4876 "sys_lstat",
4877 "sys_readlink", /* 85 */
4878 "sys_uselib",
4879 "sys_swapon",
4880 "sys_reboot",
4881 "old_readdir",
4882 "old_mmap", /* 90 */
4883 "sys_munmap",
4884 "sys_truncate",
4885 "sys_ftruncate",
4886 "sys_fchmod",
4887 "sys_fchown16", /* 95 */
4888 "sys_getpriority",
4889 "sys_setpriority",
4890 "sys_ni_syscall", /* old profil syscall holder */
4891 "sys_statfs",
4892 "sys_fstatfs", /* 100 */
4893 "sys_ioperm",
4894 "sys_socketcall",
4895 "sys_syslog",
4896 "sys_setitimer",
4897 "sys_getitimer", /* 105 */
4898 "sys_newstat",
4899 "sys_newlstat",
4900 "sys_newfstat",
4901 "sys_uname",
4902 "sys_iopl", /* 110 */
4903 "sys_vhangup",
4904 "sys_ni_syscall", /* old "idle" system call */
4905 "sys_vm86old",
4906 "sys_wait4",
4907 "sys_swapoff", /* 115 */
4908 "sys_sysinfo",
4909 "sys_ipc",
4910 "sys_fsync",
4911 "sys_sigreturn",
4912 "sys_clone", /* 120 */
4913 "sys_setdomainname",
4914 "sys_newuname",
4915 "sys_modify_ldt",
4916 "sys_adjtimex",
4917 "sys_mprotect", /* 125 */
4918 "sys_sigprocmask",
4919 "sys_ni_syscall", /* old "create_module" */
4920 "sys_init_module",
4921 "sys_delete_module",
4922 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4923 "sys_quotactl",
4924 "sys_getpgid",
4925 "sys_fchdir",
4926 "sys_bdflush",
4927 "sys_sysfs", /* 135 */
4928 "sys_personality",
4929 "sys_ni_syscall", /* reserved for afs_syscall */
4930 "sys_setfsuid16",
4931 "sys_setfsgid16",
4932 "sys_llseek", /* 140 */
4933 "sys_getdents",
4934 "sys_select",
4935 "sys_flock",
4936 "sys_msync",
4937 "sys_readv", /* 145 */
4938 "sys_writev",
4939 "sys_getsid",
4940 "sys_fdatasync",
4941 "sys_sysctl",
4942 "sys_mlock", /* 150 */
4943 "sys_munlock",
4944 "sys_mlockall",
4945 "sys_munlockall",
4946 "sys_sched_setparam",
4947 "sys_sched_getparam", /* 155 */
4948 "sys_sched_setscheduler",
4949 "sys_sched_getscheduler",
4950 "sys_sched_yield",
4951 "sys_sched_get_priority_max",
4952 "sys_sched_get_priority_min", /* 160 */
4953 "sys_sched_rr_get_interval",
4954 "sys_nanosleep",
4955 "sys_mremap",
4956 "sys_setresuid16",
4957 "sys_getresuid16", /* 165 */
4958 "sys_vm86",
4959 "sys_ni_syscall", /* Old sys_query_module */
4960 "sys_poll",
4961 "sys_nfsservctl",
4962 "sys_setresgid16", /* 170 */
4963 "sys_getresgid16",
4964 "sys_prctl",
4965 "sys_rt_sigreturn",
4966 "sys_rt_sigaction",
4967 "sys_rt_sigprocmask", /* 175 */
4968 "sys_rt_sigpending",
4969 "sys_rt_sigtimedwait",
4970 "sys_rt_sigqueueinfo",
4971 "sys_rt_sigsuspend",
4972 "sys_pread64", /* 180 */
4973 "sys_pwrite64",
4974 "sys_chown16",
4975 "sys_getcwd",
4976 "sys_capget",
4977 "sys_capset", /* 185 */
4978 "sys_sigaltstack",
4979 "sys_sendfile",
4980 "sys_ni_syscall", /* reserved for streams1 */
4981 "sys_ni_syscall", /* reserved for streams2 */
4982 "sys_vfork", /* 190 */
4983 "sys_getrlimit",
4984 "sys_mmap2",
4985 "sys_truncate64",
4986 "sys_ftruncate64",
4987 "sys_stat64", /* 195 */
4988 "sys_lstat64",
4989 "sys_fstat64",
4990 "sys_lchown",
4991 "sys_getuid",
4992 "sys_getgid", /* 200 */
4993 "sys_geteuid",
4994 "sys_getegid",
4995 "sys_setreuid",
4996 "sys_setregid",
4997 "sys_getgroups", /* 205 */
4998 "sys_setgroups",
4999 "sys_fchown",
5000 "sys_setresuid",
5001 "sys_getresuid",
5002 "sys_setresgid", /* 210 */
5003 "sys_getresgid",
5004 "sys_chown",
5005 "sys_setuid",
5006 "sys_setgid",
5007 "sys_setfsuid", /* 215 */
5008 "sys_setfsgid",
5009 "sys_pivot_root",
5010 "sys_mincore",
5011 "sys_madvise",
5012 "sys_getdents64", /* 220 */
5013 "sys_fcntl64",
5014 "sys_ni_syscall", /* reserved for TUX */
5015 "sys_ni_syscall",
5016 "sys_gettid",
5017 "sys_readahead", /* 225 */
5018 "sys_setxattr",
5019 "sys_lsetxattr",
5020 "sys_fsetxattr",
5021 "sys_getxattr",
5022 "sys_lgetxattr", /* 230 */
5023 "sys_fgetxattr",
5024 "sys_listxattr",
5025 "sys_llistxattr",
5026 "sys_flistxattr",
5027 "sys_removexattr", /* 235 */
5028 "sys_lremovexattr",
5029 "sys_fremovexattr",
5030 "sys_tkill",
5031 "sys_sendfile64",
5032 "sys_futex", /* 240 */
5033 "sys_sched_setaffinity",
5034 "sys_sched_getaffinity",
5035 "sys_set_thread_area",
5036 "sys_get_thread_area",
5037 "sys_io_setup", /* 245 */
5038 "sys_io_destroy",
5039 "sys_io_getevents",
5040 "sys_io_submit",
5041 "sys_io_cancel",
5042 "sys_fadvise64", /* 250 */
5043 "sys_ni_syscall",
5044 "sys_exit_group",
5045 "sys_lookup_dcookie",
5046 "sys_epoll_create",
5047 "sys_epoll_ctl", /* 255 */
5048 "sys_epoll_wait",
5049 "sys_remap_file_pages",
5050 "sys_set_tid_address",
5051 "sys_timer_create",
5052 "sys_timer_settime", /* 260 */
5053 "sys_timer_gettime",
5054 "sys_timer_getoverrun",
5055 "sys_timer_delete",
5056 "sys_clock_settime",
5057 "sys_clock_gettime", /* 265 */
5058 "sys_clock_getres",
5059 "sys_clock_nanosleep",
5060 "sys_statfs64",
5061 "sys_fstatfs64",
5062 "sys_tgkill", /* 270 */
5063 "sys_utimes",
5064 "sys_fadvise64_64",
5065 "sys_ni_syscall" /* sys_vserver */
5066 };
5067
5068 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5069 switch (uEAX)
5070 {
5071 default:
5072 if (uEAX < RT_ELEMENTS(apsz))
5073 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5074 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5075 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5076 else
5077 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5078 break;
5079
5080 }
5081}
5082
5083
5084/**
5085 * Dumps an OpenBSD system call.
5086 * @param pVCpu VMCPU handle.
5087 */
5088void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5089{
5090 static const char *apsz[] =
5091 {
5092 "SYS_syscall", //0
5093 "SYS_exit", //1
5094 "SYS_fork", //2
5095 "SYS_read", //3
5096 "SYS_write", //4
5097 "SYS_open", //5
5098 "SYS_close", //6
5099 "SYS_wait4", //7
5100 "SYS_8",
5101 "SYS_link", //9
5102 "SYS_unlink", //10
5103 "SYS_11",
5104 "SYS_chdir", //12
5105 "SYS_fchdir", //13
5106 "SYS_mknod", //14
5107 "SYS_chmod", //15
5108 "SYS_chown", //16
5109 "SYS_break", //17
5110 "SYS_18",
5111 "SYS_19",
5112 "SYS_getpid", //20
5113 "SYS_mount", //21
5114 "SYS_unmount", //22
5115 "SYS_setuid", //23
5116 "SYS_getuid", //24
5117 "SYS_geteuid", //25
5118 "SYS_ptrace", //26
5119 "SYS_recvmsg", //27
5120 "SYS_sendmsg", //28
5121 "SYS_recvfrom", //29
5122 "SYS_accept", //30
5123 "SYS_getpeername", //31
5124 "SYS_getsockname", //32
5125 "SYS_access", //33
5126 "SYS_chflags", //34
5127 "SYS_fchflags", //35
5128 "SYS_sync", //36
5129 "SYS_kill", //37
5130 "SYS_38",
5131 "SYS_getppid", //39
5132 "SYS_40",
5133 "SYS_dup", //41
5134 "SYS_opipe", //42
5135 "SYS_getegid", //43
5136 "SYS_profil", //44
5137 "SYS_ktrace", //45
5138 "SYS_sigaction", //46
5139 "SYS_getgid", //47
5140 "SYS_sigprocmask", //48
5141 "SYS_getlogin", //49
5142 "SYS_setlogin", //50
5143 "SYS_acct", //51
5144 "SYS_sigpending", //52
5145 "SYS_osigaltstack", //53
5146 "SYS_ioctl", //54
5147 "SYS_reboot", //55
5148 "SYS_revoke", //56
5149 "SYS_symlink", //57
5150 "SYS_readlink", //58
5151 "SYS_execve", //59
5152 "SYS_umask", //60
5153 "SYS_chroot", //61
5154 "SYS_62",
5155 "SYS_63",
5156 "SYS_64",
5157 "SYS_65",
5158 "SYS_vfork", //66
5159 "SYS_67",
5160 "SYS_68",
5161 "SYS_sbrk", //69
5162 "SYS_sstk", //70
5163 "SYS_61",
5164 "SYS_vadvise", //72
5165 "SYS_munmap", //73
5166 "SYS_mprotect", //74
5167 "SYS_madvise", //75
5168 "SYS_76",
5169 "SYS_77",
5170 "SYS_mincore", //78
5171 "SYS_getgroups", //79
5172 "SYS_setgroups", //80
5173 "SYS_getpgrp", //81
5174 "SYS_setpgid", //82
5175 "SYS_setitimer", //83
5176 "SYS_84",
5177 "SYS_85",
5178 "SYS_getitimer", //86
5179 "SYS_87",
5180 "SYS_88",
5181 "SYS_89",
5182 "SYS_dup2", //90
5183 "SYS_91",
5184 "SYS_fcntl", //92
5185 "SYS_select", //93
5186 "SYS_94",
5187 "SYS_fsync", //95
5188 "SYS_setpriority", //96
5189 "SYS_socket", //97
5190 "SYS_connect", //98
5191 "SYS_99",
5192 "SYS_getpriority", //100
5193 "SYS_101",
5194 "SYS_102",
5195 "SYS_sigreturn", //103
5196 "SYS_bind", //104
5197 "SYS_setsockopt", //105
5198 "SYS_listen", //106
5199 "SYS_107",
5200 "SYS_108",
5201 "SYS_109",
5202 "SYS_110",
5203 "SYS_sigsuspend", //111
5204 "SYS_112",
5205 "SYS_113",
5206 "SYS_114",
5207 "SYS_115",
5208 "SYS_gettimeofday", //116
5209 "SYS_getrusage", //117
5210 "SYS_getsockopt", //118
5211 "SYS_119",
5212 "SYS_readv", //120
5213 "SYS_writev", //121
5214 "SYS_settimeofday", //122
5215 "SYS_fchown", //123
5216 "SYS_fchmod", //124
5217 "SYS_125",
5218 "SYS_setreuid", //126
5219 "SYS_setregid", //127
5220 "SYS_rename", //128
5221 "SYS_129",
5222 "SYS_130",
5223 "SYS_flock", //131
5224 "SYS_mkfifo", //132
5225 "SYS_sendto", //133
5226 "SYS_shutdown", //134
5227 "SYS_socketpair", //135
5228 "SYS_mkdir", //136
5229 "SYS_rmdir", //137
5230 "SYS_utimes", //138
5231 "SYS_139",
5232 "SYS_adjtime", //140
5233 "SYS_141",
5234 "SYS_142",
5235 "SYS_143",
5236 "SYS_144",
5237 "SYS_145",
5238 "SYS_146",
5239 "SYS_setsid", //147
5240 "SYS_quotactl", //148
5241 "SYS_149",
5242 "SYS_150",
5243 "SYS_151",
5244 "SYS_152",
5245 "SYS_153",
5246 "SYS_154",
5247 "SYS_nfssvc", //155
5248 "SYS_156",
5249 "SYS_157",
5250 "SYS_158",
5251 "SYS_159",
5252 "SYS_160",
5253 "SYS_getfh", //161
5254 "SYS_162",
5255 "SYS_163",
5256 "SYS_164",
5257 "SYS_sysarch", //165
5258 "SYS_166",
5259 "SYS_167",
5260 "SYS_168",
5261 "SYS_169",
5262 "SYS_170",
5263 "SYS_171",
5264 "SYS_172",
5265 "SYS_pread", //173
5266 "SYS_pwrite", //174
5267 "SYS_175",
5268 "SYS_176",
5269 "SYS_177",
5270 "SYS_178",
5271 "SYS_179",
5272 "SYS_180",
5273 "SYS_setgid", //181
5274 "SYS_setegid", //182
5275 "SYS_seteuid", //183
5276 "SYS_lfs_bmapv", //184
5277 "SYS_lfs_markv", //185
5278 "SYS_lfs_segclean", //186
5279 "SYS_lfs_segwait", //187
5280 "SYS_188",
5281 "SYS_189",
5282 "SYS_190",
5283 "SYS_pathconf", //191
5284 "SYS_fpathconf", //192
5285 "SYS_swapctl", //193
5286 "SYS_getrlimit", //194
5287 "SYS_setrlimit", //195
5288 "SYS_getdirentries", //196
5289 "SYS_mmap", //197
5290 "SYS___syscall", //198
5291 "SYS_lseek", //199
5292 "SYS_truncate", //200
5293 "SYS_ftruncate", //201
5294 "SYS___sysctl", //202
5295 "SYS_mlock", //203
5296 "SYS_munlock", //204
5297 "SYS_205",
5298 "SYS_futimes", //206
5299 "SYS_getpgid", //207
5300 "SYS_xfspioctl", //208
5301 "SYS_209",
5302 "SYS_210",
5303 "SYS_211",
5304 "SYS_212",
5305 "SYS_213",
5306 "SYS_214",
5307 "SYS_215",
5308 "SYS_216",
5309 "SYS_217",
5310 "SYS_218",
5311 "SYS_219",
5312 "SYS_220",
5313 "SYS_semget", //221
5314 "SYS_222",
5315 "SYS_223",
5316 "SYS_224",
5317 "SYS_msgget", //225
5318 "SYS_msgsnd", //226
5319 "SYS_msgrcv", //227
5320 "SYS_shmat", //228
5321 "SYS_229",
5322 "SYS_shmdt", //230
5323 "SYS_231",
5324 "SYS_clock_gettime", //232
5325 "SYS_clock_settime", //233
5326 "SYS_clock_getres", //234
5327 "SYS_235",
5328 "SYS_236",
5329 "SYS_237",
5330 "SYS_238",
5331 "SYS_239",
5332 "SYS_nanosleep", //240
5333 "SYS_241",
5334 "SYS_242",
5335 "SYS_243",
5336 "SYS_244",
5337 "SYS_245",
5338 "SYS_246",
5339 "SYS_247",
5340 "SYS_248",
5341 "SYS_249",
5342 "SYS_minherit", //250
5343 "SYS_rfork", //251
5344 "SYS_poll", //252
5345 "SYS_issetugid", //253
5346 "SYS_lchown", //254
5347 "SYS_getsid", //255
5348 "SYS_msync", //256
5349 "SYS_257",
5350 "SYS_258",
5351 "SYS_259",
5352 "SYS_getfsstat", //260
5353 "SYS_statfs", //261
5354 "SYS_fstatfs", //262
5355 "SYS_pipe", //263
5356 "SYS_fhopen", //264
5357 "SYS_265",
5358 "SYS_fhstatfs", //266
5359 "SYS_preadv", //267
5360 "SYS_pwritev", //268
5361 "SYS_kqueue", //269
5362 "SYS_kevent", //270
5363 "SYS_mlockall", //271
5364 "SYS_munlockall", //272
5365 "SYS_getpeereid", //273
5366 "SYS_274",
5367 "SYS_275",
5368 "SYS_276",
5369 "SYS_277",
5370 "SYS_278",
5371 "SYS_279",
5372 "SYS_280",
5373 "SYS_getresuid", //281
5374 "SYS_setresuid", //282
5375 "SYS_getresgid", //283
5376 "SYS_setresgid", //284
5377 "SYS_285",
5378 "SYS_mquery", //286
5379 "SYS_closefrom", //287
5380 "SYS_sigaltstack", //288
5381 "SYS_shmget", //289
5382 "SYS_semop", //290
5383 "SYS_stat", //291
5384 "SYS_fstat", //292
5385 "SYS_lstat", //293
5386 "SYS_fhstat", //294
5387 "SYS___semctl", //295
5388 "SYS_shmctl", //296
5389 "SYS_msgctl", //297
5390 "SYS_MAXSYSCALL", //298
5391 //299
5392 //300
5393 };
5394 uint32_t uEAX;
5395 if (!LogIsEnabled())
5396 return;
5397 uEAX = CPUMGetGuestEAX(pVCpu);
5398 switch (uEAX)
5399 {
5400 default:
5401 if (uEAX < RT_ELEMENTS(apsz))
5402 {
5403 uint32_t au32Args[8] = {0};
5404 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5405 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5406 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5407 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5408 }
5409 else
5410 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5411 break;
5412 }
5413}
5414
5415
5416#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5417/**
5418 * The Dll main entry point (stub).
5419 */
5420bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5421{
5422 return true;
5423}
5424
5425void *memcpy(void *dst, const void *src, size_t size)
5426{
5427 uint8_t*pbDst = dst, *pbSrc = src;
5428 while (size-- > 0)
5429 *pbDst++ = *pbSrc++;
5430 return dst;
5431}
5432
5433#endif
5434
5435void cpu_smm_update(CPUX86State *env)
5436{
5437}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette