VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 42101

Last change on this file since 42101 was 41906, checked in by vboxsync, 13 years ago

CPUM: Combined the visible and hidden selector register data into one structure. Preparing for lazily resolving+caching of hidden registers in raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.6 KB
Line 
1/* $Id: VBoxRecompiler.c 41906 2012-06-24 15:44:03Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
222 * @todo huh??? That cannot be the case on the mac... So, this
223 * point is probably not valid any longer. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 PREMHANDLERNOTIFICATION pCur;
253 uint32_t u32Dummy;
254 int rc;
255 unsigned i;
256
257#ifdef VBOX_ENABLE_VBOXREM64
258 LogRel(("Using 64-bit aware REM\n"));
259#endif
260
261 /*
262 * Assert sanity.
263 */
264 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
265 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
266 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
267#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
268 Assert(!testmath());
269#endif
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /*
281 * Initialize the REM critical section.
282 *
283 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
284 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
285 * deadlocks. (mostly pgm vs rem locking)
286 */
287 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
288 AssertRCReturn(rc, rc);
289
290 /* ctx. */
291 pVM->rem.s.pCtx = NULL; /* set when executing code. */
292 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
293
294 /* ignore all notifications */
295 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
296
297 code_gen_prologue = RTMemExecAlloc(_1K);
298 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
299
300 cpu_exec_init_all(0);
301
302 /*
303 * Init the recompiler.
304 */
305 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
306 {
307 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
308 return VERR_GENERAL_FAILURE;
309 }
310 PVMCPU pVCpu = VMMGetCpu(pVM);
311 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
312 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
313
314 EMRemLock(pVM);
315 cpu_reset(&pVM->rem.s.Env);
316 EMRemUnlock(pVM);
317
318 /* allocate code buffer for single instruction emulation. */
319 pVM->rem.s.Env.cbCodeBuffer = 4096;
320 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
321 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
322
323 /* Finally, set the cpu_single_env global. */
324 cpu_single_env = &pVM->rem.s.Env;
325
326 /* Nothing is pending by default */
327 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
328
329 /*
330 * Register ram types.
331 */
332 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
333 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
334 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
335 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
336 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
337
338 /* stop ignoring. */
339 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
340
341 /*
342 * Register the saved state data unit.
343 */
344 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
345 NULL, NULL, NULL,
346 NULL, remR3Save, NULL,
347 NULL, remR3Load, NULL);
348 if (RT_FAILURE(rc))
349 return rc;
350
351#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
352 /*
353 * Debugger commands.
354 */
355 static bool fRegisteredCmds = false;
356 if (!fRegisteredCmds)
357 {
358 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
359 if (RT_SUCCESS(rc))
360 fRegisteredCmds = true;
361 }
362#endif
363
364#ifdef VBOX_WITH_STATISTICS
365 /*
366 * Statistics.
367 */
368 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
369 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
370 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
371 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
372 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
373 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
374 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
375 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
376 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
377 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
379
380 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
381
382 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
383 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
384 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
385 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
386 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
387 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
388 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
389 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
390 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
391 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
392 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
393
394 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
395 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
396 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
397 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
398
399 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
405
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
412
413 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
414#endif /* VBOX_WITH_STATISTICS */
415 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
416 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
417
418 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
419 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
420 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
421
422
423#ifdef DEBUG_ALL_LOGGING
424 loglevel = ~0;
425#endif
426
427 /*
428 * Init the handler notification lists.
429 */
430 pVM->rem.s.idxPendingList = UINT32_MAX;
431 pVM->rem.s.idxFreeList = 0;
432
433 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
434 {
435 pCur = &pVM->rem.s.aHandlerNotifications[i];
436 pCur->idxNext = i + 1;
437 pCur->idxSelf = i;
438 }
439 pCur->idxNext = UINT32_MAX; /* the last record. */
440
441 return rc;
442}
443
444
445/**
446 * Finalizes the REM initialization.
447 *
448 * This is called after all components, devices and drivers has
449 * been initialized. Its main purpose it to finish the RAM related
450 * initialization.
451 *
452 * @returns VBox status code.
453 *
454 * @param pVM The VM handle.
455 */
456REMR3DECL(int) REMR3InitFinalize(PVM pVM)
457{
458 int rc;
459
460 /*
461 * Ram size & dirty bit map.
462 */
463 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
464 pVM->rem.s.fGCPhysLastRamFixed = true;
465#ifdef RT_STRICT
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
467#else
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
469#endif
470 return rc;
471}
472
473/**
474 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
475 *
476 * @returns VBox status code.
477 * @param pVM The VM handle.
478 * @param fGuarded Whether to guard the map.
479 */
480static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
481{
482 int rc = VINF_SUCCESS;
483 RTGCPHYS cb;
484
485 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
486
487 cb = pVM->rem.s.GCPhysLastRam + 1;
488 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
489 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
490 VERR_OUT_OF_RANGE);
491
492 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
493 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
494
495 if (!fGuarded)
496 {
497 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
498 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
499 }
500 else
501 {
502 /*
503 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
504 */
505 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
506 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
507 if (cbBitmapFull == cbBitmapAligned)
508 cbBitmapFull += _4G >> PAGE_SHIFT;
509 else if (cbBitmapFull - cbBitmapAligned < _64K)
510 cbBitmapFull += _64K;
511
512 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
513 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
514
515 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
516 if (RT_FAILURE(rc))
517 {
518 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
519 AssertLogRelRCReturn(rc, rc);
520 }
521
522 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
523 }
524
525 /* initialize it. */
526 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
527 return rc;
528}
529
530
531/**
532 * Terminates the REM.
533 *
534 * Termination means cleaning up and freeing all resources,
535 * the VM it self is at this point powered off or suspended.
536 *
537 * @returns VBox status code.
538 * @param pVM The VM to operate on.
539 */
540REMR3DECL(int) REMR3Term(PVM pVM)
541{
542#ifdef VBOX_WITH_STATISTICS
543 /*
544 * Statistics.
545 */
546 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
547 STAM_DEREG(pVM, &gStatCompilationQEmu);
548 STAM_DEREG(pVM, &gStatRunCodeQEmu);
549 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
550 STAM_DEREG(pVM, &gStatTimers);
551 STAM_DEREG(pVM, &gStatTBLookup);
552 STAM_DEREG(pVM, &gStatIRQ);
553 STAM_DEREG(pVM, &gStatRawCheck);
554 STAM_DEREG(pVM, &gStatMemRead);
555 STAM_DEREG(pVM, &gStatMemWrite);
556 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
557
558 STAM_DEREG(pVM, &gStatCpuGetTSC);
559
560 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
561 STAM_DEREG(pVM, &gStatRefuseVM86);
562 STAM_DEREG(pVM, &gStatRefusePaging);
563 STAM_DEREG(pVM, &gStatRefusePAE);
564 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
565 STAM_DEREG(pVM, &gStatRefuseIF0);
566 STAM_DEREG(pVM, &gStatRefuseCode16);
567 STAM_DEREG(pVM, &gStatRefuseWP0);
568 STAM_DEREG(pVM, &gStatRefuseRing1or2);
569 STAM_DEREG(pVM, &gStatRefuseCanExecute);
570 STAM_DEREG(pVM, &gStatFlushTBs);
571
572 STAM_DEREG(pVM, &gStatREMGDTChange);
573 STAM_DEREG(pVM, &gStatREMLDTRChange);
574 STAM_DEREG(pVM, &gStatREMIDTChange);
575 STAM_DEREG(pVM, &gStatREMTRChange);
576
577 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
583
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
590
591 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
592#endif /* VBOX_WITH_STATISTICS */
593
594 STAM_REL_DEREG(pVM, &tb_flush_count);
595 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
596 STAM_REL_DEREG(pVM, &tlb_flush_count);
597
598 return VINF_SUCCESS;
599}
600
601
602/**
603 * The VM is being reset.
604 *
605 * For the REM component this means to call the cpu_reset() and
606 * reinitialize some state variables.
607 *
608 * @param pVM VM handle.
609 */
610REMR3DECL(void) REMR3Reset(PVM pVM)
611{
612 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
613
614 /*
615 * Reset the REM cpu.
616 */
617 Assert(pVM->rem.s.cIgnoreAll == 0);
618 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
619 cpu_reset(&pVM->rem.s.Env);
620 pVM->rem.s.cInvalidatedPages = 0;
621 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
622 Assert(pVM->rem.s.cIgnoreAll == 0);
623
624 /* Clear raw ring 0 init state */
625 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
626
627 /* Flush the TBs the next time we execute code here. */
628 pVM->rem.s.fFlushTBs = true;
629
630 EMRemUnlock(pVM);
631}
632
633
634/**
635 * Execute state save operation.
636 *
637 * @returns VBox status code.
638 * @param pVM VM Handle.
639 * @param pSSM SSM operation handle.
640 */
641static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
642{
643 PREM pRem = &pVM->rem.s;
644
645 /*
646 * Save the required CPU Env bits.
647 * (Not much because we're never in REM when doing the save.)
648 */
649 LogFlow(("remR3Save:\n"));
650 Assert(!pRem->fInREM);
651 SSMR3PutU32(pSSM, pRem->Env.hflags);
652 SSMR3PutU32(pSSM, ~0); /* separator */
653
654 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
655 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
656 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
657
658 return SSMR3PutU32(pSSM, ~0); /* terminator */
659}
660
661
662/**
663 * Execute state load operation.
664 *
665 * @returns VBox status code.
666 * @param pVM VM Handle.
667 * @param pSSM SSM operation handle.
668 * @param uVersion Data layout version.
669 * @param uPass The data pass.
670 */
671static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
672{
673 uint32_t u32Dummy;
674 uint32_t fRawRing0 = false;
675 uint32_t u32Sep;
676 uint32_t i;
677 int rc;
678 PREM pRem;
679
680 LogFlow(("remR3Load:\n"));
681 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
682
683 /*
684 * Validate version.
685 */
686 if ( uVersion != REM_SAVED_STATE_VERSION
687 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
688 {
689 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
690 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
691 }
692
693 /*
694 * Do a reset to be on the safe side...
695 */
696 REMR3Reset(pVM);
697
698 /*
699 * Ignore all ignorable notifications.
700 * (Not doing this will cause serious trouble.)
701 */
702 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
703
704 /*
705 * Load the required CPU Env bits.
706 * (Not much because we're never in REM when doing the save.)
707 */
708 pRem = &pVM->rem.s;
709 Assert(!pRem->fInREM);
710 SSMR3GetU32(pSSM, &pRem->Env.hflags);
711 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
712 {
713 /* Redundant REM CPU state has to be loaded, but can be ignored. */
714 CPUX86State_Ver16 temp;
715 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
716 }
717
718 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
719 if (RT_FAILURE(rc))
720 return rc;
721 if (u32Sep != ~0U)
722 {
723 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
724 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
725 }
726
727 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
728 SSMR3GetUInt(pSSM, &fRawRing0);
729 if (fRawRing0)
730 pRem->Env.state |= CPU_RAW_RING0;
731
732 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
733 {
734 /*
735 * Load the REM stuff.
736 */
737 /** @todo r=bird: We should just drop all these items, restoring doesn't make
738 * sense. */
739 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
740 if (RT_FAILURE(rc))
741 return rc;
742 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
743 {
744 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
745 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
746 }
747 for (i = 0; i < pRem->cInvalidatedPages; i++)
748 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
749 }
750
751 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
752 if (RT_FAILURE(rc))
753 return rc;
754
755 /* check the terminator. */
756 rc = SSMR3GetU32(pSSM, &u32Sep);
757 if (RT_FAILURE(rc))
758 return rc;
759 if (u32Sep != ~0U)
760 {
761 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
762 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
763 }
764
765 /*
766 * Get the CPUID features.
767 */
768 PVMCPU pVCpu = VMMGetCpu(pVM);
769 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
770 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
771
772 /*
773 * Stop ignoring ignorable notifications.
774 */
775 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
776
777 /*
778 * Sync the whole CPU state when executing code in the recompiler.
779 */
780 for (i = 0; i < pVM->cCpus; i++)
781 {
782 PVMCPU pVCpu = &pVM->aCpus[i];
783 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
784 }
785 return VINF_SUCCESS;
786}
787
788
789
790#undef LOG_GROUP
791#define LOG_GROUP LOG_GROUP_REM_RUN
792
793/**
794 * Single steps an instruction in recompiled mode.
795 *
796 * Before calling this function the REM state needs to be in sync with
797 * the VM. Call REMR3State() to perform the sync. It's only necessary
798 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
799 * and after calling REMR3StateBack().
800 *
801 * @returns VBox status code.
802 *
803 * @param pVM VM Handle.
804 * @param pVCpu VMCPU Handle.
805 */
806REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
807{
808 int rc, interrupt_request;
809 RTGCPTR GCPtrPC;
810 bool fBp;
811
812 /*
813 * Lock the REM - we don't wanna have anyone interrupting us
814 * while stepping - and enabled single stepping. We also ignore
815 * pending interrupts and suchlike.
816 */
817 interrupt_request = pVM->rem.s.Env.interrupt_request;
818 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
819 pVM->rem.s.Env.interrupt_request = 0;
820 cpu_single_step(&pVM->rem.s.Env, 1);
821
822 /*
823 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
824 */
825 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
826 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
827
828 /*
829 * Execute and handle the return code.
830 * We execute without enabling the cpu tick, so on success we'll
831 * just flip it on and off to make sure it moves
832 */
833 rc = cpu_exec(&pVM->rem.s.Env);
834 if (rc == EXCP_DEBUG)
835 {
836 TMR3NotifyResume(pVM, pVCpu);
837 TMR3NotifySuspend(pVM, pVCpu);
838 rc = VINF_EM_DBG_STEPPED;
839 }
840 else
841 {
842 switch (rc)
843 {
844 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
845 case EXCP_HLT:
846 case EXCP_HALTED: rc = VINF_EM_HALT; break;
847 case EXCP_RC:
848 rc = pVM->rem.s.rc;
849 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
850 break;
851 case EXCP_EXECUTE_RAW:
852 case EXCP_EXECUTE_HWACC:
853 /** @todo: is it correct? No! */
854 rc = VINF_SUCCESS;
855 break;
856 default:
857 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
858 rc = VERR_INTERNAL_ERROR;
859 break;
860 }
861 }
862
863 /*
864 * Restore the stuff we changed to prevent interruption.
865 * Unlock the REM.
866 */
867 if (fBp)
868 {
869 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
870 Assert(rc2 == 0); NOREF(rc2);
871 }
872 cpu_single_step(&pVM->rem.s.Env, 0);
873 pVM->rem.s.Env.interrupt_request = interrupt_request;
874
875 return rc;
876}
877
878
879/**
880 * Set a breakpoint using the REM facilities.
881 *
882 * @returns VBox status code.
883 * @param pVM The VM handle.
884 * @param Address The breakpoint address.
885 * @thread The emulation thread.
886 */
887REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
888{
889 VM_ASSERT_EMT(pVM);
890 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
891 {
892 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
893 return VINF_SUCCESS;
894 }
895 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
896 return VERR_REM_NO_MORE_BP_SLOTS;
897}
898
899
900/**
901 * Clears a breakpoint set by REMR3BreakpointSet().
902 *
903 * @returns VBox status code.
904 * @param pVM The VM handle.
905 * @param Address The breakpoint address.
906 * @thread The emulation thread.
907 */
908REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
909{
910 VM_ASSERT_EMT(pVM);
911 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
912 {
913 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
914 return VINF_SUCCESS;
915 }
916 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
917 return VERR_REM_BP_NOT_FOUND;
918}
919
920
921/**
922 * Emulate an instruction.
923 *
924 * This function executes one instruction without letting anyone
925 * interrupt it. This is intended for being called while being in
926 * raw mode and thus will take care of all the state syncing between
927 * REM and the rest.
928 *
929 * @returns VBox status code.
930 * @param pVM VM handle.
931 * @param pVCpu VMCPU Handle.
932 */
933REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
934{
935 bool fFlushTBs;
936
937 int rc, rc2;
938 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
939
940 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
941 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
942 */
943 if (HWACCMIsEnabled(pVM))
944 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
945
946 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
947 fFlushTBs = pVM->rem.s.fFlushTBs;
948 pVM->rem.s.fFlushTBs = false;
949
950 /*
951 * Sync the state and enable single instruction / single stepping.
952 */
953 rc = REMR3State(pVM, pVCpu);
954 pVM->rem.s.fFlushTBs = fFlushTBs;
955 if (RT_SUCCESS(rc))
956 {
957 int interrupt_request = pVM->rem.s.Env.interrupt_request;
958 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
959#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
960 cpu_single_step(&pVM->rem.s.Env, 0);
961#endif
962 Assert(!pVM->rem.s.Env.singlestep_enabled);
963
964 /*
965 * Now we set the execute single instruction flag and enter the cpu_exec loop.
966 */
967 TMNotifyStartOfExecution(pVCpu);
968 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
969 rc = cpu_exec(&pVM->rem.s.Env);
970 TMNotifyEndOfExecution(pVCpu);
971 switch (rc)
972 {
973 /*
974 * Executed without anything out of the way happening.
975 */
976 case EXCP_SINGLE_INSTR:
977 rc = VINF_EM_RESCHEDULE;
978 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
979 break;
980
981 /*
982 * If we take a trap or start servicing a pending interrupt, we might end up here.
983 * (Timer thread or some other thread wishing EMT's attention.)
984 */
985 case EXCP_INTERRUPT:
986 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
987 rc = VINF_EM_RESCHEDULE;
988 break;
989
990 /*
991 * Single step, we assume!
992 * If there was a breakpoint there we're fucked now.
993 */
994 case EXCP_DEBUG:
995 if (pVM->rem.s.Env.watchpoint_hit)
996 {
997 /** @todo deal with watchpoints */
998 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
999 rc = VINF_EM_DBG_BREAKPOINT;
1000 }
1001 else
1002 {
1003 CPUBreakpoint *pBP;
1004 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1005 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1006 if (pBP->pc == GCPtrPC)
1007 break;
1008 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1009 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1010 }
1011 break;
1012
1013 /*
1014 * hlt instruction.
1015 */
1016 case EXCP_HLT:
1017 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1018 rc = VINF_EM_HALT;
1019 break;
1020
1021 /*
1022 * The VM has halted.
1023 */
1024 case EXCP_HALTED:
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1026 rc = VINF_EM_HALT;
1027 break;
1028
1029 /*
1030 * Switch to RAW-mode.
1031 */
1032 case EXCP_EXECUTE_RAW:
1033 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1034 rc = VINF_EM_RESCHEDULE_RAW;
1035 break;
1036
1037 /*
1038 * Switch to hardware accelerated RAW-mode.
1039 */
1040 case EXCP_EXECUTE_HWACC:
1041 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1042 rc = VINF_EM_RESCHEDULE_HWACC;
1043 break;
1044
1045 /*
1046 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1047 */
1048 case EXCP_RC:
1049 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1050 rc = pVM->rem.s.rc;
1051 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1052 break;
1053
1054 /*
1055 * Figure out the rest when they arrive....
1056 */
1057 default:
1058 AssertMsgFailed(("rc=%d\n", rc));
1059 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1060 rc = VINF_EM_RESCHEDULE;
1061 break;
1062 }
1063
1064 /*
1065 * Switch back the state.
1066 */
1067 pVM->rem.s.Env.interrupt_request = interrupt_request;
1068 rc2 = REMR3StateBack(pVM, pVCpu);
1069 AssertRC(rc2);
1070 }
1071
1072 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1073 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1074 return rc;
1075}
1076
1077
1078/**
1079 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1080 *
1081 * @returns VBox status code.
1082 *
1083 * @param pVM The VM handle.
1084 * @param pVCpu The Virtual CPU handle.
1085 */
1086static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1087{
1088 int rc;
1089
1090 Assert(pVM->rem.s.fInREM);
1091#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1092 cpu_single_step(&pVM->rem.s.Env, 1);
1093#else
1094 Assert(!pVM->rem.s.Env.singlestep_enabled);
1095#endif
1096
1097 /*
1098 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1099 */
1100 for (;;)
1101 {
1102 char szBuf[256];
1103
1104 /*
1105 * Log the current registers state and instruction.
1106 */
1107 remR3StateUpdate(pVM, pVCpu);
1108 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1109 szBuf[0] = '\0';
1110 rc = DBGFR3DisasInstrEx(pVM,
1111 pVCpu->idCpu,
1112 0, /* Sel */
1113 0, /* GCPtr */
1114 DBGF_DISAS_FLAGS_CURRENT_GUEST
1115 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1116 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1117 szBuf,
1118 sizeof(szBuf),
1119 NULL);
1120 if (RT_FAILURE(rc))
1121 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1122 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1123
1124 /*
1125 * Execute the instruction.
1126 */
1127 TMNotifyStartOfExecution(pVCpu);
1128
1129 if ( pVM->rem.s.Env.exception_index < 0
1130 || pVM->rem.s.Env.exception_index > 256)
1131 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1132
1133#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1134 pVM->rem.s.Env.interrupt_request = 0;
1135#else
1136 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1137#endif
1138 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1139 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1140 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1141 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1142 pVM->rem.s.Env.interrupt_request,
1143 pVM->rem.s.Env.halted,
1144 pVM->rem.s.Env.exception_index
1145 );
1146
1147 rc = cpu_exec(&pVM->rem.s.Env);
1148
1149 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1150 pVM->rem.s.Env.interrupt_request,
1151 pVM->rem.s.Env.halted,
1152 pVM->rem.s.Env.exception_index
1153 );
1154
1155 TMNotifyEndOfExecution(pVCpu);
1156
1157 switch (rc)
1158 {
1159#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1160 /*
1161 * The normal exit.
1162 */
1163 case EXCP_SINGLE_INSTR:
1164 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1165 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1166 continue;
1167 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1168 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1169 rc = VINF_SUCCESS;
1170 break;
1171
1172#else
1173 /*
1174 * The normal exit, check for breakpoints at PC just to be sure.
1175 */
1176#endif
1177 case EXCP_DEBUG:
1178 if (pVM->rem.s.Env.watchpoint_hit)
1179 {
1180 /** @todo deal with watchpoints */
1181 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1182 rc = VINF_EM_DBG_BREAKPOINT;
1183 }
1184 else
1185 {
1186 CPUBreakpoint *pBP;
1187 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1188 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1189 if (pBP->pc == GCPtrPC)
1190 break;
1191 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1192 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1193 }
1194#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1195 if (rc == VINF_EM_DBG_STEPPED)
1196 {
1197 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1198 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1199 continue;
1200
1201 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1202 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1203 rc = VINF_SUCCESS;
1204 }
1205#endif
1206 break;
1207
1208 /*
1209 * If we take a trap or start servicing a pending interrupt, we might end up here.
1210 * (Timer thread or some other thread wishing EMT's attention.)
1211 */
1212 case EXCP_INTERRUPT:
1213 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1214 rc = VINF_SUCCESS;
1215 break;
1216
1217 /*
1218 * hlt instruction.
1219 */
1220 case EXCP_HLT:
1221 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1222 rc = VINF_EM_HALT;
1223 break;
1224
1225 /*
1226 * The VM has halted.
1227 */
1228 case EXCP_HALTED:
1229 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1230 rc = VINF_EM_HALT;
1231 break;
1232
1233 /*
1234 * Switch to RAW-mode.
1235 */
1236 case EXCP_EXECUTE_RAW:
1237 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1238 rc = VINF_EM_RESCHEDULE_RAW;
1239 break;
1240
1241 /*
1242 * Switch to hardware accelerated RAW-mode.
1243 */
1244 case EXCP_EXECUTE_HWACC:
1245 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1246 rc = VINF_EM_RESCHEDULE_HWACC;
1247 break;
1248
1249 /*
1250 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1251 */
1252 case EXCP_RC:
1253 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1254 rc = pVM->rem.s.rc;
1255 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1256 break;
1257
1258 /*
1259 * Figure out the rest when they arrive....
1260 */
1261 default:
1262 AssertMsgFailed(("rc=%d\n", rc));
1263 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1264 rc = VINF_EM_RESCHEDULE;
1265 break;
1266 }
1267 break;
1268 }
1269
1270#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1271// cpu_single_step(&pVM->rem.s.Env, 0);
1272#else
1273 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1274#endif
1275 return rc;
1276}
1277
1278
1279/**
1280 * Runs code in recompiled mode.
1281 *
1282 * Before calling this function the REM state needs to be in sync with
1283 * the VM. Call REMR3State() to perform the sync. It's only necessary
1284 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1285 * and after calling REMR3StateBack().
1286 *
1287 * @returns VBox status code.
1288 *
1289 * @param pVM VM Handle.
1290 * @param pVCpu VMCPU Handle.
1291 */
1292REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1293{
1294 int rc;
1295
1296 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1297 return remR3RunLoggingStep(pVM, pVCpu);
1298
1299 Assert(pVM->rem.s.fInREM);
1300 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1301
1302 TMNotifyStartOfExecution(pVCpu);
1303 rc = cpu_exec(&pVM->rem.s.Env);
1304 TMNotifyEndOfExecution(pVCpu);
1305 switch (rc)
1306 {
1307 /*
1308 * This happens when the execution was interrupted
1309 * by an external event, like pending timers.
1310 */
1311 case EXCP_INTERRUPT:
1312 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1313 rc = VINF_SUCCESS;
1314 break;
1315
1316 /*
1317 * hlt instruction.
1318 */
1319 case EXCP_HLT:
1320 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1321 rc = VINF_EM_HALT;
1322 break;
1323
1324 /*
1325 * The VM has halted.
1326 */
1327 case EXCP_HALTED:
1328 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1329 rc = VINF_EM_HALT;
1330 break;
1331
1332 /*
1333 * Breakpoint/single step.
1334 */
1335 case EXCP_DEBUG:
1336 if (pVM->rem.s.Env.watchpoint_hit)
1337 {
1338 /** @todo deal with watchpoints */
1339 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1340 rc = VINF_EM_DBG_BREAKPOINT;
1341 }
1342 else
1343 {
1344 CPUBreakpoint *pBP;
1345 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1346 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1347 if (pBP->pc == GCPtrPC)
1348 break;
1349 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1350 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1351 }
1352 break;
1353
1354 /*
1355 * Switch to RAW-mode.
1356 */
1357 case EXCP_EXECUTE_RAW:
1358 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1359 rc = VINF_EM_RESCHEDULE_RAW;
1360 break;
1361
1362 /*
1363 * Switch to hardware accelerated RAW-mode.
1364 */
1365 case EXCP_EXECUTE_HWACC:
1366 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1367 rc = VINF_EM_RESCHEDULE_HWACC;
1368 break;
1369
1370 /*
1371 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1372 */
1373 case EXCP_RC:
1374 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1375 rc = pVM->rem.s.rc;
1376 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1377 break;
1378
1379 /*
1380 * Figure out the rest when they arrive....
1381 */
1382 default:
1383 AssertMsgFailed(("rc=%d\n", rc));
1384 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1385 rc = VINF_SUCCESS;
1386 break;
1387 }
1388
1389 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1390 return rc;
1391}
1392
1393
1394/**
1395 * Check if the cpu state is suitable for Raw execution.
1396 *
1397 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1398 *
1399 * @param env The CPU env struct.
1400 * @param eip The EIP to check this for (might differ from env->eip).
1401 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1402 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1403 *
1404 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1405 */
1406bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1407{
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1410 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1411 uint32_t u32CR0;
1412
1413#ifdef IEM_VERIFICATION_MODE
1414 return false;
1415#endif
1416
1417 /* Update counter. */
1418 env->pVM->rem.s.cCanExecuteRaw++;
1419
1420 /* Never when single stepping+logging guest code. */
1421 if (env->state & CPU_EMULATE_SINGLE_STEP)
1422 return false;
1423
1424 if (HWACCMIsEnabled(env->pVM))
1425 {
1426 CPUMCTX Ctx;
1427
1428 env->state |= CPU_RAW_HWACC;
1429
1430 /*
1431 * The simple check first...
1432 */
1433 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1434 return false;
1435
1436 /*
1437 * Create partial context for HWACCMR3CanExecuteGuest
1438 */
1439 Ctx.cr0 = env->cr[0];
1440 Ctx.cr3 = env->cr[3];
1441 Ctx.cr4 = env->cr[4];
1442
1443 Ctx.tr.Sel = env->tr.selector;
1444 Ctx.tr.ValidSel = env->tr.selector;
1445 Ctx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1446 Ctx.tr.u64Base = env->tr.base;
1447 Ctx.tr.u32Limit = env->tr.limit;
1448 Ctx.tr.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1449
1450 Ctx.ldtr.Sel = env->ldt.selector;
1451 Ctx.ldtr.ValidSel = env->ldt.selector;
1452 Ctx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1453 Ctx.ldtr.u64Base = env->ldt.base;
1454 Ctx.ldtr.u32Limit = env->ldt.limit;
1455 Ctx.ldtr.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1456
1457 Ctx.idtr.cbIdt = env->idt.limit;
1458 Ctx.idtr.pIdt = env->idt.base;
1459
1460 Ctx.gdtr.cbGdt = env->gdt.limit;
1461 Ctx.gdtr.pGdt = env->gdt.base;
1462
1463 Ctx.rsp = env->regs[R_ESP];
1464 Ctx.rip = env->eip;
1465
1466 Ctx.eflags.u32 = env->eflags;
1467
1468 Ctx.cs.Sel = env->segs[R_CS].selector;
1469 Ctx.cs.ValidSel = env->segs[R_CS].selector;
1470 Ctx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1471 Ctx.cs.u64Base = env->segs[R_CS].base;
1472 Ctx.cs.u32Limit = env->segs[R_CS].limit;
1473 Ctx.cs.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1474
1475 Ctx.ds.Sel = env->segs[R_DS].selector;
1476 Ctx.ds.ValidSel = env->segs[R_DS].selector;
1477 Ctx.ds.fFlags = CPUMSELREG_FLAGS_VALID;
1478 Ctx.ds.u64Base = env->segs[R_DS].base;
1479 Ctx.ds.u32Limit = env->segs[R_DS].limit;
1480 Ctx.ds.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1481
1482 Ctx.es.Sel = env->segs[R_ES].selector;
1483 Ctx.es.ValidSel = env->segs[R_ES].selector;
1484 Ctx.es.fFlags = CPUMSELREG_FLAGS_VALID;
1485 Ctx.es.u64Base = env->segs[R_ES].base;
1486 Ctx.es.u32Limit = env->segs[R_ES].limit;
1487 Ctx.es.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1488
1489 Ctx.fs.Sel = env->segs[R_FS].selector;
1490 Ctx.fs.ValidSel = env->segs[R_FS].selector;
1491 Ctx.fs.fFlags = CPUMSELREG_FLAGS_VALID;
1492 Ctx.fs.u64Base = env->segs[R_FS].base;
1493 Ctx.fs.u32Limit = env->segs[R_FS].limit;
1494 Ctx.fs.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1495
1496 Ctx.gs.Sel = env->segs[R_GS].selector;
1497 Ctx.gs.ValidSel = env->segs[R_GS].selector;
1498 Ctx.gs.fFlags = CPUMSELREG_FLAGS_VALID;
1499 Ctx.gs.u64Base = env->segs[R_GS].base;
1500 Ctx.gs.u32Limit = env->segs[R_GS].limit;
1501 Ctx.gs.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1502
1503 Ctx.ss.Sel = env->segs[R_SS].selector;
1504 Ctx.ss.ValidSel = env->segs[R_SS].selector;
1505 Ctx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1506 Ctx.ss.u64Base = env->segs[R_SS].base;
1507 Ctx.ss.u32Limit = env->segs[R_SS].limit;
1508 Ctx.ss.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1509
1510 Ctx.msrEFER = env->efer;
1511
1512 /* Hardware accelerated raw-mode:
1513 *
1514 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1515 */
1516 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1517 {
1518 *piException = EXCP_EXECUTE_HWACC;
1519 return true;
1520 }
1521 return false;
1522 }
1523
1524 /*
1525 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1526 * or 32 bits protected mode ring 0 code
1527 *
1528 * The tests are ordered by the likelihood of being true during normal execution.
1529 */
1530 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1531 {
1532 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1533 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1534 return false;
1535 }
1536
1537#ifndef VBOX_RAW_V86
1538 if (fFlags & VM_MASK) {
1539 STAM_COUNTER_INC(&gStatRefuseVM86);
1540 Log2(("raw mode refused: VM_MASK\n"));
1541 return false;
1542 }
1543#endif
1544
1545 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1546 {
1547#ifndef DEBUG_bird
1548 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1549#endif
1550 return false;
1551 }
1552
1553 if (env->singlestep_enabled)
1554 {
1555 //Log2(("raw mode refused: Single step\n"));
1556 return false;
1557 }
1558
1559 if (!QTAILQ_EMPTY(&env->breakpoints))
1560 {
1561 //Log2(("raw mode refused: Breakpoints\n"));
1562 return false;
1563 }
1564
1565 if (!QTAILQ_EMPTY(&env->watchpoints))
1566 {
1567 //Log2(("raw mode refused: Watchpoints\n"));
1568 return false;
1569 }
1570
1571 u32CR0 = env->cr[0];
1572 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1573 {
1574 STAM_COUNTER_INC(&gStatRefusePaging);
1575 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1576 return false;
1577 }
1578
1579 if (env->cr[4] & CR4_PAE_MASK)
1580 {
1581 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1582 {
1583 STAM_COUNTER_INC(&gStatRefusePAE);
1584 return false;
1585 }
1586 }
1587
1588 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1589 {
1590 if (!EMIsRawRing3Enabled(env->pVM))
1591 return false;
1592
1593 if (!(env->eflags & IF_MASK))
1594 {
1595 STAM_COUNTER_INC(&gStatRefuseIF0);
1596 Log2(("raw mode refused: IF (RawR3)\n"));
1597 return false;
1598 }
1599
1600 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1601 {
1602 STAM_COUNTER_INC(&gStatRefuseWP0);
1603 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1604 return false;
1605 }
1606 }
1607 else
1608 {
1609 if (!EMIsRawRing0Enabled(env->pVM))
1610 return false;
1611
1612 // Let's start with pure 32 bits ring 0 code first
1613 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1614 {
1615 STAM_COUNTER_INC(&gStatRefuseCode16);
1616 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1617 return false;
1618 }
1619
1620 // Only R0
1621 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1622 {
1623 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1624 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1625 return false;
1626 }
1627
1628 if (!(u32CR0 & CR0_WP_MASK))
1629 {
1630 STAM_COUNTER_INC(&gStatRefuseWP0);
1631 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1632 return false;
1633 }
1634
1635 if (PATMIsPatchGCAddr(env->pVM, eip))
1636 {
1637 Log2(("raw r0 mode forced: patch code\n"));
1638 *piException = EXCP_EXECUTE_RAW;
1639 return true;
1640 }
1641
1642#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1643 if (!(env->eflags & IF_MASK))
1644 {
1645 STAM_COUNTER_INC(&gStatRefuseIF0);
1646 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1647 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1648 return false;
1649 }
1650#endif
1651
1652 env->state |= CPU_RAW_RING0;
1653 }
1654
1655 /*
1656 * Don't reschedule the first time we're called, because there might be
1657 * special reasons why we're here that is not covered by the above checks.
1658 */
1659 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1660 {
1661 Log2(("raw mode refused: first scheduling\n"));
1662 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1663 return false;
1664 }
1665
1666/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1667 *piException = EXCP_EXECUTE_RAW;
1668 return true;
1669}
1670
1671
1672/**
1673 * Fetches a code byte.
1674 *
1675 * @returns Success indicator (bool) for ease of use.
1676 * @param env The CPU environment structure.
1677 * @param GCPtrInstr Where to fetch code.
1678 * @param pu8Byte Where to store the byte on success
1679 */
1680bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1681{
1682 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1683 if (RT_SUCCESS(rc))
1684 return true;
1685 return false;
1686}
1687
1688
1689/**
1690 * Flush (or invalidate if you like) page table/dir entry.
1691 *
1692 * (invlpg instruction; tlb_flush_page)
1693 *
1694 * @param env Pointer to cpu environment.
1695 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1696 */
1697void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1698{
1699 PVM pVM = env->pVM;
1700 PCPUMCTX pCtx;
1701 int rc;
1702
1703 Assert(EMRemIsLockOwner(env->pVM));
1704
1705 /*
1706 * When we're replaying invlpg instructions or restoring a saved
1707 * state we disable this path.
1708 */
1709 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1710 return;
1711 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1712 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1713
1714 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1715
1716 /*
1717 * Update the control registers before calling PGMFlushPage.
1718 */
1719 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1720 Assert(pCtx);
1721 pCtx->cr0 = env->cr[0];
1722 pCtx->cr3 = env->cr[3];
1723 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1724 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1725 pCtx->cr4 = env->cr[4];
1726
1727 /*
1728 * Let PGM do the rest.
1729 */
1730 Assert(env->pVCpu);
1731 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1732 if (RT_FAILURE(rc))
1733 {
1734 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1735 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1736 }
1737 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1738}
1739
1740
1741#ifndef REM_PHYS_ADDR_IN_TLB
1742/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1743void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1744{
1745 void *pv;
1746 int rc;
1747
1748
1749 /* Address must be aligned enough to fiddle with lower bits */
1750 Assert((physAddr & 0x3) == 0);
1751 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1752
1753 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1754 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1755 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1756 Assert( rc == VINF_SUCCESS
1757 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1758 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1759 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1760 if (RT_FAILURE(rc))
1761 return (void *)1;
1762 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1763 return (void *)((uintptr_t)pv | 2);
1764 return pv;
1765}
1766#endif /* REM_PHYS_ADDR_IN_TLB */
1767
1768
1769/**
1770 * Called from tlb_protect_code in order to write monitor a code page.
1771 *
1772 * @param env Pointer to the CPU environment.
1773 * @param GCPtr Code page to monitor
1774 */
1775void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1776{
1777#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1778 Assert(env->pVM->rem.s.fInREM);
1779 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1780 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1781 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1782 && !(env->eflags & VM_MASK) /* no V86 mode */
1783 && !HWACCMIsEnabled(env->pVM))
1784 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1785#endif
1786}
1787
1788
1789/**
1790 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1791 *
1792 * @param env Pointer to the CPU environment.
1793 * @param GCPtr Code page to monitor
1794 */
1795void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1796{
1797 Assert(env->pVM->rem.s.fInREM);
1798#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1799 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1800 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1801 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1802 && !(env->eflags & VM_MASK) /* no V86 mode */
1803 && !HWACCMIsEnabled(env->pVM))
1804 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1805#endif
1806}
1807
1808
1809/**
1810 * Called when the CPU is initialized, any of the CRx registers are changed or
1811 * when the A20 line is modified.
1812 *
1813 * @param env Pointer to the CPU environment.
1814 * @param fGlobal Set if the flush is global.
1815 */
1816void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1817{
1818 PVM pVM = env->pVM;
1819 PCPUMCTX pCtx;
1820 Assert(EMRemIsLockOwner(pVM));
1821
1822 /*
1823 * When we're replaying invlpg instructions or restoring a saved
1824 * state we disable this path.
1825 */
1826 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1827 return;
1828 Assert(pVM->rem.s.fInREM);
1829
1830 /*
1831 * The caller doesn't check cr4, so we have to do that for ourselves.
1832 */
1833 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1834 fGlobal = true;
1835 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1836
1837 /*
1838 * Update the control registers before calling PGMR3FlushTLB.
1839 */
1840 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1841 Assert(pCtx);
1842 pCtx->cr0 = env->cr[0];
1843 pCtx->cr3 = env->cr[3];
1844 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1845 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1846 pCtx->cr4 = env->cr[4];
1847
1848 /*
1849 * Let PGM do the rest.
1850 */
1851 Assert(env->pVCpu);
1852 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1853}
1854
1855
1856/**
1857 * Called when any of the cr0, cr4 or efer registers is updated.
1858 *
1859 * @param env Pointer to the CPU environment.
1860 */
1861void remR3ChangeCpuMode(CPUX86State *env)
1862{
1863 PVM pVM = env->pVM;
1864 uint64_t efer;
1865 PCPUMCTX pCtx;
1866 int rc;
1867
1868 /*
1869 * When we're replaying loads or restoring a saved
1870 * state this path is disabled.
1871 */
1872 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1873 return;
1874 Assert(pVM->rem.s.fInREM);
1875
1876 /*
1877 * Update the control registers before calling PGMChangeMode()
1878 * as it may need to map whatever cr3 is pointing to.
1879 */
1880 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1881 Assert(pCtx);
1882 pCtx->cr0 = env->cr[0];
1883 pCtx->cr3 = env->cr[3];
1884 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1885 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1886 pCtx->cr4 = env->cr[4];
1887#ifdef TARGET_X86_64
1888 efer = env->efer;
1889 pCtx->msrEFER = efer;
1890#else
1891 efer = 0;
1892#endif
1893 Assert(env->pVCpu);
1894 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1895 if (rc != VINF_SUCCESS)
1896 {
1897 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1898 {
1899 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1900 remR3RaiseRC(env->pVM, rc);
1901 }
1902 else
1903 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1904 }
1905}
1906
1907
1908/**
1909 * Called from compiled code to run dma.
1910 *
1911 * @param env Pointer to the CPU environment.
1912 */
1913void remR3DmaRun(CPUX86State *env)
1914{
1915 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1916 PDMR3DmaRun(env->pVM);
1917 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1918}
1919
1920
1921/**
1922 * Called from compiled code to schedule pending timers in VMM
1923 *
1924 * @param env Pointer to the CPU environment.
1925 */
1926void remR3TimersRun(CPUX86State *env)
1927{
1928 LogFlow(("remR3TimersRun:\n"));
1929 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1930 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1931 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1932 TMR3TimerQueuesDo(env->pVM);
1933 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1934 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1935}
1936
1937
1938/**
1939 * Record trap occurrence
1940 *
1941 * @returns VBox status code
1942 * @param env Pointer to the CPU environment.
1943 * @param uTrap Trap nr
1944 * @param uErrorCode Error code
1945 * @param pvNextEIP Next EIP
1946 */
1947int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1948{
1949 PVM pVM = env->pVM;
1950#ifdef VBOX_WITH_STATISTICS
1951 static STAMCOUNTER s_aStatTrap[255];
1952 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1953#endif
1954
1955#ifdef VBOX_WITH_STATISTICS
1956 if (uTrap < 255)
1957 {
1958 if (!s_aRegisters[uTrap])
1959 {
1960 char szStatName[64];
1961 s_aRegisters[uTrap] = true;
1962 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1963 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1964 }
1965 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1966 }
1967#endif
1968 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1969 if( uTrap < 0x20
1970 && (env->cr[0] & X86_CR0_PE)
1971 && !(env->eflags & X86_EFL_VM))
1972 {
1973#ifdef DEBUG
1974 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1975#endif
1976 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1977 {
1978 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1979 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1980 return VERR_REM_TOO_MANY_TRAPS;
1981 }
1982 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1983 pVM->rem.s.cPendingExceptions = 1;
1984 pVM->rem.s.uPendingException = uTrap;
1985 pVM->rem.s.uPendingExcptEIP = env->eip;
1986 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1987 }
1988 else
1989 {
1990 pVM->rem.s.cPendingExceptions = 0;
1991 pVM->rem.s.uPendingException = uTrap;
1992 pVM->rem.s.uPendingExcptEIP = env->eip;
1993 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1994 }
1995 return VINF_SUCCESS;
1996}
1997
1998
1999/*
2000 * Clear current active trap
2001 *
2002 * @param pVM VM Handle.
2003 */
2004void remR3TrapClear(PVM pVM)
2005{
2006 pVM->rem.s.cPendingExceptions = 0;
2007 pVM->rem.s.uPendingException = 0;
2008 pVM->rem.s.uPendingExcptEIP = 0;
2009 pVM->rem.s.uPendingExcptCR2 = 0;
2010}
2011
2012
2013/*
2014 * Record previous call instruction addresses
2015 *
2016 * @param env Pointer to the CPU environment.
2017 */
2018void remR3RecordCall(CPUX86State *env)
2019{
2020 CSAMR3RecordCallAddress(env->pVM, env->eip);
2021}
2022
2023
2024/**
2025 * Syncs the internal REM state with the VM.
2026 *
2027 * This must be called before REMR3Run() is invoked whenever when the REM
2028 * state is not up to date. Calling it several times in a row is not
2029 * permitted.
2030 *
2031 * @returns VBox status code.
2032 *
2033 * @param pVM VM Handle.
2034 * @param pVCpu VMCPU Handle.
2035 *
2036 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2037 * no do this since the majority of the callers don't want any unnecessary of events
2038 * pending that would immediately interrupt execution.
2039 */
2040REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2041{
2042 register const CPUMCTX *pCtx;
2043 register unsigned fFlags;
2044 bool fHiddenSelRegsValid;
2045 unsigned i;
2046 TRPMEVENT enmType;
2047 uint8_t u8TrapNo;
2048 uint32_t uCpl;
2049 int rc;
2050
2051 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2052 Log2(("REMR3State:\n"));
2053
2054 pVM->rem.s.Env.pVCpu = pVCpu;
2055 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2056 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2057
2058 Assert(!pVM->rem.s.fInREM);
2059 pVM->rem.s.fInStateSync = true;
2060
2061 /*
2062 * If we have to flush TBs, do that immediately.
2063 */
2064 if (pVM->rem.s.fFlushTBs)
2065 {
2066 STAM_COUNTER_INC(&gStatFlushTBs);
2067 tb_flush(&pVM->rem.s.Env);
2068 pVM->rem.s.fFlushTBs = false;
2069 }
2070
2071 /*
2072 * Copy the registers which require no special handling.
2073 */
2074#ifdef TARGET_X86_64
2075 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2076 Assert(R_EAX == 0);
2077 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2078 Assert(R_ECX == 1);
2079 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2080 Assert(R_EDX == 2);
2081 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2082 Assert(R_EBX == 3);
2083 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2084 Assert(R_ESP == 4);
2085 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2086 Assert(R_EBP == 5);
2087 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2088 Assert(R_ESI == 6);
2089 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2090 Assert(R_EDI == 7);
2091 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2092 pVM->rem.s.Env.regs[8] = pCtx->r8;
2093 pVM->rem.s.Env.regs[9] = pCtx->r9;
2094 pVM->rem.s.Env.regs[10] = pCtx->r10;
2095 pVM->rem.s.Env.regs[11] = pCtx->r11;
2096 pVM->rem.s.Env.regs[12] = pCtx->r12;
2097 pVM->rem.s.Env.regs[13] = pCtx->r13;
2098 pVM->rem.s.Env.regs[14] = pCtx->r14;
2099 pVM->rem.s.Env.regs[15] = pCtx->r15;
2100
2101 pVM->rem.s.Env.eip = pCtx->rip;
2102
2103 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2104#else
2105 Assert(R_EAX == 0);
2106 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2107 Assert(R_ECX == 1);
2108 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2109 Assert(R_EDX == 2);
2110 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2111 Assert(R_EBX == 3);
2112 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2113 Assert(R_ESP == 4);
2114 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2115 Assert(R_EBP == 5);
2116 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2117 Assert(R_ESI == 6);
2118 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2119 Assert(R_EDI == 7);
2120 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2121 pVM->rem.s.Env.eip = pCtx->eip;
2122
2123 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2124#endif
2125
2126 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2127
2128 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2129 for (i=0;i<8;i++)
2130 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2131
2132#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2133 /*
2134 * Clear the halted hidden flag (the interrupt waking up the CPU can
2135 * have been dispatched in raw mode).
2136 */
2137 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2138#endif
2139
2140 /*
2141 * Replay invlpg? Only if we're not flushing the TLB.
2142 */
2143 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2144 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2145 if (pVM->rem.s.cInvalidatedPages)
2146 {
2147 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2148 {
2149 RTUINT i;
2150
2151 pVM->rem.s.fIgnoreCR3Load = true;
2152 pVM->rem.s.fIgnoreInvlPg = true;
2153 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2154 {
2155 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2156 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2157 }
2158 pVM->rem.s.fIgnoreInvlPg = false;
2159 pVM->rem.s.fIgnoreCR3Load = false;
2160 }
2161 pVM->rem.s.cInvalidatedPages = 0;
2162 }
2163
2164 /* Replay notification changes. */
2165 REMR3ReplayHandlerNotifications(pVM);
2166
2167 /* Update MSRs; before CRx registers! */
2168 pVM->rem.s.Env.efer = pCtx->msrEFER;
2169 pVM->rem.s.Env.star = pCtx->msrSTAR;
2170 pVM->rem.s.Env.pat = pCtx->msrPAT;
2171#ifdef TARGET_X86_64
2172 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2173 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2174 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2175 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2176
2177 /* Update the internal long mode activate flag according to the new EFER value. */
2178 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2179 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2180 else
2181 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2182#endif
2183
2184 /* Update the inhibit IRQ mask. */
2185 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2186 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2187 {
2188 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2189 if (InhibitPC == pCtx->rip)
2190 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2191 else
2192 {
2193 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2194 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2195 }
2196 }
2197
2198 /*
2199 * Sync the A20 gate.
2200 */
2201 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2202 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2203 {
2204 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2205 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2206 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2207 }
2208
2209 /*
2210 * Registers which are rarely changed and require special handling / order when changed.
2211 */
2212 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2213 | CPUM_CHANGED_CR4
2214 | CPUM_CHANGED_CR0
2215 | CPUM_CHANGED_CR3
2216 | CPUM_CHANGED_GDTR
2217 | CPUM_CHANGED_IDTR
2218 | CPUM_CHANGED_SYSENTER_MSR
2219 | CPUM_CHANGED_LDTR
2220 | CPUM_CHANGED_CPUID
2221 | CPUM_CHANGED_FPU_REM
2222 )
2223 )
2224 {
2225 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2226 {
2227 pVM->rem.s.fIgnoreCR3Load = true;
2228 tlb_flush(&pVM->rem.s.Env, true);
2229 pVM->rem.s.fIgnoreCR3Load = false;
2230 }
2231
2232 /* CR4 before CR0! */
2233 if (fFlags & CPUM_CHANGED_CR4)
2234 {
2235 pVM->rem.s.fIgnoreCR3Load = true;
2236 pVM->rem.s.fIgnoreCpuMode = true;
2237 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2238 pVM->rem.s.fIgnoreCpuMode = false;
2239 pVM->rem.s.fIgnoreCR3Load = false;
2240 }
2241
2242 if (fFlags & CPUM_CHANGED_CR0)
2243 {
2244 pVM->rem.s.fIgnoreCR3Load = true;
2245 pVM->rem.s.fIgnoreCpuMode = true;
2246 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2247 pVM->rem.s.fIgnoreCpuMode = false;
2248 pVM->rem.s.fIgnoreCR3Load = false;
2249 }
2250
2251 if (fFlags & CPUM_CHANGED_CR3)
2252 {
2253 pVM->rem.s.fIgnoreCR3Load = true;
2254 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2255 pVM->rem.s.fIgnoreCR3Load = false;
2256 }
2257
2258 if (fFlags & CPUM_CHANGED_GDTR)
2259 {
2260 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2261 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2262 }
2263
2264 if (fFlags & CPUM_CHANGED_IDTR)
2265 {
2266 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2267 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2268 }
2269
2270 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2271 {
2272 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2273 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2274 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2275 }
2276
2277 if (fFlags & CPUM_CHANGED_LDTR)
2278 {
2279 if (fHiddenSelRegsValid || (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID))
2280 {
2281 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2282 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2283 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2284 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u << 8) & 0xFFFFFF;
2285 }
2286 else
2287 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2288 }
2289
2290 if (fFlags & CPUM_CHANGED_CPUID)
2291 {
2292 uint32_t u32Dummy;
2293
2294 /*
2295 * Get the CPUID features.
2296 */
2297 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2298 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2299 }
2300
2301 /* Sync FPU state after CR4, CPUID and EFER (!). */
2302 if (fFlags & CPUM_CHANGED_FPU_REM)
2303 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2304 }
2305
2306 /*
2307 * Sync TR unconditionally to make life simpler.
2308 */
2309 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2310 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2311 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2312 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
2313 /* Note! do_interrupt will fault if the busy flag is still set... */
2314 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2315
2316 /*
2317 * Update selector registers.
2318 * This must be done *after* we've synced gdt, ldt and crX registers
2319 * since we're reading the GDT/LDT om sync_seg. This will happen with
2320 * saved state which takes a quick dip into rawmode for instance.
2321 */
2322 /*
2323 * Stack; Note first check this one as the CPL might have changed. The
2324 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2325 */
2326
2327 if (fHiddenSelRegsValid)
2328 {
2329 /* The hidden selector registers are valid in the CPU context. */
2330 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2331
2332 /* Set current CPL */
2333 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2334
2335 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, (pCtx->cs.Attr.u << 8) & 0xFFFFFF);
2336 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, (pCtx->ss.Attr.u << 8) & 0xFFFFFF);
2337 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, (pCtx->ds.Attr.u << 8) & 0xFFFFFF);
2338 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, (pCtx->es.Attr.u << 8) & 0xFFFFFF);
2339 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, (pCtx->fs.Attr.u << 8) & 0xFFFFFF);
2340 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, (pCtx->gs.Attr.u << 8) & 0xFFFFFF);
2341 }
2342 else
2343 {
2344 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2345 /** @todo use hidden registers when possible and make CPUM/someone do the
2346 * reading of lazily maintained hidden registers. */
2347 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss.Sel)
2348 {
2349 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss.Sel));
2350
2351 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2352 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel);
2353#ifdef VBOX_WITH_STATISTICS
2354 if (pVM->rem.s.Env.segs[R_SS].newselector)
2355 {
2356 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2357 }
2358#endif
2359 }
2360 else
2361 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2362
2363 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es.Sel)
2364 {
2365 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es.Sel));
2366 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es.Sel);
2367#ifdef VBOX_WITH_STATISTICS
2368 if (pVM->rem.s.Env.segs[R_ES].newselector)
2369 {
2370 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2371 }
2372#endif
2373 }
2374 else
2375 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2376
2377 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs.Sel)
2378 {
2379 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs.Sel));
2380 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel);
2381#ifdef VBOX_WITH_STATISTICS
2382 if (pVM->rem.s.Env.segs[R_CS].newselector)
2383 {
2384 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2385 }
2386#endif
2387 }
2388 else
2389 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2390
2391 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds.Sel)
2392 {
2393 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds.Sel));
2394 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel);
2395#ifdef VBOX_WITH_STATISTICS
2396 if (pVM->rem.s.Env.segs[R_DS].newselector)
2397 {
2398 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2399 }
2400#endif
2401 }
2402 else
2403 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2404
2405 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2406 * be the same but not the base/limit. */
2407 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs.Sel)
2408 {
2409 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs.Sel));
2410 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel);
2411#ifdef VBOX_WITH_STATISTICS
2412 if (pVM->rem.s.Env.segs[R_FS].newselector)
2413 {
2414 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2415 }
2416#endif
2417 }
2418 else
2419 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2420
2421 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs.Sel)
2422 {
2423 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs.Sel));
2424 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel);
2425#ifdef VBOX_WITH_STATISTICS
2426 if (pVM->rem.s.Env.segs[R_GS].newselector)
2427 {
2428 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2429 }
2430#endif
2431 }
2432 else
2433 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2434 }
2435
2436 /*
2437 * Check for traps.
2438 */
2439 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2440 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2441 if (RT_SUCCESS(rc))
2442 {
2443#ifdef DEBUG
2444 if (u8TrapNo == 0x80)
2445 {
2446 remR3DumpLnxSyscall(pVCpu);
2447 remR3DumpOBsdSyscall(pVCpu);
2448 }
2449#endif
2450
2451 pVM->rem.s.Env.exception_index = u8TrapNo;
2452 if (enmType != TRPM_SOFTWARE_INT)
2453 {
2454 pVM->rem.s.Env.exception_is_int = 0;
2455 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2456 }
2457 else
2458 {
2459 /*
2460 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2461 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2462 * for int03 and into.
2463 */
2464 pVM->rem.s.Env.exception_is_int = 1;
2465 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2466 /* int 3 may be generated by one-byte 0xcc */
2467 if (u8TrapNo == 3)
2468 {
2469 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2470 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2471 }
2472 /* int 4 may be generated by one-byte 0xce */
2473 else if (u8TrapNo == 4)
2474 {
2475 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2476 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2477 }
2478 }
2479
2480 /* get error code and cr2 if needed. */
2481 if (enmType == TRPM_TRAP)
2482 {
2483 switch (u8TrapNo)
2484 {
2485 case 0x0e:
2486 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2487 /* fallthru */
2488 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2489 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2490 break;
2491
2492 case 0x11: case 0x08:
2493 default:
2494 pVM->rem.s.Env.error_code = 0;
2495 break;
2496 }
2497 }
2498 else
2499 pVM->rem.s.Env.error_code = 0;
2500
2501 /*
2502 * We can now reset the active trap since the recompiler is gonna have a go at it.
2503 */
2504 rc = TRPMResetTrap(pVCpu);
2505 AssertRC(rc);
2506 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2507 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2508 }
2509
2510 /*
2511 * Clear old interrupt request flags; Check for pending hardware interrupts.
2512 * (See @remark for why we don't check for other FFs.)
2513 */
2514 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2515 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2516 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2517 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2518
2519 /*
2520 * We're now in REM mode.
2521 */
2522 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2523 pVM->rem.s.fInREM = true;
2524 pVM->rem.s.fInStateSync = false;
2525 pVM->rem.s.cCanExecuteRaw = 0;
2526 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2527 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2528 return VINF_SUCCESS;
2529}
2530
2531
2532/**
2533 * Syncs back changes in the REM state to the the VM state.
2534 *
2535 * This must be called after invoking REMR3Run().
2536 * Calling it several times in a row is not permitted.
2537 *
2538 * @returns VBox status code.
2539 *
2540 * @param pVM VM Handle.
2541 * @param pVCpu VMCPU Handle.
2542 */
2543REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2544{
2545 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2546 Assert(pCtx);
2547 unsigned i;
2548
2549 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2550 Log2(("REMR3StateBack:\n"));
2551 Assert(pVM->rem.s.fInREM);
2552
2553 /*
2554 * Copy back the registers.
2555 * This is done in the order they are declared in the CPUMCTX structure.
2556 */
2557
2558 /** @todo FOP */
2559 /** @todo FPUIP */
2560 /** @todo CS */
2561 /** @todo FPUDP */
2562 /** @todo DS */
2563
2564 /** @todo check if FPU/XMM was actually used in the recompiler */
2565 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2566//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2567
2568#ifdef TARGET_X86_64
2569 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2570 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2571 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2572 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2573 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2574 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2575 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2576 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2577 pCtx->r8 = pVM->rem.s.Env.regs[8];
2578 pCtx->r9 = pVM->rem.s.Env.regs[9];
2579 pCtx->r10 = pVM->rem.s.Env.regs[10];
2580 pCtx->r11 = pVM->rem.s.Env.regs[11];
2581 pCtx->r12 = pVM->rem.s.Env.regs[12];
2582 pCtx->r13 = pVM->rem.s.Env.regs[13];
2583 pCtx->r14 = pVM->rem.s.Env.regs[14];
2584 pCtx->r15 = pVM->rem.s.Env.regs[15];
2585
2586 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2587
2588#else
2589 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2590 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2591 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2592 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2593 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2594 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2595 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2596
2597 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2598#endif
2599
2600#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2601 do \
2602 { \
2603 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2604 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2605 { \
2606 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2607 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2608 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2609 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2610 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ \
2611 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> 8) & 0xF0FF; \
2612 } \
2613 else \
2614 { \
2615 pCtx->a_sreg.fFlags = 0; \
2616 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2617 } \
2618 } while (0)
2619
2620 SYNC_BACK_SREG(es, ES);
2621 SYNC_BACK_SREG(cs, CS);
2622 SYNC_BACK_SREG(ss, SS);
2623 SYNC_BACK_SREG(ds, DS);
2624 SYNC_BACK_SREG(fs, FS);
2625 SYNC_BACK_SREG(gs, GS);
2626
2627#ifdef TARGET_X86_64
2628 pCtx->rip = pVM->rem.s.Env.eip;
2629 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2630#else
2631 pCtx->eip = pVM->rem.s.Env.eip;
2632 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2633#endif
2634
2635 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2636 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2637 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2638 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2639 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2640 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2641
2642 for (i = 0; i < 8; i++)
2643 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2644
2645 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2646 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2647 {
2648 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2649 STAM_COUNTER_INC(&gStatREMGDTChange);
2650 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2651 }
2652
2653 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2654 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2655 {
2656 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2657 STAM_COUNTER_INC(&gStatREMIDTChange);
2658 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2659 }
2660
2661 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2662 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2663 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2664 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2665 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2666 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2667 )
2668 {
2669 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2670 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2671 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2672 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2673 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2674 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2675 STAM_COUNTER_INC(&gStatREMLDTRChange);
2676 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2677 }
2678
2679 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2680 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2681 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2682 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2683 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2684 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2685 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2686 : 0)
2687 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2688 )
2689 {
2690 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2691 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2692 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2693 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2694 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2695 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2696 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2697 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2698 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2699 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2700 if (pCtx->tr.Attr.u)
2701 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2702 STAM_COUNTER_INC(&gStatREMTRChange);
2703 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2704 }
2705
2706 /* Sysenter MSR */
2707 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2708 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2709 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2710
2711 /* System MSRs. */
2712 pCtx->msrEFER = pVM->rem.s.Env.efer;
2713 pCtx->msrSTAR = pVM->rem.s.Env.star;
2714 pCtx->msrPAT = pVM->rem.s.Env.pat;
2715#ifdef TARGET_X86_64
2716 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2717 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2718 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2719 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2720#endif
2721
2722 /* Inhibit interrupt flag. */
2723 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2724 {
2725 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2726 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2727 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2728 }
2729 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2730 {
2731 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2732 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2733 }
2734
2735 remR3TrapClear(pVM);
2736
2737 /*
2738 * Check for traps.
2739 */
2740 if ( pVM->rem.s.Env.exception_index >= 0
2741 && pVM->rem.s.Env.exception_index < 256)
2742 {
2743 int rc;
2744
2745 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2746 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2747 AssertRC(rc);
2748 switch (pVM->rem.s.Env.exception_index)
2749 {
2750 case 0x0e:
2751 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2752 /* fallthru */
2753 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2754 case 0x11: case 0x08: /* 0 */
2755 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2756 break;
2757 }
2758
2759 }
2760
2761 /*
2762 * We're not longer in REM mode.
2763 */
2764 CPUMR3RemLeave(pVCpu,
2765 HWACCMIsEnabled(pVM)
2766 || ( pVM->rem.s.Env.segs[R_SS].newselector
2767 | pVM->rem.s.Env.segs[R_GS].newselector
2768 | pVM->rem.s.Env.segs[R_FS].newselector
2769 | pVM->rem.s.Env.segs[R_ES].newselector
2770 | pVM->rem.s.Env.segs[R_DS].newselector
2771 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2772 );
2773 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2774 pVM->rem.s.fInREM = false;
2775 pVM->rem.s.pCtx = NULL;
2776 pVM->rem.s.Env.pVCpu = NULL;
2777 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2778 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2779 return VINF_SUCCESS;
2780}
2781
2782
2783/**
2784 * This is called by the disassembler when it wants to update the cpu state
2785 * before for instance doing a register dump.
2786 */
2787static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2788{
2789 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2790 unsigned i;
2791
2792 Assert(pVM->rem.s.fInREM);
2793
2794 /*
2795 * Copy back the registers.
2796 * This is done in the order they are declared in the CPUMCTX structure.
2797 */
2798
2799 /** @todo FOP */
2800 /** @todo FPUIP */
2801 /** @todo CS */
2802 /** @todo FPUDP */
2803 /** @todo DS */
2804 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2805 pCtx->fpu.MXCSR = 0;
2806 pCtx->fpu.MXCSR_MASK = 0;
2807
2808 /** @todo check if FPU/XMM was actually used in the recompiler */
2809 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2810//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2811
2812#ifdef TARGET_X86_64
2813 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2814 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2815 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2816 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2817 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2818 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2819 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2820 pCtx->r8 = pVM->rem.s.Env.regs[8];
2821 pCtx->r9 = pVM->rem.s.Env.regs[9];
2822 pCtx->r10 = pVM->rem.s.Env.regs[10];
2823 pCtx->r11 = pVM->rem.s.Env.regs[11];
2824 pCtx->r12 = pVM->rem.s.Env.regs[12];
2825 pCtx->r13 = pVM->rem.s.Env.regs[13];
2826 pCtx->r14 = pVM->rem.s.Env.regs[14];
2827 pCtx->r15 = pVM->rem.s.Env.regs[15];
2828
2829 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2830#else
2831 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2832 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2833 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2834 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2835 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2836 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2837 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2838
2839 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2840#endif
2841
2842 SYNC_BACK_SREG(es, ES);
2843 SYNC_BACK_SREG(cs, CS);
2844 SYNC_BACK_SREG(ss, SS);
2845 SYNC_BACK_SREG(ds, DS);
2846 SYNC_BACK_SREG(fs, FS);
2847 SYNC_BACK_SREG(gs, GS);
2848
2849#ifdef TARGET_X86_64
2850 pCtx->rip = pVM->rem.s.Env.eip;
2851 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2852#else
2853 pCtx->eip = pVM->rem.s.Env.eip;
2854 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2855#endif
2856
2857 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2858 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2859 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2860 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2861 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2862 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2863
2864 for (i = 0; i < 8; i++)
2865 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2866
2867 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2868 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2869 {
2870 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2871 STAM_COUNTER_INC(&gStatREMGDTChange);
2872 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2873 }
2874
2875 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2876 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2877 {
2878 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2879 STAM_COUNTER_INC(&gStatREMIDTChange);
2880 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2881 }
2882
2883 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2884 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2885 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2886 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2887 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2888 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2889 )
2890 {
2891 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2892 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2893 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2894 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2895 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2896 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2897 STAM_COUNTER_INC(&gStatREMLDTRChange);
2898 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2899 }
2900
2901 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2902 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2903 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2904 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2905 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2906 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2907 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2908 : 0)
2909 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2910 )
2911 {
2912 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2913 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2914 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2915 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2916 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2917 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2918 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2919 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2920 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2921 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2922 if (pCtx->tr.Attr.u)
2923 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2924 STAM_COUNTER_INC(&gStatREMTRChange);
2925 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2926 }
2927
2928 /* Sysenter MSR */
2929 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2930 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2931 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2932
2933 /* System MSRs. */
2934 pCtx->msrEFER = pVM->rem.s.Env.efer;
2935 pCtx->msrSTAR = pVM->rem.s.Env.star;
2936 pCtx->msrPAT = pVM->rem.s.Env.pat;
2937#ifdef TARGET_X86_64
2938 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2939 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2940 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2941 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2942#endif
2943
2944}
2945
2946
2947/**
2948 * Update the VMM state information if we're currently in REM.
2949 *
2950 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2951 * we're currently executing in REM and the VMM state is invalid. This method will of
2952 * course check that we're executing in REM before syncing any data over to the VMM.
2953 *
2954 * @param pVM The VM handle.
2955 * @param pVCpu The VMCPU handle.
2956 */
2957REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2958{
2959 if (pVM->rem.s.fInREM)
2960 remR3StateUpdate(pVM, pVCpu);
2961}
2962
2963
2964#undef LOG_GROUP
2965#define LOG_GROUP LOG_GROUP_REM
2966
2967
2968/**
2969 * Notify the recompiler about Address Gate 20 state change.
2970 *
2971 * This notification is required since A20 gate changes are
2972 * initialized from a device driver and the VM might just as
2973 * well be in REM mode as in RAW mode.
2974 *
2975 * @param pVM VM handle.
2976 * @param pVCpu VMCPU handle.
2977 * @param fEnable True if the gate should be enabled.
2978 * False if the gate should be disabled.
2979 */
2980REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2981{
2982 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2983 VM_ASSERT_EMT(pVM);
2984
2985 /** @todo SMP and the A20 gate... */
2986 if (pVM->rem.s.Env.pVCpu == pVCpu)
2987 {
2988 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2989 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2990 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2991 }
2992}
2993
2994
2995/**
2996 * Replays the handler notification changes
2997 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2998 *
2999 * @param pVM VM handle.
3000 */
3001REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3002{
3003 /*
3004 * Replay the flushes.
3005 */
3006 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3007 VM_ASSERT_EMT(pVM);
3008
3009 /** @todo this isn't ensuring correct replay order. */
3010 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3011 {
3012 uint32_t idxNext;
3013 uint32_t idxRevHead;
3014 uint32_t idxHead;
3015#ifdef VBOX_STRICT
3016 int32_t c = 0;
3017#endif
3018
3019 /* Lockless purging of pending notifications. */
3020 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3021 if (idxHead == UINT32_MAX)
3022 return;
3023 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3024
3025 /*
3026 * Reverse the list to process it in FIFO order.
3027 */
3028 idxRevHead = UINT32_MAX;
3029 do
3030 {
3031 /* Save the index of the next rec. */
3032 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3033 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3034 /* Push the record onto the reversed list. */
3035 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3036 idxRevHead = idxHead;
3037 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3038 /* Advance. */
3039 idxHead = idxNext;
3040 } while (idxHead != UINT32_MAX);
3041
3042 /*
3043 * Loop thru the list, reinserting the record into the free list as they are
3044 * processed to avoid having other EMTs running out of entries while we're flushing.
3045 */
3046 idxHead = idxRevHead;
3047 do
3048 {
3049 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3050 uint32_t idxCur;
3051 Assert(--c >= 0);
3052
3053 switch (pCur->enmKind)
3054 {
3055 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3056 remR3NotifyHandlerPhysicalRegister(pVM,
3057 pCur->u.PhysicalRegister.enmType,
3058 pCur->u.PhysicalRegister.GCPhys,
3059 pCur->u.PhysicalRegister.cb,
3060 pCur->u.PhysicalRegister.fHasHCHandler);
3061 break;
3062
3063 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3064 remR3NotifyHandlerPhysicalDeregister(pVM,
3065 pCur->u.PhysicalDeregister.enmType,
3066 pCur->u.PhysicalDeregister.GCPhys,
3067 pCur->u.PhysicalDeregister.cb,
3068 pCur->u.PhysicalDeregister.fHasHCHandler,
3069 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3070 break;
3071
3072 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3073 remR3NotifyHandlerPhysicalModify(pVM,
3074 pCur->u.PhysicalModify.enmType,
3075 pCur->u.PhysicalModify.GCPhysOld,
3076 pCur->u.PhysicalModify.GCPhysNew,
3077 pCur->u.PhysicalModify.cb,
3078 pCur->u.PhysicalModify.fHasHCHandler,
3079 pCur->u.PhysicalModify.fRestoreAsRAM);
3080 break;
3081
3082 default:
3083 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3084 break;
3085 }
3086
3087 /*
3088 * Advance idxHead.
3089 */
3090 idxCur = idxHead;
3091 idxHead = pCur->idxNext;
3092 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3093
3094 /*
3095 * Put the record back into the free list.
3096 */
3097 do
3098 {
3099 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3100 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3101 ASMCompilerBarrier();
3102 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3103 } while (idxHead != UINT32_MAX);
3104
3105#ifdef VBOX_STRICT
3106 if (pVM->cCpus == 1)
3107 {
3108 unsigned c;
3109 /* Check that all records are now on the free list. */
3110 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3111 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3112 c++;
3113 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3114 }
3115#endif
3116 }
3117}
3118
3119
3120/**
3121 * Notify REM about changed code page.
3122 *
3123 * @returns VBox status code.
3124 * @param pVM VM handle.
3125 * @param pVCpu VMCPU handle.
3126 * @param pvCodePage Code page address
3127 */
3128REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3129{
3130#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3131 int rc;
3132 RTGCPHYS PhysGC;
3133 uint64_t flags;
3134
3135 VM_ASSERT_EMT(pVM);
3136
3137 /*
3138 * Get the physical page address.
3139 */
3140 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3141 if (rc == VINF_SUCCESS)
3142 {
3143 /*
3144 * Sync the required registers and flush the whole page.
3145 * (Easier to do the whole page than notifying it about each physical
3146 * byte that was changed.
3147 */
3148 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3149 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3150 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3151 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3152
3153 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3154 }
3155#endif
3156 return VINF_SUCCESS;
3157}
3158
3159
3160/**
3161 * Notification about a successful MMR3PhysRegister() call.
3162 *
3163 * @param pVM VM handle.
3164 * @param GCPhys The physical address the RAM.
3165 * @param cb Size of the memory.
3166 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3167 */
3168REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3169{
3170 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3171 VM_ASSERT_EMT(pVM);
3172
3173 /*
3174 * Validate input - we trust the caller.
3175 */
3176 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3177 Assert(cb);
3178 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3179 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3180
3181 /*
3182 * Base ram? Update GCPhysLastRam.
3183 */
3184 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3185 {
3186 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3187 {
3188 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3189 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3190 }
3191 }
3192
3193 /*
3194 * Register the ram.
3195 */
3196 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3197
3198 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3199 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3200 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3201
3202 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3203}
3204
3205
3206/**
3207 * Notification about a successful MMR3PhysRomRegister() call.
3208 *
3209 * @param pVM VM handle.
3210 * @param GCPhys The physical address of the ROM.
3211 * @param cb The size of the ROM.
3212 * @param pvCopy Pointer to the ROM copy.
3213 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3214 * This function will be called when ever the protection of the
3215 * shadow ROM changes (at reset and end of POST).
3216 */
3217REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3218{
3219 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3220 VM_ASSERT_EMT(pVM);
3221
3222 /*
3223 * Validate input - we trust the caller.
3224 */
3225 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3226 Assert(cb);
3227 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3228
3229 /*
3230 * Register the rom.
3231 */
3232 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3233
3234 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3235 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3236 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3237
3238 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3239}
3240
3241
3242/**
3243 * Notification about a successful memory deregistration or reservation.
3244 *
3245 * @param pVM VM Handle.
3246 * @param GCPhys Start physical address.
3247 * @param cb The size of the range.
3248 */
3249REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3250{
3251 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3252 VM_ASSERT_EMT(pVM);
3253
3254 /*
3255 * Validate input - we trust the caller.
3256 */
3257 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3258 Assert(cb);
3259 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3260
3261 /*
3262 * Unassigning the memory.
3263 */
3264 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3265
3266 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3267 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3268 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3269
3270 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3271}
3272
3273
3274/**
3275 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3276 *
3277 * @param pVM VM Handle.
3278 * @param enmType Handler type.
3279 * @param GCPhys Handler range address.
3280 * @param cb Size of the handler range.
3281 * @param fHasHCHandler Set if the handler has a HC callback function.
3282 *
3283 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3284 * Handler memory type to memory which has no HC handler.
3285 */
3286static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3287{
3288 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3289 enmType, GCPhys, cb, fHasHCHandler));
3290
3291 VM_ASSERT_EMT(pVM);
3292 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3293 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3294
3295
3296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3297
3298 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3299 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3300 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3301 else if (fHasHCHandler)
3302 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3303 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3304
3305 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3306}
3307
3308/**
3309 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3310 *
3311 * @param pVM VM Handle.
3312 * @param enmType Handler type.
3313 * @param GCPhys Handler range address.
3314 * @param cb Size of the handler range.
3315 * @param fHasHCHandler Set if the handler has a HC callback function.
3316 *
3317 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3318 * Handler memory type to memory which has no HC handler.
3319 */
3320REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3321{
3322 REMR3ReplayHandlerNotifications(pVM);
3323
3324 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3325}
3326
3327/**
3328 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3329 *
3330 * @param pVM VM Handle.
3331 * @param enmType Handler type.
3332 * @param GCPhys Handler range address.
3333 * @param cb Size of the handler range.
3334 * @param fHasHCHandler Set if the handler has a HC callback function.
3335 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3336 */
3337static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3338{
3339 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3340 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3341 VM_ASSERT_EMT(pVM);
3342
3343
3344 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3345
3346 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3347 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3348 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3349 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3350 else if (fHasHCHandler)
3351 {
3352 if (!fRestoreAsRAM)
3353 {
3354 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3355 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3356 }
3357 else
3358 {
3359 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3360 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3361 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3362 }
3363 }
3364 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3365
3366 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3367}
3368
3369/**
3370 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3371 *
3372 * @param pVM VM Handle.
3373 * @param enmType Handler type.
3374 * @param GCPhys Handler range address.
3375 * @param cb Size of the handler range.
3376 * @param fHasHCHandler Set if the handler has a HC callback function.
3377 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3378 */
3379REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3380{
3381 REMR3ReplayHandlerNotifications(pVM);
3382 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3383}
3384
3385
3386/**
3387 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3388 *
3389 * @param pVM VM Handle.
3390 * @param enmType Handler type.
3391 * @param GCPhysOld Old handler range address.
3392 * @param GCPhysNew New handler range address.
3393 * @param cb Size of the handler range.
3394 * @param fHasHCHandler Set if the handler has a HC callback function.
3395 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3396 */
3397static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3398{
3399 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3400 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3401 VM_ASSERT_EMT(pVM);
3402 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3403
3404 if (fHasHCHandler)
3405 {
3406 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3407
3408 /*
3409 * Reset the old page.
3410 */
3411 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3412 if (!fRestoreAsRAM)
3413 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3414 else
3415 {
3416 /* This is not perfect, but it'll do for PD monitoring... */
3417 Assert(cb == PAGE_SIZE);
3418 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3419 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3420 }
3421
3422 /*
3423 * Update the new page.
3424 */
3425 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3426 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3427 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3428 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3429
3430 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3431 }
3432}
3433
3434/**
3435 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3436 *
3437 * @param pVM VM Handle.
3438 * @param enmType Handler type.
3439 * @param GCPhysOld Old handler range address.
3440 * @param GCPhysNew New handler range address.
3441 * @param cb Size of the handler range.
3442 * @param fHasHCHandler Set if the handler has a HC callback function.
3443 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3444 */
3445REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3446{
3447 REMR3ReplayHandlerNotifications(pVM);
3448
3449 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3450}
3451
3452/**
3453 * Checks if we're handling access to this page or not.
3454 *
3455 * @returns true if we're trapping access.
3456 * @returns false if we aren't.
3457 * @param pVM The VM handle.
3458 * @param GCPhys The physical address.
3459 *
3460 * @remark This function will only work correctly in VBOX_STRICT builds!
3461 */
3462REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3463{
3464#ifdef VBOX_STRICT
3465 unsigned long off;
3466 REMR3ReplayHandlerNotifications(pVM);
3467
3468 off = get_phys_page_offset(GCPhys);
3469 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3470 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3471 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3472#else
3473 return false;
3474#endif
3475}
3476
3477
3478/**
3479 * Deals with a rare case in get_phys_addr_code where the code
3480 * is being monitored.
3481 *
3482 * It could also be an MMIO page, in which case we will raise a fatal error.
3483 *
3484 * @returns The physical address corresponding to addr.
3485 * @param env The cpu environment.
3486 * @param addr The virtual address.
3487 * @param pTLBEntry The TLB entry.
3488 */
3489target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3490 target_ulong addr,
3491 CPUTLBEntry *pTLBEntry,
3492 target_phys_addr_t ioTLBEntry)
3493{
3494 PVM pVM = env->pVM;
3495
3496 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3497 {
3498 /* If code memory is being monitored, appropriate IOTLB entry will have
3499 handler IO type, and addend will provide real physical address, no
3500 matter if we store VA in TLB or not, as handlers are always passed PA */
3501 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3502 return ret;
3503 }
3504 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3505 "*** handlers\n",
3506 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3507 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3508 LogRel(("*** mmio\n"));
3509 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3510 LogRel(("*** phys\n"));
3511 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3512 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3513 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3514 AssertFatalFailed();
3515}
3516
3517/**
3518 * Read guest RAM and ROM.
3519 *
3520 * @param SrcGCPhys The source address (guest physical).
3521 * @param pvDst The destination address.
3522 * @param cb Number of bytes
3523 */
3524void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3525{
3526 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3527 VBOX_CHECK_ADDR(SrcGCPhys);
3528 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3529#ifdef VBOX_DEBUG_PHYS
3530 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3531#endif
3532 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3533}
3534
3535
3536/**
3537 * Read guest RAM and ROM, unsigned 8-bit.
3538 *
3539 * @param SrcGCPhys The source address (guest physical).
3540 */
3541RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3542{
3543 uint8_t val;
3544 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3545 VBOX_CHECK_ADDR(SrcGCPhys);
3546 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3547 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3548#ifdef VBOX_DEBUG_PHYS
3549 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3550#endif
3551 return val;
3552}
3553
3554
3555/**
3556 * Read guest RAM and ROM, signed 8-bit.
3557 *
3558 * @param SrcGCPhys The source address (guest physical).
3559 */
3560RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3561{
3562 int8_t val;
3563 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3564 VBOX_CHECK_ADDR(SrcGCPhys);
3565 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3566 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3567#ifdef VBOX_DEBUG_PHYS
3568 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3569#endif
3570 return val;
3571}
3572
3573
3574/**
3575 * Read guest RAM and ROM, unsigned 16-bit.
3576 *
3577 * @param SrcGCPhys The source address (guest physical).
3578 */
3579RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3580{
3581 uint16_t val;
3582 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3583 VBOX_CHECK_ADDR(SrcGCPhys);
3584 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3585 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3586#ifdef VBOX_DEBUG_PHYS
3587 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3588#endif
3589 return val;
3590}
3591
3592
3593/**
3594 * Read guest RAM and ROM, signed 16-bit.
3595 *
3596 * @param SrcGCPhys The source address (guest physical).
3597 */
3598RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3599{
3600 int16_t val;
3601 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3602 VBOX_CHECK_ADDR(SrcGCPhys);
3603 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3604 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3605#ifdef VBOX_DEBUG_PHYS
3606 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3607#endif
3608 return val;
3609}
3610
3611
3612/**
3613 * Read guest RAM and ROM, unsigned 32-bit.
3614 *
3615 * @param SrcGCPhys The source address (guest physical).
3616 */
3617RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3618{
3619 uint32_t val;
3620 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3621 VBOX_CHECK_ADDR(SrcGCPhys);
3622 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3623 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3624#ifdef VBOX_DEBUG_PHYS
3625 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3626#endif
3627 return val;
3628}
3629
3630
3631/**
3632 * Read guest RAM and ROM, signed 32-bit.
3633 *
3634 * @param SrcGCPhys The source address (guest physical).
3635 */
3636RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3637{
3638 int32_t val;
3639 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3640 VBOX_CHECK_ADDR(SrcGCPhys);
3641 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3642 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3643#ifdef VBOX_DEBUG_PHYS
3644 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3645#endif
3646 return val;
3647}
3648
3649
3650/**
3651 * Read guest RAM and ROM, unsigned 64-bit.
3652 *
3653 * @param SrcGCPhys The source address (guest physical).
3654 */
3655uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3656{
3657 uint64_t val;
3658 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3659 VBOX_CHECK_ADDR(SrcGCPhys);
3660 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3661 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3662#ifdef VBOX_DEBUG_PHYS
3663 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3664#endif
3665 return val;
3666}
3667
3668
3669/**
3670 * Read guest RAM and ROM, signed 64-bit.
3671 *
3672 * @param SrcGCPhys The source address (guest physical).
3673 */
3674int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3675{
3676 int64_t val;
3677 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3678 VBOX_CHECK_ADDR(SrcGCPhys);
3679 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3680 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3681#ifdef VBOX_DEBUG_PHYS
3682 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3683#endif
3684 return val;
3685}
3686
3687
3688/**
3689 * Write guest RAM.
3690 *
3691 * @param DstGCPhys The destination address (guest physical).
3692 * @param pvSrc The source address.
3693 * @param cb Number of bytes to write
3694 */
3695void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3696{
3697 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3698 VBOX_CHECK_ADDR(DstGCPhys);
3699 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3700 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3701#ifdef VBOX_DEBUG_PHYS
3702 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3703#endif
3704}
3705
3706
3707/**
3708 * Write guest RAM, unsigned 8-bit.
3709 *
3710 * @param DstGCPhys The destination address (guest physical).
3711 * @param val Value
3712 */
3713void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3714{
3715 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3716 VBOX_CHECK_ADDR(DstGCPhys);
3717 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3718 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3719#ifdef VBOX_DEBUG_PHYS
3720 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3721#endif
3722}
3723
3724
3725/**
3726 * Write guest RAM, unsigned 8-bit.
3727 *
3728 * @param DstGCPhys The destination address (guest physical).
3729 * @param val Value
3730 */
3731void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3732{
3733 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3734 VBOX_CHECK_ADDR(DstGCPhys);
3735 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3736 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3737#ifdef VBOX_DEBUG_PHYS
3738 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3739#endif
3740}
3741
3742
3743/**
3744 * Write guest RAM, unsigned 32-bit.
3745 *
3746 * @param DstGCPhys The destination address (guest physical).
3747 * @param val Value
3748 */
3749void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3750{
3751 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3752 VBOX_CHECK_ADDR(DstGCPhys);
3753 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3754 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3755#ifdef VBOX_DEBUG_PHYS
3756 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3757#endif
3758}
3759
3760
3761/**
3762 * Write guest RAM, unsigned 64-bit.
3763 *
3764 * @param DstGCPhys The destination address (guest physical).
3765 * @param val Value
3766 */
3767void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3768{
3769 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3770 VBOX_CHECK_ADDR(DstGCPhys);
3771 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3772 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3773#ifdef VBOX_DEBUG_PHYS
3774 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3775#endif
3776}
3777
3778#undef LOG_GROUP
3779#define LOG_GROUP LOG_GROUP_REM_MMIO
3780
3781/** Read MMIO memory. */
3782static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3783{
3784 uint32_t u32 = 0;
3785 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3786 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3787 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3788 return u32;
3789}
3790
3791/** Read MMIO memory. */
3792static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3793{
3794 uint32_t u32 = 0;
3795 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3796 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3797 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3798 return u32;
3799}
3800
3801/** Read MMIO memory. */
3802static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3803{
3804 uint32_t u32 = 0;
3805 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3806 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3807 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3808 return u32;
3809}
3810
3811/** Write to MMIO memory. */
3812static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3813{
3814 int rc;
3815 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3816 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3817 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3818}
3819
3820/** Write to MMIO memory. */
3821static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3822{
3823 int rc;
3824 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3825 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3826 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3827}
3828
3829/** Write to MMIO memory. */
3830static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3831{
3832 int rc;
3833 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3834 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3835 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3836}
3837
3838
3839#undef LOG_GROUP
3840#define LOG_GROUP LOG_GROUP_REM_HANDLER
3841
3842/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3843
3844static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3845{
3846 uint8_t u8;
3847 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3848 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3849 return u8;
3850}
3851
3852static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3853{
3854 uint16_t u16;
3855 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3856 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3857 return u16;
3858}
3859
3860static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3861{
3862 uint32_t u32;
3863 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3864 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3865 return u32;
3866}
3867
3868static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3869{
3870 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3871 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3872}
3873
3874static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3875{
3876 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3877 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3878}
3879
3880static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3881{
3882 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3883 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3884}
3885
3886/* -+- disassembly -+- */
3887
3888#undef LOG_GROUP
3889#define LOG_GROUP LOG_GROUP_REM_DISAS
3890
3891
3892/**
3893 * Enables or disables singled stepped disassembly.
3894 *
3895 * @returns VBox status code.
3896 * @param pVM VM handle.
3897 * @param fEnable To enable set this flag, to disable clear it.
3898 */
3899static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3900{
3901 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3902 VM_ASSERT_EMT(pVM);
3903
3904 if (fEnable)
3905 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3906 else
3907 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3908#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3909 cpu_single_step(&pVM->rem.s.Env, fEnable);
3910#endif
3911 return VINF_SUCCESS;
3912}
3913
3914
3915/**
3916 * Enables or disables singled stepped disassembly.
3917 *
3918 * @returns VBox status code.
3919 * @param pVM VM handle.
3920 * @param fEnable To enable set this flag, to disable clear it.
3921 */
3922REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3923{
3924 int rc;
3925
3926 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3927 if (VM_IS_EMT(pVM))
3928 return remR3DisasEnableStepping(pVM, fEnable);
3929
3930 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3931 AssertRC(rc);
3932 return rc;
3933}
3934
3935
3936#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3937/**
3938 * External Debugger Command: .remstep [on|off|1|0]
3939 */
3940static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3941{
3942 int rc;
3943
3944 if (cArgs == 0)
3945 /*
3946 * Print the current status.
3947 */
3948 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3949 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3950 else
3951 {
3952 /*
3953 * Convert the argument and change the mode.
3954 */
3955 bool fEnable;
3956 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3957 if (RT_SUCCESS(rc))
3958 {
3959 rc = REMR3DisasEnableStepping(pVM, fEnable);
3960 if (RT_SUCCESS(rc))
3961 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3962 else
3963 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3964 }
3965 else
3966 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3967 }
3968 return rc;
3969}
3970#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3971
3972
3973/**
3974 * Disassembles one instruction and prints it to the log.
3975 *
3976 * @returns Success indicator.
3977 * @param env Pointer to the recompiler CPU structure.
3978 * @param f32BitCode Indicates that whether or not the code should
3979 * be disassembled as 16 or 32 bit. If -1 the CS
3980 * selector will be inspected.
3981 * @param pszPrefix
3982 */
3983bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3984{
3985 PVM pVM = env->pVM;
3986 const bool fLog = LogIsEnabled();
3987 const bool fLog2 = LogIs2Enabled();
3988 int rc = VINF_SUCCESS;
3989
3990 /*
3991 * Don't bother if there ain't any log output to do.
3992 */
3993 if (!fLog && !fLog2)
3994 return true;
3995
3996 /*
3997 * Update the state so DBGF reads the correct register values.
3998 */
3999 remR3StateUpdate(pVM, env->pVCpu);
4000
4001 /*
4002 * Log registers if requested.
4003 */
4004 if (fLog2)
4005 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
4006
4007 /*
4008 * Disassemble to log.
4009 */
4010 if (fLog)
4011 {
4012 PVMCPU pVCpu = VMMGetCpu(pVM);
4013 char szBuf[256];
4014 szBuf[0] = '\0';
4015 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
4016 pVCpu->idCpu,
4017 0, /* Sel */
4018 0, /* GCPtr */
4019 DBGF_DISAS_FLAGS_CURRENT_GUEST
4020 | DBGF_DISAS_FLAGS_DEFAULT_MODE
4021 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
4022 szBuf,
4023 sizeof(szBuf),
4024 NULL);
4025 if (RT_FAILURE(rc))
4026 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4027 if (pszPrefix && *pszPrefix)
4028 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4029 else
4030 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4031 }
4032
4033 return RT_SUCCESS(rc);
4034}
4035
4036
4037/**
4038 * Disassemble recompiled code.
4039 *
4040 * @param phFileIgnored Ignored, logfile usually.
4041 * @param pvCode Pointer to the code block.
4042 * @param cb Size of the code block.
4043 */
4044void disas(FILE *phFile, void *pvCode, unsigned long cb)
4045{
4046 if (LogIs2Enabled())
4047 {
4048 unsigned off = 0;
4049 char szOutput[256];
4050 DISCPUSTATE Cpu;
4051#ifdef RT_ARCH_X86
4052 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4053#else
4054 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4055#endif
4056
4057 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4058 while (off < cb)
4059 {
4060 uint32_t cbInstr;
4061 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4062 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4063 if (RT_SUCCESS(rc))
4064 RTLogPrintf("%s", szOutput);
4065 else
4066 {
4067 RTLogPrintf("disas error %Rrc\n", rc);
4068 cbInstr = 1;
4069 }
4070 off += cbInstr;
4071 }
4072 }
4073}
4074
4075
4076/**
4077 * Disassemble guest code.
4078 *
4079 * @param phFileIgnored Ignored, logfile usually.
4080 * @param uCode The guest address of the code to disassemble. (flat?)
4081 * @param cb Number of bytes to disassemble.
4082 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4083 */
4084void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4085{
4086 if (LogIs2Enabled())
4087 {
4088 PVM pVM = cpu_single_env->pVM;
4089 PVMCPU pVCpu = cpu_single_env->pVCpu;
4090 RTSEL cs;
4091 RTGCUINTPTR eip;
4092
4093 Assert(pVCpu);
4094
4095 /*
4096 * Update the state so DBGF reads the correct register values (flags).
4097 */
4098 remR3StateUpdate(pVM, pVCpu);
4099
4100 /*
4101 * Do the disassembling.
4102 */
4103 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4104 cs = cpu_single_env->segs[R_CS].selector;
4105 eip = uCode - cpu_single_env->segs[R_CS].base;
4106 for (;;)
4107 {
4108 char szBuf[256];
4109 uint32_t cbInstr;
4110 int rc = DBGFR3DisasInstrEx(pVM,
4111 pVCpu->idCpu,
4112 cs,
4113 eip,
4114 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4115 szBuf, sizeof(szBuf),
4116 &cbInstr);
4117 if (RT_SUCCESS(rc))
4118 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4119 else
4120 {
4121 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4122 cbInstr = 1;
4123 }
4124
4125 /* next */
4126 if (cb <= cbInstr)
4127 break;
4128 cb -= cbInstr;
4129 uCode += cbInstr;
4130 eip += cbInstr;
4131 }
4132 }
4133}
4134
4135
4136/**
4137 * Looks up a guest symbol.
4138 *
4139 * @returns Pointer to symbol name. This is a static buffer.
4140 * @param orig_addr The address in question.
4141 */
4142const char *lookup_symbol(target_ulong orig_addr)
4143{
4144 PVM pVM = cpu_single_env->pVM;
4145 RTGCINTPTR off = 0;
4146 RTDBGSYMBOL Sym;
4147 DBGFADDRESS Addr;
4148
4149 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4150 if (RT_SUCCESS(rc))
4151 {
4152 static char szSym[sizeof(Sym.szName) + 48];
4153 if (!off)
4154 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4155 else if (off > 0)
4156 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4157 else
4158 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4159 return szSym;
4160 }
4161 return "<N/A>";
4162}
4163
4164
4165#undef LOG_GROUP
4166#define LOG_GROUP LOG_GROUP_REM
4167
4168
4169/* -+- FF notifications -+- */
4170
4171
4172/**
4173 * Notification about a pending interrupt.
4174 *
4175 * @param pVM VM Handle.
4176 * @param pVCpu VMCPU Handle.
4177 * @param u8Interrupt Interrupt
4178 * @thread The emulation thread.
4179 */
4180REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4181{
4182 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4183 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4184}
4185
4186/**
4187 * Notification about a pending interrupt.
4188 *
4189 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4190 * @param pVM VM Handle.
4191 * @param pVCpu VMCPU Handle.
4192 * @thread The emulation thread.
4193 */
4194REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4195{
4196 return pVM->rem.s.u32PendingInterrupt;
4197}
4198
4199/**
4200 * Notification about the interrupt FF being set.
4201 *
4202 * @param pVM VM Handle.
4203 * @param pVCpu VMCPU Handle.
4204 * @thread The emulation thread.
4205 */
4206REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4207{
4208#ifndef IEM_VERIFICATION_MODE
4209 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4210 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4211 if (pVM->rem.s.fInREM)
4212 {
4213 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4214 CPU_INTERRUPT_EXTERNAL_HARD);
4215 }
4216#endif
4217}
4218
4219
4220/**
4221 * Notification about the interrupt FF being set.
4222 *
4223 * @param pVM VM Handle.
4224 * @param pVCpu VMCPU Handle.
4225 * @thread Any.
4226 */
4227REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4228{
4229 LogFlow(("REMR3NotifyInterruptClear:\n"));
4230 if (pVM->rem.s.fInREM)
4231 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4232}
4233
4234
4235/**
4236 * Notification about pending timer(s).
4237 *
4238 * @param pVM VM Handle.
4239 * @param pVCpuDst The target cpu for this notification.
4240 * TM will not broadcast pending timer events, but use
4241 * a dedicated EMT for them. So, only interrupt REM
4242 * execution if the given CPU is executing in REM.
4243 * @thread Any.
4244 */
4245REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4246{
4247#ifndef IEM_VERIFICATION_MODE
4248#ifndef DEBUG_bird
4249 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4250#endif
4251 if (pVM->rem.s.fInREM)
4252 {
4253 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4254 {
4255 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4256 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4257 CPU_INTERRUPT_EXTERNAL_TIMER);
4258 }
4259 else
4260 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4261 }
4262 else
4263 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4264#endif
4265}
4266
4267
4268/**
4269 * Notification about pending DMA transfers.
4270 *
4271 * @param pVM VM Handle.
4272 * @thread Any.
4273 */
4274REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4275{
4276#ifndef IEM_VERIFICATION_MODE
4277 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4278 if (pVM->rem.s.fInREM)
4279 {
4280 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4281 CPU_INTERRUPT_EXTERNAL_DMA);
4282 }
4283#endif
4284}
4285
4286
4287/**
4288 * Notification about pending timer(s).
4289 *
4290 * @param pVM VM Handle.
4291 * @thread Any.
4292 */
4293REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4294{
4295#ifndef IEM_VERIFICATION_MODE
4296 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4297 if (pVM->rem.s.fInREM)
4298 {
4299 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4300 CPU_INTERRUPT_EXTERNAL_EXIT);
4301 }
4302#endif
4303}
4304
4305
4306/**
4307 * Notification about pending FF set by an external thread.
4308 *
4309 * @param pVM VM handle.
4310 * @thread Any.
4311 */
4312REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4313{
4314#ifndef IEM_VERIFICATION_MODE
4315 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4316 if (pVM->rem.s.fInREM)
4317 {
4318 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4319 CPU_INTERRUPT_EXTERNAL_EXIT);
4320 }
4321#endif
4322}
4323
4324
4325#ifdef VBOX_WITH_STATISTICS
4326void remR3ProfileStart(int statcode)
4327{
4328 STAMPROFILEADV *pStat;
4329 switch(statcode)
4330 {
4331 case STATS_EMULATE_SINGLE_INSTR:
4332 pStat = &gStatExecuteSingleInstr;
4333 break;
4334 case STATS_QEMU_COMPILATION:
4335 pStat = &gStatCompilationQEmu;
4336 break;
4337 case STATS_QEMU_RUN_EMULATED_CODE:
4338 pStat = &gStatRunCodeQEmu;
4339 break;
4340 case STATS_QEMU_TOTAL:
4341 pStat = &gStatTotalTimeQEmu;
4342 break;
4343 case STATS_QEMU_RUN_TIMERS:
4344 pStat = &gStatTimers;
4345 break;
4346 case STATS_TLB_LOOKUP:
4347 pStat= &gStatTBLookup;
4348 break;
4349 case STATS_IRQ_HANDLING:
4350 pStat= &gStatIRQ;
4351 break;
4352 case STATS_RAW_CHECK:
4353 pStat = &gStatRawCheck;
4354 break;
4355
4356 default:
4357 AssertMsgFailed(("unknown stat %d\n", statcode));
4358 return;
4359 }
4360 STAM_PROFILE_ADV_START(pStat, a);
4361}
4362
4363
4364void remR3ProfileStop(int statcode)
4365{
4366 STAMPROFILEADV *pStat;
4367 switch(statcode)
4368 {
4369 case STATS_EMULATE_SINGLE_INSTR:
4370 pStat = &gStatExecuteSingleInstr;
4371 break;
4372 case STATS_QEMU_COMPILATION:
4373 pStat = &gStatCompilationQEmu;
4374 break;
4375 case STATS_QEMU_RUN_EMULATED_CODE:
4376 pStat = &gStatRunCodeQEmu;
4377 break;
4378 case STATS_QEMU_TOTAL:
4379 pStat = &gStatTotalTimeQEmu;
4380 break;
4381 case STATS_QEMU_RUN_TIMERS:
4382 pStat = &gStatTimers;
4383 break;
4384 case STATS_TLB_LOOKUP:
4385 pStat= &gStatTBLookup;
4386 break;
4387 case STATS_IRQ_HANDLING:
4388 pStat= &gStatIRQ;
4389 break;
4390 case STATS_RAW_CHECK:
4391 pStat = &gStatRawCheck;
4392 break;
4393 default:
4394 AssertMsgFailed(("unknown stat %d\n", statcode));
4395 return;
4396 }
4397 STAM_PROFILE_ADV_STOP(pStat, a);
4398}
4399#endif
4400
4401/**
4402 * Raise an RC, force rem exit.
4403 *
4404 * @param pVM VM handle.
4405 * @param rc The rc.
4406 */
4407void remR3RaiseRC(PVM pVM, int rc)
4408{
4409 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4410 Assert(pVM->rem.s.fInREM);
4411 VM_ASSERT_EMT(pVM);
4412 pVM->rem.s.rc = rc;
4413 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4414}
4415
4416
4417/* -+- timers -+- */
4418
4419uint64_t cpu_get_tsc(CPUX86State *env)
4420{
4421 STAM_COUNTER_INC(&gStatCpuGetTSC);
4422 return TMCpuTickGet(env->pVCpu);
4423}
4424
4425
4426/* -+- interrupts -+- */
4427
4428void cpu_set_ferr(CPUX86State *env)
4429{
4430 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4431 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4432}
4433
4434int cpu_get_pic_interrupt(CPUX86State *env)
4435{
4436 uint8_t u8Interrupt;
4437 int rc;
4438
4439 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4440 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4441 * with the (a)pic.
4442 */
4443 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4444 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4445 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4446 * remove this kludge. */
4447 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4448 {
4449 rc = VINF_SUCCESS;
4450 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4451 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4452 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4453 }
4454 else
4455 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4456
4457 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4458 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4459 if (RT_SUCCESS(rc))
4460 {
4461 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4462 env->interrupt_request |= CPU_INTERRUPT_HARD;
4463 return u8Interrupt;
4464 }
4465 return -1;
4466}
4467
4468
4469/* -+- local apic -+- */
4470
4471#if 0 /* CPUMSetGuestMsr does this now. */
4472void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4473{
4474 int rc = PDMApicSetBase(env->pVM, val);
4475 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4476}
4477#endif
4478
4479uint64_t cpu_get_apic_base(CPUX86State *env)
4480{
4481 uint64_t u64;
4482 int rc = PDMApicGetBase(env->pVM, &u64);
4483 if (RT_SUCCESS(rc))
4484 {
4485 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4486 return u64;
4487 }
4488 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4489 return 0;
4490}
4491
4492void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4493{
4494 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4495 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4496}
4497
4498uint8_t cpu_get_apic_tpr(CPUX86State *env)
4499{
4500 uint8_t u8;
4501 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4502 if (RT_SUCCESS(rc))
4503 {
4504 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4505 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4506 }
4507 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4508 return 0;
4509}
4510
4511/**
4512 * Read an MSR.
4513 *
4514 * @retval 0 success.
4515 * @retval -1 failure, raise \#GP(0).
4516 * @param env The cpu state.
4517 * @param idMsr The MSR to read.
4518 * @param puValue Where to return the value.
4519 */
4520int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4521{
4522 Assert(env->pVCpu);
4523 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4524}
4525
4526/**
4527 * Write to an MSR.
4528 *
4529 * @retval 0 success.
4530 * @retval -1 failure, raise \#GP(0).
4531 * @param env The cpu state.
4532 * @param idMsr The MSR to read.
4533 * @param puValue Where to return the value.
4534 */
4535int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4536{
4537 Assert(env->pVCpu);
4538 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4539}
4540
4541/* -+- I/O Ports -+- */
4542
4543#undef LOG_GROUP
4544#define LOG_GROUP LOG_GROUP_REM_IOPORT
4545
4546void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4547{
4548 int rc;
4549
4550 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4551 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4552
4553 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4554 if (RT_LIKELY(rc == VINF_SUCCESS))
4555 return;
4556 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4557 {
4558 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4559 remR3RaiseRC(env->pVM, rc);
4560 return;
4561 }
4562 remAbort(rc, __FUNCTION__);
4563}
4564
4565void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4566{
4567 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4568 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4569 if (RT_LIKELY(rc == VINF_SUCCESS))
4570 return;
4571 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4572 {
4573 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4574 remR3RaiseRC(env->pVM, rc);
4575 return;
4576 }
4577 remAbort(rc, __FUNCTION__);
4578}
4579
4580void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4581{
4582 int rc;
4583 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4584 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4585 if (RT_LIKELY(rc == VINF_SUCCESS))
4586 return;
4587 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4588 {
4589 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4590 remR3RaiseRC(env->pVM, rc);
4591 return;
4592 }
4593 remAbort(rc, __FUNCTION__);
4594}
4595
4596uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4597{
4598 uint32_t u32 = 0;
4599 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4600 if (RT_LIKELY(rc == VINF_SUCCESS))
4601 {
4602 if (/*addr != 0x61 && */addr != 0x71)
4603 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4604 return (uint8_t)u32;
4605 }
4606 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4607 {
4608 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4609 remR3RaiseRC(env->pVM, rc);
4610 return (uint8_t)u32;
4611 }
4612 remAbort(rc, __FUNCTION__);
4613 return UINT8_C(0xff);
4614}
4615
4616uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4617{
4618 uint32_t u32 = 0;
4619 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4620 if (RT_LIKELY(rc == VINF_SUCCESS))
4621 {
4622 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4623 return (uint16_t)u32;
4624 }
4625 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4626 {
4627 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4628 remR3RaiseRC(env->pVM, rc);
4629 return (uint16_t)u32;
4630 }
4631 remAbort(rc, __FUNCTION__);
4632 return UINT16_C(0xffff);
4633}
4634
4635uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4636{
4637 uint32_t u32 = 0;
4638 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4639 if (RT_LIKELY(rc == VINF_SUCCESS))
4640 {
4641//if (addr==0x01f0 && u32 == 0x6b6d)
4642// loglevel = ~0;
4643 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4644 return u32;
4645 }
4646 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4647 {
4648 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4649 remR3RaiseRC(env->pVM, rc);
4650 return u32;
4651 }
4652 remAbort(rc, __FUNCTION__);
4653 return UINT32_C(0xffffffff);
4654}
4655
4656#undef LOG_GROUP
4657#define LOG_GROUP LOG_GROUP_REM
4658
4659
4660/* -+- helpers and misc other interfaces -+- */
4661
4662/**
4663 * Perform the CPUID instruction.
4664 *
4665 * @param env Pointer to the recompiler CPU structure.
4666 * @param idx The CPUID leaf (eax).
4667 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4668 * @param pvEAX Where to store eax.
4669 * @param pvEBX Where to store ebx.
4670 * @param pvECX Where to store ecx.
4671 * @param pvEDX Where to store edx.
4672 */
4673void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4674 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4675{
4676 NOREF(idxSub);
4677 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4678}
4679
4680
4681#if 0 /* not used */
4682/**
4683 * Interface for qemu hardware to report back fatal errors.
4684 */
4685void hw_error(const char *pszFormat, ...)
4686{
4687 /*
4688 * Bitch about it.
4689 */
4690 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4691 * this in my Odin32 tree at home! */
4692 va_list args;
4693 va_start(args, pszFormat);
4694 RTLogPrintf("fatal error in virtual hardware:");
4695 RTLogPrintfV(pszFormat, args);
4696 va_end(args);
4697 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4698
4699 /*
4700 * If we're in REM context we'll sync back the state before 'jumping' to
4701 * the EMs failure handling.
4702 */
4703 PVM pVM = cpu_single_env->pVM;
4704 if (pVM->rem.s.fInREM)
4705 REMR3StateBack(pVM);
4706 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4707 AssertMsgFailed(("EMR3FatalError returned!\n"));
4708}
4709#endif
4710
4711/**
4712 * Interface for the qemu cpu to report unhandled situation
4713 * raising a fatal VM error.
4714 */
4715void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4716{
4717 va_list va;
4718 PVM pVM;
4719 PVMCPU pVCpu;
4720 char szMsg[256];
4721
4722 /*
4723 * Bitch about it.
4724 */
4725 RTLogFlags(NULL, "nodisabled nobuffered");
4726 RTLogFlush(NULL);
4727
4728 va_start(va, pszFormat);
4729#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4730 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4731 unsigned cArgs = 0;
4732 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4733 const char *psz = strchr(pszFormat, '%');
4734 while (psz && cArgs < 6)
4735 {
4736 auArgs[cArgs++] = va_arg(va, uintptr_t);
4737 psz = strchr(psz + 1, '%');
4738 }
4739 switch (cArgs)
4740 {
4741 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4742 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4743 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4744 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4745 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4746 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4747 default:
4748 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4749 }
4750#else
4751 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4752#endif
4753 va_end(va);
4754
4755 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4756 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4757
4758 /*
4759 * If we're in REM context we'll sync back the state before 'jumping' to
4760 * the EMs failure handling.
4761 */
4762 pVM = cpu_single_env->pVM;
4763 pVCpu = cpu_single_env->pVCpu;
4764 Assert(pVCpu);
4765
4766 if (pVM->rem.s.fInREM)
4767 REMR3StateBack(pVM, pVCpu);
4768 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4769 AssertMsgFailed(("EMR3FatalError returned!\n"));
4770}
4771
4772
4773/**
4774 * Aborts the VM.
4775 *
4776 * @param rc VBox error code.
4777 * @param pszTip Hint about why/when this happened.
4778 */
4779void remAbort(int rc, const char *pszTip)
4780{
4781 PVM pVM;
4782 PVMCPU pVCpu;
4783
4784 /*
4785 * Bitch about it.
4786 */
4787 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4788 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4789
4790 /*
4791 * Jump back to where we entered the recompiler.
4792 */
4793 pVM = cpu_single_env->pVM;
4794 pVCpu = cpu_single_env->pVCpu;
4795 Assert(pVCpu);
4796
4797 if (pVM->rem.s.fInREM)
4798 REMR3StateBack(pVM, pVCpu);
4799
4800 EMR3FatalError(pVCpu, rc);
4801 AssertMsgFailed(("EMR3FatalError returned!\n"));
4802}
4803
4804
4805/**
4806 * Dumps a linux system call.
4807 * @param pVCpu VMCPU handle.
4808 */
4809void remR3DumpLnxSyscall(PVMCPU pVCpu)
4810{
4811 static const char *apsz[] =
4812 {
4813 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4814 "sys_exit",
4815 "sys_fork",
4816 "sys_read",
4817 "sys_write",
4818 "sys_open", /* 5 */
4819 "sys_close",
4820 "sys_waitpid",
4821 "sys_creat",
4822 "sys_link",
4823 "sys_unlink", /* 10 */
4824 "sys_execve",
4825 "sys_chdir",
4826 "sys_time",
4827 "sys_mknod",
4828 "sys_chmod", /* 15 */
4829 "sys_lchown16",
4830 "sys_ni_syscall", /* old break syscall holder */
4831 "sys_stat",
4832 "sys_lseek",
4833 "sys_getpid", /* 20 */
4834 "sys_mount",
4835 "sys_oldumount",
4836 "sys_setuid16",
4837 "sys_getuid16",
4838 "sys_stime", /* 25 */
4839 "sys_ptrace",
4840 "sys_alarm",
4841 "sys_fstat",
4842 "sys_pause",
4843 "sys_utime", /* 30 */
4844 "sys_ni_syscall", /* old stty syscall holder */
4845 "sys_ni_syscall", /* old gtty syscall holder */
4846 "sys_access",
4847 "sys_nice",
4848 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4849 "sys_sync",
4850 "sys_kill",
4851 "sys_rename",
4852 "sys_mkdir",
4853 "sys_rmdir", /* 40 */
4854 "sys_dup",
4855 "sys_pipe",
4856 "sys_times",
4857 "sys_ni_syscall", /* old prof syscall holder */
4858 "sys_brk", /* 45 */
4859 "sys_setgid16",
4860 "sys_getgid16",
4861 "sys_signal",
4862 "sys_geteuid16",
4863 "sys_getegid16", /* 50 */
4864 "sys_acct",
4865 "sys_umount", /* recycled never used phys() */
4866 "sys_ni_syscall", /* old lock syscall holder */
4867 "sys_ioctl",
4868 "sys_fcntl", /* 55 */
4869 "sys_ni_syscall", /* old mpx syscall holder */
4870 "sys_setpgid",
4871 "sys_ni_syscall", /* old ulimit syscall holder */
4872 "sys_olduname",
4873 "sys_umask", /* 60 */
4874 "sys_chroot",
4875 "sys_ustat",
4876 "sys_dup2",
4877 "sys_getppid",
4878 "sys_getpgrp", /* 65 */
4879 "sys_setsid",
4880 "sys_sigaction",
4881 "sys_sgetmask",
4882 "sys_ssetmask",
4883 "sys_setreuid16", /* 70 */
4884 "sys_setregid16",
4885 "sys_sigsuspend",
4886 "sys_sigpending",
4887 "sys_sethostname",
4888 "sys_setrlimit", /* 75 */
4889 "sys_old_getrlimit",
4890 "sys_getrusage",
4891 "sys_gettimeofday",
4892 "sys_settimeofday",
4893 "sys_getgroups16", /* 80 */
4894 "sys_setgroups16",
4895 "old_select",
4896 "sys_symlink",
4897 "sys_lstat",
4898 "sys_readlink", /* 85 */
4899 "sys_uselib",
4900 "sys_swapon",
4901 "sys_reboot",
4902 "old_readdir",
4903 "old_mmap", /* 90 */
4904 "sys_munmap",
4905 "sys_truncate",
4906 "sys_ftruncate",
4907 "sys_fchmod",
4908 "sys_fchown16", /* 95 */
4909 "sys_getpriority",
4910 "sys_setpriority",
4911 "sys_ni_syscall", /* old profil syscall holder */
4912 "sys_statfs",
4913 "sys_fstatfs", /* 100 */
4914 "sys_ioperm",
4915 "sys_socketcall",
4916 "sys_syslog",
4917 "sys_setitimer",
4918 "sys_getitimer", /* 105 */
4919 "sys_newstat",
4920 "sys_newlstat",
4921 "sys_newfstat",
4922 "sys_uname",
4923 "sys_iopl", /* 110 */
4924 "sys_vhangup",
4925 "sys_ni_syscall", /* old "idle" system call */
4926 "sys_vm86old",
4927 "sys_wait4",
4928 "sys_swapoff", /* 115 */
4929 "sys_sysinfo",
4930 "sys_ipc",
4931 "sys_fsync",
4932 "sys_sigreturn",
4933 "sys_clone", /* 120 */
4934 "sys_setdomainname",
4935 "sys_newuname",
4936 "sys_modify_ldt",
4937 "sys_adjtimex",
4938 "sys_mprotect", /* 125 */
4939 "sys_sigprocmask",
4940 "sys_ni_syscall", /* old "create_module" */
4941 "sys_init_module",
4942 "sys_delete_module",
4943 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4944 "sys_quotactl",
4945 "sys_getpgid",
4946 "sys_fchdir",
4947 "sys_bdflush",
4948 "sys_sysfs", /* 135 */
4949 "sys_personality",
4950 "sys_ni_syscall", /* reserved for afs_syscall */
4951 "sys_setfsuid16",
4952 "sys_setfsgid16",
4953 "sys_llseek", /* 140 */
4954 "sys_getdents",
4955 "sys_select",
4956 "sys_flock",
4957 "sys_msync",
4958 "sys_readv", /* 145 */
4959 "sys_writev",
4960 "sys_getsid",
4961 "sys_fdatasync",
4962 "sys_sysctl",
4963 "sys_mlock", /* 150 */
4964 "sys_munlock",
4965 "sys_mlockall",
4966 "sys_munlockall",
4967 "sys_sched_setparam",
4968 "sys_sched_getparam", /* 155 */
4969 "sys_sched_setscheduler",
4970 "sys_sched_getscheduler",
4971 "sys_sched_yield",
4972 "sys_sched_get_priority_max",
4973 "sys_sched_get_priority_min", /* 160 */
4974 "sys_sched_rr_get_interval",
4975 "sys_nanosleep",
4976 "sys_mremap",
4977 "sys_setresuid16",
4978 "sys_getresuid16", /* 165 */
4979 "sys_vm86",
4980 "sys_ni_syscall", /* Old sys_query_module */
4981 "sys_poll",
4982 "sys_nfsservctl",
4983 "sys_setresgid16", /* 170 */
4984 "sys_getresgid16",
4985 "sys_prctl",
4986 "sys_rt_sigreturn",
4987 "sys_rt_sigaction",
4988 "sys_rt_sigprocmask", /* 175 */
4989 "sys_rt_sigpending",
4990 "sys_rt_sigtimedwait",
4991 "sys_rt_sigqueueinfo",
4992 "sys_rt_sigsuspend",
4993 "sys_pread64", /* 180 */
4994 "sys_pwrite64",
4995 "sys_chown16",
4996 "sys_getcwd",
4997 "sys_capget",
4998 "sys_capset", /* 185 */
4999 "sys_sigaltstack",
5000 "sys_sendfile",
5001 "sys_ni_syscall", /* reserved for streams1 */
5002 "sys_ni_syscall", /* reserved for streams2 */
5003 "sys_vfork", /* 190 */
5004 "sys_getrlimit",
5005 "sys_mmap2",
5006 "sys_truncate64",
5007 "sys_ftruncate64",
5008 "sys_stat64", /* 195 */
5009 "sys_lstat64",
5010 "sys_fstat64",
5011 "sys_lchown",
5012 "sys_getuid",
5013 "sys_getgid", /* 200 */
5014 "sys_geteuid",
5015 "sys_getegid",
5016 "sys_setreuid",
5017 "sys_setregid",
5018 "sys_getgroups", /* 205 */
5019 "sys_setgroups",
5020 "sys_fchown",
5021 "sys_setresuid",
5022 "sys_getresuid",
5023 "sys_setresgid", /* 210 */
5024 "sys_getresgid",
5025 "sys_chown",
5026 "sys_setuid",
5027 "sys_setgid",
5028 "sys_setfsuid", /* 215 */
5029 "sys_setfsgid",
5030 "sys_pivot_root",
5031 "sys_mincore",
5032 "sys_madvise",
5033 "sys_getdents64", /* 220 */
5034 "sys_fcntl64",
5035 "sys_ni_syscall", /* reserved for TUX */
5036 "sys_ni_syscall",
5037 "sys_gettid",
5038 "sys_readahead", /* 225 */
5039 "sys_setxattr",
5040 "sys_lsetxattr",
5041 "sys_fsetxattr",
5042 "sys_getxattr",
5043 "sys_lgetxattr", /* 230 */
5044 "sys_fgetxattr",
5045 "sys_listxattr",
5046 "sys_llistxattr",
5047 "sys_flistxattr",
5048 "sys_removexattr", /* 235 */
5049 "sys_lremovexattr",
5050 "sys_fremovexattr",
5051 "sys_tkill",
5052 "sys_sendfile64",
5053 "sys_futex", /* 240 */
5054 "sys_sched_setaffinity",
5055 "sys_sched_getaffinity",
5056 "sys_set_thread_area",
5057 "sys_get_thread_area",
5058 "sys_io_setup", /* 245 */
5059 "sys_io_destroy",
5060 "sys_io_getevents",
5061 "sys_io_submit",
5062 "sys_io_cancel",
5063 "sys_fadvise64", /* 250 */
5064 "sys_ni_syscall",
5065 "sys_exit_group",
5066 "sys_lookup_dcookie",
5067 "sys_epoll_create",
5068 "sys_epoll_ctl", /* 255 */
5069 "sys_epoll_wait",
5070 "sys_remap_file_pages",
5071 "sys_set_tid_address",
5072 "sys_timer_create",
5073 "sys_timer_settime", /* 260 */
5074 "sys_timer_gettime",
5075 "sys_timer_getoverrun",
5076 "sys_timer_delete",
5077 "sys_clock_settime",
5078 "sys_clock_gettime", /* 265 */
5079 "sys_clock_getres",
5080 "sys_clock_nanosleep",
5081 "sys_statfs64",
5082 "sys_fstatfs64",
5083 "sys_tgkill", /* 270 */
5084 "sys_utimes",
5085 "sys_fadvise64_64",
5086 "sys_ni_syscall" /* sys_vserver */
5087 };
5088
5089 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5090 switch (uEAX)
5091 {
5092 default:
5093 if (uEAX < RT_ELEMENTS(apsz))
5094 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5095 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5096 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5097 else
5098 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5099 break;
5100
5101 }
5102}
5103
5104
5105/**
5106 * Dumps an OpenBSD system call.
5107 * @param pVCpu VMCPU handle.
5108 */
5109void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5110{
5111 static const char *apsz[] =
5112 {
5113 "SYS_syscall", //0
5114 "SYS_exit", //1
5115 "SYS_fork", //2
5116 "SYS_read", //3
5117 "SYS_write", //4
5118 "SYS_open", //5
5119 "SYS_close", //6
5120 "SYS_wait4", //7
5121 "SYS_8",
5122 "SYS_link", //9
5123 "SYS_unlink", //10
5124 "SYS_11",
5125 "SYS_chdir", //12
5126 "SYS_fchdir", //13
5127 "SYS_mknod", //14
5128 "SYS_chmod", //15
5129 "SYS_chown", //16
5130 "SYS_break", //17
5131 "SYS_18",
5132 "SYS_19",
5133 "SYS_getpid", //20
5134 "SYS_mount", //21
5135 "SYS_unmount", //22
5136 "SYS_setuid", //23
5137 "SYS_getuid", //24
5138 "SYS_geteuid", //25
5139 "SYS_ptrace", //26
5140 "SYS_recvmsg", //27
5141 "SYS_sendmsg", //28
5142 "SYS_recvfrom", //29
5143 "SYS_accept", //30
5144 "SYS_getpeername", //31
5145 "SYS_getsockname", //32
5146 "SYS_access", //33
5147 "SYS_chflags", //34
5148 "SYS_fchflags", //35
5149 "SYS_sync", //36
5150 "SYS_kill", //37
5151 "SYS_38",
5152 "SYS_getppid", //39
5153 "SYS_40",
5154 "SYS_dup", //41
5155 "SYS_opipe", //42
5156 "SYS_getegid", //43
5157 "SYS_profil", //44
5158 "SYS_ktrace", //45
5159 "SYS_sigaction", //46
5160 "SYS_getgid", //47
5161 "SYS_sigprocmask", //48
5162 "SYS_getlogin", //49
5163 "SYS_setlogin", //50
5164 "SYS_acct", //51
5165 "SYS_sigpending", //52
5166 "SYS_osigaltstack", //53
5167 "SYS_ioctl", //54
5168 "SYS_reboot", //55
5169 "SYS_revoke", //56
5170 "SYS_symlink", //57
5171 "SYS_readlink", //58
5172 "SYS_execve", //59
5173 "SYS_umask", //60
5174 "SYS_chroot", //61
5175 "SYS_62",
5176 "SYS_63",
5177 "SYS_64",
5178 "SYS_65",
5179 "SYS_vfork", //66
5180 "SYS_67",
5181 "SYS_68",
5182 "SYS_sbrk", //69
5183 "SYS_sstk", //70
5184 "SYS_61",
5185 "SYS_vadvise", //72
5186 "SYS_munmap", //73
5187 "SYS_mprotect", //74
5188 "SYS_madvise", //75
5189 "SYS_76",
5190 "SYS_77",
5191 "SYS_mincore", //78
5192 "SYS_getgroups", //79
5193 "SYS_setgroups", //80
5194 "SYS_getpgrp", //81
5195 "SYS_setpgid", //82
5196 "SYS_setitimer", //83
5197 "SYS_84",
5198 "SYS_85",
5199 "SYS_getitimer", //86
5200 "SYS_87",
5201 "SYS_88",
5202 "SYS_89",
5203 "SYS_dup2", //90
5204 "SYS_91",
5205 "SYS_fcntl", //92
5206 "SYS_select", //93
5207 "SYS_94",
5208 "SYS_fsync", //95
5209 "SYS_setpriority", //96
5210 "SYS_socket", //97
5211 "SYS_connect", //98
5212 "SYS_99",
5213 "SYS_getpriority", //100
5214 "SYS_101",
5215 "SYS_102",
5216 "SYS_sigreturn", //103
5217 "SYS_bind", //104
5218 "SYS_setsockopt", //105
5219 "SYS_listen", //106
5220 "SYS_107",
5221 "SYS_108",
5222 "SYS_109",
5223 "SYS_110",
5224 "SYS_sigsuspend", //111
5225 "SYS_112",
5226 "SYS_113",
5227 "SYS_114",
5228 "SYS_115",
5229 "SYS_gettimeofday", //116
5230 "SYS_getrusage", //117
5231 "SYS_getsockopt", //118
5232 "SYS_119",
5233 "SYS_readv", //120
5234 "SYS_writev", //121
5235 "SYS_settimeofday", //122
5236 "SYS_fchown", //123
5237 "SYS_fchmod", //124
5238 "SYS_125",
5239 "SYS_setreuid", //126
5240 "SYS_setregid", //127
5241 "SYS_rename", //128
5242 "SYS_129",
5243 "SYS_130",
5244 "SYS_flock", //131
5245 "SYS_mkfifo", //132
5246 "SYS_sendto", //133
5247 "SYS_shutdown", //134
5248 "SYS_socketpair", //135
5249 "SYS_mkdir", //136
5250 "SYS_rmdir", //137
5251 "SYS_utimes", //138
5252 "SYS_139",
5253 "SYS_adjtime", //140
5254 "SYS_141",
5255 "SYS_142",
5256 "SYS_143",
5257 "SYS_144",
5258 "SYS_145",
5259 "SYS_146",
5260 "SYS_setsid", //147
5261 "SYS_quotactl", //148
5262 "SYS_149",
5263 "SYS_150",
5264 "SYS_151",
5265 "SYS_152",
5266 "SYS_153",
5267 "SYS_154",
5268 "SYS_nfssvc", //155
5269 "SYS_156",
5270 "SYS_157",
5271 "SYS_158",
5272 "SYS_159",
5273 "SYS_160",
5274 "SYS_getfh", //161
5275 "SYS_162",
5276 "SYS_163",
5277 "SYS_164",
5278 "SYS_sysarch", //165
5279 "SYS_166",
5280 "SYS_167",
5281 "SYS_168",
5282 "SYS_169",
5283 "SYS_170",
5284 "SYS_171",
5285 "SYS_172",
5286 "SYS_pread", //173
5287 "SYS_pwrite", //174
5288 "SYS_175",
5289 "SYS_176",
5290 "SYS_177",
5291 "SYS_178",
5292 "SYS_179",
5293 "SYS_180",
5294 "SYS_setgid", //181
5295 "SYS_setegid", //182
5296 "SYS_seteuid", //183
5297 "SYS_lfs_bmapv", //184
5298 "SYS_lfs_markv", //185
5299 "SYS_lfs_segclean", //186
5300 "SYS_lfs_segwait", //187
5301 "SYS_188",
5302 "SYS_189",
5303 "SYS_190",
5304 "SYS_pathconf", //191
5305 "SYS_fpathconf", //192
5306 "SYS_swapctl", //193
5307 "SYS_getrlimit", //194
5308 "SYS_setrlimit", //195
5309 "SYS_getdirentries", //196
5310 "SYS_mmap", //197
5311 "SYS___syscall", //198
5312 "SYS_lseek", //199
5313 "SYS_truncate", //200
5314 "SYS_ftruncate", //201
5315 "SYS___sysctl", //202
5316 "SYS_mlock", //203
5317 "SYS_munlock", //204
5318 "SYS_205",
5319 "SYS_futimes", //206
5320 "SYS_getpgid", //207
5321 "SYS_xfspioctl", //208
5322 "SYS_209",
5323 "SYS_210",
5324 "SYS_211",
5325 "SYS_212",
5326 "SYS_213",
5327 "SYS_214",
5328 "SYS_215",
5329 "SYS_216",
5330 "SYS_217",
5331 "SYS_218",
5332 "SYS_219",
5333 "SYS_220",
5334 "SYS_semget", //221
5335 "SYS_222",
5336 "SYS_223",
5337 "SYS_224",
5338 "SYS_msgget", //225
5339 "SYS_msgsnd", //226
5340 "SYS_msgrcv", //227
5341 "SYS_shmat", //228
5342 "SYS_229",
5343 "SYS_shmdt", //230
5344 "SYS_231",
5345 "SYS_clock_gettime", //232
5346 "SYS_clock_settime", //233
5347 "SYS_clock_getres", //234
5348 "SYS_235",
5349 "SYS_236",
5350 "SYS_237",
5351 "SYS_238",
5352 "SYS_239",
5353 "SYS_nanosleep", //240
5354 "SYS_241",
5355 "SYS_242",
5356 "SYS_243",
5357 "SYS_244",
5358 "SYS_245",
5359 "SYS_246",
5360 "SYS_247",
5361 "SYS_248",
5362 "SYS_249",
5363 "SYS_minherit", //250
5364 "SYS_rfork", //251
5365 "SYS_poll", //252
5366 "SYS_issetugid", //253
5367 "SYS_lchown", //254
5368 "SYS_getsid", //255
5369 "SYS_msync", //256
5370 "SYS_257",
5371 "SYS_258",
5372 "SYS_259",
5373 "SYS_getfsstat", //260
5374 "SYS_statfs", //261
5375 "SYS_fstatfs", //262
5376 "SYS_pipe", //263
5377 "SYS_fhopen", //264
5378 "SYS_265",
5379 "SYS_fhstatfs", //266
5380 "SYS_preadv", //267
5381 "SYS_pwritev", //268
5382 "SYS_kqueue", //269
5383 "SYS_kevent", //270
5384 "SYS_mlockall", //271
5385 "SYS_munlockall", //272
5386 "SYS_getpeereid", //273
5387 "SYS_274",
5388 "SYS_275",
5389 "SYS_276",
5390 "SYS_277",
5391 "SYS_278",
5392 "SYS_279",
5393 "SYS_280",
5394 "SYS_getresuid", //281
5395 "SYS_setresuid", //282
5396 "SYS_getresgid", //283
5397 "SYS_setresgid", //284
5398 "SYS_285",
5399 "SYS_mquery", //286
5400 "SYS_closefrom", //287
5401 "SYS_sigaltstack", //288
5402 "SYS_shmget", //289
5403 "SYS_semop", //290
5404 "SYS_stat", //291
5405 "SYS_fstat", //292
5406 "SYS_lstat", //293
5407 "SYS_fhstat", //294
5408 "SYS___semctl", //295
5409 "SYS_shmctl", //296
5410 "SYS_msgctl", //297
5411 "SYS_MAXSYSCALL", //298
5412 //299
5413 //300
5414 };
5415 uint32_t uEAX;
5416 if (!LogIsEnabled())
5417 return;
5418 uEAX = CPUMGetGuestEAX(pVCpu);
5419 switch (uEAX)
5420 {
5421 default:
5422 if (uEAX < RT_ELEMENTS(apsz))
5423 {
5424 uint32_t au32Args[8] = {0};
5425 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5426 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5427 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5428 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5429 }
5430 else
5431 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5432 break;
5433 }
5434}
5435
5436
5437#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5438/**
5439 * The Dll main entry point (stub).
5440 */
5441bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5442{
5443 return true;
5444}
5445
5446void *memcpy(void *dst, const void *src, size_t size)
5447{
5448 uint8_t*pbDst = dst, *pbSrc = src;
5449 while (size-- > 0)
5450 *pbDst++ = *pbSrc++;
5451 return dst;
5452}
5453
5454#endif
5455
5456void cpu_smm_update(CPUX86State *env)
5457{
5458}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette