VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 37763

Last change on this file since 37763 was 37723, checked in by vboxsync, 13 years ago

VBoxRecompiler.c: tlb_flush_page may trigger a full tlb flush during invlpg replay in REMR3State if large pages are affected, so we must ignore the CR3 reload notifications as well as the invalidate page ones. Also, skip the replay if a global TLB flush is pending.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.8 KB
Line 
1/* $Id: VBoxRecompiler.c 37723 2011-06-30 23:52:19Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417
418 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
419 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
420 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
421
422
423#ifdef DEBUG_ALL_LOGGING
424 loglevel = ~0;
425#endif
426
427 /*
428 * Init the handler notification lists.
429 */
430 pVM->rem.s.idxPendingList = UINT32_MAX;
431 pVM->rem.s.idxFreeList = 0;
432
433 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
434 {
435 pCur = &pVM->rem.s.aHandlerNotifications[i];
436 pCur->idxNext = i + 1;
437 pCur->idxSelf = i;
438 }
439 pCur->idxNext = UINT32_MAX; /* the last record. */
440
441 return rc;
442}
443
444
445/**
446 * Finalizes the REM initialization.
447 *
448 * This is called after all components, devices and drivers has
449 * been initialized. Its main purpose it to finish the RAM related
450 * initialization.
451 *
452 * @returns VBox status code.
453 *
454 * @param pVM The VM handle.
455 */
456REMR3DECL(int) REMR3InitFinalize(PVM pVM)
457{
458 int rc;
459
460 /*
461 * Ram size & dirty bit map.
462 */
463 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
464 pVM->rem.s.fGCPhysLastRamFixed = true;
465#ifdef RT_STRICT
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
467#else
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
469#endif
470 return rc;
471}
472
473/**
474 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
475 *
476 * @returns VBox status code.
477 * @param pVM The VM handle.
478 * @param fGuarded Whether to guard the map.
479 */
480static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
481{
482 int rc = VINF_SUCCESS;
483 RTGCPHYS cb;
484
485 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
486
487 cb = pVM->rem.s.GCPhysLastRam + 1;
488 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
489 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
490 VERR_OUT_OF_RANGE);
491
492 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
493 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
494
495 if (!fGuarded)
496 {
497 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
498 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
499 }
500 else
501 {
502 /*
503 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
504 */
505 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
506 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
507 if (cbBitmapFull == cbBitmapAligned)
508 cbBitmapFull += _4G >> PAGE_SHIFT;
509 else if (cbBitmapFull - cbBitmapAligned < _64K)
510 cbBitmapFull += _64K;
511
512 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
513 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
514
515 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
516 if (RT_FAILURE(rc))
517 {
518 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
519 AssertLogRelRCReturn(rc, rc);
520 }
521
522 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
523 }
524
525 /* initialize it. */
526 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
527 return rc;
528}
529
530
531/**
532 * Terminates the REM.
533 *
534 * Termination means cleaning up and freeing all resources,
535 * the VM it self is at this point powered off or suspended.
536 *
537 * @returns VBox status code.
538 * @param pVM The VM to operate on.
539 */
540REMR3DECL(int) REMR3Term(PVM pVM)
541{
542#ifdef VBOX_WITH_STATISTICS
543 /*
544 * Statistics.
545 */
546 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
547 STAM_DEREG(pVM, &gStatCompilationQEmu);
548 STAM_DEREG(pVM, &gStatRunCodeQEmu);
549 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
550 STAM_DEREG(pVM, &gStatTimers);
551 STAM_DEREG(pVM, &gStatTBLookup);
552 STAM_DEREG(pVM, &gStatIRQ);
553 STAM_DEREG(pVM, &gStatRawCheck);
554 STAM_DEREG(pVM, &gStatMemRead);
555 STAM_DEREG(pVM, &gStatMemWrite);
556 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
557 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
558
559 STAM_DEREG(pVM, &gStatCpuGetTSC);
560
561 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
562 STAM_DEREG(pVM, &gStatRefuseVM86);
563 STAM_DEREG(pVM, &gStatRefusePaging);
564 STAM_DEREG(pVM, &gStatRefusePAE);
565 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
566 STAM_DEREG(pVM, &gStatRefuseIF0);
567 STAM_DEREG(pVM, &gStatRefuseCode16);
568 STAM_DEREG(pVM, &gStatRefuseWP0);
569 STAM_DEREG(pVM, &gStatRefuseRing1or2);
570 STAM_DEREG(pVM, &gStatRefuseCanExecute);
571 STAM_DEREG(pVM, &gStatFlushTBs);
572
573 STAM_DEREG(pVM, &gStatREMGDTChange);
574 STAM_DEREG(pVM, &gStatREMLDTRChange);
575 STAM_DEREG(pVM, &gStatREMIDTChange);
576 STAM_DEREG(pVM, &gStatREMTRChange);
577
578 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
584
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
591
592 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
593#endif /* VBOX_WITH_STATISTICS */
594
595 STAM_REL_DEREG(pVM, &tb_flush_count);
596 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
597 STAM_REL_DEREG(pVM, &tlb_flush_count);
598
599 return VINF_SUCCESS;
600}
601
602
603/**
604 * The VM is being reset.
605 *
606 * For the REM component this means to call the cpu_reset() and
607 * reinitialize some state variables.
608 *
609 * @param pVM VM handle.
610 */
611REMR3DECL(void) REMR3Reset(PVM pVM)
612{
613 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
614
615 /*
616 * Reset the REM cpu.
617 */
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
620 cpu_reset(&pVM->rem.s.Env);
621 pVM->rem.s.cInvalidatedPages = 0;
622 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
623 Assert(pVM->rem.s.cIgnoreAll == 0);
624
625 /* Clear raw ring 0 init state */
626 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
627
628 /* Flush the TBs the next time we execute code here. */
629 pVM->rem.s.fFlushTBs = true;
630
631 EMRemUnlock(pVM);
632}
633
634
635/**
636 * Execute state save operation.
637 *
638 * @returns VBox status code.
639 * @param pVM VM Handle.
640 * @param pSSM SSM operation handle.
641 */
642static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
643{
644 PREM pRem = &pVM->rem.s;
645
646 /*
647 * Save the required CPU Env bits.
648 * (Not much because we're never in REM when doing the save.)
649 */
650 LogFlow(("remR3Save:\n"));
651 Assert(!pRem->fInREM);
652 SSMR3PutU32(pSSM, pRem->Env.hflags);
653 SSMR3PutU32(pSSM, ~0); /* separator */
654
655 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
656 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
657 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
658
659 return SSMR3PutU32(pSSM, ~0); /* terminator */
660}
661
662
663/**
664 * Execute state load operation.
665 *
666 * @returns VBox status code.
667 * @param pVM VM Handle.
668 * @param pSSM SSM operation handle.
669 * @param uVersion Data layout version.
670 * @param uPass The data pass.
671 */
672static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
673{
674 uint32_t u32Dummy;
675 uint32_t fRawRing0 = false;
676 uint32_t u32Sep;
677 uint32_t i;
678 int rc;
679 PREM pRem;
680
681 LogFlow(("remR3Load:\n"));
682 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
683
684 /*
685 * Validate version.
686 */
687 if ( uVersion != REM_SAVED_STATE_VERSION
688 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
689 {
690 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
691 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
692 }
693
694 /*
695 * Do a reset to be on the safe side...
696 */
697 REMR3Reset(pVM);
698
699 /*
700 * Ignore all ignorable notifications.
701 * (Not doing this will cause serious trouble.)
702 */
703 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
704
705 /*
706 * Load the required CPU Env bits.
707 * (Not much because we're never in REM when doing the save.)
708 */
709 pRem = &pVM->rem.s;
710 Assert(!pRem->fInREM);
711 SSMR3GetU32(pSSM, &pRem->Env.hflags);
712 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
713 {
714 /* Redundant REM CPU state has to be loaded, but can be ignored. */
715 CPUX86State_Ver16 temp;
716 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
717 }
718
719 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
720 if (RT_FAILURE(rc))
721 return rc;
722 if (u32Sep != ~0U)
723 {
724 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
725 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
726 }
727
728 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
729 SSMR3GetUInt(pSSM, &fRawRing0);
730 if (fRawRing0)
731 pRem->Env.state |= CPU_RAW_RING0;
732
733 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
734 {
735 /*
736 * Load the REM stuff.
737 */
738 /** @todo r=bird: We should just drop all these items, restoring doesn't make
739 * sense. */
740 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
741 if (RT_FAILURE(rc))
742 return rc;
743 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
744 {
745 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
746 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
747 }
748 for (i = 0; i < pRem->cInvalidatedPages; i++)
749 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
750 }
751
752 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
753 if (RT_FAILURE(rc))
754 return rc;
755
756 /* check the terminator. */
757 rc = SSMR3GetU32(pSSM, &u32Sep);
758 if (RT_FAILURE(rc))
759 return rc;
760 if (u32Sep != ~0U)
761 {
762 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
763 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
764 }
765
766 /*
767 * Get the CPUID features.
768 */
769 PVMCPU pVCpu = VMMGetCpu(pVM);
770 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
771 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
772
773 /*
774 * Stop ignoring ignorable notifications.
775 */
776 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
777
778 /*
779 * Sync the whole CPU state when executing code in the recompiler.
780 */
781 for (i = 0; i < pVM->cCpus; i++)
782 {
783 PVMCPU pVCpu = &pVM->aCpus[i];
784 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
785 }
786 return VINF_SUCCESS;
787}
788
789
790
791#undef LOG_GROUP
792#define LOG_GROUP LOG_GROUP_REM_RUN
793
794/**
795 * Single steps an instruction in recompiled mode.
796 *
797 * Before calling this function the REM state needs to be in sync with
798 * the VM. Call REMR3State() to perform the sync. It's only necessary
799 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
800 * and after calling REMR3StateBack().
801 *
802 * @returns VBox status code.
803 *
804 * @param pVM VM Handle.
805 * @param pVCpu VMCPU Handle.
806 */
807REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
808{
809 int rc, interrupt_request;
810 RTGCPTR GCPtrPC;
811 bool fBp;
812
813 /*
814 * Lock the REM - we don't wanna have anyone interrupting us
815 * while stepping - and enabled single stepping. We also ignore
816 * pending interrupts and suchlike.
817 */
818 interrupt_request = pVM->rem.s.Env.interrupt_request;
819 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
820 pVM->rem.s.Env.interrupt_request = 0;
821 cpu_single_step(&pVM->rem.s.Env, 1);
822
823 /*
824 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
825 */
826 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
827 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
828
829 /*
830 * Execute and handle the return code.
831 * We execute without enabling the cpu tick, so on success we'll
832 * just flip it on and off to make sure it moves
833 */
834 rc = cpu_exec(&pVM->rem.s.Env);
835 if (rc == EXCP_DEBUG)
836 {
837 TMR3NotifyResume(pVM, pVCpu);
838 TMR3NotifySuspend(pVM, pVCpu);
839 rc = VINF_EM_DBG_STEPPED;
840 }
841 else
842 {
843 switch (rc)
844 {
845 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
846 case EXCP_HLT:
847 case EXCP_HALTED: rc = VINF_EM_HALT; break;
848 case EXCP_RC:
849 rc = pVM->rem.s.rc;
850 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
851 break;
852 case EXCP_EXECUTE_RAW:
853 case EXCP_EXECUTE_HWACC:
854 /** @todo: is it correct? No! */
855 rc = VINF_SUCCESS;
856 break;
857 default:
858 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
859 rc = VERR_INTERNAL_ERROR;
860 break;
861 }
862 }
863
864 /*
865 * Restore the stuff we changed to prevent interruption.
866 * Unlock the REM.
867 */
868 if (fBp)
869 {
870 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
871 Assert(rc2 == 0); NOREF(rc2);
872 }
873 cpu_single_step(&pVM->rem.s.Env, 0);
874 pVM->rem.s.Env.interrupt_request = interrupt_request;
875
876 return rc;
877}
878
879
880/**
881 * Set a breakpoint using the REM facilities.
882 *
883 * @returns VBox status code.
884 * @param pVM The VM handle.
885 * @param Address The breakpoint address.
886 * @thread The emulation thread.
887 */
888REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
889{
890 VM_ASSERT_EMT(pVM);
891 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
892 {
893 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
894 return VINF_SUCCESS;
895 }
896 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
897 return VERR_REM_NO_MORE_BP_SLOTS;
898}
899
900
901/**
902 * Clears a breakpoint set by REMR3BreakpointSet().
903 *
904 * @returns VBox status code.
905 * @param pVM The VM handle.
906 * @param Address The breakpoint address.
907 * @thread The emulation thread.
908 */
909REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
910{
911 VM_ASSERT_EMT(pVM);
912 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
913 {
914 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
915 return VINF_SUCCESS;
916 }
917 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
918 return VERR_REM_BP_NOT_FOUND;
919}
920
921
922/**
923 * Emulate an instruction.
924 *
925 * This function executes one instruction without letting anyone
926 * interrupt it. This is intended for being called while being in
927 * raw mode and thus will take care of all the state syncing between
928 * REM and the rest.
929 *
930 * @returns VBox status code.
931 * @param pVM VM handle.
932 * @param pVCpu VMCPU Handle.
933 */
934REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
935{
936 bool fFlushTBs;
937
938 int rc, rc2;
939 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
940
941 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
942 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
943 */
944 if (HWACCMIsEnabled(pVM))
945 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
946
947 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
948 fFlushTBs = pVM->rem.s.fFlushTBs;
949 pVM->rem.s.fFlushTBs = false;
950
951 /*
952 * Sync the state and enable single instruction / single stepping.
953 */
954 rc = REMR3State(pVM, pVCpu);
955 pVM->rem.s.fFlushTBs = fFlushTBs;
956 if (RT_SUCCESS(rc))
957 {
958 int interrupt_request = pVM->rem.s.Env.interrupt_request;
959 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
960#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
961 cpu_single_step(&pVM->rem.s.Env, 0);
962#endif
963 Assert(!pVM->rem.s.Env.singlestep_enabled);
964
965 /*
966 * Now we set the execute single instruction flag and enter the cpu_exec loop.
967 */
968 TMNotifyStartOfExecution(pVCpu);
969 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
970 rc = cpu_exec(&pVM->rem.s.Env);
971 TMNotifyEndOfExecution(pVCpu);
972 switch (rc)
973 {
974 /*
975 * Executed without anything out of the way happening.
976 */
977 case EXCP_SINGLE_INSTR:
978 rc = VINF_EM_RESCHEDULE;
979 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
980 break;
981
982 /*
983 * If we take a trap or start servicing a pending interrupt, we might end up here.
984 * (Timer thread or some other thread wishing EMT's attention.)
985 */
986 case EXCP_INTERRUPT:
987 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
988 rc = VINF_EM_RESCHEDULE;
989 break;
990
991 /*
992 * Single step, we assume!
993 * If there was a breakpoint there we're fucked now.
994 */
995 case EXCP_DEBUG:
996 if (pVM->rem.s.Env.watchpoint_hit)
997 {
998 /** @todo deal with watchpoints */
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1000 rc = VINF_EM_DBG_BREAKPOINT;
1001 }
1002 else
1003 {
1004 CPUBreakpoint *pBP;
1005 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1006 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1007 if (pBP->pc == GCPtrPC)
1008 break;
1009 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1011 }
1012 break;
1013
1014 /*
1015 * hlt instruction.
1016 */
1017 case EXCP_HLT:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1019 rc = VINF_EM_HALT;
1020 break;
1021
1022 /*
1023 * The VM has halted.
1024 */
1025 case EXCP_HALTED:
1026 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1027 rc = VINF_EM_HALT;
1028 break;
1029
1030 /*
1031 * Switch to RAW-mode.
1032 */
1033 case EXCP_EXECUTE_RAW:
1034 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1035 rc = VINF_EM_RESCHEDULE_RAW;
1036 break;
1037
1038 /*
1039 * Switch to hardware accelerated RAW-mode.
1040 */
1041 case EXCP_EXECUTE_HWACC:
1042 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1043 rc = VINF_EM_RESCHEDULE_HWACC;
1044 break;
1045
1046 /*
1047 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1048 */
1049 case EXCP_RC:
1050 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1051 rc = pVM->rem.s.rc;
1052 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1053 break;
1054
1055 /*
1056 * Figure out the rest when they arrive....
1057 */
1058 default:
1059 AssertMsgFailed(("rc=%d\n", rc));
1060 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1061 rc = VINF_EM_RESCHEDULE;
1062 break;
1063 }
1064
1065 /*
1066 * Switch back the state.
1067 */
1068 pVM->rem.s.Env.interrupt_request = interrupt_request;
1069 rc2 = REMR3StateBack(pVM, pVCpu);
1070 AssertRC(rc2);
1071 }
1072
1073 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1074 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1075 return rc;
1076}
1077
1078
1079/**
1080 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1081 *
1082 * @returns VBox status code.
1083 *
1084 * @param pVM The VM handle.
1085 * @param pVCpu The Virtual CPU handle.
1086 */
1087static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1088{
1089 int rc;
1090
1091 Assert(pVM->rem.s.fInREM);
1092#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1093 cpu_single_step(&pVM->rem.s.Env, 1);
1094#else
1095 Assert(!pVM->rem.s.Env.singlestep_enabled);
1096#endif
1097
1098 /*
1099 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1100 */
1101 for (;;)
1102 {
1103 char szBuf[256];
1104
1105 /*
1106 * Log the current registers state and instruction.
1107 */
1108 remR3StateUpdate(pVM, pVCpu);
1109 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1110 szBuf[0] = '\0';
1111 rc = DBGFR3DisasInstrEx(pVM,
1112 pVCpu->idCpu,
1113 0, /* Sel */
1114 0, /* GCPtr */
1115 DBGF_DISAS_FLAGS_CURRENT_GUEST
1116 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1117 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1118 szBuf,
1119 sizeof(szBuf),
1120 NULL);
1121 if (RT_FAILURE(rc))
1122 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1123 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1124
1125 /*
1126 * Execute the instruction.
1127 */
1128 TMNotifyStartOfExecution(pVCpu);
1129
1130 if ( pVM->rem.s.Env.exception_index < 0
1131 || pVM->rem.s.Env.exception_index > 256)
1132 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1133
1134#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1135 pVM->rem.s.Env.interrupt_request = 0;
1136#else
1137 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1138#endif
1139 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1140 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1141 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1142 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1143 pVM->rem.s.Env.interrupt_request,
1144 pVM->rem.s.Env.halted,
1145 pVM->rem.s.Env.exception_index
1146 );
1147
1148 rc = cpu_exec(&pVM->rem.s.Env);
1149
1150 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1151 pVM->rem.s.Env.interrupt_request,
1152 pVM->rem.s.Env.halted,
1153 pVM->rem.s.Env.exception_index
1154 );
1155
1156 TMNotifyEndOfExecution(pVCpu);
1157
1158 switch (rc)
1159 {
1160#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1161 /*
1162 * The normal exit.
1163 */
1164 case EXCP_SINGLE_INSTR:
1165 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1166 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1167 continue;
1168 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1169 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1170 rc = VINF_SUCCESS;
1171 break;
1172
1173#else
1174 /*
1175 * The normal exit, check for breakpoints at PC just to be sure.
1176 */
1177#endif
1178 case EXCP_DEBUG:
1179 if (pVM->rem.s.Env.watchpoint_hit)
1180 {
1181 /** @todo deal with watchpoints */
1182 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1183 rc = VINF_EM_DBG_BREAKPOINT;
1184 }
1185 else
1186 {
1187 CPUBreakpoint *pBP;
1188 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1189 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1190 if (pBP->pc == GCPtrPC)
1191 break;
1192 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1193 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1194 }
1195#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1196 if (rc == VINF_EM_DBG_STEPPED)
1197 {
1198 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1199 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1200 continue;
1201
1202 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1203 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1204 rc = VINF_SUCCESS;
1205 }
1206#endif
1207 break;
1208
1209 /*
1210 * If we take a trap or start servicing a pending interrupt, we might end up here.
1211 * (Timer thread or some other thread wishing EMT's attention.)
1212 */
1213 case EXCP_INTERRUPT:
1214 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1215 rc = VINF_SUCCESS;
1216 break;
1217
1218 /*
1219 * hlt instruction.
1220 */
1221 case EXCP_HLT:
1222 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1223 rc = VINF_EM_HALT;
1224 break;
1225
1226 /*
1227 * The VM has halted.
1228 */
1229 case EXCP_HALTED:
1230 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1231 rc = VINF_EM_HALT;
1232 break;
1233
1234 /*
1235 * Switch to RAW-mode.
1236 */
1237 case EXCP_EXECUTE_RAW:
1238 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1239 rc = VINF_EM_RESCHEDULE_RAW;
1240 break;
1241
1242 /*
1243 * Switch to hardware accelerated RAW-mode.
1244 */
1245 case EXCP_EXECUTE_HWACC:
1246 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1247 rc = VINF_EM_RESCHEDULE_HWACC;
1248 break;
1249
1250 /*
1251 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1252 */
1253 case EXCP_RC:
1254 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1255 rc = pVM->rem.s.rc;
1256 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1257 break;
1258
1259 /*
1260 * Figure out the rest when they arrive....
1261 */
1262 default:
1263 AssertMsgFailed(("rc=%d\n", rc));
1264 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1265 rc = VINF_EM_RESCHEDULE;
1266 break;
1267 }
1268 break;
1269 }
1270
1271#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1272// cpu_single_step(&pVM->rem.s.Env, 0);
1273#else
1274 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1275#endif
1276 return rc;
1277}
1278
1279
1280/**
1281 * Runs code in recompiled mode.
1282 *
1283 * Before calling this function the REM state needs to be in sync with
1284 * the VM. Call REMR3State() to perform the sync. It's only necessary
1285 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1286 * and after calling REMR3StateBack().
1287 *
1288 * @returns VBox status code.
1289 *
1290 * @param pVM VM Handle.
1291 * @param pVCpu VMCPU Handle.
1292 */
1293REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1294{
1295 int rc;
1296
1297 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1298 return remR3RunLoggingStep(pVM, pVCpu);
1299
1300 Assert(pVM->rem.s.fInREM);
1301 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1302
1303 TMNotifyStartOfExecution(pVCpu);
1304 rc = cpu_exec(&pVM->rem.s.Env);
1305 TMNotifyEndOfExecution(pVCpu);
1306 switch (rc)
1307 {
1308 /*
1309 * This happens when the execution was interrupted
1310 * by an external event, like pending timers.
1311 */
1312 case EXCP_INTERRUPT:
1313 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1314 rc = VINF_SUCCESS;
1315 break;
1316
1317 /*
1318 * hlt instruction.
1319 */
1320 case EXCP_HLT:
1321 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1322 rc = VINF_EM_HALT;
1323 break;
1324
1325 /*
1326 * The VM has halted.
1327 */
1328 case EXCP_HALTED:
1329 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1330 rc = VINF_EM_HALT;
1331 break;
1332
1333 /*
1334 * Breakpoint/single step.
1335 */
1336 case EXCP_DEBUG:
1337 if (pVM->rem.s.Env.watchpoint_hit)
1338 {
1339 /** @todo deal with watchpoints */
1340 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1341 rc = VINF_EM_DBG_BREAKPOINT;
1342 }
1343 else
1344 {
1345 CPUBreakpoint *pBP;
1346 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1347 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1348 if (pBP->pc == GCPtrPC)
1349 break;
1350 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1351 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1352 }
1353 break;
1354
1355 /*
1356 * Switch to RAW-mode.
1357 */
1358 case EXCP_EXECUTE_RAW:
1359 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1360 rc = VINF_EM_RESCHEDULE_RAW;
1361 break;
1362
1363 /*
1364 * Switch to hardware accelerated RAW-mode.
1365 */
1366 case EXCP_EXECUTE_HWACC:
1367 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1368 rc = VINF_EM_RESCHEDULE_HWACC;
1369 break;
1370
1371 /*
1372 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1373 */
1374 case EXCP_RC:
1375 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1376 rc = pVM->rem.s.rc;
1377 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1378 break;
1379
1380 /*
1381 * Figure out the rest when they arrive....
1382 */
1383 default:
1384 AssertMsgFailed(("rc=%d\n", rc));
1385 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1386 rc = VINF_SUCCESS;
1387 break;
1388 }
1389
1390 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1391 return rc;
1392}
1393
1394
1395/**
1396 * Check if the cpu state is suitable for Raw execution.
1397 *
1398 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1399 *
1400 * @param env The CPU env struct.
1401 * @param eip The EIP to check this for (might differ from env->eip).
1402 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1403 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1404 *
1405 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1406 */
1407bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1408{
1409 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1410 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 uint32_t u32CR0;
1413
1414#ifdef IEM_VERIFICATION_MODE
1415 return false;
1416#endif
1417
1418 /* Update counter. */
1419 env->pVM->rem.s.cCanExecuteRaw++;
1420
1421 /* Never when single stepping+logging guest code. */
1422 if (env->state & CPU_EMULATE_SINGLE_STEP)
1423 return false;
1424
1425 if (HWACCMIsEnabled(env->pVM))
1426 {
1427 CPUMCTX Ctx;
1428
1429 env->state |= CPU_RAW_HWACC;
1430
1431 /*
1432 * Create partial context for HWACCMR3CanExecuteGuest
1433 */
1434 Ctx.cr0 = env->cr[0];
1435 Ctx.cr3 = env->cr[3];
1436 Ctx.cr4 = env->cr[4];
1437
1438 Ctx.tr = env->tr.selector;
1439 Ctx.trHid.u64Base = env->tr.base;
1440 Ctx.trHid.u32Limit = env->tr.limit;
1441 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1442
1443 Ctx.ldtr = env->ldt.selector;
1444 Ctx.ldtrHid.u64Base = env->ldt.base;
1445 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1446 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1447
1448 Ctx.idtr.cbIdt = env->idt.limit;
1449 Ctx.idtr.pIdt = env->idt.base;
1450
1451 Ctx.gdtr.cbGdt = env->gdt.limit;
1452 Ctx.gdtr.pGdt = env->gdt.base;
1453
1454 Ctx.rsp = env->regs[R_ESP];
1455 Ctx.rip = env->eip;
1456
1457 Ctx.eflags.u32 = env->eflags;
1458
1459 Ctx.cs = env->segs[R_CS].selector;
1460 Ctx.csHid.u64Base = env->segs[R_CS].base;
1461 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1462 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1463
1464 Ctx.ds = env->segs[R_DS].selector;
1465 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1466 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1467 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1468
1469 Ctx.es = env->segs[R_ES].selector;
1470 Ctx.esHid.u64Base = env->segs[R_ES].base;
1471 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1472 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1473
1474 Ctx.fs = env->segs[R_FS].selector;
1475 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1476 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1477 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1478
1479 Ctx.gs = env->segs[R_GS].selector;
1480 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1481 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1482 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1483
1484 Ctx.ss = env->segs[R_SS].selector;
1485 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1486 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1487 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1488
1489 Ctx.msrEFER = env->efer;
1490
1491 /* Hardware accelerated raw-mode:
1492 *
1493 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1494 */
1495 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1496 {
1497 *piException = EXCP_EXECUTE_HWACC;
1498 return true;
1499 }
1500 return false;
1501 }
1502
1503 /*
1504 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1505 * or 32 bits protected mode ring 0 code
1506 *
1507 * The tests are ordered by the likelihood of being true during normal execution.
1508 */
1509 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1510 {
1511 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1512 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1513 return false;
1514 }
1515
1516#ifndef VBOX_RAW_V86
1517 if (fFlags & VM_MASK) {
1518 STAM_COUNTER_INC(&gStatRefuseVM86);
1519 Log2(("raw mode refused: VM_MASK\n"));
1520 return false;
1521 }
1522#endif
1523
1524 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1525 {
1526#ifndef DEBUG_bird
1527 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1528#endif
1529 return false;
1530 }
1531
1532 if (env->singlestep_enabled)
1533 {
1534 //Log2(("raw mode refused: Single step\n"));
1535 return false;
1536 }
1537
1538 if (!QTAILQ_EMPTY(&env->breakpoints))
1539 {
1540 //Log2(("raw mode refused: Breakpoints\n"));
1541 return false;
1542 }
1543
1544 if (!QTAILQ_EMPTY(&env->watchpoints))
1545 {
1546 //Log2(("raw mode refused: Watchpoints\n"));
1547 return false;
1548 }
1549
1550 u32CR0 = env->cr[0];
1551 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1552 {
1553 STAM_COUNTER_INC(&gStatRefusePaging);
1554 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1555 return false;
1556 }
1557
1558 if (env->cr[4] & CR4_PAE_MASK)
1559 {
1560 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1561 {
1562 STAM_COUNTER_INC(&gStatRefusePAE);
1563 return false;
1564 }
1565 }
1566
1567 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1568 {
1569 if (!EMIsRawRing3Enabled(env->pVM))
1570 return false;
1571
1572 if (!(env->eflags & IF_MASK))
1573 {
1574 STAM_COUNTER_INC(&gStatRefuseIF0);
1575 Log2(("raw mode refused: IF (RawR3)\n"));
1576 return false;
1577 }
1578
1579 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1580 {
1581 STAM_COUNTER_INC(&gStatRefuseWP0);
1582 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1583 return false;
1584 }
1585 }
1586 else
1587 {
1588 if (!EMIsRawRing0Enabled(env->pVM))
1589 return false;
1590
1591 // Let's start with pure 32 bits ring 0 code first
1592 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1593 {
1594 STAM_COUNTER_INC(&gStatRefuseCode16);
1595 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1596 return false;
1597 }
1598
1599 // Only R0
1600 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1601 {
1602 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1603 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1604 return false;
1605 }
1606
1607 if (!(u32CR0 & CR0_WP_MASK))
1608 {
1609 STAM_COUNTER_INC(&gStatRefuseWP0);
1610 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1611 return false;
1612 }
1613
1614 if (PATMIsPatchGCAddr(env->pVM, eip))
1615 {
1616 Log2(("raw r0 mode forced: patch code\n"));
1617 *piException = EXCP_EXECUTE_RAW;
1618 return true;
1619 }
1620
1621#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1622 if (!(env->eflags & IF_MASK))
1623 {
1624 STAM_COUNTER_INC(&gStatRefuseIF0);
1625 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1626 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1627 return false;
1628 }
1629#endif
1630
1631 env->state |= CPU_RAW_RING0;
1632 }
1633
1634 /*
1635 * Don't reschedule the first time we're called, because there might be
1636 * special reasons why we're here that is not covered by the above checks.
1637 */
1638 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1639 {
1640 Log2(("raw mode refused: first scheduling\n"));
1641 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1642 return false;
1643 }
1644
1645 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1646 *piException = EXCP_EXECUTE_RAW;
1647 return true;
1648}
1649
1650
1651/**
1652 * Fetches a code byte.
1653 *
1654 * @returns Success indicator (bool) for ease of use.
1655 * @param env The CPU environment structure.
1656 * @param GCPtrInstr Where to fetch code.
1657 * @param pu8Byte Where to store the byte on success
1658 */
1659bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1660{
1661 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1662 if (RT_SUCCESS(rc))
1663 return true;
1664 return false;
1665}
1666
1667
1668/**
1669 * Flush (or invalidate if you like) page table/dir entry.
1670 *
1671 * (invlpg instruction; tlb_flush_page)
1672 *
1673 * @param env Pointer to cpu environment.
1674 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1675 */
1676void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1677{
1678 PVM pVM = env->pVM;
1679 PCPUMCTX pCtx;
1680 int rc;
1681
1682 Assert(EMRemIsLockOwner(env->pVM));
1683
1684 /*
1685 * When we're replaying invlpg instructions or restoring a saved
1686 * state we disable this path.
1687 */
1688 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1689 return;
1690 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1691 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1692
1693 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1694
1695 /*
1696 * Update the control registers before calling PGMFlushPage.
1697 */
1698 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1699 Assert(pCtx);
1700 pCtx->cr0 = env->cr[0];
1701 pCtx->cr3 = env->cr[3];
1702 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1703 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1704 pCtx->cr4 = env->cr[4];
1705
1706 /*
1707 * Let PGM do the rest.
1708 */
1709 Assert(env->pVCpu);
1710 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1711 if (RT_FAILURE(rc))
1712 {
1713 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1714 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1715 }
1716 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1717}
1718
1719
1720#ifndef REM_PHYS_ADDR_IN_TLB
1721/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1722void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1723{
1724 void *pv;
1725 int rc;
1726
1727 /* Address must be aligned enough to fiddle with lower bits */
1728 Assert((physAddr & 0x3) == 0);
1729
1730 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1731 Assert( rc == VINF_SUCCESS
1732 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1733 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1734 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1735 if (RT_FAILURE(rc))
1736 return (void *)1;
1737 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1738 return (void *)((uintptr_t)pv | 2);
1739 return pv;
1740}
1741#endif /* REM_PHYS_ADDR_IN_TLB */
1742
1743
1744/**
1745 * Called from tlb_protect_code in order to write monitor a code page.
1746 *
1747 * @param env Pointer to the CPU environment.
1748 * @param GCPtr Code page to monitor
1749 */
1750void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1751{
1752#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1753 Assert(env->pVM->rem.s.fInREM);
1754 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1755 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1756 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1757 && !(env->eflags & VM_MASK) /* no V86 mode */
1758 && !HWACCMIsEnabled(env->pVM))
1759 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1760#endif
1761}
1762
1763
1764/**
1765 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1766 *
1767 * @param env Pointer to the CPU environment.
1768 * @param GCPtr Code page to monitor
1769 */
1770void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1771{
1772 Assert(env->pVM->rem.s.fInREM);
1773#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1774 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1775 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1776 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1777 && !(env->eflags & VM_MASK) /* no V86 mode */
1778 && !HWACCMIsEnabled(env->pVM))
1779 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1780#endif
1781}
1782
1783
1784/**
1785 * Called when the CPU is initialized, any of the CRx registers are changed or
1786 * when the A20 line is modified.
1787 *
1788 * @param env Pointer to the CPU environment.
1789 * @param fGlobal Set if the flush is global.
1790 */
1791void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1792{
1793 PVM pVM = env->pVM;
1794 PCPUMCTX pCtx;
1795 Assert(EMRemIsLockOwner(pVM));
1796
1797 /*
1798 * When we're replaying invlpg instructions or restoring a saved
1799 * state we disable this path.
1800 */
1801 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1802 return;
1803 Assert(pVM->rem.s.fInREM);
1804
1805 /*
1806 * The caller doesn't check cr4, so we have to do that for ourselves.
1807 */
1808 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1809 fGlobal = true;
1810 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1811
1812 /*
1813 * Update the control registers before calling PGMR3FlushTLB.
1814 */
1815 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1816 Assert(pCtx);
1817 pCtx->cr0 = env->cr[0];
1818 pCtx->cr3 = env->cr[3];
1819 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1820 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1821 pCtx->cr4 = env->cr[4];
1822
1823 /*
1824 * Let PGM do the rest.
1825 */
1826 Assert(env->pVCpu);
1827 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1828}
1829
1830
1831/**
1832 * Called when any of the cr0, cr4 or efer registers is updated.
1833 *
1834 * @param env Pointer to the CPU environment.
1835 */
1836void remR3ChangeCpuMode(CPUX86State *env)
1837{
1838 PVM pVM = env->pVM;
1839 uint64_t efer;
1840 PCPUMCTX pCtx;
1841 int rc;
1842
1843 /*
1844 * When we're replaying loads or restoring a saved
1845 * state this path is disabled.
1846 */
1847 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1848 return;
1849 Assert(pVM->rem.s.fInREM);
1850
1851 /*
1852 * Update the control registers before calling PGMChangeMode()
1853 * as it may need to map whatever cr3 is pointing to.
1854 */
1855 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1856 Assert(pCtx);
1857 pCtx->cr0 = env->cr[0];
1858 pCtx->cr3 = env->cr[3];
1859 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1860 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1861 pCtx->cr4 = env->cr[4];
1862#ifdef TARGET_X86_64
1863 efer = env->efer;
1864 pCtx->msrEFER = efer;
1865#else
1866 efer = 0;
1867#endif
1868 Assert(env->pVCpu);
1869 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1870 if (rc != VINF_SUCCESS)
1871 {
1872 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1873 {
1874 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1875 remR3RaiseRC(env->pVM, rc);
1876 }
1877 else
1878 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1879 }
1880}
1881
1882
1883/**
1884 * Called from compiled code to run dma.
1885 *
1886 * @param env Pointer to the CPU environment.
1887 */
1888void remR3DmaRun(CPUX86State *env)
1889{
1890 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1891 PDMR3DmaRun(env->pVM);
1892 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1893}
1894
1895
1896/**
1897 * Called from compiled code to schedule pending timers in VMM
1898 *
1899 * @param env Pointer to the CPU environment.
1900 */
1901void remR3TimersRun(CPUX86State *env)
1902{
1903 LogFlow(("remR3TimersRun:\n"));
1904 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1905 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1906 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1907 TMR3TimerQueuesDo(env->pVM);
1908 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1909 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1910}
1911
1912
1913/**
1914 * Record trap occurrence
1915 *
1916 * @returns VBox status code
1917 * @param env Pointer to the CPU environment.
1918 * @param uTrap Trap nr
1919 * @param uErrorCode Error code
1920 * @param pvNextEIP Next EIP
1921 */
1922int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1923{
1924 PVM pVM = env->pVM;
1925#ifdef VBOX_WITH_STATISTICS
1926 static STAMCOUNTER s_aStatTrap[255];
1927 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1928#endif
1929
1930#ifdef VBOX_WITH_STATISTICS
1931 if (uTrap < 255)
1932 {
1933 if (!s_aRegisters[uTrap])
1934 {
1935 char szStatName[64];
1936 s_aRegisters[uTrap] = true;
1937 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1938 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1939 }
1940 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1941 }
1942#endif
1943 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1944 if( uTrap < 0x20
1945 && (env->cr[0] & X86_CR0_PE)
1946 && !(env->eflags & X86_EFL_VM))
1947 {
1948#ifdef DEBUG
1949 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1950#endif
1951 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1952 {
1953 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1954 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1955 return VERR_REM_TOO_MANY_TRAPS;
1956 }
1957 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1958 pVM->rem.s.cPendingExceptions = 1;
1959 pVM->rem.s.uPendingException = uTrap;
1960 pVM->rem.s.uPendingExcptEIP = env->eip;
1961 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1962 }
1963 else
1964 {
1965 pVM->rem.s.cPendingExceptions = 0;
1966 pVM->rem.s.uPendingException = uTrap;
1967 pVM->rem.s.uPendingExcptEIP = env->eip;
1968 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1969 }
1970 return VINF_SUCCESS;
1971}
1972
1973
1974/*
1975 * Clear current active trap
1976 *
1977 * @param pVM VM Handle.
1978 */
1979void remR3TrapClear(PVM pVM)
1980{
1981 pVM->rem.s.cPendingExceptions = 0;
1982 pVM->rem.s.uPendingException = 0;
1983 pVM->rem.s.uPendingExcptEIP = 0;
1984 pVM->rem.s.uPendingExcptCR2 = 0;
1985}
1986
1987
1988/*
1989 * Record previous call instruction addresses
1990 *
1991 * @param env Pointer to the CPU environment.
1992 */
1993void remR3RecordCall(CPUX86State *env)
1994{
1995 CSAMR3RecordCallAddress(env->pVM, env->eip);
1996}
1997
1998
1999/**
2000 * Syncs the internal REM state with the VM.
2001 *
2002 * This must be called before REMR3Run() is invoked whenever when the REM
2003 * state is not up to date. Calling it several times in a row is not
2004 * permitted.
2005 *
2006 * @returns VBox status code.
2007 *
2008 * @param pVM VM Handle.
2009 * @param pVCpu VMCPU Handle.
2010 *
2011 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2012 * no do this since the majority of the callers don't want any unnecessary of events
2013 * pending that would immediately interrupt execution.
2014 */
2015REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2016{
2017 register const CPUMCTX *pCtx;
2018 register unsigned fFlags;
2019 bool fHiddenSelRegsValid;
2020 unsigned i;
2021 TRPMEVENT enmType;
2022 uint8_t u8TrapNo;
2023 uint32_t uCpl;
2024 int rc;
2025
2026 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2027 Log2(("REMR3State:\n"));
2028
2029 pVM->rem.s.Env.pVCpu = pVCpu;
2030 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2031 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2032
2033 Assert(!pVM->rem.s.fInREM);
2034 pVM->rem.s.fInStateSync = true;
2035
2036 /*
2037 * If we have to flush TBs, do that immediately.
2038 */
2039 if (pVM->rem.s.fFlushTBs)
2040 {
2041 STAM_COUNTER_INC(&gStatFlushTBs);
2042 tb_flush(&pVM->rem.s.Env);
2043 pVM->rem.s.fFlushTBs = false;
2044 }
2045
2046 /*
2047 * Copy the registers which require no special handling.
2048 */
2049#ifdef TARGET_X86_64
2050 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2051 Assert(R_EAX == 0);
2052 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2053 Assert(R_ECX == 1);
2054 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2055 Assert(R_EDX == 2);
2056 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2057 Assert(R_EBX == 3);
2058 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2059 Assert(R_ESP == 4);
2060 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2061 Assert(R_EBP == 5);
2062 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2063 Assert(R_ESI == 6);
2064 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2065 Assert(R_EDI == 7);
2066 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2067 pVM->rem.s.Env.regs[8] = pCtx->r8;
2068 pVM->rem.s.Env.regs[9] = pCtx->r9;
2069 pVM->rem.s.Env.regs[10] = pCtx->r10;
2070 pVM->rem.s.Env.regs[11] = pCtx->r11;
2071 pVM->rem.s.Env.regs[12] = pCtx->r12;
2072 pVM->rem.s.Env.regs[13] = pCtx->r13;
2073 pVM->rem.s.Env.regs[14] = pCtx->r14;
2074 pVM->rem.s.Env.regs[15] = pCtx->r15;
2075
2076 pVM->rem.s.Env.eip = pCtx->rip;
2077
2078 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2079#else
2080 Assert(R_EAX == 0);
2081 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2082 Assert(R_ECX == 1);
2083 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2084 Assert(R_EDX == 2);
2085 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2086 Assert(R_EBX == 3);
2087 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2088 Assert(R_ESP == 4);
2089 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2090 Assert(R_EBP == 5);
2091 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2092 Assert(R_ESI == 6);
2093 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2094 Assert(R_EDI == 7);
2095 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2096 pVM->rem.s.Env.eip = pCtx->eip;
2097
2098 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2099#endif
2100
2101 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2102
2103 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2104 for (i=0;i<8;i++)
2105 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2106
2107#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2108 /*
2109 * Clear the halted hidden flag (the interrupt waking up the CPU can
2110 * have been dispatched in raw mode).
2111 */
2112 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2113#endif
2114
2115 /*
2116 * Replay invlpg? Only if we're not flushing the TLB.
2117 */
2118 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2119 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2120 if (pVM->rem.s.cInvalidatedPages)
2121 {
2122 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2123 {
2124 RTUINT i;
2125
2126 pVM->rem.s.fIgnoreCR3Load = true;
2127 pVM->rem.s.fIgnoreInvlPg = true;
2128 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2129 {
2130 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2131 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2132 }
2133 pVM->rem.s.fIgnoreInvlPg = false;
2134 pVM->rem.s.fIgnoreCR3Load = false;
2135 }
2136 pVM->rem.s.cInvalidatedPages = 0;
2137 }
2138
2139 /* Replay notification changes. */
2140 REMR3ReplayHandlerNotifications(pVM);
2141
2142 /* Update MSRs; before CRx registers! */
2143 pVM->rem.s.Env.efer = pCtx->msrEFER;
2144 pVM->rem.s.Env.star = pCtx->msrSTAR;
2145 pVM->rem.s.Env.pat = pCtx->msrPAT;
2146#ifdef TARGET_X86_64
2147 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2148 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2149 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2150 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2151
2152 /* Update the internal long mode activate flag according to the new EFER value. */
2153 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2154 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2155 else
2156 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2157#endif
2158
2159 /*
2160 * Registers which are rarely changed and require special handling / order when changed.
2161 */
2162 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2163 | CPUM_CHANGED_CR4
2164 | CPUM_CHANGED_CR0
2165 | CPUM_CHANGED_CR3
2166 | CPUM_CHANGED_GDTR
2167 | CPUM_CHANGED_IDTR
2168 | CPUM_CHANGED_SYSENTER_MSR
2169 | CPUM_CHANGED_LDTR
2170 | CPUM_CHANGED_CPUID
2171 | CPUM_CHANGED_FPU_REM
2172 )
2173 )
2174 {
2175 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2176 {
2177 pVM->rem.s.fIgnoreCR3Load = true;
2178 tlb_flush(&pVM->rem.s.Env, true);
2179 pVM->rem.s.fIgnoreCR3Load = false;
2180 }
2181
2182 /* CR4 before CR0! */
2183 if (fFlags & CPUM_CHANGED_CR4)
2184 {
2185 pVM->rem.s.fIgnoreCR3Load = true;
2186 pVM->rem.s.fIgnoreCpuMode = true;
2187 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2188 pVM->rem.s.fIgnoreCpuMode = false;
2189 pVM->rem.s.fIgnoreCR3Load = false;
2190 }
2191
2192 if (fFlags & CPUM_CHANGED_CR0)
2193 {
2194 pVM->rem.s.fIgnoreCR3Load = true;
2195 pVM->rem.s.fIgnoreCpuMode = true;
2196 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2197 pVM->rem.s.fIgnoreCpuMode = false;
2198 pVM->rem.s.fIgnoreCR3Load = false;
2199 }
2200
2201 if (fFlags & CPUM_CHANGED_CR3)
2202 {
2203 pVM->rem.s.fIgnoreCR3Load = true;
2204 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2205 pVM->rem.s.fIgnoreCR3Load = false;
2206 }
2207
2208 if (fFlags & CPUM_CHANGED_GDTR)
2209 {
2210 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2211 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2212 }
2213
2214 if (fFlags & CPUM_CHANGED_IDTR)
2215 {
2216 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2217 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2218 }
2219
2220 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2221 {
2222 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2223 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2224 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2225 }
2226
2227 if (fFlags & CPUM_CHANGED_LDTR)
2228 {
2229 if (fHiddenSelRegsValid)
2230 {
2231 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2232 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2233 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2234 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2235 }
2236 else
2237 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2238 }
2239
2240 if (fFlags & CPUM_CHANGED_CPUID)
2241 {
2242 uint32_t u32Dummy;
2243
2244 /*
2245 * Get the CPUID features.
2246 */
2247 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2248 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2249 }
2250
2251 /* Sync FPU state after CR4, CPUID and EFER (!). */
2252 if (fFlags & CPUM_CHANGED_FPU_REM)
2253 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2254 }
2255
2256 /*
2257 * Sync TR unconditionally to make life simpler.
2258 */
2259 pVM->rem.s.Env.tr.selector = pCtx->tr;
2260 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2261 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2262 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2263 /* Note! do_interrupt will fault if the busy flag is still set... */
2264 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2265
2266 /*
2267 * Update selector registers.
2268 * This must be done *after* we've synced gdt, ldt and crX registers
2269 * since we're reading the GDT/LDT om sync_seg. This will happen with
2270 * saved state which takes a quick dip into rawmode for instance.
2271 */
2272 /*
2273 * Stack; Note first check this one as the CPL might have changed. The
2274 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2275 */
2276
2277 if (fHiddenSelRegsValid)
2278 {
2279 /* The hidden selector registers are valid in the CPU context. */
2280 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2281
2282 /* Set current CPL */
2283 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2284
2285 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2286 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2287 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2288 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2289 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2290 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2291 }
2292 else
2293 {
2294 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2295 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2296 {
2297 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2298
2299 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2300 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2301#ifdef VBOX_WITH_STATISTICS
2302 if (pVM->rem.s.Env.segs[R_SS].newselector)
2303 {
2304 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2305 }
2306#endif
2307 }
2308 else
2309 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2310
2311 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2312 {
2313 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2314 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2315#ifdef VBOX_WITH_STATISTICS
2316 if (pVM->rem.s.Env.segs[R_ES].newselector)
2317 {
2318 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2319 }
2320#endif
2321 }
2322 else
2323 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2324
2325 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2326 {
2327 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2328 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2329#ifdef VBOX_WITH_STATISTICS
2330 if (pVM->rem.s.Env.segs[R_CS].newselector)
2331 {
2332 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2333 }
2334#endif
2335 }
2336 else
2337 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2338
2339 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2340 {
2341 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2342 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2343#ifdef VBOX_WITH_STATISTICS
2344 if (pVM->rem.s.Env.segs[R_DS].newselector)
2345 {
2346 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2347 }
2348#endif
2349 }
2350 else
2351 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2352
2353 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2354 * be the same but not the base/limit. */
2355 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2356 {
2357 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2358 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2359#ifdef VBOX_WITH_STATISTICS
2360 if (pVM->rem.s.Env.segs[R_FS].newselector)
2361 {
2362 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2363 }
2364#endif
2365 }
2366 else
2367 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2368
2369 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2370 {
2371 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2372 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2373#ifdef VBOX_WITH_STATISTICS
2374 if (pVM->rem.s.Env.segs[R_GS].newselector)
2375 {
2376 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2377 }
2378#endif
2379 }
2380 else
2381 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2382 }
2383
2384 /*
2385 * Check for traps.
2386 */
2387 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2388 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2389 if (RT_SUCCESS(rc))
2390 {
2391#ifdef DEBUG
2392 if (u8TrapNo == 0x80)
2393 {
2394 remR3DumpLnxSyscall(pVCpu);
2395 remR3DumpOBsdSyscall(pVCpu);
2396 }
2397#endif
2398
2399 pVM->rem.s.Env.exception_index = u8TrapNo;
2400 if (enmType != TRPM_SOFTWARE_INT)
2401 {
2402 pVM->rem.s.Env.exception_is_int = 0;
2403 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2404 }
2405 else
2406 {
2407 /*
2408 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2409 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2410 * for int03 and into.
2411 */
2412 pVM->rem.s.Env.exception_is_int = 1;
2413 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2414 /* int 3 may be generated by one-byte 0xcc */
2415 if (u8TrapNo == 3)
2416 {
2417 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2418 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2419 }
2420 /* int 4 may be generated by one-byte 0xce */
2421 else if (u8TrapNo == 4)
2422 {
2423 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2424 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2425 }
2426 }
2427
2428 /* get error code and cr2 if needed. */
2429 if (enmType == TRPM_TRAP)
2430 {
2431 switch (u8TrapNo)
2432 {
2433 case 0x0e:
2434 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2435 /* fallthru */
2436 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2437 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2438 break;
2439
2440 case 0x11: case 0x08:
2441 default:
2442 pVM->rem.s.Env.error_code = 0;
2443 break;
2444 }
2445 }
2446 else
2447 pVM->rem.s.Env.error_code = 0;
2448
2449 /*
2450 * We can now reset the active trap since the recompiler is gonna have a go at it.
2451 */
2452 rc = TRPMResetTrap(pVCpu);
2453 AssertRC(rc);
2454 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2455 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2456 }
2457
2458 /*
2459 * Clear old interrupt request flags; Check for pending hardware interrupts.
2460 * (See @remark for why we don't check for other FFs.)
2461 */
2462 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2463 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2464 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2465 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2466
2467 /*
2468 * We're now in REM mode.
2469 */
2470 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2471 pVM->rem.s.fInREM = true;
2472 pVM->rem.s.fInStateSync = false;
2473 pVM->rem.s.cCanExecuteRaw = 0;
2474 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2475 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2476 return VINF_SUCCESS;
2477}
2478
2479
2480/**
2481 * Syncs back changes in the REM state to the the VM state.
2482 *
2483 * This must be called after invoking REMR3Run().
2484 * Calling it several times in a row is not permitted.
2485 *
2486 * @returns VBox status code.
2487 *
2488 * @param pVM VM Handle.
2489 * @param pVCpu VMCPU Handle.
2490 */
2491REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2492{
2493 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2494 Assert(pCtx);
2495 unsigned i;
2496
2497 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2498 Log2(("REMR3StateBack:\n"));
2499 Assert(pVM->rem.s.fInREM);
2500
2501 /*
2502 * Copy back the registers.
2503 * This is done in the order they are declared in the CPUMCTX structure.
2504 */
2505
2506 /** @todo FOP */
2507 /** @todo FPUIP */
2508 /** @todo CS */
2509 /** @todo FPUDP */
2510 /** @todo DS */
2511
2512 /** @todo check if FPU/XMM was actually used in the recompiler */
2513 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2514//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2515
2516#ifdef TARGET_X86_64
2517 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2518 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2519 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2520 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2521 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2522 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2523 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2524 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2525 pCtx->r8 = pVM->rem.s.Env.regs[8];
2526 pCtx->r9 = pVM->rem.s.Env.regs[9];
2527 pCtx->r10 = pVM->rem.s.Env.regs[10];
2528 pCtx->r11 = pVM->rem.s.Env.regs[11];
2529 pCtx->r12 = pVM->rem.s.Env.regs[12];
2530 pCtx->r13 = pVM->rem.s.Env.regs[13];
2531 pCtx->r14 = pVM->rem.s.Env.regs[14];
2532 pCtx->r15 = pVM->rem.s.Env.regs[15];
2533
2534 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2535
2536#else
2537 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2538 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2539 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2540 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2541 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2542 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2543 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2544
2545 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2546#endif
2547
2548 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2549
2550#ifdef VBOX_WITH_STATISTICS
2551 if (pVM->rem.s.Env.segs[R_SS].newselector)
2552 {
2553 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2554 }
2555 if (pVM->rem.s.Env.segs[R_GS].newselector)
2556 {
2557 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2558 }
2559 if (pVM->rem.s.Env.segs[R_FS].newselector)
2560 {
2561 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2562 }
2563 if (pVM->rem.s.Env.segs[R_ES].newselector)
2564 {
2565 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2566 }
2567 if (pVM->rem.s.Env.segs[R_DS].newselector)
2568 {
2569 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2570 }
2571 if (pVM->rem.s.Env.segs[R_CS].newselector)
2572 {
2573 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2574 }
2575#endif
2576 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2577 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2578 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2579 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2580 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2581
2582#ifdef TARGET_X86_64
2583 pCtx->rip = pVM->rem.s.Env.eip;
2584 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2585#else
2586 pCtx->eip = pVM->rem.s.Env.eip;
2587 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2588#endif
2589
2590 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2591 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2592 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2593 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2594 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2595 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2596
2597 for (i = 0; i < 8; i++)
2598 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2599
2600 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2601 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2602 {
2603 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2604 STAM_COUNTER_INC(&gStatREMGDTChange);
2605 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2606 }
2607
2608 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2609 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2610 {
2611 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2612 STAM_COUNTER_INC(&gStatREMIDTChange);
2613 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2614 }
2615
2616 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2617 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2618 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2619 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2620 {
2621 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2622 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2623 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2624 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2625 STAM_COUNTER_INC(&gStatREMLDTRChange);
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2627 }
2628
2629 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2630 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2631 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2632 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2633 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2634 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2635 : 0) )
2636 {
2637 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2638 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2639 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2640 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2641 pCtx->tr = pVM->rem.s.Env.tr.selector;
2642 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2643 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2644 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2645 if (pCtx->trHid.Attr.u)
2646 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2647 STAM_COUNTER_INC(&gStatREMTRChange);
2648 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2649 }
2650
2651 /** @todo These values could still be out of sync! */
2652 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2653 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2654 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2655 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2656
2657 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2658 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2659 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2660
2661 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2662 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2663 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2664
2665 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2666 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2667 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2668
2669 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2670 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2671 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2672
2673 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2674 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2675 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2676
2677 /* Sysenter MSR */
2678 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2679 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2680 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2681
2682 /* System MSRs. */
2683 pCtx->msrEFER = pVM->rem.s.Env.efer;
2684 pCtx->msrSTAR = pVM->rem.s.Env.star;
2685 pCtx->msrPAT = pVM->rem.s.Env.pat;
2686#ifdef TARGET_X86_64
2687 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2688 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2689 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2690 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2691#endif
2692
2693 remR3TrapClear(pVM);
2694
2695 /*
2696 * Check for traps.
2697 */
2698 if ( pVM->rem.s.Env.exception_index >= 0
2699 && pVM->rem.s.Env.exception_index < 256)
2700 {
2701 int rc;
2702
2703 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2704 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2705 AssertRC(rc);
2706 switch (pVM->rem.s.Env.exception_index)
2707 {
2708 case 0x0e:
2709 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2710 /* fallthru */
2711 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2712 case 0x11: case 0x08: /* 0 */
2713 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2714 break;
2715 }
2716
2717 }
2718
2719 /*
2720 * We're not longer in REM mode.
2721 */
2722 CPUMR3RemLeave(pVCpu,
2723 HWACCMIsEnabled(pVM)
2724 || ( pVM->rem.s.Env.segs[R_SS].newselector
2725 | pVM->rem.s.Env.segs[R_GS].newselector
2726 | pVM->rem.s.Env.segs[R_FS].newselector
2727 | pVM->rem.s.Env.segs[R_ES].newselector
2728 | pVM->rem.s.Env.segs[R_DS].newselector
2729 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2730 );
2731 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2732 pVM->rem.s.fInREM = false;
2733 pVM->rem.s.pCtx = NULL;
2734 pVM->rem.s.Env.pVCpu = NULL;
2735 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2736 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2737 return VINF_SUCCESS;
2738}
2739
2740
2741/**
2742 * This is called by the disassembler when it wants to update the cpu state
2743 * before for instance doing a register dump.
2744 */
2745static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2746{
2747 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2748 unsigned i;
2749
2750 Assert(pVM->rem.s.fInREM);
2751
2752 /*
2753 * Copy back the registers.
2754 * This is done in the order they are declared in the CPUMCTX structure.
2755 */
2756
2757 /** @todo FOP */
2758 /** @todo FPUIP */
2759 /** @todo CS */
2760 /** @todo FPUDP */
2761 /** @todo DS */
2762 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2763 pCtx->fpu.MXCSR = 0;
2764 pCtx->fpu.MXCSR_MASK = 0;
2765
2766 /** @todo check if FPU/XMM was actually used in the recompiler */
2767 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2768//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2769
2770#ifdef TARGET_X86_64
2771 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2772 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2773 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2774 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2775 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2776 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2777 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2778 pCtx->r8 = pVM->rem.s.Env.regs[8];
2779 pCtx->r9 = pVM->rem.s.Env.regs[9];
2780 pCtx->r10 = pVM->rem.s.Env.regs[10];
2781 pCtx->r11 = pVM->rem.s.Env.regs[11];
2782 pCtx->r12 = pVM->rem.s.Env.regs[12];
2783 pCtx->r13 = pVM->rem.s.Env.regs[13];
2784 pCtx->r14 = pVM->rem.s.Env.regs[14];
2785 pCtx->r15 = pVM->rem.s.Env.regs[15];
2786
2787 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2788#else
2789 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2790 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2791 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2792 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2793 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2794 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2795 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2796
2797 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2798#endif
2799
2800 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2801
2802 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2803 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2804 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2805 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2806 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2807
2808#ifdef TARGET_X86_64
2809 pCtx->rip = pVM->rem.s.Env.eip;
2810 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2811#else
2812 pCtx->eip = pVM->rem.s.Env.eip;
2813 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2814#endif
2815
2816 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2817 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2818 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2819 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2820 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2821 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2822
2823 for (i = 0; i < 8; i++)
2824 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2825
2826 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2827 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2828 {
2829 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2830 STAM_COUNTER_INC(&gStatREMGDTChange);
2831 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2832 }
2833
2834 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2835 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2836 {
2837 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2838 STAM_COUNTER_INC(&gStatREMIDTChange);
2839 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2840 }
2841
2842 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2843 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2844 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2845 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2846 {
2847 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2848 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2849 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2850 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2851 STAM_COUNTER_INC(&gStatREMLDTRChange);
2852 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2853 }
2854
2855 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2856 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2857 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2858 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2859 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2860 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2861 : 0) )
2862 {
2863 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2864 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2865 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2866 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2867 pCtx->tr = pVM->rem.s.Env.tr.selector;
2868 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2869 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2870 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2871 if (pCtx->trHid.Attr.u)
2872 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2873 STAM_COUNTER_INC(&gStatREMTRChange);
2874 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2875 }
2876
2877 /** @todo These values could still be out of sync! */
2878 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2879 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2880 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2881 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2882
2883 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2884 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2885 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2886
2887 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2888 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2889 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2890
2891 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2892 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2893 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2894
2895 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2896 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2897 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2898
2899 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2900 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2901 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2902
2903 /* Sysenter MSR */
2904 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2905 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2906 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2907
2908 /* System MSRs. */
2909 pCtx->msrEFER = pVM->rem.s.Env.efer;
2910 pCtx->msrSTAR = pVM->rem.s.Env.star;
2911 pCtx->msrPAT = pVM->rem.s.Env.pat;
2912#ifdef TARGET_X86_64
2913 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2914 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2915 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2916 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2917#endif
2918
2919}
2920
2921
2922/**
2923 * Update the VMM state information if we're currently in REM.
2924 *
2925 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2926 * we're currently executing in REM and the VMM state is invalid. This method will of
2927 * course check that we're executing in REM before syncing any data over to the VMM.
2928 *
2929 * @param pVM The VM handle.
2930 * @param pVCpu The VMCPU handle.
2931 */
2932REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2933{
2934 if (pVM->rem.s.fInREM)
2935 remR3StateUpdate(pVM, pVCpu);
2936}
2937
2938
2939#undef LOG_GROUP
2940#define LOG_GROUP LOG_GROUP_REM
2941
2942
2943/**
2944 * Notify the recompiler about Address Gate 20 state change.
2945 *
2946 * This notification is required since A20 gate changes are
2947 * initialized from a device driver and the VM might just as
2948 * well be in REM mode as in RAW mode.
2949 *
2950 * @param pVM VM handle.
2951 * @param pVCpu VMCPU handle.
2952 * @param fEnable True if the gate should be enabled.
2953 * False if the gate should be disabled.
2954 */
2955REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2956{
2957 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2958 VM_ASSERT_EMT(pVM);
2959
2960 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2961 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2962 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2963}
2964
2965
2966/**
2967 * Replays the handler notification changes
2968 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2969 *
2970 * @param pVM VM handle.
2971 */
2972REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2973{
2974 /*
2975 * Replay the flushes.
2976 */
2977 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2978 VM_ASSERT_EMT(pVM);
2979
2980 /** @todo this isn't ensuring correct replay order. */
2981 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2982 {
2983 uint32_t idxNext;
2984 uint32_t idxRevHead;
2985 uint32_t idxHead;
2986#ifdef VBOX_STRICT
2987 int32_t c = 0;
2988#endif
2989
2990 /* Lockless purging of pending notifications. */
2991 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2992 if (idxHead == UINT32_MAX)
2993 return;
2994 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2995
2996 /*
2997 * Reverse the list to process it in FIFO order.
2998 */
2999 idxRevHead = UINT32_MAX;
3000 do
3001 {
3002 /* Save the index of the next rec. */
3003 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3004 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3005 /* Push the record onto the reversed list. */
3006 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3007 idxRevHead = idxHead;
3008 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3009 /* Advance. */
3010 idxHead = idxNext;
3011 } while (idxHead != UINT32_MAX);
3012
3013 /*
3014 * Loop thru the list, reinserting the record into the free list as they are
3015 * processed to avoid having other EMTs running out of entries while we're flushing.
3016 */
3017 idxHead = idxRevHead;
3018 do
3019 {
3020 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3021 uint32_t idxCur;
3022 Assert(--c >= 0);
3023
3024 switch (pCur->enmKind)
3025 {
3026 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3027 remR3NotifyHandlerPhysicalRegister(pVM,
3028 pCur->u.PhysicalRegister.enmType,
3029 pCur->u.PhysicalRegister.GCPhys,
3030 pCur->u.PhysicalRegister.cb,
3031 pCur->u.PhysicalRegister.fHasHCHandler);
3032 break;
3033
3034 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3035 remR3NotifyHandlerPhysicalDeregister(pVM,
3036 pCur->u.PhysicalDeregister.enmType,
3037 pCur->u.PhysicalDeregister.GCPhys,
3038 pCur->u.PhysicalDeregister.cb,
3039 pCur->u.PhysicalDeregister.fHasHCHandler,
3040 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3041 break;
3042
3043 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3044 remR3NotifyHandlerPhysicalModify(pVM,
3045 pCur->u.PhysicalModify.enmType,
3046 pCur->u.PhysicalModify.GCPhysOld,
3047 pCur->u.PhysicalModify.GCPhysNew,
3048 pCur->u.PhysicalModify.cb,
3049 pCur->u.PhysicalModify.fHasHCHandler,
3050 pCur->u.PhysicalModify.fRestoreAsRAM);
3051 break;
3052
3053 default:
3054 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3055 break;
3056 }
3057
3058 /*
3059 * Advance idxHead.
3060 */
3061 idxCur = idxHead;
3062 idxHead = pCur->idxNext;
3063 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3064
3065 /*
3066 * Put the record back into the free list.
3067 */
3068 do
3069 {
3070 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3071 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3072 ASMCompilerBarrier();
3073 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3074 } while (idxHead != UINT32_MAX);
3075
3076#ifdef VBOX_STRICT
3077 if (pVM->cCpus == 1)
3078 {
3079 unsigned c;
3080 /* Check that all records are now on the free list. */
3081 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3082 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3083 c++;
3084 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3085 }
3086#endif
3087 }
3088}
3089
3090
3091/**
3092 * Notify REM about changed code page.
3093 *
3094 * @returns VBox status code.
3095 * @param pVM VM handle.
3096 * @param pVCpu VMCPU handle.
3097 * @param pvCodePage Code page address
3098 */
3099REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3100{
3101#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3102 int rc;
3103 RTGCPHYS PhysGC;
3104 uint64_t flags;
3105
3106 VM_ASSERT_EMT(pVM);
3107
3108 /*
3109 * Get the physical page address.
3110 */
3111 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3112 if (rc == VINF_SUCCESS)
3113 {
3114 /*
3115 * Sync the required registers and flush the whole page.
3116 * (Easier to do the whole page than notifying it about each physical
3117 * byte that was changed.
3118 */
3119 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3120 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3121 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3122 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3123
3124 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3125 }
3126#endif
3127 return VINF_SUCCESS;
3128}
3129
3130
3131/**
3132 * Notification about a successful MMR3PhysRegister() call.
3133 *
3134 * @param pVM VM handle.
3135 * @param GCPhys The physical address the RAM.
3136 * @param cb Size of the memory.
3137 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3138 */
3139REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3140{
3141 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3142 VM_ASSERT_EMT(pVM);
3143
3144 /*
3145 * Validate input - we trust the caller.
3146 */
3147 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3148 Assert(cb);
3149 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3150 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3151
3152 /*
3153 * Base ram? Update GCPhysLastRam.
3154 */
3155 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3156 {
3157 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3158 {
3159 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3160 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3161 }
3162 }
3163
3164 /*
3165 * Register the ram.
3166 */
3167 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3168
3169 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3170 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3171 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3172
3173 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3174}
3175
3176
3177/**
3178 * Notification about a successful MMR3PhysRomRegister() call.
3179 *
3180 * @param pVM VM handle.
3181 * @param GCPhys The physical address of the ROM.
3182 * @param cb The size of the ROM.
3183 * @param pvCopy Pointer to the ROM copy.
3184 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3185 * This function will be called when ever the protection of the
3186 * shadow ROM changes (at reset and end of POST).
3187 */
3188REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3189{
3190 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3191 VM_ASSERT_EMT(pVM);
3192
3193 /*
3194 * Validate input - we trust the caller.
3195 */
3196 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3197 Assert(cb);
3198 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3199
3200 /*
3201 * Register the rom.
3202 */
3203 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3204
3205 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3206 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3207 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3208
3209 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3210}
3211
3212
3213/**
3214 * Notification about a successful memory deregistration or reservation.
3215 *
3216 * @param pVM VM Handle.
3217 * @param GCPhys Start physical address.
3218 * @param cb The size of the range.
3219 */
3220REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3221{
3222 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3223 VM_ASSERT_EMT(pVM);
3224
3225 /*
3226 * Validate input - we trust the caller.
3227 */
3228 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3229 Assert(cb);
3230 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3231
3232 /*
3233 * Unassigning the memory.
3234 */
3235 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3236
3237 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3238 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3239 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3240
3241 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3242}
3243
3244
3245/**
3246 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3247 *
3248 * @param pVM VM Handle.
3249 * @param enmType Handler type.
3250 * @param GCPhys Handler range address.
3251 * @param cb Size of the handler range.
3252 * @param fHasHCHandler Set if the handler has a HC callback function.
3253 *
3254 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3255 * Handler memory type to memory which has no HC handler.
3256 */
3257static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3258{
3259 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3260 enmType, GCPhys, cb, fHasHCHandler));
3261
3262 VM_ASSERT_EMT(pVM);
3263 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3264 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3265
3266
3267 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3268
3269 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3270 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3271 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3272 else if (fHasHCHandler)
3273 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3274 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3275
3276 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3277}
3278
3279/**
3280 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3281 *
3282 * @param pVM VM Handle.
3283 * @param enmType Handler type.
3284 * @param GCPhys Handler range address.
3285 * @param cb Size of the handler range.
3286 * @param fHasHCHandler Set if the handler has a HC callback function.
3287 *
3288 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3289 * Handler memory type to memory which has no HC handler.
3290 */
3291REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3292{
3293 REMR3ReplayHandlerNotifications(pVM);
3294
3295 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3296}
3297
3298/**
3299 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3300 *
3301 * @param pVM VM Handle.
3302 * @param enmType Handler type.
3303 * @param GCPhys Handler range address.
3304 * @param cb Size of the handler range.
3305 * @param fHasHCHandler Set if the handler has a HC callback function.
3306 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3307 */
3308static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3309{
3310 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3311 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3312 VM_ASSERT_EMT(pVM);
3313
3314
3315 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3316
3317 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3318 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3319 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3320 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3321 else if (fHasHCHandler)
3322 {
3323 if (!fRestoreAsRAM)
3324 {
3325 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3326 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3327 }
3328 else
3329 {
3330 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3331 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3332 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3333 }
3334 }
3335 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3336
3337 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3338}
3339
3340/**
3341 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3342 *
3343 * @param pVM VM Handle.
3344 * @param enmType Handler type.
3345 * @param GCPhys Handler range address.
3346 * @param cb Size of the handler range.
3347 * @param fHasHCHandler Set if the handler has a HC callback function.
3348 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3349 */
3350REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3351{
3352 REMR3ReplayHandlerNotifications(pVM);
3353 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3354}
3355
3356
3357/**
3358 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3359 *
3360 * @param pVM VM Handle.
3361 * @param enmType Handler type.
3362 * @param GCPhysOld Old handler range address.
3363 * @param GCPhysNew New handler range address.
3364 * @param cb Size of the handler range.
3365 * @param fHasHCHandler Set if the handler has a HC callback function.
3366 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3367 */
3368static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3369{
3370 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3371 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3372 VM_ASSERT_EMT(pVM);
3373 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3374
3375 if (fHasHCHandler)
3376 {
3377 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3378
3379 /*
3380 * Reset the old page.
3381 */
3382 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3383 if (!fRestoreAsRAM)
3384 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3385 else
3386 {
3387 /* This is not perfect, but it'll do for PD monitoring... */
3388 Assert(cb == PAGE_SIZE);
3389 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3390 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3391 }
3392
3393 /*
3394 * Update the new page.
3395 */
3396 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3397 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3398 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3399 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3400
3401 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3402 }
3403}
3404
3405/**
3406 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3407 *
3408 * @param pVM VM Handle.
3409 * @param enmType Handler type.
3410 * @param GCPhysOld Old handler range address.
3411 * @param GCPhysNew New handler range address.
3412 * @param cb Size of the handler range.
3413 * @param fHasHCHandler Set if the handler has a HC callback function.
3414 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3415 */
3416REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3417{
3418 REMR3ReplayHandlerNotifications(pVM);
3419
3420 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3421}
3422
3423/**
3424 * Checks if we're handling access to this page or not.
3425 *
3426 * @returns true if we're trapping access.
3427 * @returns false if we aren't.
3428 * @param pVM The VM handle.
3429 * @param GCPhys The physical address.
3430 *
3431 * @remark This function will only work correctly in VBOX_STRICT builds!
3432 */
3433REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3434{
3435#ifdef VBOX_STRICT
3436 unsigned long off;
3437 REMR3ReplayHandlerNotifications(pVM);
3438
3439 off = get_phys_page_offset(GCPhys);
3440 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3441 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3442 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3443#else
3444 return false;
3445#endif
3446}
3447
3448
3449/**
3450 * Deals with a rare case in get_phys_addr_code where the code
3451 * is being monitored.
3452 *
3453 * It could also be an MMIO page, in which case we will raise a fatal error.
3454 *
3455 * @returns The physical address corresponding to addr.
3456 * @param env The cpu environment.
3457 * @param addr The virtual address.
3458 * @param pTLBEntry The TLB entry.
3459 */
3460target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3461 target_ulong addr,
3462 CPUTLBEntry *pTLBEntry,
3463 target_phys_addr_t ioTLBEntry)
3464{
3465 PVM pVM = env->pVM;
3466
3467 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3468 {
3469 /* If code memory is being monitored, appropriate IOTLB entry will have
3470 handler IO type, and addend will provide real physical address, no
3471 matter if we store VA in TLB or not, as handlers are always passed PA */
3472 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3473 return ret;
3474 }
3475 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3476 "*** handlers\n",
3477 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3478 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3479 LogRel(("*** mmio\n"));
3480 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3481 LogRel(("*** phys\n"));
3482 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3483 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3484 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3485 AssertFatalFailed();
3486}
3487
3488/**
3489 * Read guest RAM and ROM.
3490 *
3491 * @param SrcGCPhys The source address (guest physical).
3492 * @param pvDst The destination address.
3493 * @param cb Number of bytes
3494 */
3495void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3496{
3497 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3498 VBOX_CHECK_ADDR(SrcGCPhys);
3499 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3500#ifdef VBOX_DEBUG_PHYS
3501 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3502#endif
3503 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3504}
3505
3506
3507/**
3508 * Read guest RAM and ROM, unsigned 8-bit.
3509 *
3510 * @param SrcGCPhys The source address (guest physical).
3511 */
3512RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3513{
3514 uint8_t val;
3515 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3516 VBOX_CHECK_ADDR(SrcGCPhys);
3517 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3518 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3519#ifdef VBOX_DEBUG_PHYS
3520 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3521#endif
3522 return val;
3523}
3524
3525
3526/**
3527 * Read guest RAM and ROM, signed 8-bit.
3528 *
3529 * @param SrcGCPhys The source address (guest physical).
3530 */
3531RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3532{
3533 int8_t val;
3534 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3535 VBOX_CHECK_ADDR(SrcGCPhys);
3536 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3537 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3538#ifdef VBOX_DEBUG_PHYS
3539 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3540#endif
3541 return val;
3542}
3543
3544
3545/**
3546 * Read guest RAM and ROM, unsigned 16-bit.
3547 *
3548 * @param SrcGCPhys The source address (guest physical).
3549 */
3550RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3551{
3552 uint16_t val;
3553 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3554 VBOX_CHECK_ADDR(SrcGCPhys);
3555 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3556 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3557#ifdef VBOX_DEBUG_PHYS
3558 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3559#endif
3560 return val;
3561}
3562
3563
3564/**
3565 * Read guest RAM and ROM, signed 16-bit.
3566 *
3567 * @param SrcGCPhys The source address (guest physical).
3568 */
3569RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3570{
3571 int16_t val;
3572 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3573 VBOX_CHECK_ADDR(SrcGCPhys);
3574 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3575 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3576#ifdef VBOX_DEBUG_PHYS
3577 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3578#endif
3579 return val;
3580}
3581
3582
3583/**
3584 * Read guest RAM and ROM, unsigned 32-bit.
3585 *
3586 * @param SrcGCPhys The source address (guest physical).
3587 */
3588RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3589{
3590 uint32_t val;
3591 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3592 VBOX_CHECK_ADDR(SrcGCPhys);
3593 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3594 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3595#ifdef VBOX_DEBUG_PHYS
3596 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3597#endif
3598 return val;
3599}
3600
3601
3602/**
3603 * Read guest RAM and ROM, signed 32-bit.
3604 *
3605 * @param SrcGCPhys The source address (guest physical).
3606 */
3607RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3608{
3609 int32_t val;
3610 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3611 VBOX_CHECK_ADDR(SrcGCPhys);
3612 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3613 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3614#ifdef VBOX_DEBUG_PHYS
3615 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3616#endif
3617 return val;
3618}
3619
3620
3621/**
3622 * Read guest RAM and ROM, unsigned 64-bit.
3623 *
3624 * @param SrcGCPhys The source address (guest physical).
3625 */
3626uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3627{
3628 uint64_t val;
3629 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3630 VBOX_CHECK_ADDR(SrcGCPhys);
3631 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3632 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3633#ifdef VBOX_DEBUG_PHYS
3634 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3635#endif
3636 return val;
3637}
3638
3639
3640/**
3641 * Read guest RAM and ROM, signed 64-bit.
3642 *
3643 * @param SrcGCPhys The source address (guest physical).
3644 */
3645int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3646{
3647 int64_t val;
3648 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3649 VBOX_CHECK_ADDR(SrcGCPhys);
3650 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3651 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3652#ifdef VBOX_DEBUG_PHYS
3653 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3654#endif
3655 return val;
3656}
3657
3658
3659/**
3660 * Write guest RAM.
3661 *
3662 * @param DstGCPhys The destination address (guest physical).
3663 * @param pvSrc The source address.
3664 * @param cb Number of bytes to write
3665 */
3666void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3667{
3668 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3669 VBOX_CHECK_ADDR(DstGCPhys);
3670 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3671 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3672#ifdef VBOX_DEBUG_PHYS
3673 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3674#endif
3675}
3676
3677
3678/**
3679 * Write guest RAM, unsigned 8-bit.
3680 *
3681 * @param DstGCPhys The destination address (guest physical).
3682 * @param val Value
3683 */
3684void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3685{
3686 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3687 VBOX_CHECK_ADDR(DstGCPhys);
3688 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3689 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3690#ifdef VBOX_DEBUG_PHYS
3691 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3692#endif
3693}
3694
3695
3696/**
3697 * Write guest RAM, unsigned 8-bit.
3698 *
3699 * @param DstGCPhys The destination address (guest physical).
3700 * @param val Value
3701 */
3702void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3703{
3704 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3705 VBOX_CHECK_ADDR(DstGCPhys);
3706 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3707 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3708#ifdef VBOX_DEBUG_PHYS
3709 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3710#endif
3711}
3712
3713
3714/**
3715 * Write guest RAM, unsigned 32-bit.
3716 *
3717 * @param DstGCPhys The destination address (guest physical).
3718 * @param val Value
3719 */
3720void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3721{
3722 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3723 VBOX_CHECK_ADDR(DstGCPhys);
3724 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3725 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3726#ifdef VBOX_DEBUG_PHYS
3727 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3728#endif
3729}
3730
3731
3732/**
3733 * Write guest RAM, unsigned 64-bit.
3734 *
3735 * @param DstGCPhys The destination address (guest physical).
3736 * @param val Value
3737 */
3738void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3739{
3740 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3741 VBOX_CHECK_ADDR(DstGCPhys);
3742 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3743 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3744#ifdef VBOX_DEBUG_PHYS
3745 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3746#endif
3747}
3748
3749#undef LOG_GROUP
3750#define LOG_GROUP LOG_GROUP_REM_MMIO
3751
3752/** Read MMIO memory. */
3753static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3754{
3755 uint32_t u32 = 0;
3756 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3757 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3758 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3759 return u32;
3760}
3761
3762/** Read MMIO memory. */
3763static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3764{
3765 uint32_t u32 = 0;
3766 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3767 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3768 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3769 return u32;
3770}
3771
3772/** Read MMIO memory. */
3773static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3774{
3775 uint32_t u32 = 0;
3776 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3777 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3778 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3779 return u32;
3780}
3781
3782/** Write to MMIO memory. */
3783static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3784{
3785 int rc;
3786 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3787 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3788 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3789}
3790
3791/** Write to MMIO memory. */
3792static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3793{
3794 int rc;
3795 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3796 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3797 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3798}
3799
3800/** Write to MMIO memory. */
3801static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3802{
3803 int rc;
3804 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3805 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3806 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3807}
3808
3809
3810#undef LOG_GROUP
3811#define LOG_GROUP LOG_GROUP_REM_HANDLER
3812
3813/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3814
3815static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3816{
3817 uint8_t u8;
3818 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3819 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3820 return u8;
3821}
3822
3823static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3824{
3825 uint16_t u16;
3826 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3827 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3828 return u16;
3829}
3830
3831static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3832{
3833 uint32_t u32;
3834 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3835 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3836 return u32;
3837}
3838
3839static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3840{
3841 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3842 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3843}
3844
3845static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3846{
3847 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3848 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3849}
3850
3851static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3852{
3853 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3854 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3855}
3856
3857/* -+- disassembly -+- */
3858
3859#undef LOG_GROUP
3860#define LOG_GROUP LOG_GROUP_REM_DISAS
3861
3862
3863/**
3864 * Enables or disables singled stepped disassembly.
3865 *
3866 * @returns VBox status code.
3867 * @param pVM VM handle.
3868 * @param fEnable To enable set this flag, to disable clear it.
3869 */
3870static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3871{
3872 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3873 VM_ASSERT_EMT(pVM);
3874
3875 if (fEnable)
3876 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3877 else
3878 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3879#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3880 cpu_single_step(&pVM->rem.s.Env, fEnable);
3881#endif
3882 return VINF_SUCCESS;
3883}
3884
3885
3886/**
3887 * Enables or disables singled stepped disassembly.
3888 *
3889 * @returns VBox status code.
3890 * @param pVM VM handle.
3891 * @param fEnable To enable set this flag, to disable clear it.
3892 */
3893REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3894{
3895 int rc;
3896
3897 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3898 if (VM_IS_EMT(pVM))
3899 return remR3DisasEnableStepping(pVM, fEnable);
3900
3901 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3902 AssertRC(rc);
3903 return rc;
3904}
3905
3906
3907#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3908/**
3909 * External Debugger Command: .remstep [on|off|1|0]
3910 */
3911static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3912{
3913 int rc;
3914
3915 if (cArgs == 0)
3916 /*
3917 * Print the current status.
3918 */
3919 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3920 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3921 else
3922 {
3923 /*
3924 * Convert the argument and change the mode.
3925 */
3926 bool fEnable;
3927 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3928 if (RT_SUCCESS(rc))
3929 {
3930 rc = REMR3DisasEnableStepping(pVM, fEnable);
3931 if (RT_SUCCESS(rc))
3932 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3933 else
3934 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3935 }
3936 else
3937 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3938 }
3939 return rc;
3940}
3941#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3942
3943
3944/**
3945 * Disassembles one instruction and prints it to the log.
3946 *
3947 * @returns Success indicator.
3948 * @param env Pointer to the recompiler CPU structure.
3949 * @param f32BitCode Indicates that whether or not the code should
3950 * be disassembled as 16 or 32 bit. If -1 the CS
3951 * selector will be inspected.
3952 * @param pszPrefix
3953 */
3954bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3955{
3956 PVM pVM = env->pVM;
3957 const bool fLog = LogIsEnabled();
3958 const bool fLog2 = LogIs2Enabled();
3959 int rc = VINF_SUCCESS;
3960
3961 /*
3962 * Don't bother if there ain't any log output to do.
3963 */
3964 if (!fLog && !fLog2)
3965 return true;
3966
3967 /*
3968 * Update the state so DBGF reads the correct register values.
3969 */
3970 remR3StateUpdate(pVM, env->pVCpu);
3971
3972 /*
3973 * Log registers if requested.
3974 */
3975 if (fLog2)
3976 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3977
3978 /*
3979 * Disassemble to log.
3980 */
3981 if (fLog)
3982 {
3983 PVMCPU pVCpu = VMMGetCpu(pVM);
3984 char szBuf[256];
3985 szBuf[0] = '\0';
3986 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3987 pVCpu->idCpu,
3988 0, /* Sel */
3989 0, /* GCPtr */
3990 DBGF_DISAS_FLAGS_CURRENT_GUEST
3991 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3992 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3993 szBuf,
3994 sizeof(szBuf),
3995 NULL);
3996 if (RT_FAILURE(rc))
3997 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3998 if (pszPrefix && *pszPrefix)
3999 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4000 else
4001 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4002 }
4003
4004 return RT_SUCCESS(rc);
4005}
4006
4007
4008/**
4009 * Disassemble recompiled code.
4010 *
4011 * @param phFileIgnored Ignored, logfile usually.
4012 * @param pvCode Pointer to the code block.
4013 * @param cb Size of the code block.
4014 */
4015void disas(FILE *phFile, void *pvCode, unsigned long cb)
4016{
4017 if (LogIs2Enabled())
4018 {
4019 unsigned off = 0;
4020 char szOutput[256];
4021 DISCPUSTATE Cpu;
4022
4023 memset(&Cpu, 0, sizeof(Cpu));
4024#ifdef RT_ARCH_X86
4025 Cpu.mode = CPUMODE_32BIT;
4026#else
4027 Cpu.mode = CPUMODE_64BIT;
4028#endif
4029
4030 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4031 while (off < cb)
4032 {
4033 uint32_t cbInstr;
4034 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4035 RTLogPrintf("%s", szOutput);
4036 else
4037 {
4038 RTLogPrintf("disas error\n");
4039 cbInstr = 1;
4040#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4041 break;
4042#endif
4043 }
4044 off += cbInstr;
4045 }
4046 }
4047}
4048
4049
4050/**
4051 * Disassemble guest code.
4052 *
4053 * @param phFileIgnored Ignored, logfile usually.
4054 * @param uCode The guest address of the code to disassemble. (flat?)
4055 * @param cb Number of bytes to disassemble.
4056 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4057 */
4058void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4059{
4060 if (LogIs2Enabled())
4061 {
4062 PVM pVM = cpu_single_env->pVM;
4063 PVMCPU pVCpu = cpu_single_env->pVCpu;
4064 RTSEL cs;
4065 RTGCUINTPTR eip;
4066
4067 Assert(pVCpu);
4068
4069 /*
4070 * Update the state so DBGF reads the correct register values (flags).
4071 */
4072 remR3StateUpdate(pVM, pVCpu);
4073
4074 /*
4075 * Do the disassembling.
4076 */
4077 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4078 cs = cpu_single_env->segs[R_CS].selector;
4079 eip = uCode - cpu_single_env->segs[R_CS].base;
4080 for (;;)
4081 {
4082 char szBuf[256];
4083 uint32_t cbInstr;
4084 int rc = DBGFR3DisasInstrEx(pVM,
4085 pVCpu->idCpu,
4086 cs,
4087 eip,
4088 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4089 szBuf, sizeof(szBuf),
4090 &cbInstr);
4091 if (RT_SUCCESS(rc))
4092 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4093 else
4094 {
4095 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4096 cbInstr = 1;
4097 }
4098
4099 /* next */
4100 if (cb <= cbInstr)
4101 break;
4102 cb -= cbInstr;
4103 uCode += cbInstr;
4104 eip += cbInstr;
4105 }
4106 }
4107}
4108
4109
4110/**
4111 * Looks up a guest symbol.
4112 *
4113 * @returns Pointer to symbol name. This is a static buffer.
4114 * @param orig_addr The address in question.
4115 */
4116const char *lookup_symbol(target_ulong orig_addr)
4117{
4118 PVM pVM = cpu_single_env->pVM;
4119 RTGCINTPTR off = 0;
4120 RTDBGSYMBOL Sym;
4121 DBGFADDRESS Addr;
4122
4123 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4124 if (RT_SUCCESS(rc))
4125 {
4126 static char szSym[sizeof(Sym.szName) + 48];
4127 if (!off)
4128 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4129 else if (off > 0)
4130 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4131 else
4132 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4133 return szSym;
4134 }
4135 return "<N/A>";
4136}
4137
4138
4139#undef LOG_GROUP
4140#define LOG_GROUP LOG_GROUP_REM
4141
4142
4143/* -+- FF notifications -+- */
4144
4145
4146/**
4147 * Notification about a pending interrupt.
4148 *
4149 * @param pVM VM Handle.
4150 * @param pVCpu VMCPU Handle.
4151 * @param u8Interrupt Interrupt
4152 * @thread The emulation thread.
4153 */
4154REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4155{
4156 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4157 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4158}
4159
4160/**
4161 * Notification about a pending interrupt.
4162 *
4163 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4164 * @param pVM VM Handle.
4165 * @param pVCpu VMCPU Handle.
4166 * @thread The emulation thread.
4167 */
4168REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4169{
4170 return pVM->rem.s.u32PendingInterrupt;
4171}
4172
4173/**
4174 * Notification about the interrupt FF being set.
4175 *
4176 * @param pVM VM Handle.
4177 * @param pVCpu VMCPU Handle.
4178 * @thread The emulation thread.
4179 */
4180REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4181{
4182#ifndef IEM_VERIFICATION_MODE
4183 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4184 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4185 if (pVM->rem.s.fInREM)
4186 {
4187 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4188 CPU_INTERRUPT_EXTERNAL_HARD);
4189 }
4190#endif
4191}
4192
4193
4194/**
4195 * Notification about the interrupt FF being set.
4196 *
4197 * @param pVM VM Handle.
4198 * @param pVCpu VMCPU Handle.
4199 * @thread Any.
4200 */
4201REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4202{
4203 LogFlow(("REMR3NotifyInterruptClear:\n"));
4204 if (pVM->rem.s.fInREM)
4205 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4206}
4207
4208
4209/**
4210 * Notification about pending timer(s).
4211 *
4212 * @param pVM VM Handle.
4213 * @param pVCpuDst The target cpu for this notification.
4214 * TM will not broadcast pending timer events, but use
4215 * a dedicated EMT for them. So, only interrupt REM
4216 * execution if the given CPU is executing in REM.
4217 * @thread Any.
4218 */
4219REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4220{
4221#ifndef IEM_VERIFICATION_MODE
4222#ifndef DEBUG_bird
4223 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4224#endif
4225 if (pVM->rem.s.fInREM)
4226 {
4227 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4228 {
4229 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4230 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4231 CPU_INTERRUPT_EXTERNAL_TIMER);
4232 }
4233 else
4234 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4235 }
4236 else
4237 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4238#endif
4239}
4240
4241
4242/**
4243 * Notification about pending DMA transfers.
4244 *
4245 * @param pVM VM Handle.
4246 * @thread Any.
4247 */
4248REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4249{
4250#ifndef IEM_VERIFICATION_MODE
4251 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4252 if (pVM->rem.s.fInREM)
4253 {
4254 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4255 CPU_INTERRUPT_EXTERNAL_DMA);
4256 }
4257#endif
4258}
4259
4260
4261/**
4262 * Notification about pending timer(s).
4263 *
4264 * @param pVM VM Handle.
4265 * @thread Any.
4266 */
4267REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4268{
4269#ifndef IEM_VERIFICATION_MODE
4270 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4271 if (pVM->rem.s.fInREM)
4272 {
4273 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4274 CPU_INTERRUPT_EXTERNAL_EXIT);
4275 }
4276#endif
4277}
4278
4279
4280/**
4281 * Notification about pending FF set by an external thread.
4282 *
4283 * @param pVM VM handle.
4284 * @thread Any.
4285 */
4286REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4287{
4288#ifndef IEM_VERIFICATION_MODE
4289 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4290 if (pVM->rem.s.fInREM)
4291 {
4292 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4293 CPU_INTERRUPT_EXTERNAL_EXIT);
4294 }
4295#endif
4296}
4297
4298
4299#ifdef VBOX_WITH_STATISTICS
4300void remR3ProfileStart(int statcode)
4301{
4302 STAMPROFILEADV *pStat;
4303 switch(statcode)
4304 {
4305 case STATS_EMULATE_SINGLE_INSTR:
4306 pStat = &gStatExecuteSingleInstr;
4307 break;
4308 case STATS_QEMU_COMPILATION:
4309 pStat = &gStatCompilationQEmu;
4310 break;
4311 case STATS_QEMU_RUN_EMULATED_CODE:
4312 pStat = &gStatRunCodeQEmu;
4313 break;
4314 case STATS_QEMU_TOTAL:
4315 pStat = &gStatTotalTimeQEmu;
4316 break;
4317 case STATS_QEMU_RUN_TIMERS:
4318 pStat = &gStatTimers;
4319 break;
4320 case STATS_TLB_LOOKUP:
4321 pStat= &gStatTBLookup;
4322 break;
4323 case STATS_IRQ_HANDLING:
4324 pStat= &gStatIRQ;
4325 break;
4326 case STATS_RAW_CHECK:
4327 pStat = &gStatRawCheck;
4328 break;
4329
4330 default:
4331 AssertMsgFailed(("unknown stat %d\n", statcode));
4332 return;
4333 }
4334 STAM_PROFILE_ADV_START(pStat, a);
4335}
4336
4337
4338void remR3ProfileStop(int statcode)
4339{
4340 STAMPROFILEADV *pStat;
4341 switch(statcode)
4342 {
4343 case STATS_EMULATE_SINGLE_INSTR:
4344 pStat = &gStatExecuteSingleInstr;
4345 break;
4346 case STATS_QEMU_COMPILATION:
4347 pStat = &gStatCompilationQEmu;
4348 break;
4349 case STATS_QEMU_RUN_EMULATED_CODE:
4350 pStat = &gStatRunCodeQEmu;
4351 break;
4352 case STATS_QEMU_TOTAL:
4353 pStat = &gStatTotalTimeQEmu;
4354 break;
4355 case STATS_QEMU_RUN_TIMERS:
4356 pStat = &gStatTimers;
4357 break;
4358 case STATS_TLB_LOOKUP:
4359 pStat= &gStatTBLookup;
4360 break;
4361 case STATS_IRQ_HANDLING:
4362 pStat= &gStatIRQ;
4363 break;
4364 case STATS_RAW_CHECK:
4365 pStat = &gStatRawCheck;
4366 break;
4367 default:
4368 AssertMsgFailed(("unknown stat %d\n", statcode));
4369 return;
4370 }
4371 STAM_PROFILE_ADV_STOP(pStat, a);
4372}
4373#endif
4374
4375/**
4376 * Raise an RC, force rem exit.
4377 *
4378 * @param pVM VM handle.
4379 * @param rc The rc.
4380 */
4381void remR3RaiseRC(PVM pVM, int rc)
4382{
4383 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4384 Assert(pVM->rem.s.fInREM);
4385 VM_ASSERT_EMT(pVM);
4386 pVM->rem.s.rc = rc;
4387 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4388}
4389
4390
4391/* -+- timers -+- */
4392
4393uint64_t cpu_get_tsc(CPUX86State *env)
4394{
4395 STAM_COUNTER_INC(&gStatCpuGetTSC);
4396 return TMCpuTickGet(env->pVCpu);
4397}
4398
4399
4400/* -+- interrupts -+- */
4401
4402void cpu_set_ferr(CPUX86State *env)
4403{
4404 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4405 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4406}
4407
4408int cpu_get_pic_interrupt(CPUX86State *env)
4409{
4410 uint8_t u8Interrupt;
4411 int rc;
4412
4413 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4414 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4415 * with the (a)pic.
4416 */
4417 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4418 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4419 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4420 * remove this kludge. */
4421 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4422 {
4423 rc = VINF_SUCCESS;
4424 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4425 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4426 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4427 }
4428 else
4429 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4430
4431 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4432 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4433 if (RT_SUCCESS(rc))
4434 {
4435 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4436 env->interrupt_request |= CPU_INTERRUPT_HARD;
4437 return u8Interrupt;
4438 }
4439 return -1;
4440}
4441
4442
4443/* -+- local apic -+- */
4444
4445#if 0 /* CPUMSetGuestMsr does this now. */
4446void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4447{
4448 int rc = PDMApicSetBase(env->pVM, val);
4449 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4450}
4451#endif
4452
4453uint64_t cpu_get_apic_base(CPUX86State *env)
4454{
4455 uint64_t u64;
4456 int rc = PDMApicGetBase(env->pVM, &u64);
4457 if (RT_SUCCESS(rc))
4458 {
4459 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4460 return u64;
4461 }
4462 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4463 return 0;
4464}
4465
4466void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4467{
4468 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4469 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4470}
4471
4472uint8_t cpu_get_apic_tpr(CPUX86State *env)
4473{
4474 uint8_t u8;
4475 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4476 if (RT_SUCCESS(rc))
4477 {
4478 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4479 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4480 }
4481 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4482 return 0;
4483}
4484
4485/**
4486 * Read an MSR.
4487 *
4488 * @retval 0 success.
4489 * @retval -1 failure, raise \#GP(0).
4490 * @param env The cpu state.
4491 * @param idMsr The MSR to read.
4492 * @param puValue Where to return the value.
4493 */
4494int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4495{
4496 Assert(env->pVCpu);
4497 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4498}
4499
4500/**
4501 * Write to an MSR.
4502 *
4503 * @retval 0 success.
4504 * @retval -1 failure, raise \#GP(0).
4505 * @param env The cpu state.
4506 * @param idMsr The MSR to read.
4507 * @param puValue Where to return the value.
4508 */
4509int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4510{
4511 Assert(env->pVCpu);
4512 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4513}
4514
4515/* -+- I/O Ports -+- */
4516
4517#undef LOG_GROUP
4518#define LOG_GROUP LOG_GROUP_REM_IOPORT
4519
4520void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4521{
4522 int rc;
4523
4524 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4525 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4526
4527 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4528 if (RT_LIKELY(rc == VINF_SUCCESS))
4529 return;
4530 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4531 {
4532 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4533 remR3RaiseRC(env->pVM, rc);
4534 return;
4535 }
4536 remAbort(rc, __FUNCTION__);
4537}
4538
4539void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4540{
4541 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4542 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4543 if (RT_LIKELY(rc == VINF_SUCCESS))
4544 return;
4545 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4546 {
4547 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4548 remR3RaiseRC(env->pVM, rc);
4549 return;
4550 }
4551 remAbort(rc, __FUNCTION__);
4552}
4553
4554void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4555{
4556 int rc;
4557 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4558 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4559 if (RT_LIKELY(rc == VINF_SUCCESS))
4560 return;
4561 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4562 {
4563 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4564 remR3RaiseRC(env->pVM, rc);
4565 return;
4566 }
4567 remAbort(rc, __FUNCTION__);
4568}
4569
4570uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4571{
4572 uint32_t u32 = 0;
4573 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4574 if (RT_LIKELY(rc == VINF_SUCCESS))
4575 {
4576 if (/*addr != 0x61 && */addr != 0x71)
4577 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4578 return (uint8_t)u32;
4579 }
4580 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4581 {
4582 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4583 remR3RaiseRC(env->pVM, rc);
4584 return (uint8_t)u32;
4585 }
4586 remAbort(rc, __FUNCTION__);
4587 return UINT8_C(0xff);
4588}
4589
4590uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4591{
4592 uint32_t u32 = 0;
4593 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4594 if (RT_LIKELY(rc == VINF_SUCCESS))
4595 {
4596 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4597 return (uint16_t)u32;
4598 }
4599 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4600 {
4601 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4602 remR3RaiseRC(env->pVM, rc);
4603 return (uint16_t)u32;
4604 }
4605 remAbort(rc, __FUNCTION__);
4606 return UINT16_C(0xffff);
4607}
4608
4609uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4610{
4611 uint32_t u32 = 0;
4612 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4613 if (RT_LIKELY(rc == VINF_SUCCESS))
4614 {
4615//if (addr==0x01f0 && u32 == 0x6b6d)
4616// loglevel = ~0;
4617 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4618 return u32;
4619 }
4620 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4621 {
4622 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4623 remR3RaiseRC(env->pVM, rc);
4624 return u32;
4625 }
4626 remAbort(rc, __FUNCTION__);
4627 return UINT32_C(0xffffffff);
4628}
4629
4630#undef LOG_GROUP
4631#define LOG_GROUP LOG_GROUP_REM
4632
4633
4634/* -+- helpers and misc other interfaces -+- */
4635
4636/**
4637 * Perform the CPUID instruction.
4638 *
4639 * @param env Pointer to the recompiler CPU structure.
4640 * @param idx The CPUID leaf (eax).
4641 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4642 * @param pvEAX Where to store eax.
4643 * @param pvEBX Where to store ebx.
4644 * @param pvECX Where to store ecx.
4645 * @param pvEDX Where to store edx.
4646 */
4647void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4648 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4649{
4650 NOREF(idxSub);
4651 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4652}
4653
4654
4655#if 0 /* not used */
4656/**
4657 * Interface for qemu hardware to report back fatal errors.
4658 */
4659void hw_error(const char *pszFormat, ...)
4660{
4661 /*
4662 * Bitch about it.
4663 */
4664 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4665 * this in my Odin32 tree at home! */
4666 va_list args;
4667 va_start(args, pszFormat);
4668 RTLogPrintf("fatal error in virtual hardware:");
4669 RTLogPrintfV(pszFormat, args);
4670 va_end(args);
4671 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4672
4673 /*
4674 * If we're in REM context we'll sync back the state before 'jumping' to
4675 * the EMs failure handling.
4676 */
4677 PVM pVM = cpu_single_env->pVM;
4678 if (pVM->rem.s.fInREM)
4679 REMR3StateBack(pVM);
4680 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4681 AssertMsgFailed(("EMR3FatalError returned!\n"));
4682}
4683#endif
4684
4685/**
4686 * Interface for the qemu cpu to report unhandled situation
4687 * raising a fatal VM error.
4688 */
4689void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4690{
4691 va_list va;
4692 PVM pVM;
4693 PVMCPU pVCpu;
4694 char szMsg[256];
4695
4696 /*
4697 * Bitch about it.
4698 */
4699 RTLogFlags(NULL, "nodisabled nobuffered");
4700 RTLogFlush(NULL);
4701
4702 va_start(va, pszFormat);
4703#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4704 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4705 unsigned cArgs = 0;
4706 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4707 const char *psz = strchr(pszFormat, '%');
4708 while (psz && cArgs < 6)
4709 {
4710 auArgs[cArgs++] = va_arg(va, uintptr_t);
4711 psz = strchr(psz + 1, '%');
4712 }
4713 switch (cArgs)
4714 {
4715 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4716 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4717 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4718 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4719 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4720 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4721 default:
4722 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4723 }
4724#else
4725 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4726#endif
4727 va_end(va);
4728
4729 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4730 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4731
4732 /*
4733 * If we're in REM context we'll sync back the state before 'jumping' to
4734 * the EMs failure handling.
4735 */
4736 pVM = cpu_single_env->pVM;
4737 pVCpu = cpu_single_env->pVCpu;
4738 Assert(pVCpu);
4739
4740 if (pVM->rem.s.fInREM)
4741 REMR3StateBack(pVM, pVCpu);
4742 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4743 AssertMsgFailed(("EMR3FatalError returned!\n"));
4744}
4745
4746
4747/**
4748 * Aborts the VM.
4749 *
4750 * @param rc VBox error code.
4751 * @param pszTip Hint about why/when this happened.
4752 */
4753void remAbort(int rc, const char *pszTip)
4754{
4755 PVM pVM;
4756 PVMCPU pVCpu;
4757
4758 /*
4759 * Bitch about it.
4760 */
4761 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4762 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4763
4764 /*
4765 * Jump back to where we entered the recompiler.
4766 */
4767 pVM = cpu_single_env->pVM;
4768 pVCpu = cpu_single_env->pVCpu;
4769 Assert(pVCpu);
4770
4771 if (pVM->rem.s.fInREM)
4772 REMR3StateBack(pVM, pVCpu);
4773
4774 EMR3FatalError(pVCpu, rc);
4775 AssertMsgFailed(("EMR3FatalError returned!\n"));
4776}
4777
4778
4779/**
4780 * Dumps a linux system call.
4781 * @param pVCpu VMCPU handle.
4782 */
4783void remR3DumpLnxSyscall(PVMCPU pVCpu)
4784{
4785 static const char *apsz[] =
4786 {
4787 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4788 "sys_exit",
4789 "sys_fork",
4790 "sys_read",
4791 "sys_write",
4792 "sys_open", /* 5 */
4793 "sys_close",
4794 "sys_waitpid",
4795 "sys_creat",
4796 "sys_link",
4797 "sys_unlink", /* 10 */
4798 "sys_execve",
4799 "sys_chdir",
4800 "sys_time",
4801 "sys_mknod",
4802 "sys_chmod", /* 15 */
4803 "sys_lchown16",
4804 "sys_ni_syscall", /* old break syscall holder */
4805 "sys_stat",
4806 "sys_lseek",
4807 "sys_getpid", /* 20 */
4808 "sys_mount",
4809 "sys_oldumount",
4810 "sys_setuid16",
4811 "sys_getuid16",
4812 "sys_stime", /* 25 */
4813 "sys_ptrace",
4814 "sys_alarm",
4815 "sys_fstat",
4816 "sys_pause",
4817 "sys_utime", /* 30 */
4818 "sys_ni_syscall", /* old stty syscall holder */
4819 "sys_ni_syscall", /* old gtty syscall holder */
4820 "sys_access",
4821 "sys_nice",
4822 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4823 "sys_sync",
4824 "sys_kill",
4825 "sys_rename",
4826 "sys_mkdir",
4827 "sys_rmdir", /* 40 */
4828 "sys_dup",
4829 "sys_pipe",
4830 "sys_times",
4831 "sys_ni_syscall", /* old prof syscall holder */
4832 "sys_brk", /* 45 */
4833 "sys_setgid16",
4834 "sys_getgid16",
4835 "sys_signal",
4836 "sys_geteuid16",
4837 "sys_getegid16", /* 50 */
4838 "sys_acct",
4839 "sys_umount", /* recycled never used phys() */
4840 "sys_ni_syscall", /* old lock syscall holder */
4841 "sys_ioctl",
4842 "sys_fcntl", /* 55 */
4843 "sys_ni_syscall", /* old mpx syscall holder */
4844 "sys_setpgid",
4845 "sys_ni_syscall", /* old ulimit syscall holder */
4846 "sys_olduname",
4847 "sys_umask", /* 60 */
4848 "sys_chroot",
4849 "sys_ustat",
4850 "sys_dup2",
4851 "sys_getppid",
4852 "sys_getpgrp", /* 65 */
4853 "sys_setsid",
4854 "sys_sigaction",
4855 "sys_sgetmask",
4856 "sys_ssetmask",
4857 "sys_setreuid16", /* 70 */
4858 "sys_setregid16",
4859 "sys_sigsuspend",
4860 "sys_sigpending",
4861 "sys_sethostname",
4862 "sys_setrlimit", /* 75 */
4863 "sys_old_getrlimit",
4864 "sys_getrusage",
4865 "sys_gettimeofday",
4866 "sys_settimeofday",
4867 "sys_getgroups16", /* 80 */
4868 "sys_setgroups16",
4869 "old_select",
4870 "sys_symlink",
4871 "sys_lstat",
4872 "sys_readlink", /* 85 */
4873 "sys_uselib",
4874 "sys_swapon",
4875 "sys_reboot",
4876 "old_readdir",
4877 "old_mmap", /* 90 */
4878 "sys_munmap",
4879 "sys_truncate",
4880 "sys_ftruncate",
4881 "sys_fchmod",
4882 "sys_fchown16", /* 95 */
4883 "sys_getpriority",
4884 "sys_setpriority",
4885 "sys_ni_syscall", /* old profil syscall holder */
4886 "sys_statfs",
4887 "sys_fstatfs", /* 100 */
4888 "sys_ioperm",
4889 "sys_socketcall",
4890 "sys_syslog",
4891 "sys_setitimer",
4892 "sys_getitimer", /* 105 */
4893 "sys_newstat",
4894 "sys_newlstat",
4895 "sys_newfstat",
4896 "sys_uname",
4897 "sys_iopl", /* 110 */
4898 "sys_vhangup",
4899 "sys_ni_syscall", /* old "idle" system call */
4900 "sys_vm86old",
4901 "sys_wait4",
4902 "sys_swapoff", /* 115 */
4903 "sys_sysinfo",
4904 "sys_ipc",
4905 "sys_fsync",
4906 "sys_sigreturn",
4907 "sys_clone", /* 120 */
4908 "sys_setdomainname",
4909 "sys_newuname",
4910 "sys_modify_ldt",
4911 "sys_adjtimex",
4912 "sys_mprotect", /* 125 */
4913 "sys_sigprocmask",
4914 "sys_ni_syscall", /* old "create_module" */
4915 "sys_init_module",
4916 "sys_delete_module",
4917 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4918 "sys_quotactl",
4919 "sys_getpgid",
4920 "sys_fchdir",
4921 "sys_bdflush",
4922 "sys_sysfs", /* 135 */
4923 "sys_personality",
4924 "sys_ni_syscall", /* reserved for afs_syscall */
4925 "sys_setfsuid16",
4926 "sys_setfsgid16",
4927 "sys_llseek", /* 140 */
4928 "sys_getdents",
4929 "sys_select",
4930 "sys_flock",
4931 "sys_msync",
4932 "sys_readv", /* 145 */
4933 "sys_writev",
4934 "sys_getsid",
4935 "sys_fdatasync",
4936 "sys_sysctl",
4937 "sys_mlock", /* 150 */
4938 "sys_munlock",
4939 "sys_mlockall",
4940 "sys_munlockall",
4941 "sys_sched_setparam",
4942 "sys_sched_getparam", /* 155 */
4943 "sys_sched_setscheduler",
4944 "sys_sched_getscheduler",
4945 "sys_sched_yield",
4946 "sys_sched_get_priority_max",
4947 "sys_sched_get_priority_min", /* 160 */
4948 "sys_sched_rr_get_interval",
4949 "sys_nanosleep",
4950 "sys_mremap",
4951 "sys_setresuid16",
4952 "sys_getresuid16", /* 165 */
4953 "sys_vm86",
4954 "sys_ni_syscall", /* Old sys_query_module */
4955 "sys_poll",
4956 "sys_nfsservctl",
4957 "sys_setresgid16", /* 170 */
4958 "sys_getresgid16",
4959 "sys_prctl",
4960 "sys_rt_sigreturn",
4961 "sys_rt_sigaction",
4962 "sys_rt_sigprocmask", /* 175 */
4963 "sys_rt_sigpending",
4964 "sys_rt_sigtimedwait",
4965 "sys_rt_sigqueueinfo",
4966 "sys_rt_sigsuspend",
4967 "sys_pread64", /* 180 */
4968 "sys_pwrite64",
4969 "sys_chown16",
4970 "sys_getcwd",
4971 "sys_capget",
4972 "sys_capset", /* 185 */
4973 "sys_sigaltstack",
4974 "sys_sendfile",
4975 "sys_ni_syscall", /* reserved for streams1 */
4976 "sys_ni_syscall", /* reserved for streams2 */
4977 "sys_vfork", /* 190 */
4978 "sys_getrlimit",
4979 "sys_mmap2",
4980 "sys_truncate64",
4981 "sys_ftruncate64",
4982 "sys_stat64", /* 195 */
4983 "sys_lstat64",
4984 "sys_fstat64",
4985 "sys_lchown",
4986 "sys_getuid",
4987 "sys_getgid", /* 200 */
4988 "sys_geteuid",
4989 "sys_getegid",
4990 "sys_setreuid",
4991 "sys_setregid",
4992 "sys_getgroups", /* 205 */
4993 "sys_setgroups",
4994 "sys_fchown",
4995 "sys_setresuid",
4996 "sys_getresuid",
4997 "sys_setresgid", /* 210 */
4998 "sys_getresgid",
4999 "sys_chown",
5000 "sys_setuid",
5001 "sys_setgid",
5002 "sys_setfsuid", /* 215 */
5003 "sys_setfsgid",
5004 "sys_pivot_root",
5005 "sys_mincore",
5006 "sys_madvise",
5007 "sys_getdents64", /* 220 */
5008 "sys_fcntl64",
5009 "sys_ni_syscall", /* reserved for TUX */
5010 "sys_ni_syscall",
5011 "sys_gettid",
5012 "sys_readahead", /* 225 */
5013 "sys_setxattr",
5014 "sys_lsetxattr",
5015 "sys_fsetxattr",
5016 "sys_getxattr",
5017 "sys_lgetxattr", /* 230 */
5018 "sys_fgetxattr",
5019 "sys_listxattr",
5020 "sys_llistxattr",
5021 "sys_flistxattr",
5022 "sys_removexattr", /* 235 */
5023 "sys_lremovexattr",
5024 "sys_fremovexattr",
5025 "sys_tkill",
5026 "sys_sendfile64",
5027 "sys_futex", /* 240 */
5028 "sys_sched_setaffinity",
5029 "sys_sched_getaffinity",
5030 "sys_set_thread_area",
5031 "sys_get_thread_area",
5032 "sys_io_setup", /* 245 */
5033 "sys_io_destroy",
5034 "sys_io_getevents",
5035 "sys_io_submit",
5036 "sys_io_cancel",
5037 "sys_fadvise64", /* 250 */
5038 "sys_ni_syscall",
5039 "sys_exit_group",
5040 "sys_lookup_dcookie",
5041 "sys_epoll_create",
5042 "sys_epoll_ctl", /* 255 */
5043 "sys_epoll_wait",
5044 "sys_remap_file_pages",
5045 "sys_set_tid_address",
5046 "sys_timer_create",
5047 "sys_timer_settime", /* 260 */
5048 "sys_timer_gettime",
5049 "sys_timer_getoverrun",
5050 "sys_timer_delete",
5051 "sys_clock_settime",
5052 "sys_clock_gettime", /* 265 */
5053 "sys_clock_getres",
5054 "sys_clock_nanosleep",
5055 "sys_statfs64",
5056 "sys_fstatfs64",
5057 "sys_tgkill", /* 270 */
5058 "sys_utimes",
5059 "sys_fadvise64_64",
5060 "sys_ni_syscall" /* sys_vserver */
5061 };
5062
5063 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5064 switch (uEAX)
5065 {
5066 default:
5067 if (uEAX < RT_ELEMENTS(apsz))
5068 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5069 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5070 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5071 else
5072 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5073 break;
5074
5075 }
5076}
5077
5078
5079/**
5080 * Dumps an OpenBSD system call.
5081 * @param pVCpu VMCPU handle.
5082 */
5083void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5084{
5085 static const char *apsz[] =
5086 {
5087 "SYS_syscall", //0
5088 "SYS_exit", //1
5089 "SYS_fork", //2
5090 "SYS_read", //3
5091 "SYS_write", //4
5092 "SYS_open", //5
5093 "SYS_close", //6
5094 "SYS_wait4", //7
5095 "SYS_8",
5096 "SYS_link", //9
5097 "SYS_unlink", //10
5098 "SYS_11",
5099 "SYS_chdir", //12
5100 "SYS_fchdir", //13
5101 "SYS_mknod", //14
5102 "SYS_chmod", //15
5103 "SYS_chown", //16
5104 "SYS_break", //17
5105 "SYS_18",
5106 "SYS_19",
5107 "SYS_getpid", //20
5108 "SYS_mount", //21
5109 "SYS_unmount", //22
5110 "SYS_setuid", //23
5111 "SYS_getuid", //24
5112 "SYS_geteuid", //25
5113 "SYS_ptrace", //26
5114 "SYS_recvmsg", //27
5115 "SYS_sendmsg", //28
5116 "SYS_recvfrom", //29
5117 "SYS_accept", //30
5118 "SYS_getpeername", //31
5119 "SYS_getsockname", //32
5120 "SYS_access", //33
5121 "SYS_chflags", //34
5122 "SYS_fchflags", //35
5123 "SYS_sync", //36
5124 "SYS_kill", //37
5125 "SYS_38",
5126 "SYS_getppid", //39
5127 "SYS_40",
5128 "SYS_dup", //41
5129 "SYS_opipe", //42
5130 "SYS_getegid", //43
5131 "SYS_profil", //44
5132 "SYS_ktrace", //45
5133 "SYS_sigaction", //46
5134 "SYS_getgid", //47
5135 "SYS_sigprocmask", //48
5136 "SYS_getlogin", //49
5137 "SYS_setlogin", //50
5138 "SYS_acct", //51
5139 "SYS_sigpending", //52
5140 "SYS_osigaltstack", //53
5141 "SYS_ioctl", //54
5142 "SYS_reboot", //55
5143 "SYS_revoke", //56
5144 "SYS_symlink", //57
5145 "SYS_readlink", //58
5146 "SYS_execve", //59
5147 "SYS_umask", //60
5148 "SYS_chroot", //61
5149 "SYS_62",
5150 "SYS_63",
5151 "SYS_64",
5152 "SYS_65",
5153 "SYS_vfork", //66
5154 "SYS_67",
5155 "SYS_68",
5156 "SYS_sbrk", //69
5157 "SYS_sstk", //70
5158 "SYS_61",
5159 "SYS_vadvise", //72
5160 "SYS_munmap", //73
5161 "SYS_mprotect", //74
5162 "SYS_madvise", //75
5163 "SYS_76",
5164 "SYS_77",
5165 "SYS_mincore", //78
5166 "SYS_getgroups", //79
5167 "SYS_setgroups", //80
5168 "SYS_getpgrp", //81
5169 "SYS_setpgid", //82
5170 "SYS_setitimer", //83
5171 "SYS_84",
5172 "SYS_85",
5173 "SYS_getitimer", //86
5174 "SYS_87",
5175 "SYS_88",
5176 "SYS_89",
5177 "SYS_dup2", //90
5178 "SYS_91",
5179 "SYS_fcntl", //92
5180 "SYS_select", //93
5181 "SYS_94",
5182 "SYS_fsync", //95
5183 "SYS_setpriority", //96
5184 "SYS_socket", //97
5185 "SYS_connect", //98
5186 "SYS_99",
5187 "SYS_getpriority", //100
5188 "SYS_101",
5189 "SYS_102",
5190 "SYS_sigreturn", //103
5191 "SYS_bind", //104
5192 "SYS_setsockopt", //105
5193 "SYS_listen", //106
5194 "SYS_107",
5195 "SYS_108",
5196 "SYS_109",
5197 "SYS_110",
5198 "SYS_sigsuspend", //111
5199 "SYS_112",
5200 "SYS_113",
5201 "SYS_114",
5202 "SYS_115",
5203 "SYS_gettimeofday", //116
5204 "SYS_getrusage", //117
5205 "SYS_getsockopt", //118
5206 "SYS_119",
5207 "SYS_readv", //120
5208 "SYS_writev", //121
5209 "SYS_settimeofday", //122
5210 "SYS_fchown", //123
5211 "SYS_fchmod", //124
5212 "SYS_125",
5213 "SYS_setreuid", //126
5214 "SYS_setregid", //127
5215 "SYS_rename", //128
5216 "SYS_129",
5217 "SYS_130",
5218 "SYS_flock", //131
5219 "SYS_mkfifo", //132
5220 "SYS_sendto", //133
5221 "SYS_shutdown", //134
5222 "SYS_socketpair", //135
5223 "SYS_mkdir", //136
5224 "SYS_rmdir", //137
5225 "SYS_utimes", //138
5226 "SYS_139",
5227 "SYS_adjtime", //140
5228 "SYS_141",
5229 "SYS_142",
5230 "SYS_143",
5231 "SYS_144",
5232 "SYS_145",
5233 "SYS_146",
5234 "SYS_setsid", //147
5235 "SYS_quotactl", //148
5236 "SYS_149",
5237 "SYS_150",
5238 "SYS_151",
5239 "SYS_152",
5240 "SYS_153",
5241 "SYS_154",
5242 "SYS_nfssvc", //155
5243 "SYS_156",
5244 "SYS_157",
5245 "SYS_158",
5246 "SYS_159",
5247 "SYS_160",
5248 "SYS_getfh", //161
5249 "SYS_162",
5250 "SYS_163",
5251 "SYS_164",
5252 "SYS_sysarch", //165
5253 "SYS_166",
5254 "SYS_167",
5255 "SYS_168",
5256 "SYS_169",
5257 "SYS_170",
5258 "SYS_171",
5259 "SYS_172",
5260 "SYS_pread", //173
5261 "SYS_pwrite", //174
5262 "SYS_175",
5263 "SYS_176",
5264 "SYS_177",
5265 "SYS_178",
5266 "SYS_179",
5267 "SYS_180",
5268 "SYS_setgid", //181
5269 "SYS_setegid", //182
5270 "SYS_seteuid", //183
5271 "SYS_lfs_bmapv", //184
5272 "SYS_lfs_markv", //185
5273 "SYS_lfs_segclean", //186
5274 "SYS_lfs_segwait", //187
5275 "SYS_188",
5276 "SYS_189",
5277 "SYS_190",
5278 "SYS_pathconf", //191
5279 "SYS_fpathconf", //192
5280 "SYS_swapctl", //193
5281 "SYS_getrlimit", //194
5282 "SYS_setrlimit", //195
5283 "SYS_getdirentries", //196
5284 "SYS_mmap", //197
5285 "SYS___syscall", //198
5286 "SYS_lseek", //199
5287 "SYS_truncate", //200
5288 "SYS_ftruncate", //201
5289 "SYS___sysctl", //202
5290 "SYS_mlock", //203
5291 "SYS_munlock", //204
5292 "SYS_205",
5293 "SYS_futimes", //206
5294 "SYS_getpgid", //207
5295 "SYS_xfspioctl", //208
5296 "SYS_209",
5297 "SYS_210",
5298 "SYS_211",
5299 "SYS_212",
5300 "SYS_213",
5301 "SYS_214",
5302 "SYS_215",
5303 "SYS_216",
5304 "SYS_217",
5305 "SYS_218",
5306 "SYS_219",
5307 "SYS_220",
5308 "SYS_semget", //221
5309 "SYS_222",
5310 "SYS_223",
5311 "SYS_224",
5312 "SYS_msgget", //225
5313 "SYS_msgsnd", //226
5314 "SYS_msgrcv", //227
5315 "SYS_shmat", //228
5316 "SYS_229",
5317 "SYS_shmdt", //230
5318 "SYS_231",
5319 "SYS_clock_gettime", //232
5320 "SYS_clock_settime", //233
5321 "SYS_clock_getres", //234
5322 "SYS_235",
5323 "SYS_236",
5324 "SYS_237",
5325 "SYS_238",
5326 "SYS_239",
5327 "SYS_nanosleep", //240
5328 "SYS_241",
5329 "SYS_242",
5330 "SYS_243",
5331 "SYS_244",
5332 "SYS_245",
5333 "SYS_246",
5334 "SYS_247",
5335 "SYS_248",
5336 "SYS_249",
5337 "SYS_minherit", //250
5338 "SYS_rfork", //251
5339 "SYS_poll", //252
5340 "SYS_issetugid", //253
5341 "SYS_lchown", //254
5342 "SYS_getsid", //255
5343 "SYS_msync", //256
5344 "SYS_257",
5345 "SYS_258",
5346 "SYS_259",
5347 "SYS_getfsstat", //260
5348 "SYS_statfs", //261
5349 "SYS_fstatfs", //262
5350 "SYS_pipe", //263
5351 "SYS_fhopen", //264
5352 "SYS_265",
5353 "SYS_fhstatfs", //266
5354 "SYS_preadv", //267
5355 "SYS_pwritev", //268
5356 "SYS_kqueue", //269
5357 "SYS_kevent", //270
5358 "SYS_mlockall", //271
5359 "SYS_munlockall", //272
5360 "SYS_getpeereid", //273
5361 "SYS_274",
5362 "SYS_275",
5363 "SYS_276",
5364 "SYS_277",
5365 "SYS_278",
5366 "SYS_279",
5367 "SYS_280",
5368 "SYS_getresuid", //281
5369 "SYS_setresuid", //282
5370 "SYS_getresgid", //283
5371 "SYS_setresgid", //284
5372 "SYS_285",
5373 "SYS_mquery", //286
5374 "SYS_closefrom", //287
5375 "SYS_sigaltstack", //288
5376 "SYS_shmget", //289
5377 "SYS_semop", //290
5378 "SYS_stat", //291
5379 "SYS_fstat", //292
5380 "SYS_lstat", //293
5381 "SYS_fhstat", //294
5382 "SYS___semctl", //295
5383 "SYS_shmctl", //296
5384 "SYS_msgctl", //297
5385 "SYS_MAXSYSCALL", //298
5386 //299
5387 //300
5388 };
5389 uint32_t uEAX;
5390 if (!LogIsEnabled())
5391 return;
5392 uEAX = CPUMGetGuestEAX(pVCpu);
5393 switch (uEAX)
5394 {
5395 default:
5396 if (uEAX < RT_ELEMENTS(apsz))
5397 {
5398 uint32_t au32Args[8] = {0};
5399 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5400 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5401 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5402 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5403 }
5404 else
5405 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5406 break;
5407 }
5408}
5409
5410
5411#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5412/**
5413 * The Dll main entry point (stub).
5414 */
5415bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5416{
5417 return true;
5418}
5419
5420void *memcpy(void *dst, const void *src, size_t size)
5421{
5422 uint8_t*pbDst = dst, *pbSrc = src;
5423 while (size-- > 0)
5424 *pbDst++ = *pbSrc++;
5425 return dst;
5426}
5427
5428#endif
5429
5430void cpu_smm_update(CPUX86State *env)
5431{
5432}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette