VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 37702

Last change on this file since 37702 was 37702, checked in by vboxsync, 13 years ago

REM/VMM: Don't flush the TLB if you don't hold the EM/REM lock, some other EMT may be executing code in the recompiler and could be really surprised by a TLB flush.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.5 KB
Line 
1/* $Id: VBoxRecompiler.c 37702 2011-06-30 10:09:59Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
223uint8_t *code_gen_prologue;
224
225
226/*******************************************************************************
227* Internal Functions *
228*******************************************************************************/
229void remAbort(int rc, const char *pszTip);
230extern int testmath(void);
231
232/* Put them here to avoid unused variable warning. */
233AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
234#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
235//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
236/* Why did this have to be identical?? */
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#else
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#endif
241
242
243/**
244 * Initializes the REM.
245 *
246 * @returns VBox status code.
247 * @param pVM The VM to operate on.
248 */
249REMR3DECL(int) REMR3Init(PVM pVM)
250{
251 PREMHANDLERNOTIFICATION pCur;
252 uint32_t u32Dummy;
253 int rc;
254 unsigned i;
255
256#ifdef VBOX_ENABLE_VBOXREM64
257 LogRel(("Using 64-bit aware REM\n"));
258#endif
259
260 /*
261 * Assert sanity.
262 */
263 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
264 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
265 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
266#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
267 Assert(!testmath());
268#endif
269
270 /*
271 * Init some internal data members.
272 */
273 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
274 pVM->rem.s.Env.pVM = pVM;
275#ifdef CPU_RAW_MODE_INIT
276 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
277#endif
278
279 /*
280 * Initialize the REM critical section.
281 *
282 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
283 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
284 * deadlocks. (mostly pgm vs rem locking)
285 */
286 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
287 AssertRCReturn(rc, rc);
288
289 /* ctx. */
290 pVM->rem.s.pCtx = NULL; /* set when executing code. */
291 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
292
293 /* ignore all notifications */
294 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
295
296 code_gen_prologue = RTMemExecAlloc(_1K);
297 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
298
299 cpu_exec_init_all(0);
300
301 /*
302 * Init the recompiler.
303 */
304 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
305 {
306 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
307 return VERR_GENERAL_FAILURE;
308 }
309 PVMCPU pVCpu = VMMGetCpu(pVM);
310 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
311 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
312
313 EMRemLock(pVM);
314 cpu_reset(&pVM->rem.s.Env);
315 EMRemUnlock(pVM);
316
317 /* allocate code buffer for single instruction emulation. */
318 pVM->rem.s.Env.cbCodeBuffer = 4096;
319 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
320 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
321
322 /* Finally, set the cpu_single_env global. */
323 cpu_single_env = &pVM->rem.s.Env;
324
325 /* Nothing is pending by default */
326 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
327
328 /*
329 * Register ram types.
330 */
331 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
332 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
333 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
335 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
336
337 /* stop ignoring. */
338 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
339
340 /*
341 * Register the saved state data unit.
342 */
343 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
344 NULL, NULL, NULL,
345 NULL, remR3Save, NULL,
346 NULL, remR3Load, NULL);
347 if (RT_FAILURE(rc))
348 return rc;
349
350#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
351 /*
352 * Debugger commands.
353 */
354 static bool fRegisteredCmds = false;
355 if (!fRegisteredCmds)
356 {
357 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
358 if (RT_SUCCESS(rc))
359 fRegisteredCmds = true;
360 }
361#endif
362
363#ifdef VBOX_WITH_STATISTICS
364 /*
365 * Statistics.
366 */
367 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
368 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
369 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
370 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
371 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
373 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
376 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
377 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
378 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
379
380 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
381
382 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
383 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
384 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
385 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
386 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
387 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
388 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
389 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
390 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
391 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
392 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
393
394 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
395 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
396 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
397 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
398
399 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
405
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
412
413 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
414#endif /* VBOX_WITH_STATISTICS */
415
416 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
417 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
418 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
419
420
421#ifdef DEBUG_ALL_LOGGING
422 loglevel = ~0;
423#endif
424
425 /*
426 * Init the handler notification lists.
427 */
428 pVM->rem.s.idxPendingList = UINT32_MAX;
429 pVM->rem.s.idxFreeList = 0;
430
431 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
432 {
433 pCur = &pVM->rem.s.aHandlerNotifications[i];
434 pCur->idxNext = i + 1;
435 pCur->idxSelf = i;
436 }
437 pCur->idxNext = UINT32_MAX; /* the last record. */
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471/**
472 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
473 *
474 * @returns VBox status code.
475 * @param pVM The VM handle.
476 * @param fGuarded Whether to guard the map.
477 */
478static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
479{
480 int rc = VINF_SUCCESS;
481 RTGCPHYS cb;
482
483 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
484
485 cb = pVM->rem.s.GCPhysLastRam + 1;
486 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
487 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
488 VERR_OUT_OF_RANGE);
489
490 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
491 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
492
493 if (!fGuarded)
494 {
495 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
496 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
497 }
498 else
499 {
500 /*
501 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
502 */
503 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
504 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
505 if (cbBitmapFull == cbBitmapAligned)
506 cbBitmapFull += _4G >> PAGE_SHIFT;
507 else if (cbBitmapFull - cbBitmapAligned < _64K)
508 cbBitmapFull += _64K;
509
510 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
511 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
512
513 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
514 if (RT_FAILURE(rc))
515 {
516 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
517 AssertLogRelRCReturn(rc, rc);
518 }
519
520 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
521 }
522
523 /* initialize it. */
524 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
525 return rc;
526}
527
528
529/**
530 * Terminates the REM.
531 *
532 * Termination means cleaning up and freeing all resources,
533 * the VM it self is at this point powered off or suspended.
534 *
535 * @returns VBox status code.
536 * @param pVM The VM to operate on.
537 */
538REMR3DECL(int) REMR3Term(PVM pVM)
539{
540#ifdef VBOX_WITH_STATISTICS
541 /*
542 * Statistics.
543 */
544 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
545 STAM_DEREG(pVM, &gStatCompilationQEmu);
546 STAM_DEREG(pVM, &gStatRunCodeQEmu);
547 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
548 STAM_DEREG(pVM, &gStatTimers);
549 STAM_DEREG(pVM, &gStatTBLookup);
550 STAM_DEREG(pVM, &gStatIRQ);
551 STAM_DEREG(pVM, &gStatRawCheck);
552 STAM_DEREG(pVM, &gStatMemRead);
553 STAM_DEREG(pVM, &gStatMemWrite);
554 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
555 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
556
557 STAM_DEREG(pVM, &gStatCpuGetTSC);
558
559 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
560 STAM_DEREG(pVM, &gStatRefuseVM86);
561 STAM_DEREG(pVM, &gStatRefusePaging);
562 STAM_DEREG(pVM, &gStatRefusePAE);
563 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
564 STAM_DEREG(pVM, &gStatRefuseIF0);
565 STAM_DEREG(pVM, &gStatRefuseCode16);
566 STAM_DEREG(pVM, &gStatRefuseWP0);
567 STAM_DEREG(pVM, &gStatRefuseRing1or2);
568 STAM_DEREG(pVM, &gStatRefuseCanExecute);
569 STAM_DEREG(pVM, &gStatFlushTBs);
570
571 STAM_DEREG(pVM, &gStatREMGDTChange);
572 STAM_DEREG(pVM, &gStatREMLDTRChange);
573 STAM_DEREG(pVM, &gStatREMIDTChange);
574 STAM_DEREG(pVM, &gStatREMTRChange);
575
576 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
582
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
589
590 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
591#endif /* VBOX_WITH_STATISTICS */
592
593 STAM_REL_DEREG(pVM, &tb_flush_count);
594 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
595 STAM_REL_DEREG(pVM, &tlb_flush_count);
596
597 return VINF_SUCCESS;
598}
599
600
601/**
602 * The VM is being reset.
603 *
604 * For the REM component this means to call the cpu_reset() and
605 * reinitialize some state variables.
606 *
607 * @param pVM VM handle.
608 */
609REMR3DECL(void) REMR3Reset(PVM pVM)
610{
611 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
612
613 /*
614 * Reset the REM cpu.
615 */
616 Assert(pVM->rem.s.cIgnoreAll == 0);
617 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
618 cpu_reset(&pVM->rem.s.Env);
619 pVM->rem.s.cInvalidatedPages = 0;
620 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
621 Assert(pVM->rem.s.cIgnoreAll == 0);
622
623 /* Clear raw ring 0 init state */
624 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
625
626 /* Flush the TBs the next time we execute code here. */
627 pVM->rem.s.fFlushTBs = true;
628
629 EMRemUnlock(pVM);
630}
631
632
633/**
634 * Execute state save operation.
635 *
636 * @returns VBox status code.
637 * @param pVM VM Handle.
638 * @param pSSM SSM operation handle.
639 */
640static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
641{
642 PREM pRem = &pVM->rem.s;
643
644 /*
645 * Save the required CPU Env bits.
646 * (Not much because we're never in REM when doing the save.)
647 */
648 LogFlow(("remR3Save:\n"));
649 Assert(!pRem->fInREM);
650 SSMR3PutU32(pSSM, pRem->Env.hflags);
651 SSMR3PutU32(pSSM, ~0); /* separator */
652
653 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
654 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
655 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
656
657 return SSMR3PutU32(pSSM, ~0); /* terminator */
658}
659
660
661/**
662 * Execute state load operation.
663 *
664 * @returns VBox status code.
665 * @param pVM VM Handle.
666 * @param pSSM SSM operation handle.
667 * @param uVersion Data layout version.
668 * @param uPass The data pass.
669 */
670static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
671{
672 uint32_t u32Dummy;
673 uint32_t fRawRing0 = false;
674 uint32_t u32Sep;
675 uint32_t i;
676 int rc;
677 PREM pRem;
678
679 LogFlow(("remR3Load:\n"));
680 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
681
682 /*
683 * Validate version.
684 */
685 if ( uVersion != REM_SAVED_STATE_VERSION
686 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
687 {
688 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
689 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
690 }
691
692 /*
693 * Do a reset to be on the safe side...
694 */
695 REMR3Reset(pVM);
696
697 /*
698 * Ignore all ignorable notifications.
699 * (Not doing this will cause serious trouble.)
700 */
701 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
702
703 /*
704 * Load the required CPU Env bits.
705 * (Not much because we're never in REM when doing the save.)
706 */
707 pRem = &pVM->rem.s;
708 Assert(!pRem->fInREM);
709 SSMR3GetU32(pSSM, &pRem->Env.hflags);
710 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
711 {
712 /* Redundant REM CPU state has to be loaded, but can be ignored. */
713 CPUX86State_Ver16 temp;
714 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
715 }
716
717 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
718 if (RT_FAILURE(rc))
719 return rc;
720 if (u32Sep != ~0U)
721 {
722 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
723 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
724 }
725
726 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
727 SSMR3GetUInt(pSSM, &fRawRing0);
728 if (fRawRing0)
729 pRem->Env.state |= CPU_RAW_RING0;
730
731 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
732 {
733 /*
734 * Load the REM stuff.
735 */
736 /** @todo r=bird: We should just drop all these items, restoring doesn't make
737 * sense. */
738 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
739 if (RT_FAILURE(rc))
740 return rc;
741 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
742 {
743 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
744 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
745 }
746 for (i = 0; i < pRem->cInvalidatedPages; i++)
747 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
748 }
749
750 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
751 if (RT_FAILURE(rc))
752 return rc;
753
754 /* check the terminator. */
755 rc = SSMR3GetU32(pSSM, &u32Sep);
756 if (RT_FAILURE(rc))
757 return rc;
758 if (u32Sep != ~0U)
759 {
760 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
761 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
762 }
763
764 /*
765 * Get the CPUID features.
766 */
767 PVMCPU pVCpu = VMMGetCpu(pVM);
768 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
769 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
770
771 /*
772 * Stop ignoring ignorable notifications.
773 */
774 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
775
776 /*
777 * Sync the whole CPU state when executing code in the recompiler.
778 */
779 for (i = 0; i < pVM->cCpus; i++)
780 {
781 PVMCPU pVCpu = &pVM->aCpus[i];
782 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
783 }
784 return VINF_SUCCESS;
785}
786
787
788
789#undef LOG_GROUP
790#define LOG_GROUP LOG_GROUP_REM_RUN
791
792/**
793 * Single steps an instruction in recompiled mode.
794 *
795 * Before calling this function the REM state needs to be in sync with
796 * the VM. Call REMR3State() to perform the sync. It's only necessary
797 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
798 * and after calling REMR3StateBack().
799 *
800 * @returns VBox status code.
801 *
802 * @param pVM VM Handle.
803 * @param pVCpu VMCPU Handle.
804 */
805REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
806{
807 int rc, interrupt_request;
808 RTGCPTR GCPtrPC;
809 bool fBp;
810
811 /*
812 * Lock the REM - we don't wanna have anyone interrupting us
813 * while stepping - and enabled single stepping. We also ignore
814 * pending interrupts and suchlike.
815 */
816 interrupt_request = pVM->rem.s.Env.interrupt_request;
817 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
818 pVM->rem.s.Env.interrupt_request = 0;
819 cpu_single_step(&pVM->rem.s.Env, 1);
820
821 /*
822 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
823 */
824 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
825 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
826
827 /*
828 * Execute and handle the return code.
829 * We execute without enabling the cpu tick, so on success we'll
830 * just flip it on and off to make sure it moves
831 */
832 rc = cpu_exec(&pVM->rem.s.Env);
833 if (rc == EXCP_DEBUG)
834 {
835 TMR3NotifyResume(pVM, pVCpu);
836 TMR3NotifySuspend(pVM, pVCpu);
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 else
840 {
841 switch (rc)
842 {
843 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
844 case EXCP_HLT:
845 case EXCP_HALTED: rc = VINF_EM_HALT; break;
846 case EXCP_RC:
847 rc = pVM->rem.s.rc;
848 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
849 break;
850 case EXCP_EXECUTE_RAW:
851 case EXCP_EXECUTE_HWACC:
852 /** @todo: is it correct? No! */
853 rc = VINF_SUCCESS;
854 break;
855 default:
856 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
857 rc = VERR_INTERNAL_ERROR;
858 break;
859 }
860 }
861
862 /*
863 * Restore the stuff we changed to prevent interruption.
864 * Unlock the REM.
865 */
866 if (fBp)
867 {
868 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
869 Assert(rc2 == 0); NOREF(rc2);
870 }
871 cpu_single_step(&pVM->rem.s.Env, 0);
872 pVM->rem.s.Env.interrupt_request = interrupt_request;
873
874 return rc;
875}
876
877
878/**
879 * Set a breakpoint using the REM facilities.
880 *
881 * @returns VBox status code.
882 * @param pVM The VM handle.
883 * @param Address The breakpoint address.
884 * @thread The emulation thread.
885 */
886REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
887{
888 VM_ASSERT_EMT(pVM);
889 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
890 {
891 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
892 return VINF_SUCCESS;
893 }
894 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
895 return VERR_REM_NO_MORE_BP_SLOTS;
896}
897
898
899/**
900 * Clears a breakpoint set by REMR3BreakpointSet().
901 *
902 * @returns VBox status code.
903 * @param pVM The VM handle.
904 * @param Address The breakpoint address.
905 * @thread The emulation thread.
906 */
907REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
908{
909 VM_ASSERT_EMT(pVM);
910 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
911 {
912 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
913 return VINF_SUCCESS;
914 }
915 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
916 return VERR_REM_BP_NOT_FOUND;
917}
918
919
920/**
921 * Emulate an instruction.
922 *
923 * This function executes one instruction without letting anyone
924 * interrupt it. This is intended for being called while being in
925 * raw mode and thus will take care of all the state syncing between
926 * REM and the rest.
927 *
928 * @returns VBox status code.
929 * @param pVM VM handle.
930 * @param pVCpu VMCPU Handle.
931 */
932REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
933{
934 bool fFlushTBs;
935
936 int rc, rc2;
937 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
938
939 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
940 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
941 */
942 if (HWACCMIsEnabled(pVM))
943 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
944
945 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
946 fFlushTBs = pVM->rem.s.fFlushTBs;
947 pVM->rem.s.fFlushTBs = false;
948
949 /*
950 * Sync the state and enable single instruction / single stepping.
951 */
952 rc = REMR3State(pVM, pVCpu);
953 pVM->rem.s.fFlushTBs = fFlushTBs;
954 if (RT_SUCCESS(rc))
955 {
956 int interrupt_request = pVM->rem.s.Env.interrupt_request;
957 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
958#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
959 cpu_single_step(&pVM->rem.s.Env, 0);
960#endif
961 Assert(!pVM->rem.s.Env.singlestep_enabled);
962
963 /*
964 * Now we set the execute single instruction flag and enter the cpu_exec loop.
965 */
966 TMNotifyStartOfExecution(pVCpu);
967 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
968 rc = cpu_exec(&pVM->rem.s.Env);
969 TMNotifyEndOfExecution(pVCpu);
970 switch (rc)
971 {
972 /*
973 * Executed without anything out of the way happening.
974 */
975 case EXCP_SINGLE_INSTR:
976 rc = VINF_EM_RESCHEDULE;
977 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
978 break;
979
980 /*
981 * If we take a trap or start servicing a pending interrupt, we might end up here.
982 * (Timer thread or some other thread wishing EMT's attention.)
983 */
984 case EXCP_INTERRUPT:
985 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
986 rc = VINF_EM_RESCHEDULE;
987 break;
988
989 /*
990 * Single step, we assume!
991 * If there was a breakpoint there we're fucked now.
992 */
993 case EXCP_DEBUG:
994 if (pVM->rem.s.Env.watchpoint_hit)
995 {
996 /** @todo deal with watchpoints */
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
998 rc = VINF_EM_DBG_BREAKPOINT;
999 }
1000 else
1001 {
1002 CPUBreakpoint *pBP;
1003 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1004 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1005 if (pBP->pc == GCPtrPC)
1006 break;
1007 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1008 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1009 }
1010 break;
1011
1012 /*
1013 * hlt instruction.
1014 */
1015 case EXCP_HLT:
1016 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1017 rc = VINF_EM_HALT;
1018 break;
1019
1020 /*
1021 * The VM has halted.
1022 */
1023 case EXCP_HALTED:
1024 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1025 rc = VINF_EM_HALT;
1026 break;
1027
1028 /*
1029 * Switch to RAW-mode.
1030 */
1031 case EXCP_EXECUTE_RAW:
1032 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1033 rc = VINF_EM_RESCHEDULE_RAW;
1034 break;
1035
1036 /*
1037 * Switch to hardware accelerated RAW-mode.
1038 */
1039 case EXCP_EXECUTE_HWACC:
1040 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1041 rc = VINF_EM_RESCHEDULE_HWACC;
1042 break;
1043
1044 /*
1045 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1046 */
1047 case EXCP_RC:
1048 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1049 rc = pVM->rem.s.rc;
1050 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1051 break;
1052
1053 /*
1054 * Figure out the rest when they arrive....
1055 */
1056 default:
1057 AssertMsgFailed(("rc=%d\n", rc));
1058 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1059 rc = VINF_EM_RESCHEDULE;
1060 break;
1061 }
1062
1063 /*
1064 * Switch back the state.
1065 */
1066 pVM->rem.s.Env.interrupt_request = interrupt_request;
1067 rc2 = REMR3StateBack(pVM, pVCpu);
1068 AssertRC(rc2);
1069 }
1070
1071 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1072 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1073 return rc;
1074}
1075
1076
1077/**
1078 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1079 *
1080 * @returns VBox status code.
1081 *
1082 * @param pVM The VM handle.
1083 * @param pVCpu The Virtual CPU handle.
1084 */
1085static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1086{
1087 int rc;
1088
1089 Assert(pVM->rem.s.fInREM);
1090#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1091 cpu_single_step(&pVM->rem.s.Env, 1);
1092#else
1093 Assert(!pVM->rem.s.Env.singlestep_enabled);
1094#endif
1095
1096 /*
1097 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1098 */
1099 for (;;)
1100 {
1101 char szBuf[256];
1102
1103 /*
1104 * Log the current registers state and instruction.
1105 */
1106 remR3StateUpdate(pVM, pVCpu);
1107 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1108 szBuf[0] = '\0';
1109 rc = DBGFR3DisasInstrEx(pVM,
1110 pVCpu->idCpu,
1111 0, /* Sel */
1112 0, /* GCPtr */
1113 DBGF_DISAS_FLAGS_CURRENT_GUEST
1114 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1115 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1116 szBuf,
1117 sizeof(szBuf),
1118 NULL);
1119 if (RT_FAILURE(rc))
1120 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1121 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1122
1123 /*
1124 * Execute the instruction.
1125 */
1126 TMNotifyStartOfExecution(pVCpu);
1127
1128 if ( pVM->rem.s.Env.exception_index < 0
1129 || pVM->rem.s.Env.exception_index > 256)
1130 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1131
1132#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1133 pVM->rem.s.Env.interrupt_request = 0;
1134#else
1135 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1136#endif
1137 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1138 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1139 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1140 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1141 pVM->rem.s.Env.interrupt_request,
1142 pVM->rem.s.Env.halted,
1143 pVM->rem.s.Env.exception_index
1144 );
1145
1146 rc = cpu_exec(&pVM->rem.s.Env);
1147
1148 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1149 pVM->rem.s.Env.interrupt_request,
1150 pVM->rem.s.Env.halted,
1151 pVM->rem.s.Env.exception_index
1152 );
1153
1154 TMNotifyEndOfExecution(pVCpu);
1155
1156 switch (rc)
1157 {
1158#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1159 /*
1160 * The normal exit.
1161 */
1162 case EXCP_SINGLE_INSTR:
1163 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1164 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1165 continue;
1166 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1167 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1168 rc = VINF_SUCCESS;
1169 break;
1170
1171#else
1172 /*
1173 * The normal exit, check for breakpoints at PC just to be sure.
1174 */
1175#endif
1176 case EXCP_DEBUG:
1177 if (pVM->rem.s.Env.watchpoint_hit)
1178 {
1179 /** @todo deal with watchpoints */
1180 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1181 rc = VINF_EM_DBG_BREAKPOINT;
1182 }
1183 else
1184 {
1185 CPUBreakpoint *pBP;
1186 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1187 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1188 if (pBP->pc == GCPtrPC)
1189 break;
1190 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1191 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1192 }
1193#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1194 if (rc == VINF_EM_DBG_STEPPED)
1195 {
1196 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1197 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1198 continue;
1199
1200 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1201 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1202 rc = VINF_SUCCESS;
1203 }
1204#endif
1205 break;
1206
1207 /*
1208 * If we take a trap or start servicing a pending interrupt, we might end up here.
1209 * (Timer thread or some other thread wishing EMT's attention.)
1210 */
1211 case EXCP_INTERRUPT:
1212 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1213 rc = VINF_SUCCESS;
1214 break;
1215
1216 /*
1217 * hlt instruction.
1218 */
1219 case EXCP_HLT:
1220 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1221 rc = VINF_EM_HALT;
1222 break;
1223
1224 /*
1225 * The VM has halted.
1226 */
1227 case EXCP_HALTED:
1228 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1229 rc = VINF_EM_HALT;
1230 break;
1231
1232 /*
1233 * Switch to RAW-mode.
1234 */
1235 case EXCP_EXECUTE_RAW:
1236 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1237 rc = VINF_EM_RESCHEDULE_RAW;
1238 break;
1239
1240 /*
1241 * Switch to hardware accelerated RAW-mode.
1242 */
1243 case EXCP_EXECUTE_HWACC:
1244 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1245 rc = VINF_EM_RESCHEDULE_HWACC;
1246 break;
1247
1248 /*
1249 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1250 */
1251 case EXCP_RC:
1252 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1253 rc = pVM->rem.s.rc;
1254 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1255 break;
1256
1257 /*
1258 * Figure out the rest when they arrive....
1259 */
1260 default:
1261 AssertMsgFailed(("rc=%d\n", rc));
1262 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1263 rc = VINF_EM_RESCHEDULE;
1264 break;
1265 }
1266 break;
1267 }
1268
1269#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1270// cpu_single_step(&pVM->rem.s.Env, 0);
1271#else
1272 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1273#endif
1274 return rc;
1275}
1276
1277
1278/**
1279 * Runs code in recompiled mode.
1280 *
1281 * Before calling this function the REM state needs to be in sync with
1282 * the VM. Call REMR3State() to perform the sync. It's only necessary
1283 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1284 * and after calling REMR3StateBack().
1285 *
1286 * @returns VBox status code.
1287 *
1288 * @param pVM VM Handle.
1289 * @param pVCpu VMCPU Handle.
1290 */
1291REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1292{
1293 int rc;
1294
1295 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1296 return remR3RunLoggingStep(pVM, pVCpu);
1297
1298 Assert(pVM->rem.s.fInREM);
1299 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1300
1301 TMNotifyStartOfExecution(pVCpu);
1302 rc = cpu_exec(&pVM->rem.s.Env);
1303 TMNotifyEndOfExecution(pVCpu);
1304 switch (rc)
1305 {
1306 /*
1307 * This happens when the execution was interrupted
1308 * by an external event, like pending timers.
1309 */
1310 case EXCP_INTERRUPT:
1311 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1312 rc = VINF_SUCCESS;
1313 break;
1314
1315 /*
1316 * hlt instruction.
1317 */
1318 case EXCP_HLT:
1319 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1320 rc = VINF_EM_HALT;
1321 break;
1322
1323 /*
1324 * The VM has halted.
1325 */
1326 case EXCP_HALTED:
1327 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1328 rc = VINF_EM_HALT;
1329 break;
1330
1331 /*
1332 * Breakpoint/single step.
1333 */
1334 case EXCP_DEBUG:
1335 if (pVM->rem.s.Env.watchpoint_hit)
1336 {
1337 /** @todo deal with watchpoints */
1338 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1339 rc = VINF_EM_DBG_BREAKPOINT;
1340 }
1341 else
1342 {
1343 CPUBreakpoint *pBP;
1344 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1345 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1346 if (pBP->pc == GCPtrPC)
1347 break;
1348 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1349 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1350 }
1351 break;
1352
1353 /*
1354 * Switch to RAW-mode.
1355 */
1356 case EXCP_EXECUTE_RAW:
1357 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1358 rc = VINF_EM_RESCHEDULE_RAW;
1359 break;
1360
1361 /*
1362 * Switch to hardware accelerated RAW-mode.
1363 */
1364 case EXCP_EXECUTE_HWACC:
1365 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1366 rc = VINF_EM_RESCHEDULE_HWACC;
1367 break;
1368
1369 /*
1370 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1371 */
1372 case EXCP_RC:
1373 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1374 rc = pVM->rem.s.rc;
1375 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1376 break;
1377
1378 /*
1379 * Figure out the rest when they arrive....
1380 */
1381 default:
1382 AssertMsgFailed(("rc=%d\n", rc));
1383 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1384 rc = VINF_SUCCESS;
1385 break;
1386 }
1387
1388 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1389 return rc;
1390}
1391
1392
1393/**
1394 * Check if the cpu state is suitable for Raw execution.
1395 *
1396 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1397 *
1398 * @param env The CPU env struct.
1399 * @param eip The EIP to check this for (might differ from env->eip).
1400 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1401 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1402 *
1403 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1404 */
1405bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1406{
1407 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1410 uint32_t u32CR0;
1411
1412#ifdef IEM_VERIFICATION_MODE
1413 return false;
1414#endif
1415
1416 /* Update counter. */
1417 env->pVM->rem.s.cCanExecuteRaw++;
1418
1419 /* Never when single stepping+logging guest code. */
1420 if (env->state & CPU_EMULATE_SINGLE_STEP)
1421 return false;
1422
1423 if (HWACCMIsEnabled(env->pVM))
1424 {
1425 CPUMCTX Ctx;
1426
1427 env->state |= CPU_RAW_HWACC;
1428
1429 /*
1430 * Create partial context for HWACCMR3CanExecuteGuest
1431 */
1432 Ctx.cr0 = env->cr[0];
1433 Ctx.cr3 = env->cr[3];
1434 Ctx.cr4 = env->cr[4];
1435
1436 Ctx.tr = env->tr.selector;
1437 Ctx.trHid.u64Base = env->tr.base;
1438 Ctx.trHid.u32Limit = env->tr.limit;
1439 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1440
1441 Ctx.ldtr = env->ldt.selector;
1442 Ctx.ldtrHid.u64Base = env->ldt.base;
1443 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1444 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1445
1446 Ctx.idtr.cbIdt = env->idt.limit;
1447 Ctx.idtr.pIdt = env->idt.base;
1448
1449 Ctx.gdtr.cbGdt = env->gdt.limit;
1450 Ctx.gdtr.pGdt = env->gdt.base;
1451
1452 Ctx.rsp = env->regs[R_ESP];
1453 Ctx.rip = env->eip;
1454
1455 Ctx.eflags.u32 = env->eflags;
1456
1457 Ctx.cs = env->segs[R_CS].selector;
1458 Ctx.csHid.u64Base = env->segs[R_CS].base;
1459 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1460 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1461
1462 Ctx.ds = env->segs[R_DS].selector;
1463 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1464 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1465 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1466
1467 Ctx.es = env->segs[R_ES].selector;
1468 Ctx.esHid.u64Base = env->segs[R_ES].base;
1469 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1470 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1471
1472 Ctx.fs = env->segs[R_FS].selector;
1473 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1474 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1475 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1476
1477 Ctx.gs = env->segs[R_GS].selector;
1478 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1479 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1480 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1481
1482 Ctx.ss = env->segs[R_SS].selector;
1483 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1484 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1485 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.msrEFER = env->efer;
1488
1489 /* Hardware accelerated raw-mode:
1490 *
1491 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1492 */
1493 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1494 {
1495 *piException = EXCP_EXECUTE_HWACC;
1496 return true;
1497 }
1498 return false;
1499 }
1500
1501 /*
1502 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1503 * or 32 bits protected mode ring 0 code
1504 *
1505 * The tests are ordered by the likelihood of being true during normal execution.
1506 */
1507 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1508 {
1509 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1510 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1511 return false;
1512 }
1513
1514#ifndef VBOX_RAW_V86
1515 if (fFlags & VM_MASK) {
1516 STAM_COUNTER_INC(&gStatRefuseVM86);
1517 Log2(("raw mode refused: VM_MASK\n"));
1518 return false;
1519 }
1520#endif
1521
1522 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1523 {
1524#ifndef DEBUG_bird
1525 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1526#endif
1527 return false;
1528 }
1529
1530 if (env->singlestep_enabled)
1531 {
1532 //Log2(("raw mode refused: Single step\n"));
1533 return false;
1534 }
1535
1536 if (!QTAILQ_EMPTY(&env->breakpoints))
1537 {
1538 //Log2(("raw mode refused: Breakpoints\n"));
1539 return false;
1540 }
1541
1542 if (!QTAILQ_EMPTY(&env->watchpoints))
1543 {
1544 //Log2(("raw mode refused: Watchpoints\n"));
1545 return false;
1546 }
1547
1548 u32CR0 = env->cr[0];
1549 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1550 {
1551 STAM_COUNTER_INC(&gStatRefusePaging);
1552 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1553 return false;
1554 }
1555
1556 if (env->cr[4] & CR4_PAE_MASK)
1557 {
1558 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1559 {
1560 STAM_COUNTER_INC(&gStatRefusePAE);
1561 return false;
1562 }
1563 }
1564
1565 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1566 {
1567 if (!EMIsRawRing3Enabled(env->pVM))
1568 return false;
1569
1570 if (!(env->eflags & IF_MASK))
1571 {
1572 STAM_COUNTER_INC(&gStatRefuseIF0);
1573 Log2(("raw mode refused: IF (RawR3)\n"));
1574 return false;
1575 }
1576
1577 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1578 {
1579 STAM_COUNTER_INC(&gStatRefuseWP0);
1580 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1581 return false;
1582 }
1583 }
1584 else
1585 {
1586 if (!EMIsRawRing0Enabled(env->pVM))
1587 return false;
1588
1589 // Let's start with pure 32 bits ring 0 code first
1590 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1591 {
1592 STAM_COUNTER_INC(&gStatRefuseCode16);
1593 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1594 return false;
1595 }
1596
1597 // Only R0
1598 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1599 {
1600 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1601 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1602 return false;
1603 }
1604
1605 if (!(u32CR0 & CR0_WP_MASK))
1606 {
1607 STAM_COUNTER_INC(&gStatRefuseWP0);
1608 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1609 return false;
1610 }
1611
1612 if (PATMIsPatchGCAddr(env->pVM, eip))
1613 {
1614 Log2(("raw r0 mode forced: patch code\n"));
1615 *piException = EXCP_EXECUTE_RAW;
1616 return true;
1617 }
1618
1619#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1620 if (!(env->eflags & IF_MASK))
1621 {
1622 STAM_COUNTER_INC(&gStatRefuseIF0);
1623 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1624 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1625 return false;
1626 }
1627#endif
1628
1629 env->state |= CPU_RAW_RING0;
1630 }
1631
1632 /*
1633 * Don't reschedule the first time we're called, because there might be
1634 * special reasons why we're here that is not covered by the above checks.
1635 */
1636 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1637 {
1638 Log2(("raw mode refused: first scheduling\n"));
1639 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1640 return false;
1641 }
1642
1643 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1644 *piException = EXCP_EXECUTE_RAW;
1645 return true;
1646}
1647
1648
1649/**
1650 * Fetches a code byte.
1651 *
1652 * @returns Success indicator (bool) for ease of use.
1653 * @param env The CPU environment structure.
1654 * @param GCPtrInstr Where to fetch code.
1655 * @param pu8Byte Where to store the byte on success
1656 */
1657bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1658{
1659 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1660 if (RT_SUCCESS(rc))
1661 return true;
1662 return false;
1663}
1664
1665
1666/**
1667 * Flush (or invalidate if you like) page table/dir entry.
1668 *
1669 * (invlpg instruction; tlb_flush_page)
1670 *
1671 * @param env Pointer to cpu environment.
1672 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1673 */
1674void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1675{
1676 PVM pVM = env->pVM;
1677 PCPUMCTX pCtx;
1678 int rc;
1679
1680 Assert(EMRemIsLockOwner(env->pVM));
1681
1682 /*
1683 * When we're replaying invlpg instructions or restoring a saved
1684 * state we disable this path.
1685 */
1686 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1687 return;
1688 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1689 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1690
1691 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1692
1693 /*
1694 * Update the control registers before calling PGMFlushPage.
1695 */
1696 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1697 Assert(pCtx);
1698 pCtx->cr0 = env->cr[0];
1699 pCtx->cr3 = env->cr[3];
1700 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1701 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1702 pCtx->cr4 = env->cr[4];
1703
1704 /*
1705 * Let PGM do the rest.
1706 */
1707 Assert(env->pVCpu);
1708 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1709 if (RT_FAILURE(rc))
1710 {
1711 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1712 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1713 }
1714 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1715}
1716
1717
1718#ifndef REM_PHYS_ADDR_IN_TLB
1719/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1720void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1721{
1722 void *pv;
1723 int rc;
1724
1725 /* Address must be aligned enough to fiddle with lower bits */
1726 Assert((physAddr & 0x3) == 0);
1727
1728 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1729 Assert( rc == VINF_SUCCESS
1730 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1731 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1732 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1733 if (RT_FAILURE(rc))
1734 return (void *)1;
1735 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1736 return (void *)((uintptr_t)pv | 2);
1737 return pv;
1738}
1739#endif /* REM_PHYS_ADDR_IN_TLB */
1740
1741
1742/**
1743 * Called from tlb_protect_code in order to write monitor a code page.
1744 *
1745 * @param env Pointer to the CPU environment.
1746 * @param GCPtr Code page to monitor
1747 */
1748void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1749{
1750#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1751 Assert(env->pVM->rem.s.fInREM);
1752 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1753 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1754 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1755 && !(env->eflags & VM_MASK) /* no V86 mode */
1756 && !HWACCMIsEnabled(env->pVM))
1757 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1758#endif
1759}
1760
1761
1762/**
1763 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1764 *
1765 * @param env Pointer to the CPU environment.
1766 * @param GCPtr Code page to monitor
1767 */
1768void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1769{
1770 Assert(env->pVM->rem.s.fInREM);
1771#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1772 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1773 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1774 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1775 && !(env->eflags & VM_MASK) /* no V86 mode */
1776 && !HWACCMIsEnabled(env->pVM))
1777 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1778#endif
1779}
1780
1781
1782/**
1783 * Called when the CPU is initialized, any of the CRx registers are changed or
1784 * when the A20 line is modified.
1785 *
1786 * @param env Pointer to the CPU environment.
1787 * @param fGlobal Set if the flush is global.
1788 */
1789void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1790{
1791 PVM pVM = env->pVM;
1792 PCPUMCTX pCtx;
1793 Assert(EMRemIsLockOwner(pVM));
1794
1795 /*
1796 * When we're replaying invlpg instructions or restoring a saved
1797 * state we disable this path.
1798 */
1799 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1800 return;
1801 Assert(pVM->rem.s.fInREM);
1802
1803 /*
1804 * The caller doesn't check cr4, so we have to do that for ourselves.
1805 */
1806 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1807 fGlobal = true;
1808 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1809
1810 /*
1811 * Update the control registers before calling PGMR3FlushTLB.
1812 */
1813 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1814 Assert(pCtx);
1815 pCtx->cr0 = env->cr[0];
1816 pCtx->cr3 = env->cr[3];
1817 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1818 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1819 pCtx->cr4 = env->cr[4];
1820
1821 /*
1822 * Let PGM do the rest.
1823 */
1824 Assert(env->pVCpu);
1825 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1826}
1827
1828
1829/**
1830 * Called when any of the cr0, cr4 or efer registers is updated.
1831 *
1832 * @param env Pointer to the CPU environment.
1833 */
1834void remR3ChangeCpuMode(CPUX86State *env)
1835{
1836 PVM pVM = env->pVM;
1837 uint64_t efer;
1838 PCPUMCTX pCtx;
1839 int rc;
1840
1841 /*
1842 * When we're replaying loads or restoring a saved
1843 * state this path is disabled.
1844 */
1845 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1846 return;
1847 Assert(pVM->rem.s.fInREM);
1848
1849 /*
1850 * Update the control registers before calling PGMChangeMode()
1851 * as it may need to map whatever cr3 is pointing to.
1852 */
1853 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1854 Assert(pCtx);
1855 pCtx->cr0 = env->cr[0];
1856 pCtx->cr3 = env->cr[3];
1857 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1858 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1859 pCtx->cr4 = env->cr[4];
1860#ifdef TARGET_X86_64
1861 efer = env->efer;
1862 pCtx->msrEFER = efer;
1863#else
1864 efer = 0;
1865#endif
1866 Assert(env->pVCpu);
1867 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1868 if (rc != VINF_SUCCESS)
1869 {
1870 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1871 {
1872 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1873 remR3RaiseRC(env->pVM, rc);
1874 }
1875 else
1876 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1877 }
1878}
1879
1880
1881/**
1882 * Called from compiled code to run dma.
1883 *
1884 * @param env Pointer to the CPU environment.
1885 */
1886void remR3DmaRun(CPUX86State *env)
1887{
1888 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1889 PDMR3DmaRun(env->pVM);
1890 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1891}
1892
1893
1894/**
1895 * Called from compiled code to schedule pending timers in VMM
1896 *
1897 * @param env Pointer to the CPU environment.
1898 */
1899void remR3TimersRun(CPUX86State *env)
1900{
1901 LogFlow(("remR3TimersRun:\n"));
1902 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1903 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1904 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1905 TMR3TimerQueuesDo(env->pVM);
1906 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1907 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1908}
1909
1910
1911/**
1912 * Record trap occurrence
1913 *
1914 * @returns VBox status code
1915 * @param env Pointer to the CPU environment.
1916 * @param uTrap Trap nr
1917 * @param uErrorCode Error code
1918 * @param pvNextEIP Next EIP
1919 */
1920int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1921{
1922 PVM pVM = env->pVM;
1923#ifdef VBOX_WITH_STATISTICS
1924 static STAMCOUNTER s_aStatTrap[255];
1925 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1926#endif
1927
1928#ifdef VBOX_WITH_STATISTICS
1929 if (uTrap < 255)
1930 {
1931 if (!s_aRegisters[uTrap])
1932 {
1933 char szStatName[64];
1934 s_aRegisters[uTrap] = true;
1935 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1936 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1937 }
1938 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1939 }
1940#endif
1941 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1942 if( uTrap < 0x20
1943 && (env->cr[0] & X86_CR0_PE)
1944 && !(env->eflags & X86_EFL_VM))
1945 {
1946#ifdef DEBUG
1947 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1948#endif
1949 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1950 {
1951 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1952 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1953 return VERR_REM_TOO_MANY_TRAPS;
1954 }
1955 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1956 pVM->rem.s.cPendingExceptions = 1;
1957 pVM->rem.s.uPendingException = uTrap;
1958 pVM->rem.s.uPendingExcptEIP = env->eip;
1959 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1960 }
1961 else
1962 {
1963 pVM->rem.s.cPendingExceptions = 0;
1964 pVM->rem.s.uPendingException = uTrap;
1965 pVM->rem.s.uPendingExcptEIP = env->eip;
1966 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1967 }
1968 return VINF_SUCCESS;
1969}
1970
1971
1972/*
1973 * Clear current active trap
1974 *
1975 * @param pVM VM Handle.
1976 */
1977void remR3TrapClear(PVM pVM)
1978{
1979 pVM->rem.s.cPendingExceptions = 0;
1980 pVM->rem.s.uPendingException = 0;
1981 pVM->rem.s.uPendingExcptEIP = 0;
1982 pVM->rem.s.uPendingExcptCR2 = 0;
1983}
1984
1985
1986/*
1987 * Record previous call instruction addresses
1988 *
1989 * @param env Pointer to the CPU environment.
1990 */
1991void remR3RecordCall(CPUX86State *env)
1992{
1993 CSAMR3RecordCallAddress(env->pVM, env->eip);
1994}
1995
1996
1997/**
1998 * Syncs the internal REM state with the VM.
1999 *
2000 * This must be called before REMR3Run() is invoked whenever when the REM
2001 * state is not up to date. Calling it several times in a row is not
2002 * permitted.
2003 *
2004 * @returns VBox status code.
2005 *
2006 * @param pVM VM Handle.
2007 * @param pVCpu VMCPU Handle.
2008 *
2009 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2010 * no do this since the majority of the callers don't want any unnecessary of events
2011 * pending that would immediately interrupt execution.
2012 */
2013REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2014{
2015 register const CPUMCTX *pCtx;
2016 register unsigned fFlags;
2017 bool fHiddenSelRegsValid;
2018 unsigned i;
2019 TRPMEVENT enmType;
2020 uint8_t u8TrapNo;
2021 uint32_t uCpl;
2022 int rc;
2023
2024 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2025 Log2(("REMR3State:\n"));
2026
2027 pVM->rem.s.Env.pVCpu = pVCpu;
2028 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2029 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2030
2031 Assert(!pVM->rem.s.fInREM);
2032 pVM->rem.s.fInStateSync = true;
2033
2034 /*
2035 * If we have to flush TBs, do that immediately.
2036 */
2037 if (pVM->rem.s.fFlushTBs)
2038 {
2039 STAM_COUNTER_INC(&gStatFlushTBs);
2040 tb_flush(&pVM->rem.s.Env);
2041 pVM->rem.s.fFlushTBs = false;
2042 }
2043
2044 /*
2045 * Copy the registers which require no special handling.
2046 */
2047#ifdef TARGET_X86_64
2048 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2049 Assert(R_EAX == 0);
2050 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2051 Assert(R_ECX == 1);
2052 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2053 Assert(R_EDX == 2);
2054 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2055 Assert(R_EBX == 3);
2056 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2057 Assert(R_ESP == 4);
2058 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2059 Assert(R_EBP == 5);
2060 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2061 Assert(R_ESI == 6);
2062 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2063 Assert(R_EDI == 7);
2064 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2065 pVM->rem.s.Env.regs[8] = pCtx->r8;
2066 pVM->rem.s.Env.regs[9] = pCtx->r9;
2067 pVM->rem.s.Env.regs[10] = pCtx->r10;
2068 pVM->rem.s.Env.regs[11] = pCtx->r11;
2069 pVM->rem.s.Env.regs[12] = pCtx->r12;
2070 pVM->rem.s.Env.regs[13] = pCtx->r13;
2071 pVM->rem.s.Env.regs[14] = pCtx->r14;
2072 pVM->rem.s.Env.regs[15] = pCtx->r15;
2073
2074 pVM->rem.s.Env.eip = pCtx->rip;
2075
2076 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2077#else
2078 Assert(R_EAX == 0);
2079 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2080 Assert(R_ECX == 1);
2081 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2082 Assert(R_EDX == 2);
2083 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2084 Assert(R_EBX == 3);
2085 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2086 Assert(R_ESP == 4);
2087 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2088 Assert(R_EBP == 5);
2089 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2090 Assert(R_ESI == 6);
2091 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2092 Assert(R_EDI == 7);
2093 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2094 pVM->rem.s.Env.eip = pCtx->eip;
2095
2096 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2097#endif
2098
2099 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2100
2101 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2102 for (i=0;i<8;i++)
2103 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2104
2105#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2106 /*
2107 * Clear the halted hidden flag (the interrupt waking up the CPU can
2108 * have been dispatched in raw mode).
2109 */
2110 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2111#endif
2112
2113 /*
2114 * Replay invlpg?
2115 */
2116 if (pVM->rem.s.cInvalidatedPages)
2117 {
2118 RTUINT i;
2119
2120 pVM->rem.s.fIgnoreInvlPg = true;
2121 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2122 {
2123 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2124 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2125 }
2126 pVM->rem.s.fIgnoreInvlPg = false;
2127 pVM->rem.s.cInvalidatedPages = 0;
2128 }
2129
2130 /* Replay notification changes. */
2131 REMR3ReplayHandlerNotifications(pVM);
2132
2133 /* Update MSRs; before CRx registers! */
2134 pVM->rem.s.Env.efer = pCtx->msrEFER;
2135 pVM->rem.s.Env.star = pCtx->msrSTAR;
2136 pVM->rem.s.Env.pat = pCtx->msrPAT;
2137#ifdef TARGET_X86_64
2138 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2139 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2140 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2141 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2142
2143 /* Update the internal long mode activate flag according to the new EFER value. */
2144 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2145 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2146 else
2147 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2148#endif
2149
2150 /*
2151 * Registers which are rarely changed and require special handling / order when changed.
2152 */
2153 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2154 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2155 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2156 | CPUM_CHANGED_CR4
2157 | CPUM_CHANGED_CR0
2158 | CPUM_CHANGED_CR3
2159 | CPUM_CHANGED_GDTR
2160 | CPUM_CHANGED_IDTR
2161 | CPUM_CHANGED_SYSENTER_MSR
2162 | CPUM_CHANGED_LDTR
2163 | CPUM_CHANGED_CPUID
2164 | CPUM_CHANGED_FPU_REM
2165 )
2166 )
2167 {
2168 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2169 {
2170 pVM->rem.s.fIgnoreCR3Load = true;
2171 tlb_flush(&pVM->rem.s.Env, true);
2172 pVM->rem.s.fIgnoreCR3Load = false;
2173 }
2174
2175 /* CR4 before CR0! */
2176 if (fFlags & CPUM_CHANGED_CR4)
2177 {
2178 pVM->rem.s.fIgnoreCR3Load = true;
2179 pVM->rem.s.fIgnoreCpuMode = true;
2180 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2181 pVM->rem.s.fIgnoreCpuMode = false;
2182 pVM->rem.s.fIgnoreCR3Load = false;
2183 }
2184
2185 if (fFlags & CPUM_CHANGED_CR0)
2186 {
2187 pVM->rem.s.fIgnoreCR3Load = true;
2188 pVM->rem.s.fIgnoreCpuMode = true;
2189 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2190 pVM->rem.s.fIgnoreCpuMode = false;
2191 pVM->rem.s.fIgnoreCR3Load = false;
2192 }
2193
2194 if (fFlags & CPUM_CHANGED_CR3)
2195 {
2196 pVM->rem.s.fIgnoreCR3Load = true;
2197 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2198 pVM->rem.s.fIgnoreCR3Load = false;
2199 }
2200
2201 if (fFlags & CPUM_CHANGED_GDTR)
2202 {
2203 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2204 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2205 }
2206
2207 if (fFlags & CPUM_CHANGED_IDTR)
2208 {
2209 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2210 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2211 }
2212
2213 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2214 {
2215 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2216 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2217 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2218 }
2219
2220 if (fFlags & CPUM_CHANGED_LDTR)
2221 {
2222 if (fHiddenSelRegsValid)
2223 {
2224 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2225 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2226 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2227 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2228 }
2229 else
2230 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2231 }
2232
2233 if (fFlags & CPUM_CHANGED_CPUID)
2234 {
2235 uint32_t u32Dummy;
2236
2237 /*
2238 * Get the CPUID features.
2239 */
2240 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2241 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2242 }
2243
2244 /* Sync FPU state after CR4, CPUID and EFER (!). */
2245 if (fFlags & CPUM_CHANGED_FPU_REM)
2246 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2247 }
2248
2249 /*
2250 * Sync TR unconditionally to make life simpler.
2251 */
2252 pVM->rem.s.Env.tr.selector = pCtx->tr;
2253 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2254 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2255 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2256 /* Note! do_interrupt will fault if the busy flag is still set... */
2257 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2258
2259 /*
2260 * Update selector registers.
2261 * This must be done *after* we've synced gdt, ldt and crX registers
2262 * since we're reading the GDT/LDT om sync_seg. This will happen with
2263 * saved state which takes a quick dip into rawmode for instance.
2264 */
2265 /*
2266 * Stack; Note first check this one as the CPL might have changed. The
2267 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2268 */
2269
2270 if (fHiddenSelRegsValid)
2271 {
2272 /* The hidden selector registers are valid in the CPU context. */
2273 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2274
2275 /* Set current CPL */
2276 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2277
2278 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2279 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2280 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2281 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2282 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2283 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2284 }
2285 else
2286 {
2287 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2288 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2289 {
2290 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2291
2292 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2293 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2294#ifdef VBOX_WITH_STATISTICS
2295 if (pVM->rem.s.Env.segs[R_SS].newselector)
2296 {
2297 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2298 }
2299#endif
2300 }
2301 else
2302 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2303
2304 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2305 {
2306 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2307 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2308#ifdef VBOX_WITH_STATISTICS
2309 if (pVM->rem.s.Env.segs[R_ES].newselector)
2310 {
2311 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2312 }
2313#endif
2314 }
2315 else
2316 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2317
2318 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2319 {
2320 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2321 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2322#ifdef VBOX_WITH_STATISTICS
2323 if (pVM->rem.s.Env.segs[R_CS].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2326 }
2327#endif
2328 }
2329 else
2330 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2331
2332 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2333 {
2334 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2335 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2336#ifdef VBOX_WITH_STATISTICS
2337 if (pVM->rem.s.Env.segs[R_DS].newselector)
2338 {
2339 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2340 }
2341#endif
2342 }
2343 else
2344 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2345
2346 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2347 * be the same but not the base/limit. */
2348 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2349 {
2350 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2351 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2352#ifdef VBOX_WITH_STATISTICS
2353 if (pVM->rem.s.Env.segs[R_FS].newselector)
2354 {
2355 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2356 }
2357#endif
2358 }
2359 else
2360 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2361
2362 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2363 {
2364 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2365 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2366#ifdef VBOX_WITH_STATISTICS
2367 if (pVM->rem.s.Env.segs[R_GS].newselector)
2368 {
2369 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2370 }
2371#endif
2372 }
2373 else
2374 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2375 }
2376
2377 /*
2378 * Check for traps.
2379 */
2380 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2381 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2382 if (RT_SUCCESS(rc))
2383 {
2384#ifdef DEBUG
2385 if (u8TrapNo == 0x80)
2386 {
2387 remR3DumpLnxSyscall(pVCpu);
2388 remR3DumpOBsdSyscall(pVCpu);
2389 }
2390#endif
2391
2392 pVM->rem.s.Env.exception_index = u8TrapNo;
2393 if (enmType != TRPM_SOFTWARE_INT)
2394 {
2395 pVM->rem.s.Env.exception_is_int = 0;
2396 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2397 }
2398 else
2399 {
2400 /*
2401 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2402 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2403 * for int03 and into.
2404 */
2405 pVM->rem.s.Env.exception_is_int = 1;
2406 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2407 /* int 3 may be generated by one-byte 0xcc */
2408 if (u8TrapNo == 3)
2409 {
2410 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2411 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2412 }
2413 /* int 4 may be generated by one-byte 0xce */
2414 else if (u8TrapNo == 4)
2415 {
2416 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2417 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2418 }
2419 }
2420
2421 /* get error code and cr2 if needed. */
2422 if (enmType == TRPM_TRAP)
2423 {
2424 switch (u8TrapNo)
2425 {
2426 case 0x0e:
2427 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2428 /* fallthru */
2429 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2430 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2431 break;
2432
2433 case 0x11: case 0x08:
2434 default:
2435 pVM->rem.s.Env.error_code = 0;
2436 break;
2437 }
2438 }
2439 else
2440 pVM->rem.s.Env.error_code = 0;
2441
2442 /*
2443 * We can now reset the active trap since the recompiler is gonna have a go at it.
2444 */
2445 rc = TRPMResetTrap(pVCpu);
2446 AssertRC(rc);
2447 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2448 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2449 }
2450
2451 /*
2452 * Clear old interrupt request flags; Check for pending hardware interrupts.
2453 * (See @remark for why we don't check for other FFs.)
2454 */
2455 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2456 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2457 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2458 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2459
2460 /*
2461 * We're now in REM mode.
2462 */
2463 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2464 pVM->rem.s.fInREM = true;
2465 pVM->rem.s.fInStateSync = false;
2466 pVM->rem.s.cCanExecuteRaw = 0;
2467 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2468 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2469 return VINF_SUCCESS;
2470}
2471
2472
2473/**
2474 * Syncs back changes in the REM state to the the VM state.
2475 *
2476 * This must be called after invoking REMR3Run().
2477 * Calling it several times in a row is not permitted.
2478 *
2479 * @returns VBox status code.
2480 *
2481 * @param pVM VM Handle.
2482 * @param pVCpu VMCPU Handle.
2483 */
2484REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2485{
2486 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2487 Assert(pCtx);
2488 unsigned i;
2489
2490 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2491 Log2(("REMR3StateBack:\n"));
2492 Assert(pVM->rem.s.fInREM);
2493
2494 /*
2495 * Copy back the registers.
2496 * This is done in the order they are declared in the CPUMCTX structure.
2497 */
2498
2499 /** @todo FOP */
2500 /** @todo FPUIP */
2501 /** @todo CS */
2502 /** @todo FPUDP */
2503 /** @todo DS */
2504
2505 /** @todo check if FPU/XMM was actually used in the recompiler */
2506 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2507//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2508
2509#ifdef TARGET_X86_64
2510 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2511 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2512 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2513 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2514 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2515 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2516 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2517 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2518 pCtx->r8 = pVM->rem.s.Env.regs[8];
2519 pCtx->r9 = pVM->rem.s.Env.regs[9];
2520 pCtx->r10 = pVM->rem.s.Env.regs[10];
2521 pCtx->r11 = pVM->rem.s.Env.regs[11];
2522 pCtx->r12 = pVM->rem.s.Env.regs[12];
2523 pCtx->r13 = pVM->rem.s.Env.regs[13];
2524 pCtx->r14 = pVM->rem.s.Env.regs[14];
2525 pCtx->r15 = pVM->rem.s.Env.regs[15];
2526
2527 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2528
2529#else
2530 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2531 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2532 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2533 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2534 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2535 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2536 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2537
2538 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2539#endif
2540
2541 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2542
2543#ifdef VBOX_WITH_STATISTICS
2544 if (pVM->rem.s.Env.segs[R_SS].newselector)
2545 {
2546 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2547 }
2548 if (pVM->rem.s.Env.segs[R_GS].newselector)
2549 {
2550 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2551 }
2552 if (pVM->rem.s.Env.segs[R_FS].newselector)
2553 {
2554 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2555 }
2556 if (pVM->rem.s.Env.segs[R_ES].newselector)
2557 {
2558 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2559 }
2560 if (pVM->rem.s.Env.segs[R_DS].newselector)
2561 {
2562 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2563 }
2564 if (pVM->rem.s.Env.segs[R_CS].newselector)
2565 {
2566 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2567 }
2568#endif
2569 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2570 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2571 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2572 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2573 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2574
2575#ifdef TARGET_X86_64
2576 pCtx->rip = pVM->rem.s.Env.eip;
2577 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2578#else
2579 pCtx->eip = pVM->rem.s.Env.eip;
2580 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2581#endif
2582
2583 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2584 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2585 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2586 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2587 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2588 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2589
2590 for (i = 0; i < 8; i++)
2591 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2592
2593 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2594 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2595 {
2596 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2597 STAM_COUNTER_INC(&gStatREMGDTChange);
2598 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2599 }
2600
2601 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2602 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2603 {
2604 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2605 STAM_COUNTER_INC(&gStatREMIDTChange);
2606 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2607 }
2608
2609 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2610 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2611 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2612 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2613 {
2614 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2615 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2616 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2617 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2618 STAM_COUNTER_INC(&gStatREMLDTRChange);
2619 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2620 }
2621
2622 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2623 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2624 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2625 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2626 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2627 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2628 : 0) )
2629 {
2630 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2631 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2632 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2633 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2634 pCtx->tr = pVM->rem.s.Env.tr.selector;
2635 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2636 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2637 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2638 if (pCtx->trHid.Attr.u)
2639 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2640 STAM_COUNTER_INC(&gStatREMTRChange);
2641 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2642 }
2643
2644 /** @todo These values could still be out of sync! */
2645 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2646 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2647 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2648 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2649
2650 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2651 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2652 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2653
2654 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2655 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2656 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2657
2658 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2659 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2660 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2661
2662 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2663 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2664 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2665
2666 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2667 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2668 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2669
2670 /* Sysenter MSR */
2671 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2672 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2673 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2674
2675 /* System MSRs. */
2676 pCtx->msrEFER = pVM->rem.s.Env.efer;
2677 pCtx->msrSTAR = pVM->rem.s.Env.star;
2678 pCtx->msrPAT = pVM->rem.s.Env.pat;
2679#ifdef TARGET_X86_64
2680 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2681 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2682 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2683 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2684#endif
2685
2686 remR3TrapClear(pVM);
2687
2688 /*
2689 * Check for traps.
2690 */
2691 if ( pVM->rem.s.Env.exception_index >= 0
2692 && pVM->rem.s.Env.exception_index < 256)
2693 {
2694 int rc;
2695
2696 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2697 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2698 AssertRC(rc);
2699 switch (pVM->rem.s.Env.exception_index)
2700 {
2701 case 0x0e:
2702 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2703 /* fallthru */
2704 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2705 case 0x11: case 0x08: /* 0 */
2706 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2707 break;
2708 }
2709
2710 }
2711
2712 /*
2713 * We're not longer in REM mode.
2714 */
2715 CPUMR3RemLeave(pVCpu,
2716 HWACCMIsEnabled(pVM)
2717 || ( pVM->rem.s.Env.segs[R_SS].newselector
2718 | pVM->rem.s.Env.segs[R_GS].newselector
2719 | pVM->rem.s.Env.segs[R_FS].newselector
2720 | pVM->rem.s.Env.segs[R_ES].newselector
2721 | pVM->rem.s.Env.segs[R_DS].newselector
2722 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2723 );
2724 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2725 pVM->rem.s.fInREM = false;
2726 pVM->rem.s.pCtx = NULL;
2727 pVM->rem.s.Env.pVCpu = NULL;
2728 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2729 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2730 return VINF_SUCCESS;
2731}
2732
2733
2734/**
2735 * This is called by the disassembler when it wants to update the cpu state
2736 * before for instance doing a register dump.
2737 */
2738static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2739{
2740 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2741 unsigned i;
2742
2743 Assert(pVM->rem.s.fInREM);
2744
2745 /*
2746 * Copy back the registers.
2747 * This is done in the order they are declared in the CPUMCTX structure.
2748 */
2749
2750 /** @todo FOP */
2751 /** @todo FPUIP */
2752 /** @todo CS */
2753 /** @todo FPUDP */
2754 /** @todo DS */
2755 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2756 pCtx->fpu.MXCSR = 0;
2757 pCtx->fpu.MXCSR_MASK = 0;
2758
2759 /** @todo check if FPU/XMM was actually used in the recompiler */
2760 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2761//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2762
2763#ifdef TARGET_X86_64
2764 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2765 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2766 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2767 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2768 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2769 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2770 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2771 pCtx->r8 = pVM->rem.s.Env.regs[8];
2772 pCtx->r9 = pVM->rem.s.Env.regs[9];
2773 pCtx->r10 = pVM->rem.s.Env.regs[10];
2774 pCtx->r11 = pVM->rem.s.Env.regs[11];
2775 pCtx->r12 = pVM->rem.s.Env.regs[12];
2776 pCtx->r13 = pVM->rem.s.Env.regs[13];
2777 pCtx->r14 = pVM->rem.s.Env.regs[14];
2778 pCtx->r15 = pVM->rem.s.Env.regs[15];
2779
2780 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2781#else
2782 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2783 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2784 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2785 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2786 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2787 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2788 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2789
2790 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2791#endif
2792
2793 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2794
2795 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2796 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2797 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2798 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2799 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2800
2801#ifdef TARGET_X86_64
2802 pCtx->rip = pVM->rem.s.Env.eip;
2803 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2804#else
2805 pCtx->eip = pVM->rem.s.Env.eip;
2806 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2807#endif
2808
2809 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2810 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2811 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2812 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2813 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2814 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2815
2816 for (i = 0; i < 8; i++)
2817 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2818
2819 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2820 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2821 {
2822 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2823 STAM_COUNTER_INC(&gStatREMGDTChange);
2824 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2825 }
2826
2827 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2828 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2829 {
2830 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2831 STAM_COUNTER_INC(&gStatREMIDTChange);
2832 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2833 }
2834
2835 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2836 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2837 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2838 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2839 {
2840 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2841 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2842 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2843 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2844 STAM_COUNTER_INC(&gStatREMLDTRChange);
2845 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2846 }
2847
2848 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2849 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2850 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2851 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2852 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2853 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2854 : 0) )
2855 {
2856 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2857 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2858 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2859 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2860 pCtx->tr = pVM->rem.s.Env.tr.selector;
2861 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2862 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2863 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2864 if (pCtx->trHid.Attr.u)
2865 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2866 STAM_COUNTER_INC(&gStatREMTRChange);
2867 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2868 }
2869
2870 /** @todo These values could still be out of sync! */
2871 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2872 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2873 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2874 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2875
2876 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2877 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2878 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2879
2880 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2881 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2882 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2883
2884 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2885 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2886 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2887
2888 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2889 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2890 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2891
2892 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2893 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2894 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2895
2896 /* Sysenter MSR */
2897 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2898 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2899 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2900
2901 /* System MSRs. */
2902 pCtx->msrEFER = pVM->rem.s.Env.efer;
2903 pCtx->msrSTAR = pVM->rem.s.Env.star;
2904 pCtx->msrPAT = pVM->rem.s.Env.pat;
2905#ifdef TARGET_X86_64
2906 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2907 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2908 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2909 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2910#endif
2911
2912}
2913
2914
2915/**
2916 * Update the VMM state information if we're currently in REM.
2917 *
2918 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2919 * we're currently executing in REM and the VMM state is invalid. This method will of
2920 * course check that we're executing in REM before syncing any data over to the VMM.
2921 *
2922 * @param pVM The VM handle.
2923 * @param pVCpu The VMCPU handle.
2924 */
2925REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2926{
2927 if (pVM->rem.s.fInREM)
2928 remR3StateUpdate(pVM, pVCpu);
2929}
2930
2931
2932#undef LOG_GROUP
2933#define LOG_GROUP LOG_GROUP_REM
2934
2935
2936/**
2937 * Notify the recompiler about Address Gate 20 state change.
2938 *
2939 * This notification is required since A20 gate changes are
2940 * initialized from a device driver and the VM might just as
2941 * well be in REM mode as in RAW mode.
2942 *
2943 * @param pVM VM handle.
2944 * @param pVCpu VMCPU handle.
2945 * @param fEnable True if the gate should be enabled.
2946 * False if the gate should be disabled.
2947 */
2948REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2949{
2950 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2951 VM_ASSERT_EMT(pVM);
2952
2953 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2954 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2955 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2956}
2957
2958
2959/**
2960 * Replays the handler notification changes
2961 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2962 *
2963 * @param pVM VM handle.
2964 */
2965REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2966{
2967 /*
2968 * Replay the flushes.
2969 */
2970 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2971 VM_ASSERT_EMT(pVM);
2972
2973 /** @todo this isn't ensuring correct replay order. */
2974 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2975 {
2976 uint32_t idxNext;
2977 uint32_t idxRevHead;
2978 uint32_t idxHead;
2979#ifdef VBOX_STRICT
2980 int32_t c = 0;
2981#endif
2982
2983 /* Lockless purging of pending notifications. */
2984 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2985 if (idxHead == UINT32_MAX)
2986 return;
2987 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2988
2989 /*
2990 * Reverse the list to process it in FIFO order.
2991 */
2992 idxRevHead = UINT32_MAX;
2993 do
2994 {
2995 /* Save the index of the next rec. */
2996 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2997 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2998 /* Push the record onto the reversed list. */
2999 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3000 idxRevHead = idxHead;
3001 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3002 /* Advance. */
3003 idxHead = idxNext;
3004 } while (idxHead != UINT32_MAX);
3005
3006 /*
3007 * Loop thru the list, reinserting the record into the free list as they are
3008 * processed to avoid having other EMTs running out of entries while we're flushing.
3009 */
3010 idxHead = idxRevHead;
3011 do
3012 {
3013 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3014 uint32_t idxCur;
3015 Assert(--c >= 0);
3016
3017 switch (pCur->enmKind)
3018 {
3019 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3020 remR3NotifyHandlerPhysicalRegister(pVM,
3021 pCur->u.PhysicalRegister.enmType,
3022 pCur->u.PhysicalRegister.GCPhys,
3023 pCur->u.PhysicalRegister.cb,
3024 pCur->u.PhysicalRegister.fHasHCHandler);
3025 break;
3026
3027 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3028 remR3NotifyHandlerPhysicalDeregister(pVM,
3029 pCur->u.PhysicalDeregister.enmType,
3030 pCur->u.PhysicalDeregister.GCPhys,
3031 pCur->u.PhysicalDeregister.cb,
3032 pCur->u.PhysicalDeregister.fHasHCHandler,
3033 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3034 break;
3035
3036 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3037 remR3NotifyHandlerPhysicalModify(pVM,
3038 pCur->u.PhysicalModify.enmType,
3039 pCur->u.PhysicalModify.GCPhysOld,
3040 pCur->u.PhysicalModify.GCPhysNew,
3041 pCur->u.PhysicalModify.cb,
3042 pCur->u.PhysicalModify.fHasHCHandler,
3043 pCur->u.PhysicalModify.fRestoreAsRAM);
3044 break;
3045
3046 default:
3047 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3048 break;
3049 }
3050
3051 /*
3052 * Advance idxHead.
3053 */
3054 idxCur = idxHead;
3055 idxHead = pCur->idxNext;
3056 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3057
3058 /*
3059 * Put the record back into the free list.
3060 */
3061 do
3062 {
3063 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3064 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3065 ASMCompilerBarrier();
3066 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3067 } while (idxHead != UINT32_MAX);
3068
3069#ifdef VBOX_STRICT
3070 if (pVM->cCpus == 1)
3071 {
3072 unsigned c;
3073 /* Check that all records are now on the free list. */
3074 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3075 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3076 c++;
3077 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3078 }
3079#endif
3080 }
3081}
3082
3083
3084/**
3085 * Notify REM about changed code page.
3086 *
3087 * @returns VBox status code.
3088 * @param pVM VM handle.
3089 * @param pVCpu VMCPU handle.
3090 * @param pvCodePage Code page address
3091 */
3092REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3093{
3094#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3095 int rc;
3096 RTGCPHYS PhysGC;
3097 uint64_t flags;
3098
3099 VM_ASSERT_EMT(pVM);
3100
3101 /*
3102 * Get the physical page address.
3103 */
3104 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3105 if (rc == VINF_SUCCESS)
3106 {
3107 /*
3108 * Sync the required registers and flush the whole page.
3109 * (Easier to do the whole page than notifying it about each physical
3110 * byte that was changed.
3111 */
3112 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3113 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3114 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3115 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3116
3117 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3118 }
3119#endif
3120 return VINF_SUCCESS;
3121}
3122
3123
3124/**
3125 * Notification about a successful MMR3PhysRegister() call.
3126 *
3127 * @param pVM VM handle.
3128 * @param GCPhys The physical address the RAM.
3129 * @param cb Size of the memory.
3130 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3131 */
3132REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3133{
3134 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3135 VM_ASSERT_EMT(pVM);
3136
3137 /*
3138 * Validate input - we trust the caller.
3139 */
3140 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3141 Assert(cb);
3142 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3143 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3144
3145 /*
3146 * Base ram? Update GCPhysLastRam.
3147 */
3148 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3149 {
3150 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3151 {
3152 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3153 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3154 }
3155 }
3156
3157 /*
3158 * Register the ram.
3159 */
3160 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3161
3162 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3163 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3164 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3165
3166 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3167}
3168
3169
3170/**
3171 * Notification about a successful MMR3PhysRomRegister() call.
3172 *
3173 * @param pVM VM handle.
3174 * @param GCPhys The physical address of the ROM.
3175 * @param cb The size of the ROM.
3176 * @param pvCopy Pointer to the ROM copy.
3177 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3178 * This function will be called when ever the protection of the
3179 * shadow ROM changes (at reset and end of POST).
3180 */
3181REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3182{
3183 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3184 VM_ASSERT_EMT(pVM);
3185
3186 /*
3187 * Validate input - we trust the caller.
3188 */
3189 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3190 Assert(cb);
3191 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3192
3193 /*
3194 * Register the rom.
3195 */
3196 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3197
3198 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3199 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3200 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3201
3202 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3203}
3204
3205
3206/**
3207 * Notification about a successful memory deregistration or reservation.
3208 *
3209 * @param pVM VM Handle.
3210 * @param GCPhys Start physical address.
3211 * @param cb The size of the range.
3212 */
3213REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3214{
3215 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3216 VM_ASSERT_EMT(pVM);
3217
3218 /*
3219 * Validate input - we trust the caller.
3220 */
3221 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3222 Assert(cb);
3223 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3224
3225 /*
3226 * Unassigning the memory.
3227 */
3228 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3229
3230 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3231 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3232 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3233
3234 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3235}
3236
3237
3238/**
3239 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3240 *
3241 * @param pVM VM Handle.
3242 * @param enmType Handler type.
3243 * @param GCPhys Handler range address.
3244 * @param cb Size of the handler range.
3245 * @param fHasHCHandler Set if the handler has a HC callback function.
3246 *
3247 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3248 * Handler memory type to memory which has no HC handler.
3249 */
3250static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3251{
3252 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3253 enmType, GCPhys, cb, fHasHCHandler));
3254
3255 VM_ASSERT_EMT(pVM);
3256 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3257 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3258
3259
3260 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3261
3262 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3263 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3264 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3265 else if (fHasHCHandler)
3266 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3267 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3268
3269 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3270}
3271
3272/**
3273 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3274 *
3275 * @param pVM VM Handle.
3276 * @param enmType Handler type.
3277 * @param GCPhys Handler range address.
3278 * @param cb Size of the handler range.
3279 * @param fHasHCHandler Set if the handler has a HC callback function.
3280 *
3281 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3282 * Handler memory type to memory which has no HC handler.
3283 */
3284REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3285{
3286 REMR3ReplayHandlerNotifications(pVM);
3287
3288 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3289}
3290
3291/**
3292 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3293 *
3294 * @param pVM VM Handle.
3295 * @param enmType Handler type.
3296 * @param GCPhys Handler range address.
3297 * @param cb Size of the handler range.
3298 * @param fHasHCHandler Set if the handler has a HC callback function.
3299 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3300 */
3301static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3302{
3303 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3304 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3305 VM_ASSERT_EMT(pVM);
3306
3307
3308 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3309
3310 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3311 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3312 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3313 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3314 else if (fHasHCHandler)
3315 {
3316 if (!fRestoreAsRAM)
3317 {
3318 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3319 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3320 }
3321 else
3322 {
3323 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3324 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3325 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3326 }
3327 }
3328 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3329
3330 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3331}
3332
3333/**
3334 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3335 *
3336 * @param pVM VM Handle.
3337 * @param enmType Handler type.
3338 * @param GCPhys Handler range address.
3339 * @param cb Size of the handler range.
3340 * @param fHasHCHandler Set if the handler has a HC callback function.
3341 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3342 */
3343REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3344{
3345 REMR3ReplayHandlerNotifications(pVM);
3346 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3347}
3348
3349
3350/**
3351 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3352 *
3353 * @param pVM VM Handle.
3354 * @param enmType Handler type.
3355 * @param GCPhysOld Old handler range address.
3356 * @param GCPhysNew New handler range address.
3357 * @param cb Size of the handler range.
3358 * @param fHasHCHandler Set if the handler has a HC callback function.
3359 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3360 */
3361static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3362{
3363 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3364 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3365 VM_ASSERT_EMT(pVM);
3366 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3367
3368 if (fHasHCHandler)
3369 {
3370 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3371
3372 /*
3373 * Reset the old page.
3374 */
3375 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3376 if (!fRestoreAsRAM)
3377 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3378 else
3379 {
3380 /* This is not perfect, but it'll do for PD monitoring... */
3381 Assert(cb == PAGE_SIZE);
3382 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3383 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3384 }
3385
3386 /*
3387 * Update the new page.
3388 */
3389 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3390 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3391 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3392 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3393
3394 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3395 }
3396}
3397
3398/**
3399 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3400 *
3401 * @param pVM VM Handle.
3402 * @param enmType Handler type.
3403 * @param GCPhysOld Old handler range address.
3404 * @param GCPhysNew New handler range address.
3405 * @param cb Size of the handler range.
3406 * @param fHasHCHandler Set if the handler has a HC callback function.
3407 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3408 */
3409REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3410{
3411 REMR3ReplayHandlerNotifications(pVM);
3412
3413 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3414}
3415
3416/**
3417 * Checks if we're handling access to this page or not.
3418 *
3419 * @returns true if we're trapping access.
3420 * @returns false if we aren't.
3421 * @param pVM The VM handle.
3422 * @param GCPhys The physical address.
3423 *
3424 * @remark This function will only work correctly in VBOX_STRICT builds!
3425 */
3426REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3427{
3428#ifdef VBOX_STRICT
3429 unsigned long off;
3430 REMR3ReplayHandlerNotifications(pVM);
3431
3432 off = get_phys_page_offset(GCPhys);
3433 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3434 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3435 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3436#else
3437 return false;
3438#endif
3439}
3440
3441
3442/**
3443 * Deals with a rare case in get_phys_addr_code where the code
3444 * is being monitored.
3445 *
3446 * It could also be an MMIO page, in which case we will raise a fatal error.
3447 *
3448 * @returns The physical address corresponding to addr.
3449 * @param env The cpu environment.
3450 * @param addr The virtual address.
3451 * @param pTLBEntry The TLB entry.
3452 */
3453target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3454 target_ulong addr,
3455 CPUTLBEntry *pTLBEntry,
3456 target_phys_addr_t ioTLBEntry)
3457{
3458 PVM pVM = env->pVM;
3459
3460 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3461 {
3462 /* If code memory is being monitored, appropriate IOTLB entry will have
3463 handler IO type, and addend will provide real physical address, no
3464 matter if we store VA in TLB or not, as handlers are always passed PA */
3465 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3466 return ret;
3467 }
3468 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3469 "*** handlers\n",
3470 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3471 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3472 LogRel(("*** mmio\n"));
3473 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3474 LogRel(("*** phys\n"));
3475 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3476 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3477 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3478 AssertFatalFailed();
3479}
3480
3481/**
3482 * Read guest RAM and ROM.
3483 *
3484 * @param SrcGCPhys The source address (guest physical).
3485 * @param pvDst The destination address.
3486 * @param cb Number of bytes
3487 */
3488void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3489{
3490 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3491 VBOX_CHECK_ADDR(SrcGCPhys);
3492 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3493#ifdef VBOX_DEBUG_PHYS
3494 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3495#endif
3496 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3497}
3498
3499
3500/**
3501 * Read guest RAM and ROM, unsigned 8-bit.
3502 *
3503 * @param SrcGCPhys The source address (guest physical).
3504 */
3505RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3506{
3507 uint8_t val;
3508 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3509 VBOX_CHECK_ADDR(SrcGCPhys);
3510 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3511 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3512#ifdef VBOX_DEBUG_PHYS
3513 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3514#endif
3515 return val;
3516}
3517
3518
3519/**
3520 * Read guest RAM and ROM, signed 8-bit.
3521 *
3522 * @param SrcGCPhys The source address (guest physical).
3523 */
3524RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3525{
3526 int8_t val;
3527 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3528 VBOX_CHECK_ADDR(SrcGCPhys);
3529 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3530 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3531#ifdef VBOX_DEBUG_PHYS
3532 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3533#endif
3534 return val;
3535}
3536
3537
3538/**
3539 * Read guest RAM and ROM, unsigned 16-bit.
3540 *
3541 * @param SrcGCPhys The source address (guest physical).
3542 */
3543RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3544{
3545 uint16_t val;
3546 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3547 VBOX_CHECK_ADDR(SrcGCPhys);
3548 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3549 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3550#ifdef VBOX_DEBUG_PHYS
3551 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3552#endif
3553 return val;
3554}
3555
3556
3557/**
3558 * Read guest RAM and ROM, signed 16-bit.
3559 *
3560 * @param SrcGCPhys The source address (guest physical).
3561 */
3562RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3563{
3564 int16_t val;
3565 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3566 VBOX_CHECK_ADDR(SrcGCPhys);
3567 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3568 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3569#ifdef VBOX_DEBUG_PHYS
3570 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3571#endif
3572 return val;
3573}
3574
3575
3576/**
3577 * Read guest RAM and ROM, unsigned 32-bit.
3578 *
3579 * @param SrcGCPhys The source address (guest physical).
3580 */
3581RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3582{
3583 uint32_t val;
3584 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3585 VBOX_CHECK_ADDR(SrcGCPhys);
3586 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3587 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3588#ifdef VBOX_DEBUG_PHYS
3589 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3590#endif
3591 return val;
3592}
3593
3594
3595/**
3596 * Read guest RAM and ROM, signed 32-bit.
3597 *
3598 * @param SrcGCPhys The source address (guest physical).
3599 */
3600RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3601{
3602 int32_t val;
3603 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3604 VBOX_CHECK_ADDR(SrcGCPhys);
3605 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3606 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3607#ifdef VBOX_DEBUG_PHYS
3608 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3609#endif
3610 return val;
3611}
3612
3613
3614/**
3615 * Read guest RAM and ROM, unsigned 64-bit.
3616 *
3617 * @param SrcGCPhys The source address (guest physical).
3618 */
3619uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3620{
3621 uint64_t val;
3622 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3623 VBOX_CHECK_ADDR(SrcGCPhys);
3624 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3625 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3626#ifdef VBOX_DEBUG_PHYS
3627 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3628#endif
3629 return val;
3630}
3631
3632
3633/**
3634 * Read guest RAM and ROM, signed 64-bit.
3635 *
3636 * @param SrcGCPhys The source address (guest physical).
3637 */
3638int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3639{
3640 int64_t val;
3641 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3642 VBOX_CHECK_ADDR(SrcGCPhys);
3643 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3644 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3645#ifdef VBOX_DEBUG_PHYS
3646 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3647#endif
3648 return val;
3649}
3650
3651
3652/**
3653 * Write guest RAM.
3654 *
3655 * @param DstGCPhys The destination address (guest physical).
3656 * @param pvSrc The source address.
3657 * @param cb Number of bytes to write
3658 */
3659void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3660{
3661 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3662 VBOX_CHECK_ADDR(DstGCPhys);
3663 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3664 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3665#ifdef VBOX_DEBUG_PHYS
3666 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3667#endif
3668}
3669
3670
3671/**
3672 * Write guest RAM, unsigned 8-bit.
3673 *
3674 * @param DstGCPhys The destination address (guest physical).
3675 * @param val Value
3676 */
3677void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3678{
3679 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3680 VBOX_CHECK_ADDR(DstGCPhys);
3681 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3682 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3683#ifdef VBOX_DEBUG_PHYS
3684 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3685#endif
3686}
3687
3688
3689/**
3690 * Write guest RAM, unsigned 8-bit.
3691 *
3692 * @param DstGCPhys The destination address (guest physical).
3693 * @param val Value
3694 */
3695void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3696{
3697 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3698 VBOX_CHECK_ADDR(DstGCPhys);
3699 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3700 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3701#ifdef VBOX_DEBUG_PHYS
3702 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3703#endif
3704}
3705
3706
3707/**
3708 * Write guest RAM, unsigned 32-bit.
3709 *
3710 * @param DstGCPhys The destination address (guest physical).
3711 * @param val Value
3712 */
3713void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3714{
3715 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3716 VBOX_CHECK_ADDR(DstGCPhys);
3717 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3718 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3719#ifdef VBOX_DEBUG_PHYS
3720 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3721#endif
3722}
3723
3724
3725/**
3726 * Write guest RAM, unsigned 64-bit.
3727 *
3728 * @param DstGCPhys The destination address (guest physical).
3729 * @param val Value
3730 */
3731void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3732{
3733 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3734 VBOX_CHECK_ADDR(DstGCPhys);
3735 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3736 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3737#ifdef VBOX_DEBUG_PHYS
3738 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3739#endif
3740}
3741
3742#undef LOG_GROUP
3743#define LOG_GROUP LOG_GROUP_REM_MMIO
3744
3745/** Read MMIO memory. */
3746static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3747{
3748 uint32_t u32 = 0;
3749 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3750 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3751 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3752 return u32;
3753}
3754
3755/** Read MMIO memory. */
3756static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3757{
3758 uint32_t u32 = 0;
3759 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3760 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3761 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3762 return u32;
3763}
3764
3765/** Read MMIO memory. */
3766static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3767{
3768 uint32_t u32 = 0;
3769 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3770 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3771 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3772 return u32;
3773}
3774
3775/** Write to MMIO memory. */
3776static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3777{
3778 int rc;
3779 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3780 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3781 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3782}
3783
3784/** Write to MMIO memory. */
3785static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3786{
3787 int rc;
3788 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3789 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3790 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3791}
3792
3793/** Write to MMIO memory. */
3794static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3795{
3796 int rc;
3797 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3798 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3799 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3800}
3801
3802
3803#undef LOG_GROUP
3804#define LOG_GROUP LOG_GROUP_REM_HANDLER
3805
3806/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3807
3808static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3809{
3810 uint8_t u8;
3811 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3812 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3813 return u8;
3814}
3815
3816static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3817{
3818 uint16_t u16;
3819 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3820 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3821 return u16;
3822}
3823
3824static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3825{
3826 uint32_t u32;
3827 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3828 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3829 return u32;
3830}
3831
3832static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3833{
3834 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3835 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3836}
3837
3838static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3839{
3840 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3841 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3842}
3843
3844static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3845{
3846 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3847 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3848}
3849
3850/* -+- disassembly -+- */
3851
3852#undef LOG_GROUP
3853#define LOG_GROUP LOG_GROUP_REM_DISAS
3854
3855
3856/**
3857 * Enables or disables singled stepped disassembly.
3858 *
3859 * @returns VBox status code.
3860 * @param pVM VM handle.
3861 * @param fEnable To enable set this flag, to disable clear it.
3862 */
3863static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3864{
3865 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3866 VM_ASSERT_EMT(pVM);
3867
3868 if (fEnable)
3869 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3870 else
3871 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3872#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3873 cpu_single_step(&pVM->rem.s.Env, fEnable);
3874#endif
3875 return VINF_SUCCESS;
3876}
3877
3878
3879/**
3880 * Enables or disables singled stepped disassembly.
3881 *
3882 * @returns VBox status code.
3883 * @param pVM VM handle.
3884 * @param fEnable To enable set this flag, to disable clear it.
3885 */
3886REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3887{
3888 int rc;
3889
3890 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3891 if (VM_IS_EMT(pVM))
3892 return remR3DisasEnableStepping(pVM, fEnable);
3893
3894 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3895 AssertRC(rc);
3896 return rc;
3897}
3898
3899
3900#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3901/**
3902 * External Debugger Command: .remstep [on|off|1|0]
3903 */
3904static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3905{
3906 int rc;
3907
3908 if (cArgs == 0)
3909 /*
3910 * Print the current status.
3911 */
3912 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3913 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3914 else
3915 {
3916 /*
3917 * Convert the argument and change the mode.
3918 */
3919 bool fEnable;
3920 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3921 if (RT_SUCCESS(rc))
3922 {
3923 rc = REMR3DisasEnableStepping(pVM, fEnable);
3924 if (RT_SUCCESS(rc))
3925 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3926 else
3927 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3928 }
3929 else
3930 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3931 }
3932 return rc;
3933}
3934#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3935
3936
3937/**
3938 * Disassembles one instruction and prints it to the log.
3939 *
3940 * @returns Success indicator.
3941 * @param env Pointer to the recompiler CPU structure.
3942 * @param f32BitCode Indicates that whether or not the code should
3943 * be disassembled as 16 or 32 bit. If -1 the CS
3944 * selector will be inspected.
3945 * @param pszPrefix
3946 */
3947bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3948{
3949 PVM pVM = env->pVM;
3950 const bool fLog = LogIsEnabled();
3951 const bool fLog2 = LogIs2Enabled();
3952 int rc = VINF_SUCCESS;
3953
3954 /*
3955 * Don't bother if there ain't any log output to do.
3956 */
3957 if (!fLog && !fLog2)
3958 return true;
3959
3960 /*
3961 * Update the state so DBGF reads the correct register values.
3962 */
3963 remR3StateUpdate(pVM, env->pVCpu);
3964
3965 /*
3966 * Log registers if requested.
3967 */
3968 if (fLog2)
3969 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3970
3971 /*
3972 * Disassemble to log.
3973 */
3974 if (fLog)
3975 {
3976 PVMCPU pVCpu = VMMGetCpu(pVM);
3977 char szBuf[256];
3978 szBuf[0] = '\0';
3979 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3980 pVCpu->idCpu,
3981 0, /* Sel */
3982 0, /* GCPtr */
3983 DBGF_DISAS_FLAGS_CURRENT_GUEST
3984 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3985 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3986 szBuf,
3987 sizeof(szBuf),
3988 NULL);
3989 if (RT_FAILURE(rc))
3990 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3991 if (pszPrefix && *pszPrefix)
3992 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3993 else
3994 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3995 }
3996
3997 return RT_SUCCESS(rc);
3998}
3999
4000
4001/**
4002 * Disassemble recompiled code.
4003 *
4004 * @param phFileIgnored Ignored, logfile usually.
4005 * @param pvCode Pointer to the code block.
4006 * @param cb Size of the code block.
4007 */
4008void disas(FILE *phFile, void *pvCode, unsigned long cb)
4009{
4010 if (LogIs2Enabled())
4011 {
4012 unsigned off = 0;
4013 char szOutput[256];
4014 DISCPUSTATE Cpu;
4015
4016 memset(&Cpu, 0, sizeof(Cpu));
4017#ifdef RT_ARCH_X86
4018 Cpu.mode = CPUMODE_32BIT;
4019#else
4020 Cpu.mode = CPUMODE_64BIT;
4021#endif
4022
4023 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4024 while (off < cb)
4025 {
4026 uint32_t cbInstr;
4027 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4028 RTLogPrintf("%s", szOutput);
4029 else
4030 {
4031 RTLogPrintf("disas error\n");
4032 cbInstr = 1;
4033#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4034 break;
4035#endif
4036 }
4037 off += cbInstr;
4038 }
4039 }
4040}
4041
4042
4043/**
4044 * Disassemble guest code.
4045 *
4046 * @param phFileIgnored Ignored, logfile usually.
4047 * @param uCode The guest address of the code to disassemble. (flat?)
4048 * @param cb Number of bytes to disassemble.
4049 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4050 */
4051void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4052{
4053 if (LogIs2Enabled())
4054 {
4055 PVM pVM = cpu_single_env->pVM;
4056 PVMCPU pVCpu = cpu_single_env->pVCpu;
4057 RTSEL cs;
4058 RTGCUINTPTR eip;
4059
4060 Assert(pVCpu);
4061
4062 /*
4063 * Update the state so DBGF reads the correct register values (flags).
4064 */
4065 remR3StateUpdate(pVM, pVCpu);
4066
4067 /*
4068 * Do the disassembling.
4069 */
4070 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4071 cs = cpu_single_env->segs[R_CS].selector;
4072 eip = uCode - cpu_single_env->segs[R_CS].base;
4073 for (;;)
4074 {
4075 char szBuf[256];
4076 uint32_t cbInstr;
4077 int rc = DBGFR3DisasInstrEx(pVM,
4078 pVCpu->idCpu,
4079 cs,
4080 eip,
4081 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4082 szBuf, sizeof(szBuf),
4083 &cbInstr);
4084 if (RT_SUCCESS(rc))
4085 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4086 else
4087 {
4088 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4089 cbInstr = 1;
4090 }
4091
4092 /* next */
4093 if (cb <= cbInstr)
4094 break;
4095 cb -= cbInstr;
4096 uCode += cbInstr;
4097 eip += cbInstr;
4098 }
4099 }
4100}
4101
4102
4103/**
4104 * Looks up a guest symbol.
4105 *
4106 * @returns Pointer to symbol name. This is a static buffer.
4107 * @param orig_addr The address in question.
4108 */
4109const char *lookup_symbol(target_ulong orig_addr)
4110{
4111 PVM pVM = cpu_single_env->pVM;
4112 RTGCINTPTR off = 0;
4113 RTDBGSYMBOL Sym;
4114 DBGFADDRESS Addr;
4115
4116 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4117 if (RT_SUCCESS(rc))
4118 {
4119 static char szSym[sizeof(Sym.szName) + 48];
4120 if (!off)
4121 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4122 else if (off > 0)
4123 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4124 else
4125 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4126 return szSym;
4127 }
4128 return "<N/A>";
4129}
4130
4131
4132#undef LOG_GROUP
4133#define LOG_GROUP LOG_GROUP_REM
4134
4135
4136/* -+- FF notifications -+- */
4137
4138
4139/**
4140 * Notification about a pending interrupt.
4141 *
4142 * @param pVM VM Handle.
4143 * @param pVCpu VMCPU Handle.
4144 * @param u8Interrupt Interrupt
4145 * @thread The emulation thread.
4146 */
4147REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4148{
4149 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4150 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4151}
4152
4153/**
4154 * Notification about a pending interrupt.
4155 *
4156 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4157 * @param pVM VM Handle.
4158 * @param pVCpu VMCPU Handle.
4159 * @thread The emulation thread.
4160 */
4161REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4162{
4163 return pVM->rem.s.u32PendingInterrupt;
4164}
4165
4166/**
4167 * Notification about the interrupt FF being set.
4168 *
4169 * @param pVM VM Handle.
4170 * @param pVCpu VMCPU Handle.
4171 * @thread The emulation thread.
4172 */
4173REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4174{
4175#ifndef IEM_VERIFICATION_MODE
4176 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4177 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4178 if (pVM->rem.s.fInREM)
4179 {
4180 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4181 CPU_INTERRUPT_EXTERNAL_HARD);
4182 }
4183#endif
4184}
4185
4186
4187/**
4188 * Notification about the interrupt FF being set.
4189 *
4190 * @param pVM VM Handle.
4191 * @param pVCpu VMCPU Handle.
4192 * @thread Any.
4193 */
4194REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4195{
4196 LogFlow(("REMR3NotifyInterruptClear:\n"));
4197 if (pVM->rem.s.fInREM)
4198 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4199}
4200
4201
4202/**
4203 * Notification about pending timer(s).
4204 *
4205 * @param pVM VM Handle.
4206 * @param pVCpuDst The target cpu for this notification.
4207 * TM will not broadcast pending timer events, but use
4208 * a dedicated EMT for them. So, only interrupt REM
4209 * execution if the given CPU is executing in REM.
4210 * @thread Any.
4211 */
4212REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4213{
4214#ifndef IEM_VERIFICATION_MODE
4215#ifndef DEBUG_bird
4216 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4217#endif
4218 if (pVM->rem.s.fInREM)
4219 {
4220 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4221 {
4222 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4223 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4224 CPU_INTERRUPT_EXTERNAL_TIMER);
4225 }
4226 else
4227 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4228 }
4229 else
4230 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4231#endif
4232}
4233
4234
4235/**
4236 * Notification about pending DMA transfers.
4237 *
4238 * @param pVM VM Handle.
4239 * @thread Any.
4240 */
4241REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4242{
4243#ifndef IEM_VERIFICATION_MODE
4244 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4245 if (pVM->rem.s.fInREM)
4246 {
4247 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4248 CPU_INTERRUPT_EXTERNAL_DMA);
4249 }
4250#endif
4251}
4252
4253
4254/**
4255 * Notification about pending timer(s).
4256 *
4257 * @param pVM VM Handle.
4258 * @thread Any.
4259 */
4260REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4261{
4262#ifndef IEM_VERIFICATION_MODE
4263 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4264 if (pVM->rem.s.fInREM)
4265 {
4266 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4267 CPU_INTERRUPT_EXTERNAL_EXIT);
4268 }
4269#endif
4270}
4271
4272
4273/**
4274 * Notification about pending FF set by an external thread.
4275 *
4276 * @param pVM VM handle.
4277 * @thread Any.
4278 */
4279REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4280{
4281#ifndef IEM_VERIFICATION_MODE
4282 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4283 if (pVM->rem.s.fInREM)
4284 {
4285 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4286 CPU_INTERRUPT_EXTERNAL_EXIT);
4287 }
4288#endif
4289}
4290
4291
4292#ifdef VBOX_WITH_STATISTICS
4293void remR3ProfileStart(int statcode)
4294{
4295 STAMPROFILEADV *pStat;
4296 switch(statcode)
4297 {
4298 case STATS_EMULATE_SINGLE_INSTR:
4299 pStat = &gStatExecuteSingleInstr;
4300 break;
4301 case STATS_QEMU_COMPILATION:
4302 pStat = &gStatCompilationQEmu;
4303 break;
4304 case STATS_QEMU_RUN_EMULATED_CODE:
4305 pStat = &gStatRunCodeQEmu;
4306 break;
4307 case STATS_QEMU_TOTAL:
4308 pStat = &gStatTotalTimeQEmu;
4309 break;
4310 case STATS_QEMU_RUN_TIMERS:
4311 pStat = &gStatTimers;
4312 break;
4313 case STATS_TLB_LOOKUP:
4314 pStat= &gStatTBLookup;
4315 break;
4316 case STATS_IRQ_HANDLING:
4317 pStat= &gStatIRQ;
4318 break;
4319 case STATS_RAW_CHECK:
4320 pStat = &gStatRawCheck;
4321 break;
4322
4323 default:
4324 AssertMsgFailed(("unknown stat %d\n", statcode));
4325 return;
4326 }
4327 STAM_PROFILE_ADV_START(pStat, a);
4328}
4329
4330
4331void remR3ProfileStop(int statcode)
4332{
4333 STAMPROFILEADV *pStat;
4334 switch(statcode)
4335 {
4336 case STATS_EMULATE_SINGLE_INSTR:
4337 pStat = &gStatExecuteSingleInstr;
4338 break;
4339 case STATS_QEMU_COMPILATION:
4340 pStat = &gStatCompilationQEmu;
4341 break;
4342 case STATS_QEMU_RUN_EMULATED_CODE:
4343 pStat = &gStatRunCodeQEmu;
4344 break;
4345 case STATS_QEMU_TOTAL:
4346 pStat = &gStatTotalTimeQEmu;
4347 break;
4348 case STATS_QEMU_RUN_TIMERS:
4349 pStat = &gStatTimers;
4350 break;
4351 case STATS_TLB_LOOKUP:
4352 pStat= &gStatTBLookup;
4353 break;
4354 case STATS_IRQ_HANDLING:
4355 pStat= &gStatIRQ;
4356 break;
4357 case STATS_RAW_CHECK:
4358 pStat = &gStatRawCheck;
4359 break;
4360 default:
4361 AssertMsgFailed(("unknown stat %d\n", statcode));
4362 return;
4363 }
4364 STAM_PROFILE_ADV_STOP(pStat, a);
4365}
4366#endif
4367
4368/**
4369 * Raise an RC, force rem exit.
4370 *
4371 * @param pVM VM handle.
4372 * @param rc The rc.
4373 */
4374void remR3RaiseRC(PVM pVM, int rc)
4375{
4376 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4377 Assert(pVM->rem.s.fInREM);
4378 VM_ASSERT_EMT(pVM);
4379 pVM->rem.s.rc = rc;
4380 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4381}
4382
4383
4384/* -+- timers -+- */
4385
4386uint64_t cpu_get_tsc(CPUX86State *env)
4387{
4388 STAM_COUNTER_INC(&gStatCpuGetTSC);
4389 return TMCpuTickGet(env->pVCpu);
4390}
4391
4392
4393/* -+- interrupts -+- */
4394
4395void cpu_set_ferr(CPUX86State *env)
4396{
4397 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4398 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4399}
4400
4401int cpu_get_pic_interrupt(CPUX86State *env)
4402{
4403 uint8_t u8Interrupt;
4404 int rc;
4405
4406 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4407 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4408 * with the (a)pic.
4409 */
4410 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4411 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4412 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4413 * remove this kludge. */
4414 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4415 {
4416 rc = VINF_SUCCESS;
4417 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4418 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4419 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4420 }
4421 else
4422 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4423
4424 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4425 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4426 if (RT_SUCCESS(rc))
4427 {
4428 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4429 env->interrupt_request |= CPU_INTERRUPT_HARD;
4430 return u8Interrupt;
4431 }
4432 return -1;
4433}
4434
4435
4436/* -+- local apic -+- */
4437
4438#if 0 /* CPUMSetGuestMsr does this now. */
4439void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4440{
4441 int rc = PDMApicSetBase(env->pVM, val);
4442 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4443}
4444#endif
4445
4446uint64_t cpu_get_apic_base(CPUX86State *env)
4447{
4448 uint64_t u64;
4449 int rc = PDMApicGetBase(env->pVM, &u64);
4450 if (RT_SUCCESS(rc))
4451 {
4452 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4453 return u64;
4454 }
4455 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4456 return 0;
4457}
4458
4459void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4460{
4461 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4462 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4463}
4464
4465uint8_t cpu_get_apic_tpr(CPUX86State *env)
4466{
4467 uint8_t u8;
4468 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4469 if (RT_SUCCESS(rc))
4470 {
4471 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4472 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4473 }
4474 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4475 return 0;
4476}
4477
4478/**
4479 * Read an MSR.
4480 *
4481 * @retval 0 success.
4482 * @retval -1 failure, raise \#GP(0).
4483 * @param env The cpu state.
4484 * @param idMsr The MSR to read.
4485 * @param puValue Where to return the value.
4486 */
4487int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4488{
4489 Assert(env->pVCpu);
4490 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4491}
4492
4493/**
4494 * Write to an MSR.
4495 *
4496 * @retval 0 success.
4497 * @retval -1 failure, raise \#GP(0).
4498 * @param env The cpu state.
4499 * @param idMsr The MSR to read.
4500 * @param puValue Where to return the value.
4501 */
4502int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4503{
4504 Assert(env->pVCpu);
4505 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4506}
4507
4508/* -+- I/O Ports -+- */
4509
4510#undef LOG_GROUP
4511#define LOG_GROUP LOG_GROUP_REM_IOPORT
4512
4513void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4514{
4515 int rc;
4516
4517 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4518 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4519
4520 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4521 if (RT_LIKELY(rc == VINF_SUCCESS))
4522 return;
4523 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4524 {
4525 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4526 remR3RaiseRC(env->pVM, rc);
4527 return;
4528 }
4529 remAbort(rc, __FUNCTION__);
4530}
4531
4532void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4533{
4534 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4535 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4536 if (RT_LIKELY(rc == VINF_SUCCESS))
4537 return;
4538 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4539 {
4540 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4541 remR3RaiseRC(env->pVM, rc);
4542 return;
4543 }
4544 remAbort(rc, __FUNCTION__);
4545}
4546
4547void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4548{
4549 int rc;
4550 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4551 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4552 if (RT_LIKELY(rc == VINF_SUCCESS))
4553 return;
4554 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4555 {
4556 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4557 remR3RaiseRC(env->pVM, rc);
4558 return;
4559 }
4560 remAbort(rc, __FUNCTION__);
4561}
4562
4563uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4564{
4565 uint32_t u32 = 0;
4566 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4567 if (RT_LIKELY(rc == VINF_SUCCESS))
4568 {
4569 if (/*addr != 0x61 && */addr != 0x71)
4570 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4571 return (uint8_t)u32;
4572 }
4573 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4574 {
4575 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4576 remR3RaiseRC(env->pVM, rc);
4577 return (uint8_t)u32;
4578 }
4579 remAbort(rc, __FUNCTION__);
4580 return UINT8_C(0xff);
4581}
4582
4583uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4584{
4585 uint32_t u32 = 0;
4586 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4587 if (RT_LIKELY(rc == VINF_SUCCESS))
4588 {
4589 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4590 return (uint16_t)u32;
4591 }
4592 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4593 {
4594 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4595 remR3RaiseRC(env->pVM, rc);
4596 return (uint16_t)u32;
4597 }
4598 remAbort(rc, __FUNCTION__);
4599 return UINT16_C(0xffff);
4600}
4601
4602uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4603{
4604 uint32_t u32 = 0;
4605 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4606 if (RT_LIKELY(rc == VINF_SUCCESS))
4607 {
4608//if (addr==0x01f0 && u32 == 0x6b6d)
4609// loglevel = ~0;
4610 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4611 return u32;
4612 }
4613 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4614 {
4615 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4616 remR3RaiseRC(env->pVM, rc);
4617 return u32;
4618 }
4619 remAbort(rc, __FUNCTION__);
4620 return UINT32_C(0xffffffff);
4621}
4622
4623#undef LOG_GROUP
4624#define LOG_GROUP LOG_GROUP_REM
4625
4626
4627/* -+- helpers and misc other interfaces -+- */
4628
4629/**
4630 * Perform the CPUID instruction.
4631 *
4632 * @param env Pointer to the recompiler CPU structure.
4633 * @param idx The CPUID leaf (eax).
4634 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4635 * @param pvEAX Where to store eax.
4636 * @param pvEBX Where to store ebx.
4637 * @param pvECX Where to store ecx.
4638 * @param pvEDX Where to store edx.
4639 */
4640void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4641 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4642{
4643 NOREF(idxSub);
4644 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4645}
4646
4647
4648#if 0 /* not used */
4649/**
4650 * Interface for qemu hardware to report back fatal errors.
4651 */
4652void hw_error(const char *pszFormat, ...)
4653{
4654 /*
4655 * Bitch about it.
4656 */
4657 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4658 * this in my Odin32 tree at home! */
4659 va_list args;
4660 va_start(args, pszFormat);
4661 RTLogPrintf("fatal error in virtual hardware:");
4662 RTLogPrintfV(pszFormat, args);
4663 va_end(args);
4664 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4665
4666 /*
4667 * If we're in REM context we'll sync back the state before 'jumping' to
4668 * the EMs failure handling.
4669 */
4670 PVM pVM = cpu_single_env->pVM;
4671 if (pVM->rem.s.fInREM)
4672 REMR3StateBack(pVM);
4673 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4674 AssertMsgFailed(("EMR3FatalError returned!\n"));
4675}
4676#endif
4677
4678/**
4679 * Interface for the qemu cpu to report unhandled situation
4680 * raising a fatal VM error.
4681 */
4682void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4683{
4684 va_list va;
4685 PVM pVM;
4686 PVMCPU pVCpu;
4687 char szMsg[256];
4688
4689 /*
4690 * Bitch about it.
4691 */
4692 RTLogFlags(NULL, "nodisabled nobuffered");
4693 RTLogFlush(NULL);
4694
4695 va_start(va, pszFormat);
4696#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4697 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4698 unsigned cArgs = 0;
4699 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4700 const char *psz = strchr(pszFormat, '%');
4701 while (psz && cArgs < 6)
4702 {
4703 auArgs[cArgs++] = va_arg(va, uintptr_t);
4704 psz = strchr(psz + 1, '%');
4705 }
4706 switch (cArgs)
4707 {
4708 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4709 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4710 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4711 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4712 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4713 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4714 default:
4715 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4716 }
4717#else
4718 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4719#endif
4720 va_end(va);
4721
4722 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4723 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4724
4725 /*
4726 * If we're in REM context we'll sync back the state before 'jumping' to
4727 * the EMs failure handling.
4728 */
4729 pVM = cpu_single_env->pVM;
4730 pVCpu = cpu_single_env->pVCpu;
4731 Assert(pVCpu);
4732
4733 if (pVM->rem.s.fInREM)
4734 REMR3StateBack(pVM, pVCpu);
4735 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4736 AssertMsgFailed(("EMR3FatalError returned!\n"));
4737}
4738
4739
4740/**
4741 * Aborts the VM.
4742 *
4743 * @param rc VBox error code.
4744 * @param pszTip Hint about why/when this happened.
4745 */
4746void remAbort(int rc, const char *pszTip)
4747{
4748 PVM pVM;
4749 PVMCPU pVCpu;
4750
4751 /*
4752 * Bitch about it.
4753 */
4754 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4755 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4756
4757 /*
4758 * Jump back to where we entered the recompiler.
4759 */
4760 pVM = cpu_single_env->pVM;
4761 pVCpu = cpu_single_env->pVCpu;
4762 Assert(pVCpu);
4763
4764 if (pVM->rem.s.fInREM)
4765 REMR3StateBack(pVM, pVCpu);
4766
4767 EMR3FatalError(pVCpu, rc);
4768 AssertMsgFailed(("EMR3FatalError returned!\n"));
4769}
4770
4771
4772/**
4773 * Dumps a linux system call.
4774 * @param pVCpu VMCPU handle.
4775 */
4776void remR3DumpLnxSyscall(PVMCPU pVCpu)
4777{
4778 static const char *apsz[] =
4779 {
4780 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4781 "sys_exit",
4782 "sys_fork",
4783 "sys_read",
4784 "sys_write",
4785 "sys_open", /* 5 */
4786 "sys_close",
4787 "sys_waitpid",
4788 "sys_creat",
4789 "sys_link",
4790 "sys_unlink", /* 10 */
4791 "sys_execve",
4792 "sys_chdir",
4793 "sys_time",
4794 "sys_mknod",
4795 "sys_chmod", /* 15 */
4796 "sys_lchown16",
4797 "sys_ni_syscall", /* old break syscall holder */
4798 "sys_stat",
4799 "sys_lseek",
4800 "sys_getpid", /* 20 */
4801 "sys_mount",
4802 "sys_oldumount",
4803 "sys_setuid16",
4804 "sys_getuid16",
4805 "sys_stime", /* 25 */
4806 "sys_ptrace",
4807 "sys_alarm",
4808 "sys_fstat",
4809 "sys_pause",
4810 "sys_utime", /* 30 */
4811 "sys_ni_syscall", /* old stty syscall holder */
4812 "sys_ni_syscall", /* old gtty syscall holder */
4813 "sys_access",
4814 "sys_nice",
4815 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4816 "sys_sync",
4817 "sys_kill",
4818 "sys_rename",
4819 "sys_mkdir",
4820 "sys_rmdir", /* 40 */
4821 "sys_dup",
4822 "sys_pipe",
4823 "sys_times",
4824 "sys_ni_syscall", /* old prof syscall holder */
4825 "sys_brk", /* 45 */
4826 "sys_setgid16",
4827 "sys_getgid16",
4828 "sys_signal",
4829 "sys_geteuid16",
4830 "sys_getegid16", /* 50 */
4831 "sys_acct",
4832 "sys_umount", /* recycled never used phys() */
4833 "sys_ni_syscall", /* old lock syscall holder */
4834 "sys_ioctl",
4835 "sys_fcntl", /* 55 */
4836 "sys_ni_syscall", /* old mpx syscall holder */
4837 "sys_setpgid",
4838 "sys_ni_syscall", /* old ulimit syscall holder */
4839 "sys_olduname",
4840 "sys_umask", /* 60 */
4841 "sys_chroot",
4842 "sys_ustat",
4843 "sys_dup2",
4844 "sys_getppid",
4845 "sys_getpgrp", /* 65 */
4846 "sys_setsid",
4847 "sys_sigaction",
4848 "sys_sgetmask",
4849 "sys_ssetmask",
4850 "sys_setreuid16", /* 70 */
4851 "sys_setregid16",
4852 "sys_sigsuspend",
4853 "sys_sigpending",
4854 "sys_sethostname",
4855 "sys_setrlimit", /* 75 */
4856 "sys_old_getrlimit",
4857 "sys_getrusage",
4858 "sys_gettimeofday",
4859 "sys_settimeofday",
4860 "sys_getgroups16", /* 80 */
4861 "sys_setgroups16",
4862 "old_select",
4863 "sys_symlink",
4864 "sys_lstat",
4865 "sys_readlink", /* 85 */
4866 "sys_uselib",
4867 "sys_swapon",
4868 "sys_reboot",
4869 "old_readdir",
4870 "old_mmap", /* 90 */
4871 "sys_munmap",
4872 "sys_truncate",
4873 "sys_ftruncate",
4874 "sys_fchmod",
4875 "sys_fchown16", /* 95 */
4876 "sys_getpriority",
4877 "sys_setpriority",
4878 "sys_ni_syscall", /* old profil syscall holder */
4879 "sys_statfs",
4880 "sys_fstatfs", /* 100 */
4881 "sys_ioperm",
4882 "sys_socketcall",
4883 "sys_syslog",
4884 "sys_setitimer",
4885 "sys_getitimer", /* 105 */
4886 "sys_newstat",
4887 "sys_newlstat",
4888 "sys_newfstat",
4889 "sys_uname",
4890 "sys_iopl", /* 110 */
4891 "sys_vhangup",
4892 "sys_ni_syscall", /* old "idle" system call */
4893 "sys_vm86old",
4894 "sys_wait4",
4895 "sys_swapoff", /* 115 */
4896 "sys_sysinfo",
4897 "sys_ipc",
4898 "sys_fsync",
4899 "sys_sigreturn",
4900 "sys_clone", /* 120 */
4901 "sys_setdomainname",
4902 "sys_newuname",
4903 "sys_modify_ldt",
4904 "sys_adjtimex",
4905 "sys_mprotect", /* 125 */
4906 "sys_sigprocmask",
4907 "sys_ni_syscall", /* old "create_module" */
4908 "sys_init_module",
4909 "sys_delete_module",
4910 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4911 "sys_quotactl",
4912 "sys_getpgid",
4913 "sys_fchdir",
4914 "sys_bdflush",
4915 "sys_sysfs", /* 135 */
4916 "sys_personality",
4917 "sys_ni_syscall", /* reserved for afs_syscall */
4918 "sys_setfsuid16",
4919 "sys_setfsgid16",
4920 "sys_llseek", /* 140 */
4921 "sys_getdents",
4922 "sys_select",
4923 "sys_flock",
4924 "sys_msync",
4925 "sys_readv", /* 145 */
4926 "sys_writev",
4927 "sys_getsid",
4928 "sys_fdatasync",
4929 "sys_sysctl",
4930 "sys_mlock", /* 150 */
4931 "sys_munlock",
4932 "sys_mlockall",
4933 "sys_munlockall",
4934 "sys_sched_setparam",
4935 "sys_sched_getparam", /* 155 */
4936 "sys_sched_setscheduler",
4937 "sys_sched_getscheduler",
4938 "sys_sched_yield",
4939 "sys_sched_get_priority_max",
4940 "sys_sched_get_priority_min", /* 160 */
4941 "sys_sched_rr_get_interval",
4942 "sys_nanosleep",
4943 "sys_mremap",
4944 "sys_setresuid16",
4945 "sys_getresuid16", /* 165 */
4946 "sys_vm86",
4947 "sys_ni_syscall", /* Old sys_query_module */
4948 "sys_poll",
4949 "sys_nfsservctl",
4950 "sys_setresgid16", /* 170 */
4951 "sys_getresgid16",
4952 "sys_prctl",
4953 "sys_rt_sigreturn",
4954 "sys_rt_sigaction",
4955 "sys_rt_sigprocmask", /* 175 */
4956 "sys_rt_sigpending",
4957 "sys_rt_sigtimedwait",
4958 "sys_rt_sigqueueinfo",
4959 "sys_rt_sigsuspend",
4960 "sys_pread64", /* 180 */
4961 "sys_pwrite64",
4962 "sys_chown16",
4963 "sys_getcwd",
4964 "sys_capget",
4965 "sys_capset", /* 185 */
4966 "sys_sigaltstack",
4967 "sys_sendfile",
4968 "sys_ni_syscall", /* reserved for streams1 */
4969 "sys_ni_syscall", /* reserved for streams2 */
4970 "sys_vfork", /* 190 */
4971 "sys_getrlimit",
4972 "sys_mmap2",
4973 "sys_truncate64",
4974 "sys_ftruncate64",
4975 "sys_stat64", /* 195 */
4976 "sys_lstat64",
4977 "sys_fstat64",
4978 "sys_lchown",
4979 "sys_getuid",
4980 "sys_getgid", /* 200 */
4981 "sys_geteuid",
4982 "sys_getegid",
4983 "sys_setreuid",
4984 "sys_setregid",
4985 "sys_getgroups", /* 205 */
4986 "sys_setgroups",
4987 "sys_fchown",
4988 "sys_setresuid",
4989 "sys_getresuid",
4990 "sys_setresgid", /* 210 */
4991 "sys_getresgid",
4992 "sys_chown",
4993 "sys_setuid",
4994 "sys_setgid",
4995 "sys_setfsuid", /* 215 */
4996 "sys_setfsgid",
4997 "sys_pivot_root",
4998 "sys_mincore",
4999 "sys_madvise",
5000 "sys_getdents64", /* 220 */
5001 "sys_fcntl64",
5002 "sys_ni_syscall", /* reserved for TUX */
5003 "sys_ni_syscall",
5004 "sys_gettid",
5005 "sys_readahead", /* 225 */
5006 "sys_setxattr",
5007 "sys_lsetxattr",
5008 "sys_fsetxattr",
5009 "sys_getxattr",
5010 "sys_lgetxattr", /* 230 */
5011 "sys_fgetxattr",
5012 "sys_listxattr",
5013 "sys_llistxattr",
5014 "sys_flistxattr",
5015 "sys_removexattr", /* 235 */
5016 "sys_lremovexattr",
5017 "sys_fremovexattr",
5018 "sys_tkill",
5019 "sys_sendfile64",
5020 "sys_futex", /* 240 */
5021 "sys_sched_setaffinity",
5022 "sys_sched_getaffinity",
5023 "sys_set_thread_area",
5024 "sys_get_thread_area",
5025 "sys_io_setup", /* 245 */
5026 "sys_io_destroy",
5027 "sys_io_getevents",
5028 "sys_io_submit",
5029 "sys_io_cancel",
5030 "sys_fadvise64", /* 250 */
5031 "sys_ni_syscall",
5032 "sys_exit_group",
5033 "sys_lookup_dcookie",
5034 "sys_epoll_create",
5035 "sys_epoll_ctl", /* 255 */
5036 "sys_epoll_wait",
5037 "sys_remap_file_pages",
5038 "sys_set_tid_address",
5039 "sys_timer_create",
5040 "sys_timer_settime", /* 260 */
5041 "sys_timer_gettime",
5042 "sys_timer_getoverrun",
5043 "sys_timer_delete",
5044 "sys_clock_settime",
5045 "sys_clock_gettime", /* 265 */
5046 "sys_clock_getres",
5047 "sys_clock_nanosleep",
5048 "sys_statfs64",
5049 "sys_fstatfs64",
5050 "sys_tgkill", /* 270 */
5051 "sys_utimes",
5052 "sys_fadvise64_64",
5053 "sys_ni_syscall" /* sys_vserver */
5054 };
5055
5056 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5057 switch (uEAX)
5058 {
5059 default:
5060 if (uEAX < RT_ELEMENTS(apsz))
5061 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5062 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5063 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5064 else
5065 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5066 break;
5067
5068 }
5069}
5070
5071
5072/**
5073 * Dumps an OpenBSD system call.
5074 * @param pVCpu VMCPU handle.
5075 */
5076void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5077{
5078 static const char *apsz[] =
5079 {
5080 "SYS_syscall", //0
5081 "SYS_exit", //1
5082 "SYS_fork", //2
5083 "SYS_read", //3
5084 "SYS_write", //4
5085 "SYS_open", //5
5086 "SYS_close", //6
5087 "SYS_wait4", //7
5088 "SYS_8",
5089 "SYS_link", //9
5090 "SYS_unlink", //10
5091 "SYS_11",
5092 "SYS_chdir", //12
5093 "SYS_fchdir", //13
5094 "SYS_mknod", //14
5095 "SYS_chmod", //15
5096 "SYS_chown", //16
5097 "SYS_break", //17
5098 "SYS_18",
5099 "SYS_19",
5100 "SYS_getpid", //20
5101 "SYS_mount", //21
5102 "SYS_unmount", //22
5103 "SYS_setuid", //23
5104 "SYS_getuid", //24
5105 "SYS_geteuid", //25
5106 "SYS_ptrace", //26
5107 "SYS_recvmsg", //27
5108 "SYS_sendmsg", //28
5109 "SYS_recvfrom", //29
5110 "SYS_accept", //30
5111 "SYS_getpeername", //31
5112 "SYS_getsockname", //32
5113 "SYS_access", //33
5114 "SYS_chflags", //34
5115 "SYS_fchflags", //35
5116 "SYS_sync", //36
5117 "SYS_kill", //37
5118 "SYS_38",
5119 "SYS_getppid", //39
5120 "SYS_40",
5121 "SYS_dup", //41
5122 "SYS_opipe", //42
5123 "SYS_getegid", //43
5124 "SYS_profil", //44
5125 "SYS_ktrace", //45
5126 "SYS_sigaction", //46
5127 "SYS_getgid", //47
5128 "SYS_sigprocmask", //48
5129 "SYS_getlogin", //49
5130 "SYS_setlogin", //50
5131 "SYS_acct", //51
5132 "SYS_sigpending", //52
5133 "SYS_osigaltstack", //53
5134 "SYS_ioctl", //54
5135 "SYS_reboot", //55
5136 "SYS_revoke", //56
5137 "SYS_symlink", //57
5138 "SYS_readlink", //58
5139 "SYS_execve", //59
5140 "SYS_umask", //60
5141 "SYS_chroot", //61
5142 "SYS_62",
5143 "SYS_63",
5144 "SYS_64",
5145 "SYS_65",
5146 "SYS_vfork", //66
5147 "SYS_67",
5148 "SYS_68",
5149 "SYS_sbrk", //69
5150 "SYS_sstk", //70
5151 "SYS_61",
5152 "SYS_vadvise", //72
5153 "SYS_munmap", //73
5154 "SYS_mprotect", //74
5155 "SYS_madvise", //75
5156 "SYS_76",
5157 "SYS_77",
5158 "SYS_mincore", //78
5159 "SYS_getgroups", //79
5160 "SYS_setgroups", //80
5161 "SYS_getpgrp", //81
5162 "SYS_setpgid", //82
5163 "SYS_setitimer", //83
5164 "SYS_84",
5165 "SYS_85",
5166 "SYS_getitimer", //86
5167 "SYS_87",
5168 "SYS_88",
5169 "SYS_89",
5170 "SYS_dup2", //90
5171 "SYS_91",
5172 "SYS_fcntl", //92
5173 "SYS_select", //93
5174 "SYS_94",
5175 "SYS_fsync", //95
5176 "SYS_setpriority", //96
5177 "SYS_socket", //97
5178 "SYS_connect", //98
5179 "SYS_99",
5180 "SYS_getpriority", //100
5181 "SYS_101",
5182 "SYS_102",
5183 "SYS_sigreturn", //103
5184 "SYS_bind", //104
5185 "SYS_setsockopt", //105
5186 "SYS_listen", //106
5187 "SYS_107",
5188 "SYS_108",
5189 "SYS_109",
5190 "SYS_110",
5191 "SYS_sigsuspend", //111
5192 "SYS_112",
5193 "SYS_113",
5194 "SYS_114",
5195 "SYS_115",
5196 "SYS_gettimeofday", //116
5197 "SYS_getrusage", //117
5198 "SYS_getsockopt", //118
5199 "SYS_119",
5200 "SYS_readv", //120
5201 "SYS_writev", //121
5202 "SYS_settimeofday", //122
5203 "SYS_fchown", //123
5204 "SYS_fchmod", //124
5205 "SYS_125",
5206 "SYS_setreuid", //126
5207 "SYS_setregid", //127
5208 "SYS_rename", //128
5209 "SYS_129",
5210 "SYS_130",
5211 "SYS_flock", //131
5212 "SYS_mkfifo", //132
5213 "SYS_sendto", //133
5214 "SYS_shutdown", //134
5215 "SYS_socketpair", //135
5216 "SYS_mkdir", //136
5217 "SYS_rmdir", //137
5218 "SYS_utimes", //138
5219 "SYS_139",
5220 "SYS_adjtime", //140
5221 "SYS_141",
5222 "SYS_142",
5223 "SYS_143",
5224 "SYS_144",
5225 "SYS_145",
5226 "SYS_146",
5227 "SYS_setsid", //147
5228 "SYS_quotactl", //148
5229 "SYS_149",
5230 "SYS_150",
5231 "SYS_151",
5232 "SYS_152",
5233 "SYS_153",
5234 "SYS_154",
5235 "SYS_nfssvc", //155
5236 "SYS_156",
5237 "SYS_157",
5238 "SYS_158",
5239 "SYS_159",
5240 "SYS_160",
5241 "SYS_getfh", //161
5242 "SYS_162",
5243 "SYS_163",
5244 "SYS_164",
5245 "SYS_sysarch", //165
5246 "SYS_166",
5247 "SYS_167",
5248 "SYS_168",
5249 "SYS_169",
5250 "SYS_170",
5251 "SYS_171",
5252 "SYS_172",
5253 "SYS_pread", //173
5254 "SYS_pwrite", //174
5255 "SYS_175",
5256 "SYS_176",
5257 "SYS_177",
5258 "SYS_178",
5259 "SYS_179",
5260 "SYS_180",
5261 "SYS_setgid", //181
5262 "SYS_setegid", //182
5263 "SYS_seteuid", //183
5264 "SYS_lfs_bmapv", //184
5265 "SYS_lfs_markv", //185
5266 "SYS_lfs_segclean", //186
5267 "SYS_lfs_segwait", //187
5268 "SYS_188",
5269 "SYS_189",
5270 "SYS_190",
5271 "SYS_pathconf", //191
5272 "SYS_fpathconf", //192
5273 "SYS_swapctl", //193
5274 "SYS_getrlimit", //194
5275 "SYS_setrlimit", //195
5276 "SYS_getdirentries", //196
5277 "SYS_mmap", //197
5278 "SYS___syscall", //198
5279 "SYS_lseek", //199
5280 "SYS_truncate", //200
5281 "SYS_ftruncate", //201
5282 "SYS___sysctl", //202
5283 "SYS_mlock", //203
5284 "SYS_munlock", //204
5285 "SYS_205",
5286 "SYS_futimes", //206
5287 "SYS_getpgid", //207
5288 "SYS_xfspioctl", //208
5289 "SYS_209",
5290 "SYS_210",
5291 "SYS_211",
5292 "SYS_212",
5293 "SYS_213",
5294 "SYS_214",
5295 "SYS_215",
5296 "SYS_216",
5297 "SYS_217",
5298 "SYS_218",
5299 "SYS_219",
5300 "SYS_220",
5301 "SYS_semget", //221
5302 "SYS_222",
5303 "SYS_223",
5304 "SYS_224",
5305 "SYS_msgget", //225
5306 "SYS_msgsnd", //226
5307 "SYS_msgrcv", //227
5308 "SYS_shmat", //228
5309 "SYS_229",
5310 "SYS_shmdt", //230
5311 "SYS_231",
5312 "SYS_clock_gettime", //232
5313 "SYS_clock_settime", //233
5314 "SYS_clock_getres", //234
5315 "SYS_235",
5316 "SYS_236",
5317 "SYS_237",
5318 "SYS_238",
5319 "SYS_239",
5320 "SYS_nanosleep", //240
5321 "SYS_241",
5322 "SYS_242",
5323 "SYS_243",
5324 "SYS_244",
5325 "SYS_245",
5326 "SYS_246",
5327 "SYS_247",
5328 "SYS_248",
5329 "SYS_249",
5330 "SYS_minherit", //250
5331 "SYS_rfork", //251
5332 "SYS_poll", //252
5333 "SYS_issetugid", //253
5334 "SYS_lchown", //254
5335 "SYS_getsid", //255
5336 "SYS_msync", //256
5337 "SYS_257",
5338 "SYS_258",
5339 "SYS_259",
5340 "SYS_getfsstat", //260
5341 "SYS_statfs", //261
5342 "SYS_fstatfs", //262
5343 "SYS_pipe", //263
5344 "SYS_fhopen", //264
5345 "SYS_265",
5346 "SYS_fhstatfs", //266
5347 "SYS_preadv", //267
5348 "SYS_pwritev", //268
5349 "SYS_kqueue", //269
5350 "SYS_kevent", //270
5351 "SYS_mlockall", //271
5352 "SYS_munlockall", //272
5353 "SYS_getpeereid", //273
5354 "SYS_274",
5355 "SYS_275",
5356 "SYS_276",
5357 "SYS_277",
5358 "SYS_278",
5359 "SYS_279",
5360 "SYS_280",
5361 "SYS_getresuid", //281
5362 "SYS_setresuid", //282
5363 "SYS_getresgid", //283
5364 "SYS_setresgid", //284
5365 "SYS_285",
5366 "SYS_mquery", //286
5367 "SYS_closefrom", //287
5368 "SYS_sigaltstack", //288
5369 "SYS_shmget", //289
5370 "SYS_semop", //290
5371 "SYS_stat", //291
5372 "SYS_fstat", //292
5373 "SYS_lstat", //293
5374 "SYS_fhstat", //294
5375 "SYS___semctl", //295
5376 "SYS_shmctl", //296
5377 "SYS_msgctl", //297
5378 "SYS_MAXSYSCALL", //298
5379 //299
5380 //300
5381 };
5382 uint32_t uEAX;
5383 if (!LogIsEnabled())
5384 return;
5385 uEAX = CPUMGetGuestEAX(pVCpu);
5386 switch (uEAX)
5387 {
5388 default:
5389 if (uEAX < RT_ELEMENTS(apsz))
5390 {
5391 uint32_t au32Args[8] = {0};
5392 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5393 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5394 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5395 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5396 }
5397 else
5398 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5399 break;
5400 }
5401}
5402
5403
5404#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5405/**
5406 * The Dll main entry point (stub).
5407 */
5408bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5409{
5410 return true;
5411}
5412
5413void *memcpy(void *dst, const void *src, size_t size)
5414{
5415 uint8_t*pbDst = dst, *pbSrc = src;
5416 while (size-- > 0)
5417 *pbDst++ = *pbSrc++;
5418 return dst;
5419}
5420
5421#endif
5422
5423void cpu_smm_update(CPUX86State *env)
5424{
5425}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette