VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 38838

Last change on this file since 38838 was 38838, checked in by vboxsync, 13 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.5 KB
Line 
1/* $Id: VBoxRecompiler.c 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
418 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
419
420 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
421 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
422 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
423
424
425#ifdef DEBUG_ALL_LOGGING
426 loglevel = ~0;
427#endif
428
429 /*
430 * Init the handler notification lists.
431 */
432 pVM->rem.s.idxPendingList = UINT32_MAX;
433 pVM->rem.s.idxFreeList = 0;
434
435 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
436 {
437 pCur = &pVM->rem.s.aHandlerNotifications[i];
438 pCur->idxNext = i + 1;
439 pCur->idxSelf = i;
440 }
441 pCur->idxNext = UINT32_MAX; /* the last record. */
442
443 return rc;
444}
445
446
447/**
448 * Finalizes the REM initialization.
449 *
450 * This is called after all components, devices and drivers has
451 * been initialized. Its main purpose it to finish the RAM related
452 * initialization.
453 *
454 * @returns VBox status code.
455 *
456 * @param pVM The VM handle.
457 */
458REMR3DECL(int) REMR3InitFinalize(PVM pVM)
459{
460 int rc;
461
462 /*
463 * Ram size & dirty bit map.
464 */
465 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
466 pVM->rem.s.fGCPhysLastRamFixed = true;
467#ifdef RT_STRICT
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
469#else
470 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
471#endif
472 return rc;
473}
474
475/**
476 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
477 *
478 * @returns VBox status code.
479 * @param pVM The VM handle.
480 * @param fGuarded Whether to guard the map.
481 */
482static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
483{
484 int rc = VINF_SUCCESS;
485 RTGCPHYS cb;
486
487 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
488
489 cb = pVM->rem.s.GCPhysLastRam + 1;
490 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
491 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
492 VERR_OUT_OF_RANGE);
493
494 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
495 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
496
497 if (!fGuarded)
498 {
499 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
500 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
501 }
502 else
503 {
504 /*
505 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
506 */
507 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
508 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
509 if (cbBitmapFull == cbBitmapAligned)
510 cbBitmapFull += _4G >> PAGE_SHIFT;
511 else if (cbBitmapFull - cbBitmapAligned < _64K)
512 cbBitmapFull += _64K;
513
514 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
515 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
516
517 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
518 if (RT_FAILURE(rc))
519 {
520 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
521 AssertLogRelRCReturn(rc, rc);
522 }
523
524 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
525 }
526
527 /* initialize it. */
528 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
529 return rc;
530}
531
532
533/**
534 * Terminates the REM.
535 *
536 * Termination means cleaning up and freeing all resources,
537 * the VM it self is at this point powered off or suspended.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542REMR3DECL(int) REMR3Term(PVM pVM)
543{
544#ifdef VBOX_WITH_STATISTICS
545 /*
546 * Statistics.
547 */
548 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
549 STAM_DEREG(pVM, &gStatCompilationQEmu);
550 STAM_DEREG(pVM, &gStatRunCodeQEmu);
551 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
552 STAM_DEREG(pVM, &gStatTimers);
553 STAM_DEREG(pVM, &gStatTBLookup);
554 STAM_DEREG(pVM, &gStatIRQ);
555 STAM_DEREG(pVM, &gStatRawCheck);
556 STAM_DEREG(pVM, &gStatMemRead);
557 STAM_DEREG(pVM, &gStatMemWrite);
558 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
559 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
560
561 STAM_DEREG(pVM, &gStatCpuGetTSC);
562
563 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
564 STAM_DEREG(pVM, &gStatRefuseVM86);
565 STAM_DEREG(pVM, &gStatRefusePaging);
566 STAM_DEREG(pVM, &gStatRefusePAE);
567 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
568 STAM_DEREG(pVM, &gStatRefuseIF0);
569 STAM_DEREG(pVM, &gStatRefuseCode16);
570 STAM_DEREG(pVM, &gStatRefuseWP0);
571 STAM_DEREG(pVM, &gStatRefuseRing1or2);
572 STAM_DEREG(pVM, &gStatRefuseCanExecute);
573 STAM_DEREG(pVM, &gStatFlushTBs);
574
575 STAM_DEREG(pVM, &gStatREMGDTChange);
576 STAM_DEREG(pVM, &gStatREMLDTRChange);
577 STAM_DEREG(pVM, &gStatREMIDTChange);
578 STAM_DEREG(pVM, &gStatREMTRChange);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
586
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
591 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
592 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
593
594 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
595#endif /* VBOX_WITH_STATISTICS */
596
597 STAM_REL_DEREG(pVM, &tb_flush_count);
598 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
599 STAM_REL_DEREG(pVM, &tlb_flush_count);
600
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * The VM is being reset.
607 *
608 * For the REM component this means to call the cpu_reset() and
609 * reinitialize some state variables.
610 *
611 * @param pVM VM handle.
612 */
613REMR3DECL(void) REMR3Reset(PVM pVM)
614{
615 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
616
617 /*
618 * Reset the REM cpu.
619 */
620 Assert(pVM->rem.s.cIgnoreAll == 0);
621 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
622 cpu_reset(&pVM->rem.s.Env);
623 pVM->rem.s.cInvalidatedPages = 0;
624 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
625 Assert(pVM->rem.s.cIgnoreAll == 0);
626
627 /* Clear raw ring 0 init state */
628 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
629
630 /* Flush the TBs the next time we execute code here. */
631 pVM->rem.s.fFlushTBs = true;
632
633 EMRemUnlock(pVM);
634}
635
636
637/**
638 * Execute state save operation.
639 *
640 * @returns VBox status code.
641 * @param pVM VM Handle.
642 * @param pSSM SSM operation handle.
643 */
644static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
645{
646 PREM pRem = &pVM->rem.s;
647
648 /*
649 * Save the required CPU Env bits.
650 * (Not much because we're never in REM when doing the save.)
651 */
652 LogFlow(("remR3Save:\n"));
653 Assert(!pRem->fInREM);
654 SSMR3PutU32(pSSM, pRem->Env.hflags);
655 SSMR3PutU32(pSSM, ~0); /* separator */
656
657 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
658 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
659 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
660
661 return SSMR3PutU32(pSSM, ~0); /* terminator */
662}
663
664
665/**
666 * Execute state load operation.
667 *
668 * @returns VBox status code.
669 * @param pVM VM Handle.
670 * @param pSSM SSM operation handle.
671 * @param uVersion Data layout version.
672 * @param uPass The data pass.
673 */
674static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 uint32_t u32Dummy;
677 uint32_t fRawRing0 = false;
678 uint32_t u32Sep;
679 uint32_t i;
680 int rc;
681 PREM pRem;
682
683 LogFlow(("remR3Load:\n"));
684 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
685
686 /*
687 * Validate version.
688 */
689 if ( uVersion != REM_SAVED_STATE_VERSION
690 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
693 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
694 }
695
696 /*
697 * Do a reset to be on the safe side...
698 */
699 REMR3Reset(pVM);
700
701 /*
702 * Ignore all ignorable notifications.
703 * (Not doing this will cause serious trouble.)
704 */
705 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
706
707 /*
708 * Load the required CPU Env bits.
709 * (Not much because we're never in REM when doing the save.)
710 */
711 pRem = &pVM->rem.s;
712 Assert(!pRem->fInREM);
713 SSMR3GetU32(pSSM, &pRem->Env.hflags);
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /* Redundant REM CPU state has to be loaded, but can be ignored. */
717 CPUX86State_Ver16 temp;
718 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
719 }
720
721 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
722 if (RT_FAILURE(rc))
723 return rc;
724 if (u32Sep != ~0U)
725 {
726 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729
730 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
731 SSMR3GetUInt(pSSM, &fRawRing0);
732 if (fRawRing0)
733 pRem->Env.state |= CPU_RAW_RING0;
734
735 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
736 {
737 /*
738 * Load the REM stuff.
739 */
740 /** @todo r=bird: We should just drop all these items, restoring doesn't make
741 * sense. */
742 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
743 if (RT_FAILURE(rc))
744 return rc;
745 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
746 {
747 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
748 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
749 }
750 for (i = 0; i < pRem->cInvalidatedPages; i++)
751 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
752 }
753
754 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
755 if (RT_FAILURE(rc))
756 return rc;
757
758 /* check the terminator. */
759 rc = SSMR3GetU32(pSSM, &u32Sep);
760 if (RT_FAILURE(rc))
761 return rc;
762 if (u32Sep != ~0U)
763 {
764 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
765 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
766 }
767
768 /*
769 * Get the CPUID features.
770 */
771 PVMCPU pVCpu = VMMGetCpu(pVM);
772 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
773 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
774
775 /*
776 * Stop ignoring ignorable notifications.
777 */
778 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
779
780 /*
781 * Sync the whole CPU state when executing code in the recompiler.
782 */
783 for (i = 0; i < pVM->cCpus; i++)
784 {
785 PVMCPU pVCpu = &pVM->aCpus[i];
786 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
787 }
788 return VINF_SUCCESS;
789}
790
791
792
793#undef LOG_GROUP
794#define LOG_GROUP LOG_GROUP_REM_RUN
795
796/**
797 * Single steps an instruction in recompiled mode.
798 *
799 * Before calling this function the REM state needs to be in sync with
800 * the VM. Call REMR3State() to perform the sync. It's only necessary
801 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
802 * and after calling REMR3StateBack().
803 *
804 * @returns VBox status code.
805 *
806 * @param pVM VM Handle.
807 * @param pVCpu VMCPU Handle.
808 */
809REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
810{
811 int rc, interrupt_request;
812 RTGCPTR GCPtrPC;
813 bool fBp;
814
815 /*
816 * Lock the REM - we don't wanna have anyone interrupting us
817 * while stepping - and enabled single stepping. We also ignore
818 * pending interrupts and suchlike.
819 */
820 interrupt_request = pVM->rem.s.Env.interrupt_request;
821 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
822 pVM->rem.s.Env.interrupt_request = 0;
823 cpu_single_step(&pVM->rem.s.Env, 1);
824
825 /*
826 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
827 */
828 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
829 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
830
831 /*
832 * Execute and handle the return code.
833 * We execute without enabling the cpu tick, so on success we'll
834 * just flip it on and off to make sure it moves
835 */
836 rc = cpu_exec(&pVM->rem.s.Env);
837 if (rc == EXCP_DEBUG)
838 {
839 TMR3NotifyResume(pVM, pVCpu);
840 TMR3NotifySuspend(pVM, pVCpu);
841 rc = VINF_EM_DBG_STEPPED;
842 }
843 else
844 {
845 switch (rc)
846 {
847 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
848 case EXCP_HLT:
849 case EXCP_HALTED: rc = VINF_EM_HALT; break;
850 case EXCP_RC:
851 rc = pVM->rem.s.rc;
852 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
853 break;
854 case EXCP_EXECUTE_RAW:
855 case EXCP_EXECUTE_HWACC:
856 /** @todo: is it correct? No! */
857 rc = VINF_SUCCESS;
858 break;
859 default:
860 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
861 rc = VERR_INTERNAL_ERROR;
862 break;
863 }
864 }
865
866 /*
867 * Restore the stuff we changed to prevent interruption.
868 * Unlock the REM.
869 */
870 if (fBp)
871 {
872 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
873 Assert(rc2 == 0); NOREF(rc2);
874 }
875 cpu_single_step(&pVM->rem.s.Env, 0);
876 pVM->rem.s.Env.interrupt_request = interrupt_request;
877
878 return rc;
879}
880
881
882/**
883 * Set a breakpoint using the REM facilities.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param Address The breakpoint address.
888 * @thread The emulation thread.
889 */
890REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
891{
892 VM_ASSERT_EMT(pVM);
893 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
894 {
895 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
896 return VINF_SUCCESS;
897 }
898 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
899 return VERR_REM_NO_MORE_BP_SLOTS;
900}
901
902
903/**
904 * Clears a breakpoint set by REMR3BreakpointSet().
905 *
906 * @returns VBox status code.
907 * @param pVM The VM handle.
908 * @param Address The breakpoint address.
909 * @thread The emulation thread.
910 */
911REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
912{
913 VM_ASSERT_EMT(pVM);
914 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
915 {
916 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
917 return VINF_SUCCESS;
918 }
919 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
920 return VERR_REM_BP_NOT_FOUND;
921}
922
923
924/**
925 * Emulate an instruction.
926 *
927 * This function executes one instruction without letting anyone
928 * interrupt it. This is intended for being called while being in
929 * raw mode and thus will take care of all the state syncing between
930 * REM and the rest.
931 *
932 * @returns VBox status code.
933 * @param pVM VM handle.
934 * @param pVCpu VMCPU Handle.
935 */
936REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
937{
938 bool fFlushTBs;
939
940 int rc, rc2;
941 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
942
943 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
944 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
945 */
946 if (HWACCMIsEnabled(pVM))
947 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
948
949 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
950 fFlushTBs = pVM->rem.s.fFlushTBs;
951 pVM->rem.s.fFlushTBs = false;
952
953 /*
954 * Sync the state and enable single instruction / single stepping.
955 */
956 rc = REMR3State(pVM, pVCpu);
957 pVM->rem.s.fFlushTBs = fFlushTBs;
958 if (RT_SUCCESS(rc))
959 {
960 int interrupt_request = pVM->rem.s.Env.interrupt_request;
961 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HWACC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1045 rc = VINF_EM_RESCHEDULE_HWACC;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */
1116 0, /* GCPtr */
1117 DBGF_DISAS_FLAGS_CURRENT_GUEST
1118 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1119 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1120 szBuf,
1121 sizeof(szBuf),
1122 NULL);
1123 if (RT_FAILURE(rc))
1124 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1125 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1126
1127 /*
1128 * Execute the instruction.
1129 */
1130 TMNotifyStartOfExecution(pVCpu);
1131
1132 if ( pVM->rem.s.Env.exception_index < 0
1133 || pVM->rem.s.Env.exception_index > 256)
1134 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1135
1136#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1137 pVM->rem.s.Env.interrupt_request = 0;
1138#else
1139 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1140#endif
1141 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1142 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1143 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1144 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 rc = cpu_exec(&pVM->rem.s.Env);
1151
1152 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1153 pVM->rem.s.Env.interrupt_request,
1154 pVM->rem.s.Env.halted,
1155 pVM->rem.s.Env.exception_index
1156 );
1157
1158 TMNotifyEndOfExecution(pVCpu);
1159
1160 switch (rc)
1161 {
1162#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1163 /*
1164 * The normal exit.
1165 */
1166 case EXCP_SINGLE_INSTR:
1167 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1168 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1169 continue;
1170 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1171 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1172 rc = VINF_SUCCESS;
1173 break;
1174
1175#else
1176 /*
1177 * The normal exit, check for breakpoints at PC just to be sure.
1178 */
1179#endif
1180 case EXCP_DEBUG:
1181 if (pVM->rem.s.Env.watchpoint_hit)
1182 {
1183 /** @todo deal with watchpoints */
1184 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1185 rc = VINF_EM_DBG_BREAKPOINT;
1186 }
1187 else
1188 {
1189 CPUBreakpoint *pBP;
1190 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1191 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1192 if (pBP->pc == GCPtrPC)
1193 break;
1194 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1195 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1196 }
1197#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1198 if (rc == VINF_EM_DBG_STEPPED)
1199 {
1200 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1201 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1202 continue;
1203
1204 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1205 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1206 rc = VINF_SUCCESS;
1207 }
1208#endif
1209 break;
1210
1211 /*
1212 * If we take a trap or start servicing a pending interrupt, we might end up here.
1213 * (Timer thread or some other thread wishing EMT's attention.)
1214 */
1215 case EXCP_INTERRUPT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1217 rc = VINF_SUCCESS;
1218 break;
1219
1220 /*
1221 * hlt instruction.
1222 */
1223 case EXCP_HLT:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * The VM has halted.
1230 */
1231 case EXCP_HALTED:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1233 rc = VINF_EM_HALT;
1234 break;
1235
1236 /*
1237 * Switch to RAW-mode.
1238 */
1239 case EXCP_EXECUTE_RAW:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1241 rc = VINF_EM_RESCHEDULE_RAW;
1242 break;
1243
1244 /*
1245 * Switch to hardware accelerated RAW-mode.
1246 */
1247 case EXCP_EXECUTE_HWACC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1249 rc = VINF_EM_RESCHEDULE_HWACC;
1250 break;
1251
1252 /*
1253 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1254 */
1255 case EXCP_RC:
1256 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1257 rc = pVM->rem.s.rc;
1258 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1259 break;
1260
1261 /*
1262 * Figure out the rest when they arrive....
1263 */
1264 default:
1265 AssertMsgFailed(("rc=%d\n", rc));
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1267 rc = VINF_EM_RESCHEDULE;
1268 break;
1269 }
1270 break;
1271 }
1272
1273#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1274// cpu_single_step(&pVM->rem.s.Env, 0);
1275#else
1276 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1277#endif
1278 return rc;
1279}
1280
1281
1282/**
1283 * Runs code in recompiled mode.
1284 *
1285 * Before calling this function the REM state needs to be in sync with
1286 * the VM. Call REMR3State() to perform the sync. It's only necessary
1287 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1288 * and after calling REMR3StateBack().
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM VM Handle.
1293 * @param pVCpu VMCPU Handle.
1294 */
1295REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1296{
1297 int rc;
1298
1299 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1300 return remR3RunLoggingStep(pVM, pVCpu);
1301
1302 Assert(pVM->rem.s.fInREM);
1303 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1304
1305 TMNotifyStartOfExecution(pVCpu);
1306 rc = cpu_exec(&pVM->rem.s.Env);
1307 TMNotifyEndOfExecution(pVCpu);
1308 switch (rc)
1309 {
1310 /*
1311 * This happens when the execution was interrupted
1312 * by an external event, like pending timers.
1313 */
1314 case EXCP_INTERRUPT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1316 rc = VINF_SUCCESS;
1317 break;
1318
1319 /*
1320 * hlt instruction.
1321 */
1322 case EXCP_HLT:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * The VM has halted.
1329 */
1330 case EXCP_HALTED:
1331 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1332 rc = VINF_EM_HALT;
1333 break;
1334
1335 /*
1336 * Breakpoint/single step.
1337 */
1338 case EXCP_DEBUG:
1339 if (pVM->rem.s.Env.watchpoint_hit)
1340 {
1341 /** @todo deal with watchpoints */
1342 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1343 rc = VINF_EM_DBG_BREAKPOINT;
1344 }
1345 else
1346 {
1347 CPUBreakpoint *pBP;
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1350 if (pBP->pc == GCPtrPC)
1351 break;
1352 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1353 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1354 }
1355 break;
1356
1357 /*
1358 * Switch to RAW-mode.
1359 */
1360 case EXCP_EXECUTE_RAW:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1362 rc = VINF_EM_RESCHEDULE_RAW;
1363 break;
1364
1365 /*
1366 * Switch to hardware accelerated RAW-mode.
1367 */
1368 case EXCP_EXECUTE_HWACC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1370 rc = VINF_EM_RESCHEDULE_HWACC;
1371 break;
1372
1373 /*
1374 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1375 */
1376 case EXCP_RC:
1377 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1378 rc = pVM->rem.s.rc;
1379 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1380 break;
1381
1382 /*
1383 * Figure out the rest when they arrive....
1384 */
1385 default:
1386 AssertMsgFailed(("rc=%d\n", rc));
1387 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1388 rc = VINF_SUCCESS;
1389 break;
1390 }
1391
1392 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1393 return rc;
1394}
1395
1396
1397/**
1398 * Check if the cpu state is suitable for Raw execution.
1399 *
1400 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1401 *
1402 * @param env The CPU env struct.
1403 * @param eip The EIP to check this for (might differ from env->eip).
1404 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1405 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1406 *
1407 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1408 */
1409bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1410{
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1413 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1414 uint32_t u32CR0;
1415
1416#ifdef IEM_VERIFICATION_MODE
1417 return false;
1418#endif
1419
1420 /* Update counter. */
1421 env->pVM->rem.s.cCanExecuteRaw++;
1422
1423 /* Never when single stepping+logging guest code. */
1424 if (env->state & CPU_EMULATE_SINGLE_STEP)
1425 return false;
1426
1427 if (HWACCMIsEnabled(env->pVM))
1428 {
1429 CPUMCTX Ctx;
1430
1431 env->state |= CPU_RAW_HWACC;
1432
1433 /*
1434 * The simple check first...
1435 */
1436 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1437 return false;
1438
1439 /*
1440 * Create partial context for HWACCMR3CanExecuteGuest
1441 */
1442 Ctx.cr0 = env->cr[0];
1443 Ctx.cr3 = env->cr[3];
1444 Ctx.cr4 = env->cr[4];
1445
1446 Ctx.tr = env->tr.selector;
1447 Ctx.trHid.u64Base = env->tr.base;
1448 Ctx.trHid.u32Limit = env->tr.limit;
1449 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1450
1451 Ctx.ldtr = env->ldt.selector;
1452 Ctx.ldtrHid.u64Base = env->ldt.base;
1453 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1454 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1455
1456 Ctx.idtr.cbIdt = env->idt.limit;
1457 Ctx.idtr.pIdt = env->idt.base;
1458
1459 Ctx.gdtr.cbGdt = env->gdt.limit;
1460 Ctx.gdtr.pGdt = env->gdt.base;
1461
1462 Ctx.rsp = env->regs[R_ESP];
1463 Ctx.rip = env->eip;
1464
1465 Ctx.eflags.u32 = env->eflags;
1466
1467 Ctx.cs = env->segs[R_CS].selector;
1468 Ctx.csHid.u64Base = env->segs[R_CS].base;
1469 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1470 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1471
1472 Ctx.ds = env->segs[R_DS].selector;
1473 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1474 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1475 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1476
1477 Ctx.es = env->segs[R_ES].selector;
1478 Ctx.esHid.u64Base = env->segs[R_ES].base;
1479 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1480 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1481
1482 Ctx.fs = env->segs[R_FS].selector;
1483 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1484 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1485 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.gs = env->segs[R_GS].selector;
1488 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1489 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1490 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1491
1492 Ctx.ss = env->segs[R_SS].selector;
1493 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1494 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1495 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1496
1497 Ctx.msrEFER = env->efer;
1498
1499 /* Hardware accelerated raw-mode:
1500 *
1501 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1502 */
1503 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1504 {
1505 *piException = EXCP_EXECUTE_HWACC;
1506 return true;
1507 }
1508 return false;
1509 }
1510
1511 /*
1512 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1513 * or 32 bits protected mode ring 0 code
1514 *
1515 * The tests are ordered by the likelihood of being true during normal execution.
1516 */
1517 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1518 {
1519 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1520 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1521 return false;
1522 }
1523
1524#ifndef VBOX_RAW_V86
1525 if (fFlags & VM_MASK) {
1526 STAM_COUNTER_INC(&gStatRefuseVM86);
1527 Log2(("raw mode refused: VM_MASK\n"));
1528 return false;
1529 }
1530#endif
1531
1532 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1533 {
1534#ifndef DEBUG_bird
1535 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1536#endif
1537 return false;
1538 }
1539
1540 if (env->singlestep_enabled)
1541 {
1542 //Log2(("raw mode refused: Single step\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->breakpoints))
1547 {
1548 //Log2(("raw mode refused: Breakpoints\n"));
1549 return false;
1550 }
1551
1552 if (!QTAILQ_EMPTY(&env->watchpoints))
1553 {
1554 //Log2(("raw mode refused: Watchpoints\n"));
1555 return false;
1556 }
1557
1558 u32CR0 = env->cr[0];
1559 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1560 {
1561 STAM_COUNTER_INC(&gStatRefusePaging);
1562 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1563 return false;
1564 }
1565
1566 if (env->cr[4] & CR4_PAE_MASK)
1567 {
1568 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1569 {
1570 STAM_COUNTER_INC(&gStatRefusePAE);
1571 return false;
1572 }
1573 }
1574
1575 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1576 {
1577 if (!EMIsRawRing3Enabled(env->pVM))
1578 return false;
1579
1580 if (!(env->eflags & IF_MASK))
1581 {
1582 STAM_COUNTER_INC(&gStatRefuseIF0);
1583 Log2(("raw mode refused: IF (RawR3)\n"));
1584 return false;
1585 }
1586
1587 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1588 {
1589 STAM_COUNTER_INC(&gStatRefuseWP0);
1590 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1591 return false;
1592 }
1593 }
1594 else
1595 {
1596 if (!EMIsRawRing0Enabled(env->pVM))
1597 return false;
1598
1599 // Let's start with pure 32 bits ring 0 code first
1600 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1601 {
1602 STAM_COUNTER_INC(&gStatRefuseCode16);
1603 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1604 return false;
1605 }
1606
1607 // Only R0
1608 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1609 {
1610 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1611 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1612 return false;
1613 }
1614
1615 if (!(u32CR0 & CR0_WP_MASK))
1616 {
1617 STAM_COUNTER_INC(&gStatRefuseWP0);
1618 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1619 return false;
1620 }
1621
1622 if (PATMIsPatchGCAddr(env->pVM, eip))
1623 {
1624 Log2(("raw r0 mode forced: patch code\n"));
1625 *piException = EXCP_EXECUTE_RAW;
1626 return true;
1627 }
1628
1629#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1630 if (!(env->eflags & IF_MASK))
1631 {
1632 STAM_COUNTER_INC(&gStatRefuseIF0);
1633 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1634 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1635 return false;
1636 }
1637#endif
1638
1639 env->state |= CPU_RAW_RING0;
1640 }
1641
1642 /*
1643 * Don't reschedule the first time we're called, because there might be
1644 * special reasons why we're here that is not covered by the above checks.
1645 */
1646 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1647 {
1648 Log2(("raw mode refused: first scheduling\n"));
1649 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1650 return false;
1651 }
1652
1653 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1654 *piException = EXCP_EXECUTE_RAW;
1655 return true;
1656}
1657
1658
1659/**
1660 * Fetches a code byte.
1661 *
1662 * @returns Success indicator (bool) for ease of use.
1663 * @param env The CPU environment structure.
1664 * @param GCPtrInstr Where to fetch code.
1665 * @param pu8Byte Where to store the byte on success
1666 */
1667bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1668{
1669 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1670 if (RT_SUCCESS(rc))
1671 return true;
1672 return false;
1673}
1674
1675
1676/**
1677 * Flush (or invalidate if you like) page table/dir entry.
1678 *
1679 * (invlpg instruction; tlb_flush_page)
1680 *
1681 * @param env Pointer to cpu environment.
1682 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1683 */
1684void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1685{
1686 PVM pVM = env->pVM;
1687 PCPUMCTX pCtx;
1688 int rc;
1689
1690 Assert(EMRemIsLockOwner(env->pVM));
1691
1692 /*
1693 * When we're replaying invlpg instructions or restoring a saved
1694 * state we disable this path.
1695 */
1696 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1697 return;
1698 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1699 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1700
1701 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1702
1703 /*
1704 * Update the control registers before calling PGMFlushPage.
1705 */
1706 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1707 Assert(pCtx);
1708 pCtx->cr0 = env->cr[0];
1709 pCtx->cr3 = env->cr[3];
1710 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1711 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1712 pCtx->cr4 = env->cr[4];
1713
1714 /*
1715 * Let PGM do the rest.
1716 */
1717 Assert(env->pVCpu);
1718 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1719 if (RT_FAILURE(rc))
1720 {
1721 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1722 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1723 }
1724 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1725}
1726
1727
1728#ifndef REM_PHYS_ADDR_IN_TLB
1729/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1730void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1731{
1732 void *pv;
1733 int rc;
1734
1735 /* Address must be aligned enough to fiddle with lower bits */
1736 Assert((physAddr & 0x3) == 0);
1737
1738 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1739 Assert( rc == VINF_SUCCESS
1740 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1741 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1742 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1743 if (RT_FAILURE(rc))
1744 return (void *)1;
1745 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1746 return (void *)((uintptr_t)pv | 2);
1747 return pv;
1748}
1749#endif /* REM_PHYS_ADDR_IN_TLB */
1750
1751
1752/**
1753 * Called from tlb_protect_code in order to write monitor a code page.
1754 *
1755 * @param env Pointer to the CPU environment.
1756 * @param GCPtr Code page to monitor
1757 */
1758void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1759{
1760#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1761 Assert(env->pVM->rem.s.fInREM);
1762 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1763 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1764 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1765 && !(env->eflags & VM_MASK) /* no V86 mode */
1766 && !HWACCMIsEnabled(env->pVM))
1767 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1768#endif
1769}
1770
1771
1772/**
1773 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1774 *
1775 * @param env Pointer to the CPU environment.
1776 * @param GCPtr Code page to monitor
1777 */
1778void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1779{
1780 Assert(env->pVM->rem.s.fInREM);
1781#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1782 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1783 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1784 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1785 && !(env->eflags & VM_MASK) /* no V86 mode */
1786 && !HWACCMIsEnabled(env->pVM))
1787 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1788#endif
1789}
1790
1791
1792/**
1793 * Called when the CPU is initialized, any of the CRx registers are changed or
1794 * when the A20 line is modified.
1795 *
1796 * @param env Pointer to the CPU environment.
1797 * @param fGlobal Set if the flush is global.
1798 */
1799void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1800{
1801 PVM pVM = env->pVM;
1802 PCPUMCTX pCtx;
1803 Assert(EMRemIsLockOwner(pVM));
1804
1805 /*
1806 * When we're replaying invlpg instructions or restoring a saved
1807 * state we disable this path.
1808 */
1809 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1810 return;
1811 Assert(pVM->rem.s.fInREM);
1812
1813 /*
1814 * The caller doesn't check cr4, so we have to do that for ourselves.
1815 */
1816 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1817 fGlobal = true;
1818 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1819
1820 /*
1821 * Update the control registers before calling PGMR3FlushTLB.
1822 */
1823 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1824 Assert(pCtx);
1825 pCtx->cr0 = env->cr[0];
1826 pCtx->cr3 = env->cr[3];
1827 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1828 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1829 pCtx->cr4 = env->cr[4];
1830
1831 /*
1832 * Let PGM do the rest.
1833 */
1834 Assert(env->pVCpu);
1835 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1836}
1837
1838
1839/**
1840 * Called when any of the cr0, cr4 or efer registers is updated.
1841 *
1842 * @param env Pointer to the CPU environment.
1843 */
1844void remR3ChangeCpuMode(CPUX86State *env)
1845{
1846 PVM pVM = env->pVM;
1847 uint64_t efer;
1848 PCPUMCTX pCtx;
1849 int rc;
1850
1851 /*
1852 * When we're replaying loads or restoring a saved
1853 * state this path is disabled.
1854 */
1855 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1856 return;
1857 Assert(pVM->rem.s.fInREM);
1858
1859 /*
1860 * Update the control registers before calling PGMChangeMode()
1861 * as it may need to map whatever cr3 is pointing to.
1862 */
1863 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1864 Assert(pCtx);
1865 pCtx->cr0 = env->cr[0];
1866 pCtx->cr3 = env->cr[3];
1867 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1868 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1869 pCtx->cr4 = env->cr[4];
1870#ifdef TARGET_X86_64
1871 efer = env->efer;
1872 pCtx->msrEFER = efer;
1873#else
1874 efer = 0;
1875#endif
1876 Assert(env->pVCpu);
1877 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1878 if (rc != VINF_SUCCESS)
1879 {
1880 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1881 {
1882 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1883 remR3RaiseRC(env->pVM, rc);
1884 }
1885 else
1886 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1887 }
1888}
1889
1890
1891/**
1892 * Called from compiled code to run dma.
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3DmaRun(CPUX86State *env)
1897{
1898 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1899 PDMR3DmaRun(env->pVM);
1900 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1901}
1902
1903
1904/**
1905 * Called from compiled code to schedule pending timers in VMM
1906 *
1907 * @param env Pointer to the CPU environment.
1908 */
1909void remR3TimersRun(CPUX86State *env)
1910{
1911 LogFlow(("remR3TimersRun:\n"));
1912 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1913 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1914 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1915 TMR3TimerQueuesDo(env->pVM);
1916 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1917 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1918}
1919
1920
1921/**
1922 * Record trap occurrence
1923 *
1924 * @returns VBox status code
1925 * @param env Pointer to the CPU environment.
1926 * @param uTrap Trap nr
1927 * @param uErrorCode Error code
1928 * @param pvNextEIP Next EIP
1929 */
1930int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1931{
1932 PVM pVM = env->pVM;
1933#ifdef VBOX_WITH_STATISTICS
1934 static STAMCOUNTER s_aStatTrap[255];
1935 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1936#endif
1937
1938#ifdef VBOX_WITH_STATISTICS
1939 if (uTrap < 255)
1940 {
1941 if (!s_aRegisters[uTrap])
1942 {
1943 char szStatName[64];
1944 s_aRegisters[uTrap] = true;
1945 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1946 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1947 }
1948 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1949 }
1950#endif
1951 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1952 if( uTrap < 0x20
1953 && (env->cr[0] & X86_CR0_PE)
1954 && !(env->eflags & X86_EFL_VM))
1955 {
1956#ifdef DEBUG
1957 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1958#endif
1959 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1960 {
1961 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1962 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1963 return VERR_REM_TOO_MANY_TRAPS;
1964 }
1965 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1966 pVM->rem.s.cPendingExceptions = 1;
1967 pVM->rem.s.uPendingException = uTrap;
1968 pVM->rem.s.uPendingExcptEIP = env->eip;
1969 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1970 }
1971 else
1972 {
1973 pVM->rem.s.cPendingExceptions = 0;
1974 pVM->rem.s.uPendingException = uTrap;
1975 pVM->rem.s.uPendingExcptEIP = env->eip;
1976 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1977 }
1978 return VINF_SUCCESS;
1979}
1980
1981
1982/*
1983 * Clear current active trap
1984 *
1985 * @param pVM VM Handle.
1986 */
1987void remR3TrapClear(PVM pVM)
1988{
1989 pVM->rem.s.cPendingExceptions = 0;
1990 pVM->rem.s.uPendingException = 0;
1991 pVM->rem.s.uPendingExcptEIP = 0;
1992 pVM->rem.s.uPendingExcptCR2 = 0;
1993}
1994
1995
1996/*
1997 * Record previous call instruction addresses
1998 *
1999 * @param env Pointer to the CPU environment.
2000 */
2001void remR3RecordCall(CPUX86State *env)
2002{
2003 CSAMR3RecordCallAddress(env->pVM, env->eip);
2004}
2005
2006
2007/**
2008 * Syncs the internal REM state with the VM.
2009 *
2010 * This must be called before REMR3Run() is invoked whenever when the REM
2011 * state is not up to date. Calling it several times in a row is not
2012 * permitted.
2013 *
2014 * @returns VBox status code.
2015 *
2016 * @param pVM VM Handle.
2017 * @param pVCpu VMCPU Handle.
2018 *
2019 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2020 * no do this since the majority of the callers don't want any unnecessary of events
2021 * pending that would immediately interrupt execution.
2022 */
2023REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2024{
2025 register const CPUMCTX *pCtx;
2026 register unsigned fFlags;
2027 bool fHiddenSelRegsValid;
2028 unsigned i;
2029 TRPMEVENT enmType;
2030 uint8_t u8TrapNo;
2031 uint32_t uCpl;
2032 int rc;
2033
2034 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2035 Log2(("REMR3State:\n"));
2036
2037 pVM->rem.s.Env.pVCpu = pVCpu;
2038 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2039 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2040
2041 Assert(!pVM->rem.s.fInREM);
2042 pVM->rem.s.fInStateSync = true;
2043
2044 /*
2045 * If we have to flush TBs, do that immediately.
2046 */
2047 if (pVM->rem.s.fFlushTBs)
2048 {
2049 STAM_COUNTER_INC(&gStatFlushTBs);
2050 tb_flush(&pVM->rem.s.Env);
2051 pVM->rem.s.fFlushTBs = false;
2052 }
2053
2054 /*
2055 * Copy the registers which require no special handling.
2056 */
2057#ifdef TARGET_X86_64
2058 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2059 Assert(R_EAX == 0);
2060 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2061 Assert(R_ECX == 1);
2062 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2063 Assert(R_EDX == 2);
2064 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2065 Assert(R_EBX == 3);
2066 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2067 Assert(R_ESP == 4);
2068 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2069 Assert(R_EBP == 5);
2070 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2071 Assert(R_ESI == 6);
2072 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2073 Assert(R_EDI == 7);
2074 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2075 pVM->rem.s.Env.regs[8] = pCtx->r8;
2076 pVM->rem.s.Env.regs[9] = pCtx->r9;
2077 pVM->rem.s.Env.regs[10] = pCtx->r10;
2078 pVM->rem.s.Env.regs[11] = pCtx->r11;
2079 pVM->rem.s.Env.regs[12] = pCtx->r12;
2080 pVM->rem.s.Env.regs[13] = pCtx->r13;
2081 pVM->rem.s.Env.regs[14] = pCtx->r14;
2082 pVM->rem.s.Env.regs[15] = pCtx->r15;
2083
2084 pVM->rem.s.Env.eip = pCtx->rip;
2085
2086 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2087#else
2088 Assert(R_EAX == 0);
2089 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2090 Assert(R_ECX == 1);
2091 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2092 Assert(R_EDX == 2);
2093 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2094 Assert(R_EBX == 3);
2095 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2096 Assert(R_ESP == 4);
2097 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2098 Assert(R_EBP == 5);
2099 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2100 Assert(R_ESI == 6);
2101 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2102 Assert(R_EDI == 7);
2103 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2104 pVM->rem.s.Env.eip = pCtx->eip;
2105
2106 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2107#endif
2108
2109 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2110
2111 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2112 for (i=0;i<8;i++)
2113 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2114
2115#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2116 /*
2117 * Clear the halted hidden flag (the interrupt waking up the CPU can
2118 * have been dispatched in raw mode).
2119 */
2120 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2121#endif
2122
2123 /*
2124 * Replay invlpg? Only if we're not flushing the TLB.
2125 */
2126 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2127 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2128 if (pVM->rem.s.cInvalidatedPages)
2129 {
2130 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2131 {
2132 RTUINT i;
2133
2134 pVM->rem.s.fIgnoreCR3Load = true;
2135 pVM->rem.s.fIgnoreInvlPg = true;
2136 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2137 {
2138 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2139 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2140 }
2141 pVM->rem.s.fIgnoreInvlPg = false;
2142 pVM->rem.s.fIgnoreCR3Load = false;
2143 }
2144 pVM->rem.s.cInvalidatedPages = 0;
2145 }
2146
2147 /* Replay notification changes. */
2148 REMR3ReplayHandlerNotifications(pVM);
2149
2150 /* Update MSRs; before CRx registers! */
2151 pVM->rem.s.Env.efer = pCtx->msrEFER;
2152 pVM->rem.s.Env.star = pCtx->msrSTAR;
2153 pVM->rem.s.Env.pat = pCtx->msrPAT;
2154#ifdef TARGET_X86_64
2155 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2156 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2157 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2158 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2159
2160 /* Update the internal long mode activate flag according to the new EFER value. */
2161 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2162 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2163 else
2164 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2165#endif
2166
2167 /*
2168 * Sync the A20 gate.
2169 */
2170 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2171 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2172 {
2173 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2174 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2175 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2176 }
2177
2178 /*
2179 * Registers which are rarely changed and require special handling / order when changed.
2180 */
2181 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2182 | CPUM_CHANGED_CR4
2183 | CPUM_CHANGED_CR0
2184 | CPUM_CHANGED_CR3
2185 | CPUM_CHANGED_GDTR
2186 | CPUM_CHANGED_IDTR
2187 | CPUM_CHANGED_SYSENTER_MSR
2188 | CPUM_CHANGED_LDTR
2189 | CPUM_CHANGED_CPUID
2190 | CPUM_CHANGED_FPU_REM
2191 )
2192 )
2193 {
2194 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2195 {
2196 pVM->rem.s.fIgnoreCR3Load = true;
2197 tlb_flush(&pVM->rem.s.Env, true);
2198 pVM->rem.s.fIgnoreCR3Load = false;
2199 }
2200
2201 /* CR4 before CR0! */
2202 if (fFlags & CPUM_CHANGED_CR4)
2203 {
2204 pVM->rem.s.fIgnoreCR3Load = true;
2205 pVM->rem.s.fIgnoreCpuMode = true;
2206 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2207 pVM->rem.s.fIgnoreCpuMode = false;
2208 pVM->rem.s.fIgnoreCR3Load = false;
2209 }
2210
2211 if (fFlags & CPUM_CHANGED_CR0)
2212 {
2213 pVM->rem.s.fIgnoreCR3Load = true;
2214 pVM->rem.s.fIgnoreCpuMode = true;
2215 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2216 pVM->rem.s.fIgnoreCpuMode = false;
2217 pVM->rem.s.fIgnoreCR3Load = false;
2218 }
2219
2220 if (fFlags & CPUM_CHANGED_CR3)
2221 {
2222 pVM->rem.s.fIgnoreCR3Load = true;
2223 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2224 pVM->rem.s.fIgnoreCR3Load = false;
2225 }
2226
2227 if (fFlags & CPUM_CHANGED_GDTR)
2228 {
2229 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2230 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2231 }
2232
2233 if (fFlags & CPUM_CHANGED_IDTR)
2234 {
2235 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2236 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2237 }
2238
2239 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2240 {
2241 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2242 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2243 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2244 }
2245
2246 if (fFlags & CPUM_CHANGED_LDTR)
2247 {
2248 if (fHiddenSelRegsValid)
2249 {
2250 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2251 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2252 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2253 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2254 }
2255 else
2256 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2257 }
2258
2259 if (fFlags & CPUM_CHANGED_CPUID)
2260 {
2261 uint32_t u32Dummy;
2262
2263 /*
2264 * Get the CPUID features.
2265 */
2266 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2267 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2268 }
2269
2270 /* Sync FPU state after CR4, CPUID and EFER (!). */
2271 if (fFlags & CPUM_CHANGED_FPU_REM)
2272 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2273 }
2274
2275 /*
2276 * Sync TR unconditionally to make life simpler.
2277 */
2278 pVM->rem.s.Env.tr.selector = pCtx->tr;
2279 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2280 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2281 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2282 /* Note! do_interrupt will fault if the busy flag is still set... */
2283 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2284
2285 /*
2286 * Update selector registers.
2287 * This must be done *after* we've synced gdt, ldt and crX registers
2288 * since we're reading the GDT/LDT om sync_seg. This will happen with
2289 * saved state which takes a quick dip into rawmode for instance.
2290 */
2291 /*
2292 * Stack; Note first check this one as the CPL might have changed. The
2293 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2294 */
2295
2296 if (fHiddenSelRegsValid)
2297 {
2298 /* The hidden selector registers are valid in the CPU context. */
2299 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2300
2301 /* Set current CPL */
2302 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2303
2304 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2305 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2306 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2307 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2308 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2309 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2310 }
2311 else
2312 {
2313 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2314 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2315 {
2316 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2317
2318 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2319 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2320#ifdef VBOX_WITH_STATISTICS
2321 if (pVM->rem.s.Env.segs[R_SS].newselector)
2322 {
2323 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2324 }
2325#endif
2326 }
2327 else
2328 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2329
2330 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2331 {
2332 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2333 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2334#ifdef VBOX_WITH_STATISTICS
2335 if (pVM->rem.s.Env.segs[R_ES].newselector)
2336 {
2337 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2338 }
2339#endif
2340 }
2341 else
2342 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2343
2344 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2345 {
2346 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2347 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2348#ifdef VBOX_WITH_STATISTICS
2349 if (pVM->rem.s.Env.segs[R_CS].newselector)
2350 {
2351 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2352 }
2353#endif
2354 }
2355 else
2356 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2357
2358 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2359 {
2360 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2361 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2362#ifdef VBOX_WITH_STATISTICS
2363 if (pVM->rem.s.Env.segs[R_DS].newselector)
2364 {
2365 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2366 }
2367#endif
2368 }
2369 else
2370 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2371
2372 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2373 * be the same but not the base/limit. */
2374 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2375 {
2376 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2377 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2378#ifdef VBOX_WITH_STATISTICS
2379 if (pVM->rem.s.Env.segs[R_FS].newselector)
2380 {
2381 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2382 }
2383#endif
2384 }
2385 else
2386 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2387
2388 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2389 {
2390 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2391 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2392#ifdef VBOX_WITH_STATISTICS
2393 if (pVM->rem.s.Env.segs[R_GS].newselector)
2394 {
2395 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2396 }
2397#endif
2398 }
2399 else
2400 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2401 }
2402
2403 /*
2404 * Check for traps.
2405 */
2406 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2407 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2408 if (RT_SUCCESS(rc))
2409 {
2410#ifdef DEBUG
2411 if (u8TrapNo == 0x80)
2412 {
2413 remR3DumpLnxSyscall(pVCpu);
2414 remR3DumpOBsdSyscall(pVCpu);
2415 }
2416#endif
2417
2418 pVM->rem.s.Env.exception_index = u8TrapNo;
2419 if (enmType != TRPM_SOFTWARE_INT)
2420 {
2421 pVM->rem.s.Env.exception_is_int = 0;
2422 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2423 }
2424 else
2425 {
2426 /*
2427 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2428 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2429 * for int03 and into.
2430 */
2431 pVM->rem.s.Env.exception_is_int = 1;
2432 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2433 /* int 3 may be generated by one-byte 0xcc */
2434 if (u8TrapNo == 3)
2435 {
2436 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2437 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2438 }
2439 /* int 4 may be generated by one-byte 0xce */
2440 else if (u8TrapNo == 4)
2441 {
2442 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2443 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2444 }
2445 }
2446
2447 /* get error code and cr2 if needed. */
2448 if (enmType == TRPM_TRAP)
2449 {
2450 switch (u8TrapNo)
2451 {
2452 case 0x0e:
2453 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2454 /* fallthru */
2455 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2456 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2457 break;
2458
2459 case 0x11: case 0x08:
2460 default:
2461 pVM->rem.s.Env.error_code = 0;
2462 break;
2463 }
2464 }
2465 else
2466 pVM->rem.s.Env.error_code = 0;
2467
2468 /*
2469 * We can now reset the active trap since the recompiler is gonna have a go at it.
2470 */
2471 rc = TRPMResetTrap(pVCpu);
2472 AssertRC(rc);
2473 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2474 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2475 }
2476
2477 /*
2478 * Clear old interrupt request flags; Check for pending hardware interrupts.
2479 * (See @remark for why we don't check for other FFs.)
2480 */
2481 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2482 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2483 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2484 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2485
2486 /*
2487 * We're now in REM mode.
2488 */
2489 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2490 pVM->rem.s.fInREM = true;
2491 pVM->rem.s.fInStateSync = false;
2492 pVM->rem.s.cCanExecuteRaw = 0;
2493 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2494 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2495 return VINF_SUCCESS;
2496}
2497
2498
2499/**
2500 * Syncs back changes in the REM state to the the VM state.
2501 *
2502 * This must be called after invoking REMR3Run().
2503 * Calling it several times in a row is not permitted.
2504 *
2505 * @returns VBox status code.
2506 *
2507 * @param pVM VM Handle.
2508 * @param pVCpu VMCPU Handle.
2509 */
2510REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2511{
2512 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2513 Assert(pCtx);
2514 unsigned i;
2515
2516 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2517 Log2(("REMR3StateBack:\n"));
2518 Assert(pVM->rem.s.fInREM);
2519
2520 /*
2521 * Copy back the registers.
2522 * This is done in the order they are declared in the CPUMCTX structure.
2523 */
2524
2525 /** @todo FOP */
2526 /** @todo FPUIP */
2527 /** @todo CS */
2528 /** @todo FPUDP */
2529 /** @todo DS */
2530
2531 /** @todo check if FPU/XMM was actually used in the recompiler */
2532 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2533//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2534
2535#ifdef TARGET_X86_64
2536 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2537 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2538 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2539 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2540 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2541 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2542 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2543 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2544 pCtx->r8 = pVM->rem.s.Env.regs[8];
2545 pCtx->r9 = pVM->rem.s.Env.regs[9];
2546 pCtx->r10 = pVM->rem.s.Env.regs[10];
2547 pCtx->r11 = pVM->rem.s.Env.regs[11];
2548 pCtx->r12 = pVM->rem.s.Env.regs[12];
2549 pCtx->r13 = pVM->rem.s.Env.regs[13];
2550 pCtx->r14 = pVM->rem.s.Env.regs[14];
2551 pCtx->r15 = pVM->rem.s.Env.regs[15];
2552
2553 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2554
2555#else
2556 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2557 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2558 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2559 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2560 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2561 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2562 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2563
2564 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2565#endif
2566
2567 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2568
2569#ifdef VBOX_WITH_STATISTICS
2570 if (pVM->rem.s.Env.segs[R_SS].newselector)
2571 {
2572 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2573 }
2574 if (pVM->rem.s.Env.segs[R_GS].newselector)
2575 {
2576 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2577 }
2578 if (pVM->rem.s.Env.segs[R_FS].newselector)
2579 {
2580 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2581 }
2582 if (pVM->rem.s.Env.segs[R_ES].newselector)
2583 {
2584 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2585 }
2586 if (pVM->rem.s.Env.segs[R_DS].newselector)
2587 {
2588 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2589 }
2590 if (pVM->rem.s.Env.segs[R_CS].newselector)
2591 {
2592 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2593 }
2594#endif
2595 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2596 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2597 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2598 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2599 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2600
2601#ifdef TARGET_X86_64
2602 pCtx->rip = pVM->rem.s.Env.eip;
2603 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2604#else
2605 pCtx->eip = pVM->rem.s.Env.eip;
2606 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2607#endif
2608
2609 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2610 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2611 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2612 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2613 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2614 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2615
2616 for (i = 0; i < 8; i++)
2617 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2618
2619 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2620 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2621 {
2622 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2623 STAM_COUNTER_INC(&gStatREMGDTChange);
2624 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2625 }
2626
2627 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2628 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2629 {
2630 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2631 STAM_COUNTER_INC(&gStatREMIDTChange);
2632 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2633 }
2634
2635 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2636 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2637 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2638 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2639 {
2640 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2641 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2642 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2643 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2644 STAM_COUNTER_INC(&gStatREMLDTRChange);
2645 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2646 }
2647
2648 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2649 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2650 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2651 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2652 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2653 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2654 : 0) )
2655 {
2656 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2657 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2658 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2659 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2660 pCtx->tr = pVM->rem.s.Env.tr.selector;
2661 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2662 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2663 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2664 if (pCtx->trHid.Attr.u)
2665 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2666 STAM_COUNTER_INC(&gStatREMTRChange);
2667 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2668 }
2669
2670 /** @todo These values could still be out of sync! */
2671 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2672 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2673 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2674 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2675
2676 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2677 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2678 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2679
2680 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2681 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2682 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2683
2684 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2685 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2686 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2687
2688 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2689 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2690 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2691
2692 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2693 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2694 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2695
2696 /* Sysenter MSR */
2697 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2698 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2699 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2700
2701 /* System MSRs. */
2702 pCtx->msrEFER = pVM->rem.s.Env.efer;
2703 pCtx->msrSTAR = pVM->rem.s.Env.star;
2704 pCtx->msrPAT = pVM->rem.s.Env.pat;
2705#ifdef TARGET_X86_64
2706 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2707 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2708 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2709 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2710#endif
2711
2712 remR3TrapClear(pVM);
2713
2714 /*
2715 * Check for traps.
2716 */
2717 if ( pVM->rem.s.Env.exception_index >= 0
2718 && pVM->rem.s.Env.exception_index < 256)
2719 {
2720 int rc;
2721
2722 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2723 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2724 AssertRC(rc);
2725 switch (pVM->rem.s.Env.exception_index)
2726 {
2727 case 0x0e:
2728 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2729 /* fallthru */
2730 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2731 case 0x11: case 0x08: /* 0 */
2732 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2733 break;
2734 }
2735
2736 }
2737
2738 /*
2739 * We're not longer in REM mode.
2740 */
2741 CPUMR3RemLeave(pVCpu,
2742 HWACCMIsEnabled(pVM)
2743 || ( pVM->rem.s.Env.segs[R_SS].newselector
2744 | pVM->rem.s.Env.segs[R_GS].newselector
2745 | pVM->rem.s.Env.segs[R_FS].newselector
2746 | pVM->rem.s.Env.segs[R_ES].newselector
2747 | pVM->rem.s.Env.segs[R_DS].newselector
2748 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2749 );
2750 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2751 pVM->rem.s.fInREM = false;
2752 pVM->rem.s.pCtx = NULL;
2753 pVM->rem.s.Env.pVCpu = NULL;
2754 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2755 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2756 return VINF_SUCCESS;
2757}
2758
2759
2760/**
2761 * This is called by the disassembler when it wants to update the cpu state
2762 * before for instance doing a register dump.
2763 */
2764static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2765{
2766 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2767 unsigned i;
2768
2769 Assert(pVM->rem.s.fInREM);
2770
2771 /*
2772 * Copy back the registers.
2773 * This is done in the order they are declared in the CPUMCTX structure.
2774 */
2775
2776 /** @todo FOP */
2777 /** @todo FPUIP */
2778 /** @todo CS */
2779 /** @todo FPUDP */
2780 /** @todo DS */
2781 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2782 pCtx->fpu.MXCSR = 0;
2783 pCtx->fpu.MXCSR_MASK = 0;
2784
2785 /** @todo check if FPU/XMM was actually used in the recompiler */
2786 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2787//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2788
2789#ifdef TARGET_X86_64
2790 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2791 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2792 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2793 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2794 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2795 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2796 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2797 pCtx->r8 = pVM->rem.s.Env.regs[8];
2798 pCtx->r9 = pVM->rem.s.Env.regs[9];
2799 pCtx->r10 = pVM->rem.s.Env.regs[10];
2800 pCtx->r11 = pVM->rem.s.Env.regs[11];
2801 pCtx->r12 = pVM->rem.s.Env.regs[12];
2802 pCtx->r13 = pVM->rem.s.Env.regs[13];
2803 pCtx->r14 = pVM->rem.s.Env.regs[14];
2804 pCtx->r15 = pVM->rem.s.Env.regs[15];
2805
2806 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2807#else
2808 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2809 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2810 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2811 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2812 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2813 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2814 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2815
2816 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2817#endif
2818
2819 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2820
2821 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2822 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2823 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2824 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2825 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2826
2827#ifdef TARGET_X86_64
2828 pCtx->rip = pVM->rem.s.Env.eip;
2829 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2830#else
2831 pCtx->eip = pVM->rem.s.Env.eip;
2832 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2833#endif
2834
2835 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2836 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2837 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2838 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2839 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2840 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2841
2842 for (i = 0; i < 8; i++)
2843 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2844
2845 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2846 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2847 {
2848 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2849 STAM_COUNTER_INC(&gStatREMGDTChange);
2850 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2851 }
2852
2853 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2854 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2855 {
2856 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2857 STAM_COUNTER_INC(&gStatREMIDTChange);
2858 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2859 }
2860
2861 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2862 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2863 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2864 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2865 {
2866 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2867 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2868 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2869 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2870 STAM_COUNTER_INC(&gStatREMLDTRChange);
2871 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2872 }
2873
2874 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2875 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2876 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2877 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2878 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2879 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2880 : 0) )
2881 {
2882 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2883 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2884 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2885 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2886 pCtx->tr = pVM->rem.s.Env.tr.selector;
2887 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2888 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2889 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2890 if (pCtx->trHid.Attr.u)
2891 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2892 STAM_COUNTER_INC(&gStatREMTRChange);
2893 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2894 }
2895
2896 /** @todo These values could still be out of sync! */
2897 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2898 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2899 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2900 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2901
2902 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2903 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2904 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2905
2906 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2907 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2908 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2909
2910 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2911 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2912 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2913
2914 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2915 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2916 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2917
2918 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2919 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2920 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2921
2922 /* Sysenter MSR */
2923 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2924 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2925 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2926
2927 /* System MSRs. */
2928 pCtx->msrEFER = pVM->rem.s.Env.efer;
2929 pCtx->msrSTAR = pVM->rem.s.Env.star;
2930 pCtx->msrPAT = pVM->rem.s.Env.pat;
2931#ifdef TARGET_X86_64
2932 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2933 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2934 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2935 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2936#endif
2937
2938}
2939
2940
2941/**
2942 * Update the VMM state information if we're currently in REM.
2943 *
2944 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2945 * we're currently executing in REM and the VMM state is invalid. This method will of
2946 * course check that we're executing in REM before syncing any data over to the VMM.
2947 *
2948 * @param pVM The VM handle.
2949 * @param pVCpu The VMCPU handle.
2950 */
2951REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2952{
2953 if (pVM->rem.s.fInREM)
2954 remR3StateUpdate(pVM, pVCpu);
2955}
2956
2957
2958#undef LOG_GROUP
2959#define LOG_GROUP LOG_GROUP_REM
2960
2961
2962/**
2963 * Notify the recompiler about Address Gate 20 state change.
2964 *
2965 * This notification is required since A20 gate changes are
2966 * initialized from a device driver and the VM might just as
2967 * well be in REM mode as in RAW mode.
2968 *
2969 * @param pVM VM handle.
2970 * @param pVCpu VMCPU handle.
2971 * @param fEnable True if the gate should be enabled.
2972 * False if the gate should be disabled.
2973 */
2974REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2975{
2976 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2977 VM_ASSERT_EMT(pVM);
2978
2979 /** @todo SMP and the A20 gate... */
2980 if (pVM->rem.s.Env.pVCpu == pVCpu)
2981 {
2982 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2983 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2984 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2985 }
2986}
2987
2988
2989/**
2990 * Replays the handler notification changes
2991 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2992 *
2993 * @param pVM VM handle.
2994 */
2995REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2996{
2997 /*
2998 * Replay the flushes.
2999 */
3000 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3001 VM_ASSERT_EMT(pVM);
3002
3003 /** @todo this isn't ensuring correct replay order. */
3004 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3005 {
3006 uint32_t idxNext;
3007 uint32_t idxRevHead;
3008 uint32_t idxHead;
3009#ifdef VBOX_STRICT
3010 int32_t c = 0;
3011#endif
3012
3013 /* Lockless purging of pending notifications. */
3014 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3015 if (idxHead == UINT32_MAX)
3016 return;
3017 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3018
3019 /*
3020 * Reverse the list to process it in FIFO order.
3021 */
3022 idxRevHead = UINT32_MAX;
3023 do
3024 {
3025 /* Save the index of the next rec. */
3026 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3027 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3028 /* Push the record onto the reversed list. */
3029 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3030 idxRevHead = idxHead;
3031 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3032 /* Advance. */
3033 idxHead = idxNext;
3034 } while (idxHead != UINT32_MAX);
3035
3036 /*
3037 * Loop thru the list, reinserting the record into the free list as they are
3038 * processed to avoid having other EMTs running out of entries while we're flushing.
3039 */
3040 idxHead = idxRevHead;
3041 do
3042 {
3043 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3044 uint32_t idxCur;
3045 Assert(--c >= 0);
3046
3047 switch (pCur->enmKind)
3048 {
3049 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3050 remR3NotifyHandlerPhysicalRegister(pVM,
3051 pCur->u.PhysicalRegister.enmType,
3052 pCur->u.PhysicalRegister.GCPhys,
3053 pCur->u.PhysicalRegister.cb,
3054 pCur->u.PhysicalRegister.fHasHCHandler);
3055 break;
3056
3057 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3058 remR3NotifyHandlerPhysicalDeregister(pVM,
3059 pCur->u.PhysicalDeregister.enmType,
3060 pCur->u.PhysicalDeregister.GCPhys,
3061 pCur->u.PhysicalDeregister.cb,
3062 pCur->u.PhysicalDeregister.fHasHCHandler,
3063 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3064 break;
3065
3066 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3067 remR3NotifyHandlerPhysicalModify(pVM,
3068 pCur->u.PhysicalModify.enmType,
3069 pCur->u.PhysicalModify.GCPhysOld,
3070 pCur->u.PhysicalModify.GCPhysNew,
3071 pCur->u.PhysicalModify.cb,
3072 pCur->u.PhysicalModify.fHasHCHandler,
3073 pCur->u.PhysicalModify.fRestoreAsRAM);
3074 break;
3075
3076 default:
3077 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3078 break;
3079 }
3080
3081 /*
3082 * Advance idxHead.
3083 */
3084 idxCur = idxHead;
3085 idxHead = pCur->idxNext;
3086 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3087
3088 /*
3089 * Put the record back into the free list.
3090 */
3091 do
3092 {
3093 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3094 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3095 ASMCompilerBarrier();
3096 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3097 } while (idxHead != UINT32_MAX);
3098
3099#ifdef VBOX_STRICT
3100 if (pVM->cCpus == 1)
3101 {
3102 unsigned c;
3103 /* Check that all records are now on the free list. */
3104 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3105 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3106 c++;
3107 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3108 }
3109#endif
3110 }
3111}
3112
3113
3114/**
3115 * Notify REM about changed code page.
3116 *
3117 * @returns VBox status code.
3118 * @param pVM VM handle.
3119 * @param pVCpu VMCPU handle.
3120 * @param pvCodePage Code page address
3121 */
3122REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3123{
3124#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3125 int rc;
3126 RTGCPHYS PhysGC;
3127 uint64_t flags;
3128
3129 VM_ASSERT_EMT(pVM);
3130
3131 /*
3132 * Get the physical page address.
3133 */
3134 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3135 if (rc == VINF_SUCCESS)
3136 {
3137 /*
3138 * Sync the required registers and flush the whole page.
3139 * (Easier to do the whole page than notifying it about each physical
3140 * byte that was changed.
3141 */
3142 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3143 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3144 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3145 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3146
3147 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3148 }
3149#endif
3150 return VINF_SUCCESS;
3151}
3152
3153
3154/**
3155 * Notification about a successful MMR3PhysRegister() call.
3156 *
3157 * @param pVM VM handle.
3158 * @param GCPhys The physical address the RAM.
3159 * @param cb Size of the memory.
3160 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3161 */
3162REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3163{
3164 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3165 VM_ASSERT_EMT(pVM);
3166
3167 /*
3168 * Validate input - we trust the caller.
3169 */
3170 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3171 Assert(cb);
3172 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3173 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3174
3175 /*
3176 * Base ram? Update GCPhysLastRam.
3177 */
3178 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3179 {
3180 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3181 {
3182 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3183 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3184 }
3185 }
3186
3187 /*
3188 * Register the ram.
3189 */
3190 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3191
3192 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3193 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3194 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3195
3196 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3197}
3198
3199
3200/**
3201 * Notification about a successful MMR3PhysRomRegister() call.
3202 *
3203 * @param pVM VM handle.
3204 * @param GCPhys The physical address of the ROM.
3205 * @param cb The size of the ROM.
3206 * @param pvCopy Pointer to the ROM copy.
3207 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3208 * This function will be called when ever the protection of the
3209 * shadow ROM changes (at reset and end of POST).
3210 */
3211REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3212{
3213 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3214 VM_ASSERT_EMT(pVM);
3215
3216 /*
3217 * Validate input - we trust the caller.
3218 */
3219 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3220 Assert(cb);
3221 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3222
3223 /*
3224 * Register the rom.
3225 */
3226 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3227
3228 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3229 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3230 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3231
3232 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3233}
3234
3235
3236/**
3237 * Notification about a successful memory deregistration or reservation.
3238 *
3239 * @param pVM VM Handle.
3240 * @param GCPhys Start physical address.
3241 * @param cb The size of the range.
3242 */
3243REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3244{
3245 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3246 VM_ASSERT_EMT(pVM);
3247
3248 /*
3249 * Validate input - we trust the caller.
3250 */
3251 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3252 Assert(cb);
3253 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3254
3255 /*
3256 * Unassigning the memory.
3257 */
3258 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3259
3260 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3261 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3262 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3263
3264 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3265}
3266
3267
3268/**
3269 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3270 *
3271 * @param pVM VM Handle.
3272 * @param enmType Handler type.
3273 * @param GCPhys Handler range address.
3274 * @param cb Size of the handler range.
3275 * @param fHasHCHandler Set if the handler has a HC callback function.
3276 *
3277 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3278 * Handler memory type to memory which has no HC handler.
3279 */
3280static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3281{
3282 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3283 enmType, GCPhys, cb, fHasHCHandler));
3284
3285 VM_ASSERT_EMT(pVM);
3286 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3287 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3288
3289
3290 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3291
3292 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3293 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3294 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3295 else if (fHasHCHandler)
3296 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3297 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3298
3299 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3300}
3301
3302/**
3303 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3304 *
3305 * @param pVM VM Handle.
3306 * @param enmType Handler type.
3307 * @param GCPhys Handler range address.
3308 * @param cb Size of the handler range.
3309 * @param fHasHCHandler Set if the handler has a HC callback function.
3310 *
3311 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3312 * Handler memory type to memory which has no HC handler.
3313 */
3314REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3315{
3316 REMR3ReplayHandlerNotifications(pVM);
3317
3318 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3319}
3320
3321/**
3322 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3323 *
3324 * @param pVM VM Handle.
3325 * @param enmType Handler type.
3326 * @param GCPhys Handler range address.
3327 * @param cb Size of the handler range.
3328 * @param fHasHCHandler Set if the handler has a HC callback function.
3329 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3330 */
3331static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3332{
3333 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3334 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3335 VM_ASSERT_EMT(pVM);
3336
3337
3338 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3339
3340 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3341 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3342 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3343 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3344 else if (fHasHCHandler)
3345 {
3346 if (!fRestoreAsRAM)
3347 {
3348 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3349 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3350 }
3351 else
3352 {
3353 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3354 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3355 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3356 }
3357 }
3358 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3359
3360 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3361}
3362
3363/**
3364 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3365 *
3366 * @param pVM VM Handle.
3367 * @param enmType Handler type.
3368 * @param GCPhys Handler range address.
3369 * @param cb Size of the handler range.
3370 * @param fHasHCHandler Set if the handler has a HC callback function.
3371 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3372 */
3373REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3374{
3375 REMR3ReplayHandlerNotifications(pVM);
3376 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3377}
3378
3379
3380/**
3381 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3382 *
3383 * @param pVM VM Handle.
3384 * @param enmType Handler type.
3385 * @param GCPhysOld Old handler range address.
3386 * @param GCPhysNew New handler range address.
3387 * @param cb Size of the handler range.
3388 * @param fHasHCHandler Set if the handler has a HC callback function.
3389 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3390 */
3391static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3392{
3393 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3394 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3395 VM_ASSERT_EMT(pVM);
3396 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3397
3398 if (fHasHCHandler)
3399 {
3400 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3401
3402 /*
3403 * Reset the old page.
3404 */
3405 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3406 if (!fRestoreAsRAM)
3407 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3408 else
3409 {
3410 /* This is not perfect, but it'll do for PD monitoring... */
3411 Assert(cb == PAGE_SIZE);
3412 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3413 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3414 }
3415
3416 /*
3417 * Update the new page.
3418 */
3419 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3420 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3421 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3422 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3423
3424 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3425 }
3426}
3427
3428/**
3429 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3430 *
3431 * @param pVM VM Handle.
3432 * @param enmType Handler type.
3433 * @param GCPhysOld Old handler range address.
3434 * @param GCPhysNew New handler range address.
3435 * @param cb Size of the handler range.
3436 * @param fHasHCHandler Set if the handler has a HC callback function.
3437 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3438 */
3439REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3440{
3441 REMR3ReplayHandlerNotifications(pVM);
3442
3443 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3444}
3445
3446/**
3447 * Checks if we're handling access to this page or not.
3448 *
3449 * @returns true if we're trapping access.
3450 * @returns false if we aren't.
3451 * @param pVM The VM handle.
3452 * @param GCPhys The physical address.
3453 *
3454 * @remark This function will only work correctly in VBOX_STRICT builds!
3455 */
3456REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3457{
3458#ifdef VBOX_STRICT
3459 unsigned long off;
3460 REMR3ReplayHandlerNotifications(pVM);
3461
3462 off = get_phys_page_offset(GCPhys);
3463 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3464 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3465 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3466#else
3467 return false;
3468#endif
3469}
3470
3471
3472/**
3473 * Deals with a rare case in get_phys_addr_code where the code
3474 * is being monitored.
3475 *
3476 * It could also be an MMIO page, in which case we will raise a fatal error.
3477 *
3478 * @returns The physical address corresponding to addr.
3479 * @param env The cpu environment.
3480 * @param addr The virtual address.
3481 * @param pTLBEntry The TLB entry.
3482 */
3483target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3484 target_ulong addr,
3485 CPUTLBEntry *pTLBEntry,
3486 target_phys_addr_t ioTLBEntry)
3487{
3488 PVM pVM = env->pVM;
3489
3490 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3491 {
3492 /* If code memory is being monitored, appropriate IOTLB entry will have
3493 handler IO type, and addend will provide real physical address, no
3494 matter if we store VA in TLB or not, as handlers are always passed PA */
3495 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3496 return ret;
3497 }
3498 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3499 "*** handlers\n",
3500 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3501 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3502 LogRel(("*** mmio\n"));
3503 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3504 LogRel(("*** phys\n"));
3505 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3506 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3507 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3508 AssertFatalFailed();
3509}
3510
3511/**
3512 * Read guest RAM and ROM.
3513 *
3514 * @param SrcGCPhys The source address (guest physical).
3515 * @param pvDst The destination address.
3516 * @param cb Number of bytes
3517 */
3518void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3519{
3520 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3521 VBOX_CHECK_ADDR(SrcGCPhys);
3522 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3523#ifdef VBOX_DEBUG_PHYS
3524 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3525#endif
3526 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3527}
3528
3529
3530/**
3531 * Read guest RAM and ROM, unsigned 8-bit.
3532 *
3533 * @param SrcGCPhys The source address (guest physical).
3534 */
3535RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3536{
3537 uint8_t val;
3538 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3539 VBOX_CHECK_ADDR(SrcGCPhys);
3540 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3541 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3542#ifdef VBOX_DEBUG_PHYS
3543 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3544#endif
3545 return val;
3546}
3547
3548
3549/**
3550 * Read guest RAM and ROM, signed 8-bit.
3551 *
3552 * @param SrcGCPhys The source address (guest physical).
3553 */
3554RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3555{
3556 int8_t val;
3557 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3558 VBOX_CHECK_ADDR(SrcGCPhys);
3559 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3560 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3561#ifdef VBOX_DEBUG_PHYS
3562 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3563#endif
3564 return val;
3565}
3566
3567
3568/**
3569 * Read guest RAM and ROM, unsigned 16-bit.
3570 *
3571 * @param SrcGCPhys The source address (guest physical).
3572 */
3573RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3574{
3575 uint16_t val;
3576 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3577 VBOX_CHECK_ADDR(SrcGCPhys);
3578 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3579 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3580#ifdef VBOX_DEBUG_PHYS
3581 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3582#endif
3583 return val;
3584}
3585
3586
3587/**
3588 * Read guest RAM and ROM, signed 16-bit.
3589 *
3590 * @param SrcGCPhys The source address (guest physical).
3591 */
3592RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3593{
3594 int16_t val;
3595 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3596 VBOX_CHECK_ADDR(SrcGCPhys);
3597 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3598 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3599#ifdef VBOX_DEBUG_PHYS
3600 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3601#endif
3602 return val;
3603}
3604
3605
3606/**
3607 * Read guest RAM and ROM, unsigned 32-bit.
3608 *
3609 * @param SrcGCPhys The source address (guest physical).
3610 */
3611RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3612{
3613 uint32_t val;
3614 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3615 VBOX_CHECK_ADDR(SrcGCPhys);
3616 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3617 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3618#ifdef VBOX_DEBUG_PHYS
3619 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3620#endif
3621 return val;
3622}
3623
3624
3625/**
3626 * Read guest RAM and ROM, signed 32-bit.
3627 *
3628 * @param SrcGCPhys The source address (guest physical).
3629 */
3630RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3631{
3632 int32_t val;
3633 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3634 VBOX_CHECK_ADDR(SrcGCPhys);
3635 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3636 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3637#ifdef VBOX_DEBUG_PHYS
3638 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3639#endif
3640 return val;
3641}
3642
3643
3644/**
3645 * Read guest RAM and ROM, unsigned 64-bit.
3646 *
3647 * @param SrcGCPhys The source address (guest physical).
3648 */
3649uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3650{
3651 uint64_t val;
3652 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3653 VBOX_CHECK_ADDR(SrcGCPhys);
3654 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3655 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3656#ifdef VBOX_DEBUG_PHYS
3657 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3658#endif
3659 return val;
3660}
3661
3662
3663/**
3664 * Read guest RAM and ROM, signed 64-bit.
3665 *
3666 * @param SrcGCPhys The source address (guest physical).
3667 */
3668int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3669{
3670 int64_t val;
3671 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3672 VBOX_CHECK_ADDR(SrcGCPhys);
3673 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3674 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3675#ifdef VBOX_DEBUG_PHYS
3676 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3677#endif
3678 return val;
3679}
3680
3681
3682/**
3683 * Write guest RAM.
3684 *
3685 * @param DstGCPhys The destination address (guest physical).
3686 * @param pvSrc The source address.
3687 * @param cb Number of bytes to write
3688 */
3689void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3690{
3691 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3692 VBOX_CHECK_ADDR(DstGCPhys);
3693 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3694 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3695#ifdef VBOX_DEBUG_PHYS
3696 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3697#endif
3698}
3699
3700
3701/**
3702 * Write guest RAM, unsigned 8-bit.
3703 *
3704 * @param DstGCPhys The destination address (guest physical).
3705 * @param val Value
3706 */
3707void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3708{
3709 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3710 VBOX_CHECK_ADDR(DstGCPhys);
3711 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3712 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3713#ifdef VBOX_DEBUG_PHYS
3714 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3715#endif
3716}
3717
3718
3719/**
3720 * Write guest RAM, unsigned 8-bit.
3721 *
3722 * @param DstGCPhys The destination address (guest physical).
3723 * @param val Value
3724 */
3725void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3726{
3727 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3728 VBOX_CHECK_ADDR(DstGCPhys);
3729 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3730 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3731#ifdef VBOX_DEBUG_PHYS
3732 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3733#endif
3734}
3735
3736
3737/**
3738 * Write guest RAM, unsigned 32-bit.
3739 *
3740 * @param DstGCPhys The destination address (guest physical).
3741 * @param val Value
3742 */
3743void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3744{
3745 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3746 VBOX_CHECK_ADDR(DstGCPhys);
3747 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3748 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3749#ifdef VBOX_DEBUG_PHYS
3750 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3751#endif
3752}
3753
3754
3755/**
3756 * Write guest RAM, unsigned 64-bit.
3757 *
3758 * @param DstGCPhys The destination address (guest physical).
3759 * @param val Value
3760 */
3761void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3762{
3763 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3764 VBOX_CHECK_ADDR(DstGCPhys);
3765 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3766 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3767#ifdef VBOX_DEBUG_PHYS
3768 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3769#endif
3770}
3771
3772#undef LOG_GROUP
3773#define LOG_GROUP LOG_GROUP_REM_MMIO
3774
3775/** Read MMIO memory. */
3776static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3777{
3778 uint32_t u32 = 0;
3779 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3780 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3781 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3782 return u32;
3783}
3784
3785/** Read MMIO memory. */
3786static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3787{
3788 uint32_t u32 = 0;
3789 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3790 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3791 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3792 return u32;
3793}
3794
3795/** Read MMIO memory. */
3796static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3797{
3798 uint32_t u32 = 0;
3799 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3800 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3801 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3802 return u32;
3803}
3804
3805/** Write to MMIO memory. */
3806static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3807{
3808 int rc;
3809 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3810 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3811 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3812}
3813
3814/** Write to MMIO memory. */
3815static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3816{
3817 int rc;
3818 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3819 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3820 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3821}
3822
3823/** Write to MMIO memory. */
3824static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3825{
3826 int rc;
3827 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3828 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3829 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3830}
3831
3832
3833#undef LOG_GROUP
3834#define LOG_GROUP LOG_GROUP_REM_HANDLER
3835
3836/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3837
3838static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3839{
3840 uint8_t u8;
3841 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3842 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3843 return u8;
3844}
3845
3846static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3847{
3848 uint16_t u16;
3849 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3850 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3851 return u16;
3852}
3853
3854static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3855{
3856 uint32_t u32;
3857 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3858 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3859 return u32;
3860}
3861
3862static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3863{
3864 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3865 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3866}
3867
3868static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3869{
3870 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3871 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3872}
3873
3874static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3875{
3876 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3877 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3878}
3879
3880/* -+- disassembly -+- */
3881
3882#undef LOG_GROUP
3883#define LOG_GROUP LOG_GROUP_REM_DISAS
3884
3885
3886/**
3887 * Enables or disables singled stepped disassembly.
3888 *
3889 * @returns VBox status code.
3890 * @param pVM VM handle.
3891 * @param fEnable To enable set this flag, to disable clear it.
3892 */
3893static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3894{
3895 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3896 VM_ASSERT_EMT(pVM);
3897
3898 if (fEnable)
3899 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3900 else
3901 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3902#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3903 cpu_single_step(&pVM->rem.s.Env, fEnable);
3904#endif
3905 return VINF_SUCCESS;
3906}
3907
3908
3909/**
3910 * Enables or disables singled stepped disassembly.
3911 *
3912 * @returns VBox status code.
3913 * @param pVM VM handle.
3914 * @param fEnable To enable set this flag, to disable clear it.
3915 */
3916REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3917{
3918 int rc;
3919
3920 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3921 if (VM_IS_EMT(pVM))
3922 return remR3DisasEnableStepping(pVM, fEnable);
3923
3924 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3925 AssertRC(rc);
3926 return rc;
3927}
3928
3929
3930#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3931/**
3932 * External Debugger Command: .remstep [on|off|1|0]
3933 */
3934static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3935{
3936 int rc;
3937
3938 if (cArgs == 0)
3939 /*
3940 * Print the current status.
3941 */
3942 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3943 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3944 else
3945 {
3946 /*
3947 * Convert the argument and change the mode.
3948 */
3949 bool fEnable;
3950 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3951 if (RT_SUCCESS(rc))
3952 {
3953 rc = REMR3DisasEnableStepping(pVM, fEnable);
3954 if (RT_SUCCESS(rc))
3955 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3956 else
3957 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3958 }
3959 else
3960 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3961 }
3962 return rc;
3963}
3964#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3965
3966
3967/**
3968 * Disassembles one instruction and prints it to the log.
3969 *
3970 * @returns Success indicator.
3971 * @param env Pointer to the recompiler CPU structure.
3972 * @param f32BitCode Indicates that whether or not the code should
3973 * be disassembled as 16 or 32 bit. If -1 the CS
3974 * selector will be inspected.
3975 * @param pszPrefix
3976 */
3977bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3978{
3979 PVM pVM = env->pVM;
3980 const bool fLog = LogIsEnabled();
3981 const bool fLog2 = LogIs2Enabled();
3982 int rc = VINF_SUCCESS;
3983
3984 /*
3985 * Don't bother if there ain't any log output to do.
3986 */
3987 if (!fLog && !fLog2)
3988 return true;
3989
3990 /*
3991 * Update the state so DBGF reads the correct register values.
3992 */
3993 remR3StateUpdate(pVM, env->pVCpu);
3994
3995 /*
3996 * Log registers if requested.
3997 */
3998 if (fLog2)
3999 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
4000
4001 /*
4002 * Disassemble to log.
4003 */
4004 if (fLog)
4005 {
4006 PVMCPU pVCpu = VMMGetCpu(pVM);
4007 char szBuf[256];
4008 szBuf[0] = '\0';
4009 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
4010 pVCpu->idCpu,
4011 0, /* Sel */
4012 0, /* GCPtr */
4013 DBGF_DISAS_FLAGS_CURRENT_GUEST
4014 | DBGF_DISAS_FLAGS_DEFAULT_MODE
4015 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
4016 szBuf,
4017 sizeof(szBuf),
4018 NULL);
4019 if (RT_FAILURE(rc))
4020 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4021 if (pszPrefix && *pszPrefix)
4022 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4023 else
4024 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4025 }
4026
4027 return RT_SUCCESS(rc);
4028}
4029
4030
4031/**
4032 * Disassemble recompiled code.
4033 *
4034 * @param phFileIgnored Ignored, logfile usually.
4035 * @param pvCode Pointer to the code block.
4036 * @param cb Size of the code block.
4037 */
4038void disas(FILE *phFile, void *pvCode, unsigned long cb)
4039{
4040 if (LogIs2Enabled())
4041 {
4042 unsigned off = 0;
4043 char szOutput[256];
4044 DISCPUSTATE Cpu;
4045
4046 memset(&Cpu, 0, sizeof(Cpu));
4047#ifdef RT_ARCH_X86
4048 Cpu.mode = CPUMODE_32BIT;
4049#else
4050 Cpu.mode = CPUMODE_64BIT;
4051#endif
4052
4053 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4054 while (off < cb)
4055 {
4056 uint32_t cbInstr;
4057 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4058 RTLogPrintf("%s", szOutput);
4059 else
4060 {
4061 RTLogPrintf("disas error\n");
4062 cbInstr = 1;
4063#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4064 break;
4065#endif
4066 }
4067 off += cbInstr;
4068 }
4069 }
4070}
4071
4072
4073/**
4074 * Disassemble guest code.
4075 *
4076 * @param phFileIgnored Ignored, logfile usually.
4077 * @param uCode The guest address of the code to disassemble. (flat?)
4078 * @param cb Number of bytes to disassemble.
4079 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4080 */
4081void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4082{
4083 if (LogIs2Enabled())
4084 {
4085 PVM pVM = cpu_single_env->pVM;
4086 PVMCPU pVCpu = cpu_single_env->pVCpu;
4087 RTSEL cs;
4088 RTGCUINTPTR eip;
4089
4090 Assert(pVCpu);
4091
4092 /*
4093 * Update the state so DBGF reads the correct register values (flags).
4094 */
4095 remR3StateUpdate(pVM, pVCpu);
4096
4097 /*
4098 * Do the disassembling.
4099 */
4100 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4101 cs = cpu_single_env->segs[R_CS].selector;
4102 eip = uCode - cpu_single_env->segs[R_CS].base;
4103 for (;;)
4104 {
4105 char szBuf[256];
4106 uint32_t cbInstr;
4107 int rc = DBGFR3DisasInstrEx(pVM,
4108 pVCpu->idCpu,
4109 cs,
4110 eip,
4111 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4112 szBuf, sizeof(szBuf),
4113 &cbInstr);
4114 if (RT_SUCCESS(rc))
4115 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4116 else
4117 {
4118 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4119 cbInstr = 1;
4120 }
4121
4122 /* next */
4123 if (cb <= cbInstr)
4124 break;
4125 cb -= cbInstr;
4126 uCode += cbInstr;
4127 eip += cbInstr;
4128 }
4129 }
4130}
4131
4132
4133/**
4134 * Looks up a guest symbol.
4135 *
4136 * @returns Pointer to symbol name. This is a static buffer.
4137 * @param orig_addr The address in question.
4138 */
4139const char *lookup_symbol(target_ulong orig_addr)
4140{
4141 PVM pVM = cpu_single_env->pVM;
4142 RTGCINTPTR off = 0;
4143 RTDBGSYMBOL Sym;
4144 DBGFADDRESS Addr;
4145
4146 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4147 if (RT_SUCCESS(rc))
4148 {
4149 static char szSym[sizeof(Sym.szName) + 48];
4150 if (!off)
4151 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4152 else if (off > 0)
4153 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4154 else
4155 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4156 return szSym;
4157 }
4158 return "<N/A>";
4159}
4160
4161
4162#undef LOG_GROUP
4163#define LOG_GROUP LOG_GROUP_REM
4164
4165
4166/* -+- FF notifications -+- */
4167
4168
4169/**
4170 * Notification about a pending interrupt.
4171 *
4172 * @param pVM VM Handle.
4173 * @param pVCpu VMCPU Handle.
4174 * @param u8Interrupt Interrupt
4175 * @thread The emulation thread.
4176 */
4177REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4178{
4179 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4180 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4181}
4182
4183/**
4184 * Notification about a pending interrupt.
4185 *
4186 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4187 * @param pVM VM Handle.
4188 * @param pVCpu VMCPU Handle.
4189 * @thread The emulation thread.
4190 */
4191REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4192{
4193 return pVM->rem.s.u32PendingInterrupt;
4194}
4195
4196/**
4197 * Notification about the interrupt FF being set.
4198 *
4199 * @param pVM VM Handle.
4200 * @param pVCpu VMCPU Handle.
4201 * @thread The emulation thread.
4202 */
4203REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4204{
4205#ifndef IEM_VERIFICATION_MODE
4206 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4207 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4208 if (pVM->rem.s.fInREM)
4209 {
4210 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4211 CPU_INTERRUPT_EXTERNAL_HARD);
4212 }
4213#endif
4214}
4215
4216
4217/**
4218 * Notification about the interrupt FF being set.
4219 *
4220 * @param pVM VM Handle.
4221 * @param pVCpu VMCPU Handle.
4222 * @thread Any.
4223 */
4224REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4225{
4226 LogFlow(("REMR3NotifyInterruptClear:\n"));
4227 if (pVM->rem.s.fInREM)
4228 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4229}
4230
4231
4232/**
4233 * Notification about pending timer(s).
4234 *
4235 * @param pVM VM Handle.
4236 * @param pVCpuDst The target cpu for this notification.
4237 * TM will not broadcast pending timer events, but use
4238 * a dedicated EMT for them. So, only interrupt REM
4239 * execution if the given CPU is executing in REM.
4240 * @thread Any.
4241 */
4242REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4243{
4244#ifndef IEM_VERIFICATION_MODE
4245#ifndef DEBUG_bird
4246 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4247#endif
4248 if (pVM->rem.s.fInREM)
4249 {
4250 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4251 {
4252 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4253 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4254 CPU_INTERRUPT_EXTERNAL_TIMER);
4255 }
4256 else
4257 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4258 }
4259 else
4260 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4261#endif
4262}
4263
4264
4265/**
4266 * Notification about pending DMA transfers.
4267 *
4268 * @param pVM VM Handle.
4269 * @thread Any.
4270 */
4271REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4272{
4273#ifndef IEM_VERIFICATION_MODE
4274 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4275 if (pVM->rem.s.fInREM)
4276 {
4277 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4278 CPU_INTERRUPT_EXTERNAL_DMA);
4279 }
4280#endif
4281}
4282
4283
4284/**
4285 * Notification about pending timer(s).
4286 *
4287 * @param pVM VM Handle.
4288 * @thread Any.
4289 */
4290REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4291{
4292#ifndef IEM_VERIFICATION_MODE
4293 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4294 if (pVM->rem.s.fInREM)
4295 {
4296 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4297 CPU_INTERRUPT_EXTERNAL_EXIT);
4298 }
4299#endif
4300}
4301
4302
4303/**
4304 * Notification about pending FF set by an external thread.
4305 *
4306 * @param pVM VM handle.
4307 * @thread Any.
4308 */
4309REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4310{
4311#ifndef IEM_VERIFICATION_MODE
4312 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4313 if (pVM->rem.s.fInREM)
4314 {
4315 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4316 CPU_INTERRUPT_EXTERNAL_EXIT);
4317 }
4318#endif
4319}
4320
4321
4322#ifdef VBOX_WITH_STATISTICS
4323void remR3ProfileStart(int statcode)
4324{
4325 STAMPROFILEADV *pStat;
4326 switch(statcode)
4327 {
4328 case STATS_EMULATE_SINGLE_INSTR:
4329 pStat = &gStatExecuteSingleInstr;
4330 break;
4331 case STATS_QEMU_COMPILATION:
4332 pStat = &gStatCompilationQEmu;
4333 break;
4334 case STATS_QEMU_RUN_EMULATED_CODE:
4335 pStat = &gStatRunCodeQEmu;
4336 break;
4337 case STATS_QEMU_TOTAL:
4338 pStat = &gStatTotalTimeQEmu;
4339 break;
4340 case STATS_QEMU_RUN_TIMERS:
4341 pStat = &gStatTimers;
4342 break;
4343 case STATS_TLB_LOOKUP:
4344 pStat= &gStatTBLookup;
4345 break;
4346 case STATS_IRQ_HANDLING:
4347 pStat= &gStatIRQ;
4348 break;
4349 case STATS_RAW_CHECK:
4350 pStat = &gStatRawCheck;
4351 break;
4352
4353 default:
4354 AssertMsgFailed(("unknown stat %d\n", statcode));
4355 return;
4356 }
4357 STAM_PROFILE_ADV_START(pStat, a);
4358}
4359
4360
4361void remR3ProfileStop(int statcode)
4362{
4363 STAMPROFILEADV *pStat;
4364 switch(statcode)
4365 {
4366 case STATS_EMULATE_SINGLE_INSTR:
4367 pStat = &gStatExecuteSingleInstr;
4368 break;
4369 case STATS_QEMU_COMPILATION:
4370 pStat = &gStatCompilationQEmu;
4371 break;
4372 case STATS_QEMU_RUN_EMULATED_CODE:
4373 pStat = &gStatRunCodeQEmu;
4374 break;
4375 case STATS_QEMU_TOTAL:
4376 pStat = &gStatTotalTimeQEmu;
4377 break;
4378 case STATS_QEMU_RUN_TIMERS:
4379 pStat = &gStatTimers;
4380 break;
4381 case STATS_TLB_LOOKUP:
4382 pStat= &gStatTBLookup;
4383 break;
4384 case STATS_IRQ_HANDLING:
4385 pStat= &gStatIRQ;
4386 break;
4387 case STATS_RAW_CHECK:
4388 pStat = &gStatRawCheck;
4389 break;
4390 default:
4391 AssertMsgFailed(("unknown stat %d\n", statcode));
4392 return;
4393 }
4394 STAM_PROFILE_ADV_STOP(pStat, a);
4395}
4396#endif
4397
4398/**
4399 * Raise an RC, force rem exit.
4400 *
4401 * @param pVM VM handle.
4402 * @param rc The rc.
4403 */
4404void remR3RaiseRC(PVM pVM, int rc)
4405{
4406 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4407 Assert(pVM->rem.s.fInREM);
4408 VM_ASSERT_EMT(pVM);
4409 pVM->rem.s.rc = rc;
4410 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4411}
4412
4413
4414/* -+- timers -+- */
4415
4416uint64_t cpu_get_tsc(CPUX86State *env)
4417{
4418 STAM_COUNTER_INC(&gStatCpuGetTSC);
4419 return TMCpuTickGet(env->pVCpu);
4420}
4421
4422
4423/* -+- interrupts -+- */
4424
4425void cpu_set_ferr(CPUX86State *env)
4426{
4427 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4428 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4429}
4430
4431int cpu_get_pic_interrupt(CPUX86State *env)
4432{
4433 uint8_t u8Interrupt;
4434 int rc;
4435
4436 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4437 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4438 * with the (a)pic.
4439 */
4440 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4441 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4442 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4443 * remove this kludge. */
4444 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4445 {
4446 rc = VINF_SUCCESS;
4447 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4448 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4449 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4450 }
4451 else
4452 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4453
4454 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4455 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4456 if (RT_SUCCESS(rc))
4457 {
4458 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4459 env->interrupt_request |= CPU_INTERRUPT_HARD;
4460 return u8Interrupt;
4461 }
4462 return -1;
4463}
4464
4465
4466/* -+- local apic -+- */
4467
4468#if 0 /* CPUMSetGuestMsr does this now. */
4469void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4470{
4471 int rc = PDMApicSetBase(env->pVM, val);
4472 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4473}
4474#endif
4475
4476uint64_t cpu_get_apic_base(CPUX86State *env)
4477{
4478 uint64_t u64;
4479 int rc = PDMApicGetBase(env->pVM, &u64);
4480 if (RT_SUCCESS(rc))
4481 {
4482 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4483 return u64;
4484 }
4485 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4486 return 0;
4487}
4488
4489void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4490{
4491 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4492 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4493}
4494
4495uint8_t cpu_get_apic_tpr(CPUX86State *env)
4496{
4497 uint8_t u8;
4498 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4499 if (RT_SUCCESS(rc))
4500 {
4501 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4502 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4503 }
4504 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4505 return 0;
4506}
4507
4508/**
4509 * Read an MSR.
4510 *
4511 * @retval 0 success.
4512 * @retval -1 failure, raise \#GP(0).
4513 * @param env The cpu state.
4514 * @param idMsr The MSR to read.
4515 * @param puValue Where to return the value.
4516 */
4517int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4518{
4519 Assert(env->pVCpu);
4520 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4521}
4522
4523/**
4524 * Write to an MSR.
4525 *
4526 * @retval 0 success.
4527 * @retval -1 failure, raise \#GP(0).
4528 * @param env The cpu state.
4529 * @param idMsr The MSR to read.
4530 * @param puValue Where to return the value.
4531 */
4532int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4533{
4534 Assert(env->pVCpu);
4535 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4536}
4537
4538/* -+- I/O Ports -+- */
4539
4540#undef LOG_GROUP
4541#define LOG_GROUP LOG_GROUP_REM_IOPORT
4542
4543void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4544{
4545 int rc;
4546
4547 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4548 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4549
4550 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4551 if (RT_LIKELY(rc == VINF_SUCCESS))
4552 return;
4553 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4554 {
4555 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4556 remR3RaiseRC(env->pVM, rc);
4557 return;
4558 }
4559 remAbort(rc, __FUNCTION__);
4560}
4561
4562void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4563{
4564 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4565 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4566 if (RT_LIKELY(rc == VINF_SUCCESS))
4567 return;
4568 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4569 {
4570 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4571 remR3RaiseRC(env->pVM, rc);
4572 return;
4573 }
4574 remAbort(rc, __FUNCTION__);
4575}
4576
4577void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4578{
4579 int rc;
4580 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4581 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4582 if (RT_LIKELY(rc == VINF_SUCCESS))
4583 return;
4584 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4585 {
4586 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4587 remR3RaiseRC(env->pVM, rc);
4588 return;
4589 }
4590 remAbort(rc, __FUNCTION__);
4591}
4592
4593uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4594{
4595 uint32_t u32 = 0;
4596 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4597 if (RT_LIKELY(rc == VINF_SUCCESS))
4598 {
4599 if (/*addr != 0x61 && */addr != 0x71)
4600 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4601 return (uint8_t)u32;
4602 }
4603 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4604 {
4605 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4606 remR3RaiseRC(env->pVM, rc);
4607 return (uint8_t)u32;
4608 }
4609 remAbort(rc, __FUNCTION__);
4610 return UINT8_C(0xff);
4611}
4612
4613uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4614{
4615 uint32_t u32 = 0;
4616 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4617 if (RT_LIKELY(rc == VINF_SUCCESS))
4618 {
4619 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4620 return (uint16_t)u32;
4621 }
4622 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4623 {
4624 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4625 remR3RaiseRC(env->pVM, rc);
4626 return (uint16_t)u32;
4627 }
4628 remAbort(rc, __FUNCTION__);
4629 return UINT16_C(0xffff);
4630}
4631
4632uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4633{
4634 uint32_t u32 = 0;
4635 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4636 if (RT_LIKELY(rc == VINF_SUCCESS))
4637 {
4638//if (addr==0x01f0 && u32 == 0x6b6d)
4639// loglevel = ~0;
4640 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4641 return u32;
4642 }
4643 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4644 {
4645 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4646 remR3RaiseRC(env->pVM, rc);
4647 return u32;
4648 }
4649 remAbort(rc, __FUNCTION__);
4650 return UINT32_C(0xffffffff);
4651}
4652
4653#undef LOG_GROUP
4654#define LOG_GROUP LOG_GROUP_REM
4655
4656
4657/* -+- helpers and misc other interfaces -+- */
4658
4659/**
4660 * Perform the CPUID instruction.
4661 *
4662 * @param env Pointer to the recompiler CPU structure.
4663 * @param idx The CPUID leaf (eax).
4664 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4665 * @param pvEAX Where to store eax.
4666 * @param pvEBX Where to store ebx.
4667 * @param pvECX Where to store ecx.
4668 * @param pvEDX Where to store edx.
4669 */
4670void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4671 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4672{
4673 NOREF(idxSub);
4674 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4675}
4676
4677
4678#if 0 /* not used */
4679/**
4680 * Interface for qemu hardware to report back fatal errors.
4681 */
4682void hw_error(const char *pszFormat, ...)
4683{
4684 /*
4685 * Bitch about it.
4686 */
4687 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4688 * this in my Odin32 tree at home! */
4689 va_list args;
4690 va_start(args, pszFormat);
4691 RTLogPrintf("fatal error in virtual hardware:");
4692 RTLogPrintfV(pszFormat, args);
4693 va_end(args);
4694 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4695
4696 /*
4697 * If we're in REM context we'll sync back the state before 'jumping' to
4698 * the EMs failure handling.
4699 */
4700 PVM pVM = cpu_single_env->pVM;
4701 if (pVM->rem.s.fInREM)
4702 REMR3StateBack(pVM);
4703 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4704 AssertMsgFailed(("EMR3FatalError returned!\n"));
4705}
4706#endif
4707
4708/**
4709 * Interface for the qemu cpu to report unhandled situation
4710 * raising a fatal VM error.
4711 */
4712void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4713{
4714 va_list va;
4715 PVM pVM;
4716 PVMCPU pVCpu;
4717 char szMsg[256];
4718
4719 /*
4720 * Bitch about it.
4721 */
4722 RTLogFlags(NULL, "nodisabled nobuffered");
4723 RTLogFlush(NULL);
4724
4725 va_start(va, pszFormat);
4726#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4727 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4728 unsigned cArgs = 0;
4729 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4730 const char *psz = strchr(pszFormat, '%');
4731 while (psz && cArgs < 6)
4732 {
4733 auArgs[cArgs++] = va_arg(va, uintptr_t);
4734 psz = strchr(psz + 1, '%');
4735 }
4736 switch (cArgs)
4737 {
4738 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4739 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4740 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4741 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4742 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4743 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4744 default:
4745 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4746 }
4747#else
4748 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4749#endif
4750 va_end(va);
4751
4752 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4753 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4754
4755 /*
4756 * If we're in REM context we'll sync back the state before 'jumping' to
4757 * the EMs failure handling.
4758 */
4759 pVM = cpu_single_env->pVM;
4760 pVCpu = cpu_single_env->pVCpu;
4761 Assert(pVCpu);
4762
4763 if (pVM->rem.s.fInREM)
4764 REMR3StateBack(pVM, pVCpu);
4765 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4766 AssertMsgFailed(("EMR3FatalError returned!\n"));
4767}
4768
4769
4770/**
4771 * Aborts the VM.
4772 *
4773 * @param rc VBox error code.
4774 * @param pszTip Hint about why/when this happened.
4775 */
4776void remAbort(int rc, const char *pszTip)
4777{
4778 PVM pVM;
4779 PVMCPU pVCpu;
4780
4781 /*
4782 * Bitch about it.
4783 */
4784 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4785 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4786
4787 /*
4788 * Jump back to where we entered the recompiler.
4789 */
4790 pVM = cpu_single_env->pVM;
4791 pVCpu = cpu_single_env->pVCpu;
4792 Assert(pVCpu);
4793
4794 if (pVM->rem.s.fInREM)
4795 REMR3StateBack(pVM, pVCpu);
4796
4797 EMR3FatalError(pVCpu, rc);
4798 AssertMsgFailed(("EMR3FatalError returned!\n"));
4799}
4800
4801
4802/**
4803 * Dumps a linux system call.
4804 * @param pVCpu VMCPU handle.
4805 */
4806void remR3DumpLnxSyscall(PVMCPU pVCpu)
4807{
4808 static const char *apsz[] =
4809 {
4810 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4811 "sys_exit",
4812 "sys_fork",
4813 "sys_read",
4814 "sys_write",
4815 "sys_open", /* 5 */
4816 "sys_close",
4817 "sys_waitpid",
4818 "sys_creat",
4819 "sys_link",
4820 "sys_unlink", /* 10 */
4821 "sys_execve",
4822 "sys_chdir",
4823 "sys_time",
4824 "sys_mknod",
4825 "sys_chmod", /* 15 */
4826 "sys_lchown16",
4827 "sys_ni_syscall", /* old break syscall holder */
4828 "sys_stat",
4829 "sys_lseek",
4830 "sys_getpid", /* 20 */
4831 "sys_mount",
4832 "sys_oldumount",
4833 "sys_setuid16",
4834 "sys_getuid16",
4835 "sys_stime", /* 25 */
4836 "sys_ptrace",
4837 "sys_alarm",
4838 "sys_fstat",
4839 "sys_pause",
4840 "sys_utime", /* 30 */
4841 "sys_ni_syscall", /* old stty syscall holder */
4842 "sys_ni_syscall", /* old gtty syscall holder */
4843 "sys_access",
4844 "sys_nice",
4845 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4846 "sys_sync",
4847 "sys_kill",
4848 "sys_rename",
4849 "sys_mkdir",
4850 "sys_rmdir", /* 40 */
4851 "sys_dup",
4852 "sys_pipe",
4853 "sys_times",
4854 "sys_ni_syscall", /* old prof syscall holder */
4855 "sys_brk", /* 45 */
4856 "sys_setgid16",
4857 "sys_getgid16",
4858 "sys_signal",
4859 "sys_geteuid16",
4860 "sys_getegid16", /* 50 */
4861 "sys_acct",
4862 "sys_umount", /* recycled never used phys() */
4863 "sys_ni_syscall", /* old lock syscall holder */
4864 "sys_ioctl",
4865 "sys_fcntl", /* 55 */
4866 "sys_ni_syscall", /* old mpx syscall holder */
4867 "sys_setpgid",
4868 "sys_ni_syscall", /* old ulimit syscall holder */
4869 "sys_olduname",
4870 "sys_umask", /* 60 */
4871 "sys_chroot",
4872 "sys_ustat",
4873 "sys_dup2",
4874 "sys_getppid",
4875 "sys_getpgrp", /* 65 */
4876 "sys_setsid",
4877 "sys_sigaction",
4878 "sys_sgetmask",
4879 "sys_ssetmask",
4880 "sys_setreuid16", /* 70 */
4881 "sys_setregid16",
4882 "sys_sigsuspend",
4883 "sys_sigpending",
4884 "sys_sethostname",
4885 "sys_setrlimit", /* 75 */
4886 "sys_old_getrlimit",
4887 "sys_getrusage",
4888 "sys_gettimeofday",
4889 "sys_settimeofday",
4890 "sys_getgroups16", /* 80 */
4891 "sys_setgroups16",
4892 "old_select",
4893 "sys_symlink",
4894 "sys_lstat",
4895 "sys_readlink", /* 85 */
4896 "sys_uselib",
4897 "sys_swapon",
4898 "sys_reboot",
4899 "old_readdir",
4900 "old_mmap", /* 90 */
4901 "sys_munmap",
4902 "sys_truncate",
4903 "sys_ftruncate",
4904 "sys_fchmod",
4905 "sys_fchown16", /* 95 */
4906 "sys_getpriority",
4907 "sys_setpriority",
4908 "sys_ni_syscall", /* old profil syscall holder */
4909 "sys_statfs",
4910 "sys_fstatfs", /* 100 */
4911 "sys_ioperm",
4912 "sys_socketcall",
4913 "sys_syslog",
4914 "sys_setitimer",
4915 "sys_getitimer", /* 105 */
4916 "sys_newstat",
4917 "sys_newlstat",
4918 "sys_newfstat",
4919 "sys_uname",
4920 "sys_iopl", /* 110 */
4921 "sys_vhangup",
4922 "sys_ni_syscall", /* old "idle" system call */
4923 "sys_vm86old",
4924 "sys_wait4",
4925 "sys_swapoff", /* 115 */
4926 "sys_sysinfo",
4927 "sys_ipc",
4928 "sys_fsync",
4929 "sys_sigreturn",
4930 "sys_clone", /* 120 */
4931 "sys_setdomainname",
4932 "sys_newuname",
4933 "sys_modify_ldt",
4934 "sys_adjtimex",
4935 "sys_mprotect", /* 125 */
4936 "sys_sigprocmask",
4937 "sys_ni_syscall", /* old "create_module" */
4938 "sys_init_module",
4939 "sys_delete_module",
4940 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4941 "sys_quotactl",
4942 "sys_getpgid",
4943 "sys_fchdir",
4944 "sys_bdflush",
4945 "sys_sysfs", /* 135 */
4946 "sys_personality",
4947 "sys_ni_syscall", /* reserved for afs_syscall */
4948 "sys_setfsuid16",
4949 "sys_setfsgid16",
4950 "sys_llseek", /* 140 */
4951 "sys_getdents",
4952 "sys_select",
4953 "sys_flock",
4954 "sys_msync",
4955 "sys_readv", /* 145 */
4956 "sys_writev",
4957 "sys_getsid",
4958 "sys_fdatasync",
4959 "sys_sysctl",
4960 "sys_mlock", /* 150 */
4961 "sys_munlock",
4962 "sys_mlockall",
4963 "sys_munlockall",
4964 "sys_sched_setparam",
4965 "sys_sched_getparam", /* 155 */
4966 "sys_sched_setscheduler",
4967 "sys_sched_getscheduler",
4968 "sys_sched_yield",
4969 "sys_sched_get_priority_max",
4970 "sys_sched_get_priority_min", /* 160 */
4971 "sys_sched_rr_get_interval",
4972 "sys_nanosleep",
4973 "sys_mremap",
4974 "sys_setresuid16",
4975 "sys_getresuid16", /* 165 */
4976 "sys_vm86",
4977 "sys_ni_syscall", /* Old sys_query_module */
4978 "sys_poll",
4979 "sys_nfsservctl",
4980 "sys_setresgid16", /* 170 */
4981 "sys_getresgid16",
4982 "sys_prctl",
4983 "sys_rt_sigreturn",
4984 "sys_rt_sigaction",
4985 "sys_rt_sigprocmask", /* 175 */
4986 "sys_rt_sigpending",
4987 "sys_rt_sigtimedwait",
4988 "sys_rt_sigqueueinfo",
4989 "sys_rt_sigsuspend",
4990 "sys_pread64", /* 180 */
4991 "sys_pwrite64",
4992 "sys_chown16",
4993 "sys_getcwd",
4994 "sys_capget",
4995 "sys_capset", /* 185 */
4996 "sys_sigaltstack",
4997 "sys_sendfile",
4998 "sys_ni_syscall", /* reserved for streams1 */
4999 "sys_ni_syscall", /* reserved for streams2 */
5000 "sys_vfork", /* 190 */
5001 "sys_getrlimit",
5002 "sys_mmap2",
5003 "sys_truncate64",
5004 "sys_ftruncate64",
5005 "sys_stat64", /* 195 */
5006 "sys_lstat64",
5007 "sys_fstat64",
5008 "sys_lchown",
5009 "sys_getuid",
5010 "sys_getgid", /* 200 */
5011 "sys_geteuid",
5012 "sys_getegid",
5013 "sys_setreuid",
5014 "sys_setregid",
5015 "sys_getgroups", /* 205 */
5016 "sys_setgroups",
5017 "sys_fchown",
5018 "sys_setresuid",
5019 "sys_getresuid",
5020 "sys_setresgid", /* 210 */
5021 "sys_getresgid",
5022 "sys_chown",
5023 "sys_setuid",
5024 "sys_setgid",
5025 "sys_setfsuid", /* 215 */
5026 "sys_setfsgid",
5027 "sys_pivot_root",
5028 "sys_mincore",
5029 "sys_madvise",
5030 "sys_getdents64", /* 220 */
5031 "sys_fcntl64",
5032 "sys_ni_syscall", /* reserved for TUX */
5033 "sys_ni_syscall",
5034 "sys_gettid",
5035 "sys_readahead", /* 225 */
5036 "sys_setxattr",
5037 "sys_lsetxattr",
5038 "sys_fsetxattr",
5039 "sys_getxattr",
5040 "sys_lgetxattr", /* 230 */
5041 "sys_fgetxattr",
5042 "sys_listxattr",
5043 "sys_llistxattr",
5044 "sys_flistxattr",
5045 "sys_removexattr", /* 235 */
5046 "sys_lremovexattr",
5047 "sys_fremovexattr",
5048 "sys_tkill",
5049 "sys_sendfile64",
5050 "sys_futex", /* 240 */
5051 "sys_sched_setaffinity",
5052 "sys_sched_getaffinity",
5053 "sys_set_thread_area",
5054 "sys_get_thread_area",
5055 "sys_io_setup", /* 245 */
5056 "sys_io_destroy",
5057 "sys_io_getevents",
5058 "sys_io_submit",
5059 "sys_io_cancel",
5060 "sys_fadvise64", /* 250 */
5061 "sys_ni_syscall",
5062 "sys_exit_group",
5063 "sys_lookup_dcookie",
5064 "sys_epoll_create",
5065 "sys_epoll_ctl", /* 255 */
5066 "sys_epoll_wait",
5067 "sys_remap_file_pages",
5068 "sys_set_tid_address",
5069 "sys_timer_create",
5070 "sys_timer_settime", /* 260 */
5071 "sys_timer_gettime",
5072 "sys_timer_getoverrun",
5073 "sys_timer_delete",
5074 "sys_clock_settime",
5075 "sys_clock_gettime", /* 265 */
5076 "sys_clock_getres",
5077 "sys_clock_nanosleep",
5078 "sys_statfs64",
5079 "sys_fstatfs64",
5080 "sys_tgkill", /* 270 */
5081 "sys_utimes",
5082 "sys_fadvise64_64",
5083 "sys_ni_syscall" /* sys_vserver */
5084 };
5085
5086 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5087 switch (uEAX)
5088 {
5089 default:
5090 if (uEAX < RT_ELEMENTS(apsz))
5091 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5092 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5093 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5094 else
5095 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5096 break;
5097
5098 }
5099}
5100
5101
5102/**
5103 * Dumps an OpenBSD system call.
5104 * @param pVCpu VMCPU handle.
5105 */
5106void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5107{
5108 static const char *apsz[] =
5109 {
5110 "SYS_syscall", //0
5111 "SYS_exit", //1
5112 "SYS_fork", //2
5113 "SYS_read", //3
5114 "SYS_write", //4
5115 "SYS_open", //5
5116 "SYS_close", //6
5117 "SYS_wait4", //7
5118 "SYS_8",
5119 "SYS_link", //9
5120 "SYS_unlink", //10
5121 "SYS_11",
5122 "SYS_chdir", //12
5123 "SYS_fchdir", //13
5124 "SYS_mknod", //14
5125 "SYS_chmod", //15
5126 "SYS_chown", //16
5127 "SYS_break", //17
5128 "SYS_18",
5129 "SYS_19",
5130 "SYS_getpid", //20
5131 "SYS_mount", //21
5132 "SYS_unmount", //22
5133 "SYS_setuid", //23
5134 "SYS_getuid", //24
5135 "SYS_geteuid", //25
5136 "SYS_ptrace", //26
5137 "SYS_recvmsg", //27
5138 "SYS_sendmsg", //28
5139 "SYS_recvfrom", //29
5140 "SYS_accept", //30
5141 "SYS_getpeername", //31
5142 "SYS_getsockname", //32
5143 "SYS_access", //33
5144 "SYS_chflags", //34
5145 "SYS_fchflags", //35
5146 "SYS_sync", //36
5147 "SYS_kill", //37
5148 "SYS_38",
5149 "SYS_getppid", //39
5150 "SYS_40",
5151 "SYS_dup", //41
5152 "SYS_opipe", //42
5153 "SYS_getegid", //43
5154 "SYS_profil", //44
5155 "SYS_ktrace", //45
5156 "SYS_sigaction", //46
5157 "SYS_getgid", //47
5158 "SYS_sigprocmask", //48
5159 "SYS_getlogin", //49
5160 "SYS_setlogin", //50
5161 "SYS_acct", //51
5162 "SYS_sigpending", //52
5163 "SYS_osigaltstack", //53
5164 "SYS_ioctl", //54
5165 "SYS_reboot", //55
5166 "SYS_revoke", //56
5167 "SYS_symlink", //57
5168 "SYS_readlink", //58
5169 "SYS_execve", //59
5170 "SYS_umask", //60
5171 "SYS_chroot", //61
5172 "SYS_62",
5173 "SYS_63",
5174 "SYS_64",
5175 "SYS_65",
5176 "SYS_vfork", //66
5177 "SYS_67",
5178 "SYS_68",
5179 "SYS_sbrk", //69
5180 "SYS_sstk", //70
5181 "SYS_61",
5182 "SYS_vadvise", //72
5183 "SYS_munmap", //73
5184 "SYS_mprotect", //74
5185 "SYS_madvise", //75
5186 "SYS_76",
5187 "SYS_77",
5188 "SYS_mincore", //78
5189 "SYS_getgroups", //79
5190 "SYS_setgroups", //80
5191 "SYS_getpgrp", //81
5192 "SYS_setpgid", //82
5193 "SYS_setitimer", //83
5194 "SYS_84",
5195 "SYS_85",
5196 "SYS_getitimer", //86
5197 "SYS_87",
5198 "SYS_88",
5199 "SYS_89",
5200 "SYS_dup2", //90
5201 "SYS_91",
5202 "SYS_fcntl", //92
5203 "SYS_select", //93
5204 "SYS_94",
5205 "SYS_fsync", //95
5206 "SYS_setpriority", //96
5207 "SYS_socket", //97
5208 "SYS_connect", //98
5209 "SYS_99",
5210 "SYS_getpriority", //100
5211 "SYS_101",
5212 "SYS_102",
5213 "SYS_sigreturn", //103
5214 "SYS_bind", //104
5215 "SYS_setsockopt", //105
5216 "SYS_listen", //106
5217 "SYS_107",
5218 "SYS_108",
5219 "SYS_109",
5220 "SYS_110",
5221 "SYS_sigsuspend", //111
5222 "SYS_112",
5223 "SYS_113",
5224 "SYS_114",
5225 "SYS_115",
5226 "SYS_gettimeofday", //116
5227 "SYS_getrusage", //117
5228 "SYS_getsockopt", //118
5229 "SYS_119",
5230 "SYS_readv", //120
5231 "SYS_writev", //121
5232 "SYS_settimeofday", //122
5233 "SYS_fchown", //123
5234 "SYS_fchmod", //124
5235 "SYS_125",
5236 "SYS_setreuid", //126
5237 "SYS_setregid", //127
5238 "SYS_rename", //128
5239 "SYS_129",
5240 "SYS_130",
5241 "SYS_flock", //131
5242 "SYS_mkfifo", //132
5243 "SYS_sendto", //133
5244 "SYS_shutdown", //134
5245 "SYS_socketpair", //135
5246 "SYS_mkdir", //136
5247 "SYS_rmdir", //137
5248 "SYS_utimes", //138
5249 "SYS_139",
5250 "SYS_adjtime", //140
5251 "SYS_141",
5252 "SYS_142",
5253 "SYS_143",
5254 "SYS_144",
5255 "SYS_145",
5256 "SYS_146",
5257 "SYS_setsid", //147
5258 "SYS_quotactl", //148
5259 "SYS_149",
5260 "SYS_150",
5261 "SYS_151",
5262 "SYS_152",
5263 "SYS_153",
5264 "SYS_154",
5265 "SYS_nfssvc", //155
5266 "SYS_156",
5267 "SYS_157",
5268 "SYS_158",
5269 "SYS_159",
5270 "SYS_160",
5271 "SYS_getfh", //161
5272 "SYS_162",
5273 "SYS_163",
5274 "SYS_164",
5275 "SYS_sysarch", //165
5276 "SYS_166",
5277 "SYS_167",
5278 "SYS_168",
5279 "SYS_169",
5280 "SYS_170",
5281 "SYS_171",
5282 "SYS_172",
5283 "SYS_pread", //173
5284 "SYS_pwrite", //174
5285 "SYS_175",
5286 "SYS_176",
5287 "SYS_177",
5288 "SYS_178",
5289 "SYS_179",
5290 "SYS_180",
5291 "SYS_setgid", //181
5292 "SYS_setegid", //182
5293 "SYS_seteuid", //183
5294 "SYS_lfs_bmapv", //184
5295 "SYS_lfs_markv", //185
5296 "SYS_lfs_segclean", //186
5297 "SYS_lfs_segwait", //187
5298 "SYS_188",
5299 "SYS_189",
5300 "SYS_190",
5301 "SYS_pathconf", //191
5302 "SYS_fpathconf", //192
5303 "SYS_swapctl", //193
5304 "SYS_getrlimit", //194
5305 "SYS_setrlimit", //195
5306 "SYS_getdirentries", //196
5307 "SYS_mmap", //197
5308 "SYS___syscall", //198
5309 "SYS_lseek", //199
5310 "SYS_truncate", //200
5311 "SYS_ftruncate", //201
5312 "SYS___sysctl", //202
5313 "SYS_mlock", //203
5314 "SYS_munlock", //204
5315 "SYS_205",
5316 "SYS_futimes", //206
5317 "SYS_getpgid", //207
5318 "SYS_xfspioctl", //208
5319 "SYS_209",
5320 "SYS_210",
5321 "SYS_211",
5322 "SYS_212",
5323 "SYS_213",
5324 "SYS_214",
5325 "SYS_215",
5326 "SYS_216",
5327 "SYS_217",
5328 "SYS_218",
5329 "SYS_219",
5330 "SYS_220",
5331 "SYS_semget", //221
5332 "SYS_222",
5333 "SYS_223",
5334 "SYS_224",
5335 "SYS_msgget", //225
5336 "SYS_msgsnd", //226
5337 "SYS_msgrcv", //227
5338 "SYS_shmat", //228
5339 "SYS_229",
5340 "SYS_shmdt", //230
5341 "SYS_231",
5342 "SYS_clock_gettime", //232
5343 "SYS_clock_settime", //233
5344 "SYS_clock_getres", //234
5345 "SYS_235",
5346 "SYS_236",
5347 "SYS_237",
5348 "SYS_238",
5349 "SYS_239",
5350 "SYS_nanosleep", //240
5351 "SYS_241",
5352 "SYS_242",
5353 "SYS_243",
5354 "SYS_244",
5355 "SYS_245",
5356 "SYS_246",
5357 "SYS_247",
5358 "SYS_248",
5359 "SYS_249",
5360 "SYS_minherit", //250
5361 "SYS_rfork", //251
5362 "SYS_poll", //252
5363 "SYS_issetugid", //253
5364 "SYS_lchown", //254
5365 "SYS_getsid", //255
5366 "SYS_msync", //256
5367 "SYS_257",
5368 "SYS_258",
5369 "SYS_259",
5370 "SYS_getfsstat", //260
5371 "SYS_statfs", //261
5372 "SYS_fstatfs", //262
5373 "SYS_pipe", //263
5374 "SYS_fhopen", //264
5375 "SYS_265",
5376 "SYS_fhstatfs", //266
5377 "SYS_preadv", //267
5378 "SYS_pwritev", //268
5379 "SYS_kqueue", //269
5380 "SYS_kevent", //270
5381 "SYS_mlockall", //271
5382 "SYS_munlockall", //272
5383 "SYS_getpeereid", //273
5384 "SYS_274",
5385 "SYS_275",
5386 "SYS_276",
5387 "SYS_277",
5388 "SYS_278",
5389 "SYS_279",
5390 "SYS_280",
5391 "SYS_getresuid", //281
5392 "SYS_setresuid", //282
5393 "SYS_getresgid", //283
5394 "SYS_setresgid", //284
5395 "SYS_285",
5396 "SYS_mquery", //286
5397 "SYS_closefrom", //287
5398 "SYS_sigaltstack", //288
5399 "SYS_shmget", //289
5400 "SYS_semop", //290
5401 "SYS_stat", //291
5402 "SYS_fstat", //292
5403 "SYS_lstat", //293
5404 "SYS_fhstat", //294
5405 "SYS___semctl", //295
5406 "SYS_shmctl", //296
5407 "SYS_msgctl", //297
5408 "SYS_MAXSYSCALL", //298
5409 //299
5410 //300
5411 };
5412 uint32_t uEAX;
5413 if (!LogIsEnabled())
5414 return;
5415 uEAX = CPUMGetGuestEAX(pVCpu);
5416 switch (uEAX)
5417 {
5418 default:
5419 if (uEAX < RT_ELEMENTS(apsz))
5420 {
5421 uint32_t au32Args[8] = {0};
5422 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5423 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5424 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5425 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5426 }
5427 else
5428 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5429 break;
5430 }
5431}
5432
5433
5434#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5435/**
5436 * The Dll main entry point (stub).
5437 */
5438bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5439{
5440 return true;
5441}
5442
5443void *memcpy(void *dst, const void *src, size_t size)
5444{
5445 uint8_t*pbDst = dst, *pbSrc = src;
5446 while (size-- > 0)
5447 *pbDst++ = *pbSrc++;
5448 return dst;
5449}
5450
5451#endif
5452
5453void cpu_smm_update(CPUX86State *env)
5454{
5455}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette