VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 41385

Last change on this file since 41385 was 41385, checked in by vboxsync, 13 years ago

recompiler: VBOX_DEBUG_PHYS logging fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 181.6 KB
Line 
1/* $Id: VBoxRecompiler.c 41385 2012-05-22 07:51:07Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
418 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
419
420 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
421 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
422 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
423
424
425#ifdef DEBUG_ALL_LOGGING
426 loglevel = ~0;
427#endif
428
429 /*
430 * Init the handler notification lists.
431 */
432 pVM->rem.s.idxPendingList = UINT32_MAX;
433 pVM->rem.s.idxFreeList = 0;
434
435 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
436 {
437 pCur = &pVM->rem.s.aHandlerNotifications[i];
438 pCur->idxNext = i + 1;
439 pCur->idxSelf = i;
440 }
441 pCur->idxNext = UINT32_MAX; /* the last record. */
442
443 return rc;
444}
445
446
447/**
448 * Finalizes the REM initialization.
449 *
450 * This is called after all components, devices and drivers has
451 * been initialized. Its main purpose it to finish the RAM related
452 * initialization.
453 *
454 * @returns VBox status code.
455 *
456 * @param pVM The VM handle.
457 */
458REMR3DECL(int) REMR3InitFinalize(PVM pVM)
459{
460 int rc;
461
462 /*
463 * Ram size & dirty bit map.
464 */
465 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
466 pVM->rem.s.fGCPhysLastRamFixed = true;
467#ifdef RT_STRICT
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
469#else
470 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
471#endif
472 return rc;
473}
474
475/**
476 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
477 *
478 * @returns VBox status code.
479 * @param pVM The VM handle.
480 * @param fGuarded Whether to guard the map.
481 */
482static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
483{
484 int rc = VINF_SUCCESS;
485 RTGCPHYS cb;
486
487 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
488
489 cb = pVM->rem.s.GCPhysLastRam + 1;
490 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
491 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
492 VERR_OUT_OF_RANGE);
493
494 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
495 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
496
497 if (!fGuarded)
498 {
499 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
500 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
501 }
502 else
503 {
504 /*
505 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
506 */
507 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
508 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
509 if (cbBitmapFull == cbBitmapAligned)
510 cbBitmapFull += _4G >> PAGE_SHIFT;
511 else if (cbBitmapFull - cbBitmapAligned < _64K)
512 cbBitmapFull += _64K;
513
514 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
515 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
516
517 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
518 if (RT_FAILURE(rc))
519 {
520 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
521 AssertLogRelRCReturn(rc, rc);
522 }
523
524 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
525 }
526
527 /* initialize it. */
528 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
529 return rc;
530}
531
532
533/**
534 * Terminates the REM.
535 *
536 * Termination means cleaning up and freeing all resources,
537 * the VM it self is at this point powered off or suspended.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542REMR3DECL(int) REMR3Term(PVM pVM)
543{
544#ifdef VBOX_WITH_STATISTICS
545 /*
546 * Statistics.
547 */
548 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
549 STAM_DEREG(pVM, &gStatCompilationQEmu);
550 STAM_DEREG(pVM, &gStatRunCodeQEmu);
551 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
552 STAM_DEREG(pVM, &gStatTimers);
553 STAM_DEREG(pVM, &gStatTBLookup);
554 STAM_DEREG(pVM, &gStatIRQ);
555 STAM_DEREG(pVM, &gStatRawCheck);
556 STAM_DEREG(pVM, &gStatMemRead);
557 STAM_DEREG(pVM, &gStatMemWrite);
558 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
559 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
560
561 STAM_DEREG(pVM, &gStatCpuGetTSC);
562
563 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
564 STAM_DEREG(pVM, &gStatRefuseVM86);
565 STAM_DEREG(pVM, &gStatRefusePaging);
566 STAM_DEREG(pVM, &gStatRefusePAE);
567 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
568 STAM_DEREG(pVM, &gStatRefuseIF0);
569 STAM_DEREG(pVM, &gStatRefuseCode16);
570 STAM_DEREG(pVM, &gStatRefuseWP0);
571 STAM_DEREG(pVM, &gStatRefuseRing1or2);
572 STAM_DEREG(pVM, &gStatRefuseCanExecute);
573 STAM_DEREG(pVM, &gStatFlushTBs);
574
575 STAM_DEREG(pVM, &gStatREMGDTChange);
576 STAM_DEREG(pVM, &gStatREMLDTRChange);
577 STAM_DEREG(pVM, &gStatREMIDTChange);
578 STAM_DEREG(pVM, &gStatREMTRChange);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
586
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
591 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
592 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
593
594 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
595#endif /* VBOX_WITH_STATISTICS */
596
597 STAM_REL_DEREG(pVM, &tb_flush_count);
598 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
599 STAM_REL_DEREG(pVM, &tlb_flush_count);
600
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * The VM is being reset.
607 *
608 * For the REM component this means to call the cpu_reset() and
609 * reinitialize some state variables.
610 *
611 * @param pVM VM handle.
612 */
613REMR3DECL(void) REMR3Reset(PVM pVM)
614{
615 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
616
617 /*
618 * Reset the REM cpu.
619 */
620 Assert(pVM->rem.s.cIgnoreAll == 0);
621 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
622 cpu_reset(&pVM->rem.s.Env);
623 pVM->rem.s.cInvalidatedPages = 0;
624 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
625 Assert(pVM->rem.s.cIgnoreAll == 0);
626
627 /* Clear raw ring 0 init state */
628 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
629
630 /* Flush the TBs the next time we execute code here. */
631 pVM->rem.s.fFlushTBs = true;
632
633 EMRemUnlock(pVM);
634}
635
636
637/**
638 * Execute state save operation.
639 *
640 * @returns VBox status code.
641 * @param pVM VM Handle.
642 * @param pSSM SSM operation handle.
643 */
644static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
645{
646 PREM pRem = &pVM->rem.s;
647
648 /*
649 * Save the required CPU Env bits.
650 * (Not much because we're never in REM when doing the save.)
651 */
652 LogFlow(("remR3Save:\n"));
653 Assert(!pRem->fInREM);
654 SSMR3PutU32(pSSM, pRem->Env.hflags);
655 SSMR3PutU32(pSSM, ~0); /* separator */
656
657 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
658 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
659 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
660
661 return SSMR3PutU32(pSSM, ~0); /* terminator */
662}
663
664
665/**
666 * Execute state load operation.
667 *
668 * @returns VBox status code.
669 * @param pVM VM Handle.
670 * @param pSSM SSM operation handle.
671 * @param uVersion Data layout version.
672 * @param uPass The data pass.
673 */
674static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 uint32_t u32Dummy;
677 uint32_t fRawRing0 = false;
678 uint32_t u32Sep;
679 uint32_t i;
680 int rc;
681 PREM pRem;
682
683 LogFlow(("remR3Load:\n"));
684 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
685
686 /*
687 * Validate version.
688 */
689 if ( uVersion != REM_SAVED_STATE_VERSION
690 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
693 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
694 }
695
696 /*
697 * Do a reset to be on the safe side...
698 */
699 REMR3Reset(pVM);
700
701 /*
702 * Ignore all ignorable notifications.
703 * (Not doing this will cause serious trouble.)
704 */
705 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
706
707 /*
708 * Load the required CPU Env bits.
709 * (Not much because we're never in REM when doing the save.)
710 */
711 pRem = &pVM->rem.s;
712 Assert(!pRem->fInREM);
713 SSMR3GetU32(pSSM, &pRem->Env.hflags);
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /* Redundant REM CPU state has to be loaded, but can be ignored. */
717 CPUX86State_Ver16 temp;
718 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
719 }
720
721 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
722 if (RT_FAILURE(rc))
723 return rc;
724 if (u32Sep != ~0U)
725 {
726 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729
730 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
731 SSMR3GetUInt(pSSM, &fRawRing0);
732 if (fRawRing0)
733 pRem->Env.state |= CPU_RAW_RING0;
734
735 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
736 {
737 /*
738 * Load the REM stuff.
739 */
740 /** @todo r=bird: We should just drop all these items, restoring doesn't make
741 * sense. */
742 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
743 if (RT_FAILURE(rc))
744 return rc;
745 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
746 {
747 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
748 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
749 }
750 for (i = 0; i < pRem->cInvalidatedPages; i++)
751 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
752 }
753
754 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
755 if (RT_FAILURE(rc))
756 return rc;
757
758 /* check the terminator. */
759 rc = SSMR3GetU32(pSSM, &u32Sep);
760 if (RT_FAILURE(rc))
761 return rc;
762 if (u32Sep != ~0U)
763 {
764 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
765 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
766 }
767
768 /*
769 * Get the CPUID features.
770 */
771 PVMCPU pVCpu = VMMGetCpu(pVM);
772 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
773 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
774
775 /*
776 * Stop ignoring ignorable notifications.
777 */
778 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
779
780 /*
781 * Sync the whole CPU state when executing code in the recompiler.
782 */
783 for (i = 0; i < pVM->cCpus; i++)
784 {
785 PVMCPU pVCpu = &pVM->aCpus[i];
786 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
787 }
788 return VINF_SUCCESS;
789}
790
791
792
793#undef LOG_GROUP
794#define LOG_GROUP LOG_GROUP_REM_RUN
795
796/**
797 * Single steps an instruction in recompiled mode.
798 *
799 * Before calling this function the REM state needs to be in sync with
800 * the VM. Call REMR3State() to perform the sync. It's only necessary
801 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
802 * and after calling REMR3StateBack().
803 *
804 * @returns VBox status code.
805 *
806 * @param pVM VM Handle.
807 * @param pVCpu VMCPU Handle.
808 */
809REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
810{
811 int rc, interrupt_request;
812 RTGCPTR GCPtrPC;
813 bool fBp;
814
815 /*
816 * Lock the REM - we don't wanna have anyone interrupting us
817 * while stepping - and enabled single stepping. We also ignore
818 * pending interrupts and suchlike.
819 */
820 interrupt_request = pVM->rem.s.Env.interrupt_request;
821 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
822 pVM->rem.s.Env.interrupt_request = 0;
823 cpu_single_step(&pVM->rem.s.Env, 1);
824
825 /*
826 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
827 */
828 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
829 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
830
831 /*
832 * Execute and handle the return code.
833 * We execute without enabling the cpu tick, so on success we'll
834 * just flip it on and off to make sure it moves
835 */
836 rc = cpu_exec(&pVM->rem.s.Env);
837 if (rc == EXCP_DEBUG)
838 {
839 TMR3NotifyResume(pVM, pVCpu);
840 TMR3NotifySuspend(pVM, pVCpu);
841 rc = VINF_EM_DBG_STEPPED;
842 }
843 else
844 {
845 switch (rc)
846 {
847 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
848 case EXCP_HLT:
849 case EXCP_HALTED: rc = VINF_EM_HALT; break;
850 case EXCP_RC:
851 rc = pVM->rem.s.rc;
852 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
853 break;
854 case EXCP_EXECUTE_RAW:
855 case EXCP_EXECUTE_HWACC:
856 /** @todo: is it correct? No! */
857 rc = VINF_SUCCESS;
858 break;
859 default:
860 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
861 rc = VERR_INTERNAL_ERROR;
862 break;
863 }
864 }
865
866 /*
867 * Restore the stuff we changed to prevent interruption.
868 * Unlock the REM.
869 */
870 if (fBp)
871 {
872 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
873 Assert(rc2 == 0); NOREF(rc2);
874 }
875 cpu_single_step(&pVM->rem.s.Env, 0);
876 pVM->rem.s.Env.interrupt_request = interrupt_request;
877
878 return rc;
879}
880
881
882/**
883 * Set a breakpoint using the REM facilities.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param Address The breakpoint address.
888 * @thread The emulation thread.
889 */
890REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
891{
892 VM_ASSERT_EMT(pVM);
893 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
894 {
895 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
896 return VINF_SUCCESS;
897 }
898 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
899 return VERR_REM_NO_MORE_BP_SLOTS;
900}
901
902
903/**
904 * Clears a breakpoint set by REMR3BreakpointSet().
905 *
906 * @returns VBox status code.
907 * @param pVM The VM handle.
908 * @param Address The breakpoint address.
909 * @thread The emulation thread.
910 */
911REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
912{
913 VM_ASSERT_EMT(pVM);
914 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
915 {
916 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
917 return VINF_SUCCESS;
918 }
919 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
920 return VERR_REM_BP_NOT_FOUND;
921}
922
923
924/**
925 * Emulate an instruction.
926 *
927 * This function executes one instruction without letting anyone
928 * interrupt it. This is intended for being called while being in
929 * raw mode and thus will take care of all the state syncing between
930 * REM and the rest.
931 *
932 * @returns VBox status code.
933 * @param pVM VM handle.
934 * @param pVCpu VMCPU Handle.
935 */
936REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
937{
938 bool fFlushTBs;
939
940 int rc, rc2;
941 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
942
943 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
944 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
945 */
946 if (HWACCMIsEnabled(pVM))
947 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
948
949 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
950 fFlushTBs = pVM->rem.s.fFlushTBs;
951 pVM->rem.s.fFlushTBs = false;
952
953 /*
954 * Sync the state and enable single instruction / single stepping.
955 */
956 rc = REMR3State(pVM, pVCpu);
957 pVM->rem.s.fFlushTBs = fFlushTBs;
958 if (RT_SUCCESS(rc))
959 {
960 int interrupt_request = pVM->rem.s.Env.interrupt_request;
961 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HWACC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1045 rc = VINF_EM_RESCHEDULE_HWACC;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */
1116 0, /* GCPtr */
1117 DBGF_DISAS_FLAGS_CURRENT_GUEST
1118 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1119 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1120 szBuf,
1121 sizeof(szBuf),
1122 NULL);
1123 if (RT_FAILURE(rc))
1124 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1125 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1126
1127 /*
1128 * Execute the instruction.
1129 */
1130 TMNotifyStartOfExecution(pVCpu);
1131
1132 if ( pVM->rem.s.Env.exception_index < 0
1133 || pVM->rem.s.Env.exception_index > 256)
1134 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1135
1136#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1137 pVM->rem.s.Env.interrupt_request = 0;
1138#else
1139 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1140#endif
1141 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1142 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1143 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1144 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 rc = cpu_exec(&pVM->rem.s.Env);
1151
1152 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1153 pVM->rem.s.Env.interrupt_request,
1154 pVM->rem.s.Env.halted,
1155 pVM->rem.s.Env.exception_index
1156 );
1157
1158 TMNotifyEndOfExecution(pVCpu);
1159
1160 switch (rc)
1161 {
1162#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1163 /*
1164 * The normal exit.
1165 */
1166 case EXCP_SINGLE_INSTR:
1167 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1168 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1169 continue;
1170 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1171 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1172 rc = VINF_SUCCESS;
1173 break;
1174
1175#else
1176 /*
1177 * The normal exit, check for breakpoints at PC just to be sure.
1178 */
1179#endif
1180 case EXCP_DEBUG:
1181 if (pVM->rem.s.Env.watchpoint_hit)
1182 {
1183 /** @todo deal with watchpoints */
1184 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1185 rc = VINF_EM_DBG_BREAKPOINT;
1186 }
1187 else
1188 {
1189 CPUBreakpoint *pBP;
1190 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1191 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1192 if (pBP->pc == GCPtrPC)
1193 break;
1194 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1195 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1196 }
1197#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1198 if (rc == VINF_EM_DBG_STEPPED)
1199 {
1200 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1201 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1202 continue;
1203
1204 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1205 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1206 rc = VINF_SUCCESS;
1207 }
1208#endif
1209 break;
1210
1211 /*
1212 * If we take a trap or start servicing a pending interrupt, we might end up here.
1213 * (Timer thread or some other thread wishing EMT's attention.)
1214 */
1215 case EXCP_INTERRUPT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1217 rc = VINF_SUCCESS;
1218 break;
1219
1220 /*
1221 * hlt instruction.
1222 */
1223 case EXCP_HLT:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * The VM has halted.
1230 */
1231 case EXCP_HALTED:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1233 rc = VINF_EM_HALT;
1234 break;
1235
1236 /*
1237 * Switch to RAW-mode.
1238 */
1239 case EXCP_EXECUTE_RAW:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1241 rc = VINF_EM_RESCHEDULE_RAW;
1242 break;
1243
1244 /*
1245 * Switch to hardware accelerated RAW-mode.
1246 */
1247 case EXCP_EXECUTE_HWACC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1249 rc = VINF_EM_RESCHEDULE_HWACC;
1250 break;
1251
1252 /*
1253 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1254 */
1255 case EXCP_RC:
1256 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1257 rc = pVM->rem.s.rc;
1258 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1259 break;
1260
1261 /*
1262 * Figure out the rest when they arrive....
1263 */
1264 default:
1265 AssertMsgFailed(("rc=%d\n", rc));
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1267 rc = VINF_EM_RESCHEDULE;
1268 break;
1269 }
1270 break;
1271 }
1272
1273#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1274// cpu_single_step(&pVM->rem.s.Env, 0);
1275#else
1276 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1277#endif
1278 return rc;
1279}
1280
1281
1282/**
1283 * Runs code in recompiled mode.
1284 *
1285 * Before calling this function the REM state needs to be in sync with
1286 * the VM. Call REMR3State() to perform the sync. It's only necessary
1287 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1288 * and after calling REMR3StateBack().
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM VM Handle.
1293 * @param pVCpu VMCPU Handle.
1294 */
1295REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1296{
1297 int rc;
1298
1299 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1300 return remR3RunLoggingStep(pVM, pVCpu);
1301
1302 Assert(pVM->rem.s.fInREM);
1303 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1304
1305 TMNotifyStartOfExecution(pVCpu);
1306 rc = cpu_exec(&pVM->rem.s.Env);
1307 TMNotifyEndOfExecution(pVCpu);
1308 switch (rc)
1309 {
1310 /*
1311 * This happens when the execution was interrupted
1312 * by an external event, like pending timers.
1313 */
1314 case EXCP_INTERRUPT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1316 rc = VINF_SUCCESS;
1317 break;
1318
1319 /*
1320 * hlt instruction.
1321 */
1322 case EXCP_HLT:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * The VM has halted.
1329 */
1330 case EXCP_HALTED:
1331 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1332 rc = VINF_EM_HALT;
1333 break;
1334
1335 /*
1336 * Breakpoint/single step.
1337 */
1338 case EXCP_DEBUG:
1339 if (pVM->rem.s.Env.watchpoint_hit)
1340 {
1341 /** @todo deal with watchpoints */
1342 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1343 rc = VINF_EM_DBG_BREAKPOINT;
1344 }
1345 else
1346 {
1347 CPUBreakpoint *pBP;
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1350 if (pBP->pc == GCPtrPC)
1351 break;
1352 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1353 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1354 }
1355 break;
1356
1357 /*
1358 * Switch to RAW-mode.
1359 */
1360 case EXCP_EXECUTE_RAW:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1362 rc = VINF_EM_RESCHEDULE_RAW;
1363 break;
1364
1365 /*
1366 * Switch to hardware accelerated RAW-mode.
1367 */
1368 case EXCP_EXECUTE_HWACC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1370 rc = VINF_EM_RESCHEDULE_HWACC;
1371 break;
1372
1373 /*
1374 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1375 */
1376 case EXCP_RC:
1377 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1378 rc = pVM->rem.s.rc;
1379 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1380 break;
1381
1382 /*
1383 * Figure out the rest when they arrive....
1384 */
1385 default:
1386 AssertMsgFailed(("rc=%d\n", rc));
1387 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1388 rc = VINF_SUCCESS;
1389 break;
1390 }
1391
1392 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1393 return rc;
1394}
1395
1396
1397/**
1398 * Check if the cpu state is suitable for Raw execution.
1399 *
1400 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1401 *
1402 * @param env The CPU env struct.
1403 * @param eip The EIP to check this for (might differ from env->eip).
1404 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1405 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1406 *
1407 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1408 */
1409bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1410{
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1413 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1414 uint32_t u32CR0;
1415
1416#ifdef IEM_VERIFICATION_MODE
1417 return false;
1418#endif
1419
1420 /* Update counter. */
1421 env->pVM->rem.s.cCanExecuteRaw++;
1422
1423 /* Never when single stepping+logging guest code. */
1424 if (env->state & CPU_EMULATE_SINGLE_STEP)
1425 return false;
1426
1427 if (HWACCMIsEnabled(env->pVM))
1428 {
1429 CPUMCTX Ctx;
1430
1431 env->state |= CPU_RAW_HWACC;
1432
1433 /*
1434 * The simple check first...
1435 */
1436 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1437 return false;
1438
1439 /*
1440 * Create partial context for HWACCMR3CanExecuteGuest
1441 */
1442 Ctx.cr0 = env->cr[0];
1443 Ctx.cr3 = env->cr[3];
1444 Ctx.cr4 = env->cr[4];
1445
1446 Ctx.tr = env->tr.selector;
1447 Ctx.trHid.u64Base = env->tr.base;
1448 Ctx.trHid.u32Limit = env->tr.limit;
1449 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1450
1451 Ctx.ldtr = env->ldt.selector;
1452 Ctx.ldtrHid.u64Base = env->ldt.base;
1453 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1454 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1455
1456 Ctx.idtr.cbIdt = env->idt.limit;
1457 Ctx.idtr.pIdt = env->idt.base;
1458
1459 Ctx.gdtr.cbGdt = env->gdt.limit;
1460 Ctx.gdtr.pGdt = env->gdt.base;
1461
1462 Ctx.rsp = env->regs[R_ESP];
1463 Ctx.rip = env->eip;
1464
1465 Ctx.eflags.u32 = env->eflags;
1466
1467 Ctx.cs = env->segs[R_CS].selector;
1468 Ctx.csHid.u64Base = env->segs[R_CS].base;
1469 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1470 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1471
1472 Ctx.ds = env->segs[R_DS].selector;
1473 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1474 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1475 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1476
1477 Ctx.es = env->segs[R_ES].selector;
1478 Ctx.esHid.u64Base = env->segs[R_ES].base;
1479 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1480 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1481
1482 Ctx.fs = env->segs[R_FS].selector;
1483 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1484 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1485 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.gs = env->segs[R_GS].selector;
1488 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1489 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1490 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1491
1492 Ctx.ss = env->segs[R_SS].selector;
1493 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1494 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1495 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1496
1497 Ctx.msrEFER = env->efer;
1498
1499 /* Hardware accelerated raw-mode:
1500 *
1501 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1502 */
1503 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1504 {
1505 *piException = EXCP_EXECUTE_HWACC;
1506 return true;
1507 }
1508 return false;
1509 }
1510
1511 /*
1512 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1513 * or 32 bits protected mode ring 0 code
1514 *
1515 * The tests are ordered by the likelihood of being true during normal execution.
1516 */
1517 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1518 {
1519 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1520 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1521 return false;
1522 }
1523
1524#ifndef VBOX_RAW_V86
1525 if (fFlags & VM_MASK) {
1526 STAM_COUNTER_INC(&gStatRefuseVM86);
1527 Log2(("raw mode refused: VM_MASK\n"));
1528 return false;
1529 }
1530#endif
1531
1532 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1533 {
1534#ifndef DEBUG_bird
1535 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1536#endif
1537 return false;
1538 }
1539
1540 if (env->singlestep_enabled)
1541 {
1542 //Log2(("raw mode refused: Single step\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->breakpoints))
1547 {
1548 //Log2(("raw mode refused: Breakpoints\n"));
1549 return false;
1550 }
1551
1552 if (!QTAILQ_EMPTY(&env->watchpoints))
1553 {
1554 //Log2(("raw mode refused: Watchpoints\n"));
1555 return false;
1556 }
1557
1558 u32CR0 = env->cr[0];
1559 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1560 {
1561 STAM_COUNTER_INC(&gStatRefusePaging);
1562 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1563 return false;
1564 }
1565
1566 if (env->cr[4] & CR4_PAE_MASK)
1567 {
1568 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1569 {
1570 STAM_COUNTER_INC(&gStatRefusePAE);
1571 return false;
1572 }
1573 }
1574
1575 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1576 {
1577 if (!EMIsRawRing3Enabled(env->pVM))
1578 return false;
1579
1580 if (!(env->eflags & IF_MASK))
1581 {
1582 STAM_COUNTER_INC(&gStatRefuseIF0);
1583 Log2(("raw mode refused: IF (RawR3)\n"));
1584 return false;
1585 }
1586
1587 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1588 {
1589 STAM_COUNTER_INC(&gStatRefuseWP0);
1590 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1591 return false;
1592 }
1593 }
1594 else
1595 {
1596 if (!EMIsRawRing0Enabled(env->pVM))
1597 return false;
1598
1599 // Let's start with pure 32 bits ring 0 code first
1600 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1601 {
1602 STAM_COUNTER_INC(&gStatRefuseCode16);
1603 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1604 return false;
1605 }
1606
1607 // Only R0
1608 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1609 {
1610 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1611 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1612 return false;
1613 }
1614
1615 if (!(u32CR0 & CR0_WP_MASK))
1616 {
1617 STAM_COUNTER_INC(&gStatRefuseWP0);
1618 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1619 return false;
1620 }
1621
1622 if (PATMIsPatchGCAddr(env->pVM, eip))
1623 {
1624 Log2(("raw r0 mode forced: patch code\n"));
1625 *piException = EXCP_EXECUTE_RAW;
1626 return true;
1627 }
1628
1629#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1630 if (!(env->eflags & IF_MASK))
1631 {
1632 STAM_COUNTER_INC(&gStatRefuseIF0);
1633 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1634 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1635 return false;
1636 }
1637#endif
1638
1639 env->state |= CPU_RAW_RING0;
1640 }
1641
1642 /*
1643 * Don't reschedule the first time we're called, because there might be
1644 * special reasons why we're here that is not covered by the above checks.
1645 */
1646 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1647 {
1648 Log2(("raw mode refused: first scheduling\n"));
1649 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1650 return false;
1651 }
1652
1653 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1654 *piException = EXCP_EXECUTE_RAW;
1655 return true;
1656}
1657
1658
1659/**
1660 * Fetches a code byte.
1661 *
1662 * @returns Success indicator (bool) for ease of use.
1663 * @param env The CPU environment structure.
1664 * @param GCPtrInstr Where to fetch code.
1665 * @param pu8Byte Where to store the byte on success
1666 */
1667bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1668{
1669 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1670 if (RT_SUCCESS(rc))
1671 return true;
1672 return false;
1673}
1674
1675
1676/**
1677 * Flush (or invalidate if you like) page table/dir entry.
1678 *
1679 * (invlpg instruction; tlb_flush_page)
1680 *
1681 * @param env Pointer to cpu environment.
1682 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1683 */
1684void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1685{
1686 PVM pVM = env->pVM;
1687 PCPUMCTX pCtx;
1688 int rc;
1689
1690 Assert(EMRemIsLockOwner(env->pVM));
1691
1692 /*
1693 * When we're replaying invlpg instructions or restoring a saved
1694 * state we disable this path.
1695 */
1696 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1697 return;
1698 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1699 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1700
1701 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1702
1703 /*
1704 * Update the control registers before calling PGMFlushPage.
1705 */
1706 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1707 Assert(pCtx);
1708 pCtx->cr0 = env->cr[0];
1709 pCtx->cr3 = env->cr[3];
1710 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1711 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1712 pCtx->cr4 = env->cr[4];
1713
1714 /*
1715 * Let PGM do the rest.
1716 */
1717 Assert(env->pVCpu);
1718 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1719 if (RT_FAILURE(rc))
1720 {
1721 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1722 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1723 }
1724 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1725}
1726
1727
1728#ifndef REM_PHYS_ADDR_IN_TLB
1729/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1730void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1731{
1732 void *pv;
1733 int rc;
1734
1735 /* Address must be aligned enough to fiddle with lower bits */
1736 Assert((physAddr & 0x3) == 0);
1737
1738 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1739 Assert( rc == VINF_SUCCESS
1740 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1741 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1742 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1743 if (RT_FAILURE(rc))
1744 return (void *)1;
1745 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1746 return (void *)((uintptr_t)pv | 2);
1747 return pv;
1748}
1749#endif /* REM_PHYS_ADDR_IN_TLB */
1750
1751
1752/**
1753 * Called from tlb_protect_code in order to write monitor a code page.
1754 *
1755 * @param env Pointer to the CPU environment.
1756 * @param GCPtr Code page to monitor
1757 */
1758void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1759{
1760#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1761 Assert(env->pVM->rem.s.fInREM);
1762 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1763 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1764 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1765 && !(env->eflags & VM_MASK) /* no V86 mode */
1766 && !HWACCMIsEnabled(env->pVM))
1767 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1768#endif
1769}
1770
1771
1772/**
1773 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1774 *
1775 * @param env Pointer to the CPU environment.
1776 * @param GCPtr Code page to monitor
1777 */
1778void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1779{
1780 Assert(env->pVM->rem.s.fInREM);
1781#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1782 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1783 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1784 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1785 && !(env->eflags & VM_MASK) /* no V86 mode */
1786 && !HWACCMIsEnabled(env->pVM))
1787 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1788#endif
1789}
1790
1791
1792/**
1793 * Called when the CPU is initialized, any of the CRx registers are changed or
1794 * when the A20 line is modified.
1795 *
1796 * @param env Pointer to the CPU environment.
1797 * @param fGlobal Set if the flush is global.
1798 */
1799void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1800{
1801 PVM pVM = env->pVM;
1802 PCPUMCTX pCtx;
1803 Assert(EMRemIsLockOwner(pVM));
1804
1805 /*
1806 * When we're replaying invlpg instructions or restoring a saved
1807 * state we disable this path.
1808 */
1809 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1810 return;
1811 Assert(pVM->rem.s.fInREM);
1812
1813 /*
1814 * The caller doesn't check cr4, so we have to do that for ourselves.
1815 */
1816 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1817 fGlobal = true;
1818 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1819
1820 /*
1821 * Update the control registers before calling PGMR3FlushTLB.
1822 */
1823 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1824 Assert(pCtx);
1825 pCtx->cr0 = env->cr[0];
1826 pCtx->cr3 = env->cr[3];
1827 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1828 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1829 pCtx->cr4 = env->cr[4];
1830
1831 /*
1832 * Let PGM do the rest.
1833 */
1834 Assert(env->pVCpu);
1835 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1836}
1837
1838
1839/**
1840 * Called when any of the cr0, cr4 or efer registers is updated.
1841 *
1842 * @param env Pointer to the CPU environment.
1843 */
1844void remR3ChangeCpuMode(CPUX86State *env)
1845{
1846 PVM pVM = env->pVM;
1847 uint64_t efer;
1848 PCPUMCTX pCtx;
1849 int rc;
1850
1851 /*
1852 * When we're replaying loads or restoring a saved
1853 * state this path is disabled.
1854 */
1855 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1856 return;
1857 Assert(pVM->rem.s.fInREM);
1858
1859 /*
1860 * Update the control registers before calling PGMChangeMode()
1861 * as it may need to map whatever cr3 is pointing to.
1862 */
1863 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1864 Assert(pCtx);
1865 pCtx->cr0 = env->cr[0];
1866 pCtx->cr3 = env->cr[3];
1867 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1868 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1869 pCtx->cr4 = env->cr[4];
1870#ifdef TARGET_X86_64
1871 efer = env->efer;
1872 pCtx->msrEFER = efer;
1873#else
1874 efer = 0;
1875#endif
1876 Assert(env->pVCpu);
1877 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1878 if (rc != VINF_SUCCESS)
1879 {
1880 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1881 {
1882 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1883 remR3RaiseRC(env->pVM, rc);
1884 }
1885 else
1886 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1887 }
1888}
1889
1890
1891/**
1892 * Called from compiled code to run dma.
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3DmaRun(CPUX86State *env)
1897{
1898 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1899 PDMR3DmaRun(env->pVM);
1900 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1901}
1902
1903
1904/**
1905 * Called from compiled code to schedule pending timers in VMM
1906 *
1907 * @param env Pointer to the CPU environment.
1908 */
1909void remR3TimersRun(CPUX86State *env)
1910{
1911 LogFlow(("remR3TimersRun:\n"));
1912 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1913 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1914 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1915 TMR3TimerQueuesDo(env->pVM);
1916 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1917 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1918}
1919
1920
1921/**
1922 * Record trap occurrence
1923 *
1924 * @returns VBox status code
1925 * @param env Pointer to the CPU environment.
1926 * @param uTrap Trap nr
1927 * @param uErrorCode Error code
1928 * @param pvNextEIP Next EIP
1929 */
1930int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1931{
1932 PVM pVM = env->pVM;
1933#ifdef VBOX_WITH_STATISTICS
1934 static STAMCOUNTER s_aStatTrap[255];
1935 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1936#endif
1937
1938#ifdef VBOX_WITH_STATISTICS
1939 if (uTrap < 255)
1940 {
1941 if (!s_aRegisters[uTrap])
1942 {
1943 char szStatName[64];
1944 s_aRegisters[uTrap] = true;
1945 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1946 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1947 }
1948 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1949 }
1950#endif
1951 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1952 if( uTrap < 0x20
1953 && (env->cr[0] & X86_CR0_PE)
1954 && !(env->eflags & X86_EFL_VM))
1955 {
1956#ifdef DEBUG
1957 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1958#endif
1959 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1960 {
1961 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1962 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1963 return VERR_REM_TOO_MANY_TRAPS;
1964 }
1965 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1966 pVM->rem.s.cPendingExceptions = 1;
1967 pVM->rem.s.uPendingException = uTrap;
1968 pVM->rem.s.uPendingExcptEIP = env->eip;
1969 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1970 }
1971 else
1972 {
1973 pVM->rem.s.cPendingExceptions = 0;
1974 pVM->rem.s.uPendingException = uTrap;
1975 pVM->rem.s.uPendingExcptEIP = env->eip;
1976 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1977 }
1978 return VINF_SUCCESS;
1979}
1980
1981
1982/*
1983 * Clear current active trap
1984 *
1985 * @param pVM VM Handle.
1986 */
1987void remR3TrapClear(PVM pVM)
1988{
1989 pVM->rem.s.cPendingExceptions = 0;
1990 pVM->rem.s.uPendingException = 0;
1991 pVM->rem.s.uPendingExcptEIP = 0;
1992 pVM->rem.s.uPendingExcptCR2 = 0;
1993}
1994
1995
1996/*
1997 * Record previous call instruction addresses
1998 *
1999 * @param env Pointer to the CPU environment.
2000 */
2001void remR3RecordCall(CPUX86State *env)
2002{
2003 CSAMR3RecordCallAddress(env->pVM, env->eip);
2004}
2005
2006
2007/**
2008 * Syncs the internal REM state with the VM.
2009 *
2010 * This must be called before REMR3Run() is invoked whenever when the REM
2011 * state is not up to date. Calling it several times in a row is not
2012 * permitted.
2013 *
2014 * @returns VBox status code.
2015 *
2016 * @param pVM VM Handle.
2017 * @param pVCpu VMCPU Handle.
2018 *
2019 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2020 * no do this since the majority of the callers don't want any unnecessary of events
2021 * pending that would immediately interrupt execution.
2022 */
2023REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2024{
2025 register const CPUMCTX *pCtx;
2026 register unsigned fFlags;
2027 bool fHiddenSelRegsValid;
2028 unsigned i;
2029 TRPMEVENT enmType;
2030 uint8_t u8TrapNo;
2031 uint32_t uCpl;
2032 int rc;
2033
2034 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2035 Log2(("REMR3State:\n"));
2036
2037 pVM->rem.s.Env.pVCpu = pVCpu;
2038 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2039 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2040
2041 Assert(!pVM->rem.s.fInREM);
2042 pVM->rem.s.fInStateSync = true;
2043
2044 /*
2045 * If we have to flush TBs, do that immediately.
2046 */
2047 if (pVM->rem.s.fFlushTBs)
2048 {
2049 STAM_COUNTER_INC(&gStatFlushTBs);
2050 tb_flush(&pVM->rem.s.Env);
2051 pVM->rem.s.fFlushTBs = false;
2052 }
2053
2054 /*
2055 * Copy the registers which require no special handling.
2056 */
2057#ifdef TARGET_X86_64
2058 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2059 Assert(R_EAX == 0);
2060 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2061 Assert(R_ECX == 1);
2062 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2063 Assert(R_EDX == 2);
2064 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2065 Assert(R_EBX == 3);
2066 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2067 Assert(R_ESP == 4);
2068 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2069 Assert(R_EBP == 5);
2070 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2071 Assert(R_ESI == 6);
2072 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2073 Assert(R_EDI == 7);
2074 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2075 pVM->rem.s.Env.regs[8] = pCtx->r8;
2076 pVM->rem.s.Env.regs[9] = pCtx->r9;
2077 pVM->rem.s.Env.regs[10] = pCtx->r10;
2078 pVM->rem.s.Env.regs[11] = pCtx->r11;
2079 pVM->rem.s.Env.regs[12] = pCtx->r12;
2080 pVM->rem.s.Env.regs[13] = pCtx->r13;
2081 pVM->rem.s.Env.regs[14] = pCtx->r14;
2082 pVM->rem.s.Env.regs[15] = pCtx->r15;
2083
2084 pVM->rem.s.Env.eip = pCtx->rip;
2085
2086 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2087#else
2088 Assert(R_EAX == 0);
2089 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2090 Assert(R_ECX == 1);
2091 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2092 Assert(R_EDX == 2);
2093 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2094 Assert(R_EBX == 3);
2095 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2096 Assert(R_ESP == 4);
2097 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2098 Assert(R_EBP == 5);
2099 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2100 Assert(R_ESI == 6);
2101 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2102 Assert(R_EDI == 7);
2103 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2104 pVM->rem.s.Env.eip = pCtx->eip;
2105
2106 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2107#endif
2108
2109 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2110
2111 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2112 for (i=0;i<8;i++)
2113 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2114
2115#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2116 /*
2117 * Clear the halted hidden flag (the interrupt waking up the CPU can
2118 * have been dispatched in raw mode).
2119 */
2120 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2121#endif
2122
2123 /*
2124 * Replay invlpg? Only if we're not flushing the TLB.
2125 */
2126 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2127 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2128 if (pVM->rem.s.cInvalidatedPages)
2129 {
2130 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2131 {
2132 RTUINT i;
2133
2134 pVM->rem.s.fIgnoreCR3Load = true;
2135 pVM->rem.s.fIgnoreInvlPg = true;
2136 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2137 {
2138 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2139 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2140 }
2141 pVM->rem.s.fIgnoreInvlPg = false;
2142 pVM->rem.s.fIgnoreCR3Load = false;
2143 }
2144 pVM->rem.s.cInvalidatedPages = 0;
2145 }
2146
2147 /* Replay notification changes. */
2148 REMR3ReplayHandlerNotifications(pVM);
2149
2150 /* Update MSRs; before CRx registers! */
2151 pVM->rem.s.Env.efer = pCtx->msrEFER;
2152 pVM->rem.s.Env.star = pCtx->msrSTAR;
2153 pVM->rem.s.Env.pat = pCtx->msrPAT;
2154#ifdef TARGET_X86_64
2155 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2156 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2157 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2158 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2159
2160 /* Update the internal long mode activate flag according to the new EFER value. */
2161 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2162 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2163 else
2164 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2165#endif
2166
2167 /* Update the inhibit IRQ mask. */
2168 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2169 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2170 {
2171 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2172 if (InhibitPC == pCtx->rip)
2173 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2174 else
2175 {
2176 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2177 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2178 }
2179 }
2180
2181 /*
2182 * Sync the A20 gate.
2183 */
2184 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2185 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2186 {
2187 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2188 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2189 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2190 }
2191
2192 /*
2193 * Registers which are rarely changed and require special handling / order when changed.
2194 */
2195 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2196 | CPUM_CHANGED_CR4
2197 | CPUM_CHANGED_CR0
2198 | CPUM_CHANGED_CR3
2199 | CPUM_CHANGED_GDTR
2200 | CPUM_CHANGED_IDTR
2201 | CPUM_CHANGED_SYSENTER_MSR
2202 | CPUM_CHANGED_LDTR
2203 | CPUM_CHANGED_CPUID
2204 | CPUM_CHANGED_FPU_REM
2205 )
2206 )
2207 {
2208 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2209 {
2210 pVM->rem.s.fIgnoreCR3Load = true;
2211 tlb_flush(&pVM->rem.s.Env, true);
2212 pVM->rem.s.fIgnoreCR3Load = false;
2213 }
2214
2215 /* CR4 before CR0! */
2216 if (fFlags & CPUM_CHANGED_CR4)
2217 {
2218 pVM->rem.s.fIgnoreCR3Load = true;
2219 pVM->rem.s.fIgnoreCpuMode = true;
2220 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2221 pVM->rem.s.fIgnoreCpuMode = false;
2222 pVM->rem.s.fIgnoreCR3Load = false;
2223 }
2224
2225 if (fFlags & CPUM_CHANGED_CR0)
2226 {
2227 pVM->rem.s.fIgnoreCR3Load = true;
2228 pVM->rem.s.fIgnoreCpuMode = true;
2229 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2230 pVM->rem.s.fIgnoreCpuMode = false;
2231 pVM->rem.s.fIgnoreCR3Load = false;
2232 }
2233
2234 if (fFlags & CPUM_CHANGED_CR3)
2235 {
2236 pVM->rem.s.fIgnoreCR3Load = true;
2237 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2238 pVM->rem.s.fIgnoreCR3Load = false;
2239 }
2240
2241 if (fFlags & CPUM_CHANGED_GDTR)
2242 {
2243 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2244 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2245 }
2246
2247 if (fFlags & CPUM_CHANGED_IDTR)
2248 {
2249 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2250 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2251 }
2252
2253 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2254 {
2255 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2256 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2257 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2258 }
2259
2260 if (fFlags & CPUM_CHANGED_LDTR)
2261 {
2262 if (fHiddenSelRegsValid)
2263 {
2264 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2265 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2266 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2267 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2268 }
2269 else
2270 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2271 }
2272
2273 if (fFlags & CPUM_CHANGED_CPUID)
2274 {
2275 uint32_t u32Dummy;
2276
2277 /*
2278 * Get the CPUID features.
2279 */
2280 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2281 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2282 }
2283
2284 /* Sync FPU state after CR4, CPUID and EFER (!). */
2285 if (fFlags & CPUM_CHANGED_FPU_REM)
2286 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2287 }
2288
2289 /*
2290 * Sync TR unconditionally to make life simpler.
2291 */
2292 pVM->rem.s.Env.tr.selector = pCtx->tr;
2293 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2294 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2295 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2296 /* Note! do_interrupt will fault if the busy flag is still set... */
2297 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2298
2299 /*
2300 * Update selector registers.
2301 * This must be done *after* we've synced gdt, ldt and crX registers
2302 * since we're reading the GDT/LDT om sync_seg. This will happen with
2303 * saved state which takes a quick dip into rawmode for instance.
2304 */
2305 /*
2306 * Stack; Note first check this one as the CPL might have changed. The
2307 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2308 */
2309
2310 if (fHiddenSelRegsValid)
2311 {
2312 /* The hidden selector registers are valid in the CPU context. */
2313 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2314
2315 /* Set current CPL */
2316 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2317
2318 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2319 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2320 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2321 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2322 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2323 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2324 }
2325 else
2326 {
2327 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2328 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2329 {
2330 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2331
2332 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2333 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2334#ifdef VBOX_WITH_STATISTICS
2335 if (pVM->rem.s.Env.segs[R_SS].newselector)
2336 {
2337 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2338 }
2339#endif
2340 }
2341 else
2342 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2343
2344 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2345 {
2346 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2347 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2348#ifdef VBOX_WITH_STATISTICS
2349 if (pVM->rem.s.Env.segs[R_ES].newselector)
2350 {
2351 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2352 }
2353#endif
2354 }
2355 else
2356 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2357
2358 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2359 {
2360 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2361 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2362#ifdef VBOX_WITH_STATISTICS
2363 if (pVM->rem.s.Env.segs[R_CS].newselector)
2364 {
2365 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2366 }
2367#endif
2368 }
2369 else
2370 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2371
2372 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2373 {
2374 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2375 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2376#ifdef VBOX_WITH_STATISTICS
2377 if (pVM->rem.s.Env.segs[R_DS].newselector)
2378 {
2379 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2380 }
2381#endif
2382 }
2383 else
2384 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2385
2386 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2387 * be the same but not the base/limit. */
2388 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2389 {
2390 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2391 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2392#ifdef VBOX_WITH_STATISTICS
2393 if (pVM->rem.s.Env.segs[R_FS].newselector)
2394 {
2395 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2396 }
2397#endif
2398 }
2399 else
2400 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2401
2402 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2403 {
2404 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2405 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2406#ifdef VBOX_WITH_STATISTICS
2407 if (pVM->rem.s.Env.segs[R_GS].newselector)
2408 {
2409 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2410 }
2411#endif
2412 }
2413 else
2414 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2415 }
2416
2417 /*
2418 * Check for traps.
2419 */
2420 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2421 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2422 if (RT_SUCCESS(rc))
2423 {
2424#ifdef DEBUG
2425 if (u8TrapNo == 0x80)
2426 {
2427 remR3DumpLnxSyscall(pVCpu);
2428 remR3DumpOBsdSyscall(pVCpu);
2429 }
2430#endif
2431
2432 pVM->rem.s.Env.exception_index = u8TrapNo;
2433 if (enmType != TRPM_SOFTWARE_INT)
2434 {
2435 pVM->rem.s.Env.exception_is_int = 0;
2436 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2437 }
2438 else
2439 {
2440 /*
2441 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2442 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2443 * for int03 and into.
2444 */
2445 pVM->rem.s.Env.exception_is_int = 1;
2446 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2447 /* int 3 may be generated by one-byte 0xcc */
2448 if (u8TrapNo == 3)
2449 {
2450 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2451 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2452 }
2453 /* int 4 may be generated by one-byte 0xce */
2454 else if (u8TrapNo == 4)
2455 {
2456 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2457 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2458 }
2459 }
2460
2461 /* get error code and cr2 if needed. */
2462 if (enmType == TRPM_TRAP)
2463 {
2464 switch (u8TrapNo)
2465 {
2466 case 0x0e:
2467 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2468 /* fallthru */
2469 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2470 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2471 break;
2472
2473 case 0x11: case 0x08:
2474 default:
2475 pVM->rem.s.Env.error_code = 0;
2476 break;
2477 }
2478 }
2479 else
2480 pVM->rem.s.Env.error_code = 0;
2481
2482 /*
2483 * We can now reset the active trap since the recompiler is gonna have a go at it.
2484 */
2485 rc = TRPMResetTrap(pVCpu);
2486 AssertRC(rc);
2487 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2488 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2489 }
2490
2491 /*
2492 * Clear old interrupt request flags; Check for pending hardware interrupts.
2493 * (See @remark for why we don't check for other FFs.)
2494 */
2495 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2496 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2497 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2498 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2499
2500 /*
2501 * We're now in REM mode.
2502 */
2503 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2504 pVM->rem.s.fInREM = true;
2505 pVM->rem.s.fInStateSync = false;
2506 pVM->rem.s.cCanExecuteRaw = 0;
2507 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2508 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2509 return VINF_SUCCESS;
2510}
2511
2512
2513/**
2514 * Syncs back changes in the REM state to the the VM state.
2515 *
2516 * This must be called after invoking REMR3Run().
2517 * Calling it several times in a row is not permitted.
2518 *
2519 * @returns VBox status code.
2520 *
2521 * @param pVM VM Handle.
2522 * @param pVCpu VMCPU Handle.
2523 */
2524REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2525{
2526 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2527 Assert(pCtx);
2528 unsigned i;
2529
2530 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2531 Log2(("REMR3StateBack:\n"));
2532 Assert(pVM->rem.s.fInREM);
2533
2534 /*
2535 * Copy back the registers.
2536 * This is done in the order they are declared in the CPUMCTX structure.
2537 */
2538
2539 /** @todo FOP */
2540 /** @todo FPUIP */
2541 /** @todo CS */
2542 /** @todo FPUDP */
2543 /** @todo DS */
2544
2545 /** @todo check if FPU/XMM was actually used in the recompiler */
2546 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2547//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2548
2549#ifdef TARGET_X86_64
2550 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2551 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2552 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2553 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2554 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2555 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2556 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2557 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2558 pCtx->r8 = pVM->rem.s.Env.regs[8];
2559 pCtx->r9 = pVM->rem.s.Env.regs[9];
2560 pCtx->r10 = pVM->rem.s.Env.regs[10];
2561 pCtx->r11 = pVM->rem.s.Env.regs[11];
2562 pCtx->r12 = pVM->rem.s.Env.regs[12];
2563 pCtx->r13 = pVM->rem.s.Env.regs[13];
2564 pCtx->r14 = pVM->rem.s.Env.regs[14];
2565 pCtx->r15 = pVM->rem.s.Env.regs[15];
2566
2567 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2568
2569#else
2570 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2571 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2572 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2573 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2574 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2575 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2576 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2577
2578 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2579#endif
2580
2581 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2582
2583#ifdef VBOX_WITH_STATISTICS
2584 if (pVM->rem.s.Env.segs[R_SS].newselector)
2585 {
2586 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2587 }
2588 if (pVM->rem.s.Env.segs[R_GS].newselector)
2589 {
2590 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2591 }
2592 if (pVM->rem.s.Env.segs[R_FS].newselector)
2593 {
2594 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2595 }
2596 if (pVM->rem.s.Env.segs[R_ES].newselector)
2597 {
2598 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2599 }
2600 if (pVM->rem.s.Env.segs[R_DS].newselector)
2601 {
2602 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2603 }
2604 if (pVM->rem.s.Env.segs[R_CS].newselector)
2605 {
2606 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2607 }
2608#endif
2609 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2610 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2611 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2612 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2613 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2614
2615#ifdef TARGET_X86_64
2616 pCtx->rip = pVM->rem.s.Env.eip;
2617 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2618#else
2619 pCtx->eip = pVM->rem.s.Env.eip;
2620 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2621#endif
2622
2623 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2624 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2625 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2626 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2627 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2628 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2629
2630 for (i = 0; i < 8; i++)
2631 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2632
2633 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2634 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2635 {
2636 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2637 STAM_COUNTER_INC(&gStatREMGDTChange);
2638 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2639 }
2640
2641 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2642 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2643 {
2644 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2645 STAM_COUNTER_INC(&gStatREMIDTChange);
2646 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2647 }
2648
2649 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2650 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2651 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2652 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2653 {
2654 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2655 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2656 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2657 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2658 STAM_COUNTER_INC(&gStatREMLDTRChange);
2659 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2660 }
2661
2662 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2663 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2664 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2665 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2666 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2667 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2668 : 0) )
2669 {
2670 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2671 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2672 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2673 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2674 pCtx->tr = pVM->rem.s.Env.tr.selector;
2675 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2676 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2677 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2678 if (pCtx->trHid.Attr.u)
2679 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2680 STAM_COUNTER_INC(&gStatREMTRChange);
2681 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2682 }
2683
2684 /** @todo These values could still be out of sync! */
2685 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2686 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2687 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2688 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2689
2690 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2691 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2692 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2693
2694 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2695 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2696 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2697
2698 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2699 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2700 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2701
2702 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2703 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2704 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2705
2706 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2707 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2708 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2709
2710 /* Sysenter MSR */
2711 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2712 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2713 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2714
2715 /* System MSRs. */
2716 pCtx->msrEFER = pVM->rem.s.Env.efer;
2717 pCtx->msrSTAR = pVM->rem.s.Env.star;
2718 pCtx->msrPAT = pVM->rem.s.Env.pat;
2719#ifdef TARGET_X86_64
2720 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2721 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2722 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2723 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2724#endif
2725
2726 /* Inhibit interrupt flag. */
2727 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2728 {
2729 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2730 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2731 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2732 }
2733 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2734 {
2735 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2737 }
2738
2739 remR3TrapClear(pVM);
2740
2741 /*
2742 * Check for traps.
2743 */
2744 if ( pVM->rem.s.Env.exception_index >= 0
2745 && pVM->rem.s.Env.exception_index < 256)
2746 {
2747 int rc;
2748
2749 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2750 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2751 AssertRC(rc);
2752 switch (pVM->rem.s.Env.exception_index)
2753 {
2754 case 0x0e:
2755 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2756 /* fallthru */
2757 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2758 case 0x11: case 0x08: /* 0 */
2759 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2760 break;
2761 }
2762
2763 }
2764
2765 /*
2766 * We're not longer in REM mode.
2767 */
2768 CPUMR3RemLeave(pVCpu,
2769 HWACCMIsEnabled(pVM)
2770 || ( pVM->rem.s.Env.segs[R_SS].newselector
2771 | pVM->rem.s.Env.segs[R_GS].newselector
2772 | pVM->rem.s.Env.segs[R_FS].newselector
2773 | pVM->rem.s.Env.segs[R_ES].newselector
2774 | pVM->rem.s.Env.segs[R_DS].newselector
2775 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2776 );
2777 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2778 pVM->rem.s.fInREM = false;
2779 pVM->rem.s.pCtx = NULL;
2780 pVM->rem.s.Env.pVCpu = NULL;
2781 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2782 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2783 return VINF_SUCCESS;
2784}
2785
2786
2787/**
2788 * This is called by the disassembler when it wants to update the cpu state
2789 * before for instance doing a register dump.
2790 */
2791static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2792{
2793 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2794 unsigned i;
2795
2796 Assert(pVM->rem.s.fInREM);
2797
2798 /*
2799 * Copy back the registers.
2800 * This is done in the order they are declared in the CPUMCTX structure.
2801 */
2802
2803 /** @todo FOP */
2804 /** @todo FPUIP */
2805 /** @todo CS */
2806 /** @todo FPUDP */
2807 /** @todo DS */
2808 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2809 pCtx->fpu.MXCSR = 0;
2810 pCtx->fpu.MXCSR_MASK = 0;
2811
2812 /** @todo check if FPU/XMM was actually used in the recompiler */
2813 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2814//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2815
2816#ifdef TARGET_X86_64
2817 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2818 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2819 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2820 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2821 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2822 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2823 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2824 pCtx->r8 = pVM->rem.s.Env.regs[8];
2825 pCtx->r9 = pVM->rem.s.Env.regs[9];
2826 pCtx->r10 = pVM->rem.s.Env.regs[10];
2827 pCtx->r11 = pVM->rem.s.Env.regs[11];
2828 pCtx->r12 = pVM->rem.s.Env.regs[12];
2829 pCtx->r13 = pVM->rem.s.Env.regs[13];
2830 pCtx->r14 = pVM->rem.s.Env.regs[14];
2831 pCtx->r15 = pVM->rem.s.Env.regs[15];
2832
2833 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2834#else
2835 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2836 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2837 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2838 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2839 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2840 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2841 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2842
2843 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2844#endif
2845
2846 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2847
2848 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2849 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2850 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2851 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2852 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2853
2854#ifdef TARGET_X86_64
2855 pCtx->rip = pVM->rem.s.Env.eip;
2856 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2857#else
2858 pCtx->eip = pVM->rem.s.Env.eip;
2859 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2860#endif
2861
2862 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2863 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2864 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2865 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2866 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2867 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2868
2869 for (i = 0; i < 8; i++)
2870 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2871
2872 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2873 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2874 {
2875 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2876 STAM_COUNTER_INC(&gStatREMGDTChange);
2877 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2878 }
2879
2880 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2881 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2882 {
2883 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2884 STAM_COUNTER_INC(&gStatREMIDTChange);
2885 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2886 }
2887
2888 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2889 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2890 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2891 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2892 {
2893 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2894 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2895 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2896 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2897 STAM_COUNTER_INC(&gStatREMLDTRChange);
2898 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2899 }
2900
2901 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2902 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2903 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2904 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2905 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2906 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2907 : 0) )
2908 {
2909 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2910 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2911 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2912 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2913 pCtx->tr = pVM->rem.s.Env.tr.selector;
2914 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2915 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2916 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2917 if (pCtx->trHid.Attr.u)
2918 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2919 STAM_COUNTER_INC(&gStatREMTRChange);
2920 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2921 }
2922
2923 /** @todo These values could still be out of sync! */
2924 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2925 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2926 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2927 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2928
2929 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2930 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2931 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2932
2933 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2934 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2935 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2936
2937 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2938 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2939 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2940
2941 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2942 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2943 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2944
2945 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2946 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2947 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2948
2949 /* Sysenter MSR */
2950 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2951 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2952 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2953
2954 /* System MSRs. */
2955 pCtx->msrEFER = pVM->rem.s.Env.efer;
2956 pCtx->msrSTAR = pVM->rem.s.Env.star;
2957 pCtx->msrPAT = pVM->rem.s.Env.pat;
2958#ifdef TARGET_X86_64
2959 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2960 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2961 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2962 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2963#endif
2964
2965}
2966
2967
2968/**
2969 * Update the VMM state information if we're currently in REM.
2970 *
2971 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2972 * we're currently executing in REM and the VMM state is invalid. This method will of
2973 * course check that we're executing in REM before syncing any data over to the VMM.
2974 *
2975 * @param pVM The VM handle.
2976 * @param pVCpu The VMCPU handle.
2977 */
2978REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2979{
2980 if (pVM->rem.s.fInREM)
2981 remR3StateUpdate(pVM, pVCpu);
2982}
2983
2984
2985#undef LOG_GROUP
2986#define LOG_GROUP LOG_GROUP_REM
2987
2988
2989/**
2990 * Notify the recompiler about Address Gate 20 state change.
2991 *
2992 * This notification is required since A20 gate changes are
2993 * initialized from a device driver and the VM might just as
2994 * well be in REM mode as in RAW mode.
2995 *
2996 * @param pVM VM handle.
2997 * @param pVCpu VMCPU handle.
2998 * @param fEnable True if the gate should be enabled.
2999 * False if the gate should be disabled.
3000 */
3001REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3002{
3003 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3004 VM_ASSERT_EMT(pVM);
3005
3006 /** @todo SMP and the A20 gate... */
3007 if (pVM->rem.s.Env.pVCpu == pVCpu)
3008 {
3009 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3010 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3011 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3012 }
3013}
3014
3015
3016/**
3017 * Replays the handler notification changes
3018 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3019 *
3020 * @param pVM VM handle.
3021 */
3022REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3023{
3024 /*
3025 * Replay the flushes.
3026 */
3027 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3028 VM_ASSERT_EMT(pVM);
3029
3030 /** @todo this isn't ensuring correct replay order. */
3031 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3032 {
3033 uint32_t idxNext;
3034 uint32_t idxRevHead;
3035 uint32_t idxHead;
3036#ifdef VBOX_STRICT
3037 int32_t c = 0;
3038#endif
3039
3040 /* Lockless purging of pending notifications. */
3041 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3042 if (idxHead == UINT32_MAX)
3043 return;
3044 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3045
3046 /*
3047 * Reverse the list to process it in FIFO order.
3048 */
3049 idxRevHead = UINT32_MAX;
3050 do
3051 {
3052 /* Save the index of the next rec. */
3053 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3054 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3055 /* Push the record onto the reversed list. */
3056 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3057 idxRevHead = idxHead;
3058 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3059 /* Advance. */
3060 idxHead = idxNext;
3061 } while (idxHead != UINT32_MAX);
3062
3063 /*
3064 * Loop thru the list, reinserting the record into the free list as they are
3065 * processed to avoid having other EMTs running out of entries while we're flushing.
3066 */
3067 idxHead = idxRevHead;
3068 do
3069 {
3070 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3071 uint32_t idxCur;
3072 Assert(--c >= 0);
3073
3074 switch (pCur->enmKind)
3075 {
3076 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3077 remR3NotifyHandlerPhysicalRegister(pVM,
3078 pCur->u.PhysicalRegister.enmType,
3079 pCur->u.PhysicalRegister.GCPhys,
3080 pCur->u.PhysicalRegister.cb,
3081 pCur->u.PhysicalRegister.fHasHCHandler);
3082 break;
3083
3084 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3085 remR3NotifyHandlerPhysicalDeregister(pVM,
3086 pCur->u.PhysicalDeregister.enmType,
3087 pCur->u.PhysicalDeregister.GCPhys,
3088 pCur->u.PhysicalDeregister.cb,
3089 pCur->u.PhysicalDeregister.fHasHCHandler,
3090 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3091 break;
3092
3093 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3094 remR3NotifyHandlerPhysicalModify(pVM,
3095 pCur->u.PhysicalModify.enmType,
3096 pCur->u.PhysicalModify.GCPhysOld,
3097 pCur->u.PhysicalModify.GCPhysNew,
3098 pCur->u.PhysicalModify.cb,
3099 pCur->u.PhysicalModify.fHasHCHandler,
3100 pCur->u.PhysicalModify.fRestoreAsRAM);
3101 break;
3102
3103 default:
3104 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3105 break;
3106 }
3107
3108 /*
3109 * Advance idxHead.
3110 */
3111 idxCur = idxHead;
3112 idxHead = pCur->idxNext;
3113 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3114
3115 /*
3116 * Put the record back into the free list.
3117 */
3118 do
3119 {
3120 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3121 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3122 ASMCompilerBarrier();
3123 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3124 } while (idxHead != UINT32_MAX);
3125
3126#ifdef VBOX_STRICT
3127 if (pVM->cCpus == 1)
3128 {
3129 unsigned c;
3130 /* Check that all records are now on the free list. */
3131 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3132 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3133 c++;
3134 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3135 }
3136#endif
3137 }
3138}
3139
3140
3141/**
3142 * Notify REM about changed code page.
3143 *
3144 * @returns VBox status code.
3145 * @param pVM VM handle.
3146 * @param pVCpu VMCPU handle.
3147 * @param pvCodePage Code page address
3148 */
3149REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3150{
3151#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3152 int rc;
3153 RTGCPHYS PhysGC;
3154 uint64_t flags;
3155
3156 VM_ASSERT_EMT(pVM);
3157
3158 /*
3159 * Get the physical page address.
3160 */
3161 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3162 if (rc == VINF_SUCCESS)
3163 {
3164 /*
3165 * Sync the required registers and flush the whole page.
3166 * (Easier to do the whole page than notifying it about each physical
3167 * byte that was changed.
3168 */
3169 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3170 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3171 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3172 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3173
3174 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3175 }
3176#endif
3177 return VINF_SUCCESS;
3178}
3179
3180
3181/**
3182 * Notification about a successful MMR3PhysRegister() call.
3183 *
3184 * @param pVM VM handle.
3185 * @param GCPhys The physical address the RAM.
3186 * @param cb Size of the memory.
3187 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3188 */
3189REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3190{
3191 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3192 VM_ASSERT_EMT(pVM);
3193
3194 /*
3195 * Validate input - we trust the caller.
3196 */
3197 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3198 Assert(cb);
3199 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3200 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3201
3202 /*
3203 * Base ram? Update GCPhysLastRam.
3204 */
3205 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3206 {
3207 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3208 {
3209 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3210 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3211 }
3212 }
3213
3214 /*
3215 * Register the ram.
3216 */
3217 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3218
3219 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3220 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3221 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3222
3223 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3224}
3225
3226
3227/**
3228 * Notification about a successful MMR3PhysRomRegister() call.
3229 *
3230 * @param pVM VM handle.
3231 * @param GCPhys The physical address of the ROM.
3232 * @param cb The size of the ROM.
3233 * @param pvCopy Pointer to the ROM copy.
3234 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3235 * This function will be called when ever the protection of the
3236 * shadow ROM changes (at reset and end of POST).
3237 */
3238REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3239{
3240 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3241 VM_ASSERT_EMT(pVM);
3242
3243 /*
3244 * Validate input - we trust the caller.
3245 */
3246 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3247 Assert(cb);
3248 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3249
3250 /*
3251 * Register the rom.
3252 */
3253 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3254
3255 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3256 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3257 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3258
3259 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3260}
3261
3262
3263/**
3264 * Notification about a successful memory deregistration or reservation.
3265 *
3266 * @param pVM VM Handle.
3267 * @param GCPhys Start physical address.
3268 * @param cb The size of the range.
3269 */
3270REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3271{
3272 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3273 VM_ASSERT_EMT(pVM);
3274
3275 /*
3276 * Validate input - we trust the caller.
3277 */
3278 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3279 Assert(cb);
3280 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3281
3282 /*
3283 * Unassigning the memory.
3284 */
3285 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3286
3287 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3288 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3289 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3290
3291 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3292}
3293
3294
3295/**
3296 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3297 *
3298 * @param pVM VM Handle.
3299 * @param enmType Handler type.
3300 * @param GCPhys Handler range address.
3301 * @param cb Size of the handler range.
3302 * @param fHasHCHandler Set if the handler has a HC callback function.
3303 *
3304 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3305 * Handler memory type to memory which has no HC handler.
3306 */
3307static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3308{
3309 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3310 enmType, GCPhys, cb, fHasHCHandler));
3311
3312 VM_ASSERT_EMT(pVM);
3313 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3314 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3315
3316
3317 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3318
3319 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3320 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3321 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3322 else if (fHasHCHandler)
3323 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3324 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3325
3326 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3327}
3328
3329/**
3330 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3331 *
3332 * @param pVM VM Handle.
3333 * @param enmType Handler type.
3334 * @param GCPhys Handler range address.
3335 * @param cb Size of the handler range.
3336 * @param fHasHCHandler Set if the handler has a HC callback function.
3337 *
3338 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3339 * Handler memory type to memory which has no HC handler.
3340 */
3341REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3342{
3343 REMR3ReplayHandlerNotifications(pVM);
3344
3345 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3346}
3347
3348/**
3349 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3350 *
3351 * @param pVM VM Handle.
3352 * @param enmType Handler type.
3353 * @param GCPhys Handler range address.
3354 * @param cb Size of the handler range.
3355 * @param fHasHCHandler Set if the handler has a HC callback function.
3356 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3357 */
3358static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3359{
3360 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3361 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3362 VM_ASSERT_EMT(pVM);
3363
3364
3365 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3366
3367 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3368 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3369 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3370 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3371 else if (fHasHCHandler)
3372 {
3373 if (!fRestoreAsRAM)
3374 {
3375 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3376 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3377 }
3378 else
3379 {
3380 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3381 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3382 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3383 }
3384 }
3385 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3386
3387 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3388}
3389
3390/**
3391 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3392 *
3393 * @param pVM VM Handle.
3394 * @param enmType Handler type.
3395 * @param GCPhys Handler range address.
3396 * @param cb Size of the handler range.
3397 * @param fHasHCHandler Set if the handler has a HC callback function.
3398 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3399 */
3400REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3401{
3402 REMR3ReplayHandlerNotifications(pVM);
3403 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3404}
3405
3406
3407/**
3408 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3409 *
3410 * @param pVM VM Handle.
3411 * @param enmType Handler type.
3412 * @param GCPhysOld Old handler range address.
3413 * @param GCPhysNew New handler range address.
3414 * @param cb Size of the handler range.
3415 * @param fHasHCHandler Set if the handler has a HC callback function.
3416 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3417 */
3418static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3419{
3420 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3421 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3422 VM_ASSERT_EMT(pVM);
3423 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3424
3425 if (fHasHCHandler)
3426 {
3427 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3428
3429 /*
3430 * Reset the old page.
3431 */
3432 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3433 if (!fRestoreAsRAM)
3434 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3435 else
3436 {
3437 /* This is not perfect, but it'll do for PD monitoring... */
3438 Assert(cb == PAGE_SIZE);
3439 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3440 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3441 }
3442
3443 /*
3444 * Update the new page.
3445 */
3446 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3447 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3448 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3449 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3450
3451 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3452 }
3453}
3454
3455/**
3456 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3457 *
3458 * @param pVM VM Handle.
3459 * @param enmType Handler type.
3460 * @param GCPhysOld Old handler range address.
3461 * @param GCPhysNew New handler range address.
3462 * @param cb Size of the handler range.
3463 * @param fHasHCHandler Set if the handler has a HC callback function.
3464 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3465 */
3466REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3467{
3468 REMR3ReplayHandlerNotifications(pVM);
3469
3470 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3471}
3472
3473/**
3474 * Checks if we're handling access to this page or not.
3475 *
3476 * @returns true if we're trapping access.
3477 * @returns false if we aren't.
3478 * @param pVM The VM handle.
3479 * @param GCPhys The physical address.
3480 *
3481 * @remark This function will only work correctly in VBOX_STRICT builds!
3482 */
3483REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3484{
3485#ifdef VBOX_STRICT
3486 unsigned long off;
3487 REMR3ReplayHandlerNotifications(pVM);
3488
3489 off = get_phys_page_offset(GCPhys);
3490 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3491 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3492 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3493#else
3494 return false;
3495#endif
3496}
3497
3498
3499/**
3500 * Deals with a rare case in get_phys_addr_code where the code
3501 * is being monitored.
3502 *
3503 * It could also be an MMIO page, in which case we will raise a fatal error.
3504 *
3505 * @returns The physical address corresponding to addr.
3506 * @param env The cpu environment.
3507 * @param addr The virtual address.
3508 * @param pTLBEntry The TLB entry.
3509 */
3510target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3511 target_ulong addr,
3512 CPUTLBEntry *pTLBEntry,
3513 target_phys_addr_t ioTLBEntry)
3514{
3515 PVM pVM = env->pVM;
3516
3517 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3518 {
3519 /* If code memory is being monitored, appropriate IOTLB entry will have
3520 handler IO type, and addend will provide real physical address, no
3521 matter if we store VA in TLB or not, as handlers are always passed PA */
3522 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3523 return ret;
3524 }
3525 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3526 "*** handlers\n",
3527 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3528 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3529 LogRel(("*** mmio\n"));
3530 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3531 LogRel(("*** phys\n"));
3532 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3533 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3534 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3535 AssertFatalFailed();
3536}
3537
3538/**
3539 * Read guest RAM and ROM.
3540 *
3541 * @param SrcGCPhys The source address (guest physical).
3542 * @param pvDst The destination address.
3543 * @param cb Number of bytes
3544 */
3545void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3546{
3547 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3548 VBOX_CHECK_ADDR(SrcGCPhys);
3549 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3550#ifdef VBOX_DEBUG_PHYS
3551 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3552#endif
3553 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3554}
3555
3556
3557/**
3558 * Read guest RAM and ROM, unsigned 8-bit.
3559 *
3560 * @param SrcGCPhys The source address (guest physical).
3561 */
3562RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3563{
3564 uint8_t val;
3565 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3566 VBOX_CHECK_ADDR(SrcGCPhys);
3567 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3568 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3569#ifdef VBOX_DEBUG_PHYS
3570 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3571#endif
3572 return val;
3573}
3574
3575
3576/**
3577 * Read guest RAM and ROM, signed 8-bit.
3578 *
3579 * @param SrcGCPhys The source address (guest physical).
3580 */
3581RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3582{
3583 int8_t val;
3584 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3585 VBOX_CHECK_ADDR(SrcGCPhys);
3586 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3587 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3588#ifdef VBOX_DEBUG_PHYS
3589 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3590#endif
3591 return val;
3592}
3593
3594
3595/**
3596 * Read guest RAM and ROM, unsigned 16-bit.
3597 *
3598 * @param SrcGCPhys The source address (guest physical).
3599 */
3600RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3601{
3602 uint16_t val;
3603 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3604 VBOX_CHECK_ADDR(SrcGCPhys);
3605 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3606 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3607#ifdef VBOX_DEBUG_PHYS
3608 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3609#endif
3610 return val;
3611}
3612
3613
3614/**
3615 * Read guest RAM and ROM, signed 16-bit.
3616 *
3617 * @param SrcGCPhys The source address (guest physical).
3618 */
3619RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3620{
3621 int16_t val;
3622 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3623 VBOX_CHECK_ADDR(SrcGCPhys);
3624 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3625 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3626#ifdef VBOX_DEBUG_PHYS
3627 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3628#endif
3629 return val;
3630}
3631
3632
3633/**
3634 * Read guest RAM and ROM, unsigned 32-bit.
3635 *
3636 * @param SrcGCPhys The source address (guest physical).
3637 */
3638RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3639{
3640 uint32_t val;
3641 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3642 VBOX_CHECK_ADDR(SrcGCPhys);
3643 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3644 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3645#ifdef VBOX_DEBUG_PHYS
3646 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3647#endif
3648 return val;
3649}
3650
3651
3652/**
3653 * Read guest RAM and ROM, signed 32-bit.
3654 *
3655 * @param SrcGCPhys The source address (guest physical).
3656 */
3657RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3658{
3659 int32_t val;
3660 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3661 VBOX_CHECK_ADDR(SrcGCPhys);
3662 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3663 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3664#ifdef VBOX_DEBUG_PHYS
3665 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3666#endif
3667 return val;
3668}
3669
3670
3671/**
3672 * Read guest RAM and ROM, unsigned 64-bit.
3673 *
3674 * @param SrcGCPhys The source address (guest physical).
3675 */
3676uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3677{
3678 uint64_t val;
3679 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3680 VBOX_CHECK_ADDR(SrcGCPhys);
3681 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3682 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3683#ifdef VBOX_DEBUG_PHYS
3684 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3685#endif
3686 return val;
3687}
3688
3689
3690/**
3691 * Read guest RAM and ROM, signed 64-bit.
3692 *
3693 * @param SrcGCPhys The source address (guest physical).
3694 */
3695int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3696{
3697 int64_t val;
3698 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3699 VBOX_CHECK_ADDR(SrcGCPhys);
3700 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3701 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3702#ifdef VBOX_DEBUG_PHYS
3703 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3704#endif
3705 return val;
3706}
3707
3708
3709/**
3710 * Write guest RAM.
3711 *
3712 * @param DstGCPhys The destination address (guest physical).
3713 * @param pvSrc The source address.
3714 * @param cb Number of bytes to write
3715 */
3716void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3717{
3718 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3719 VBOX_CHECK_ADDR(DstGCPhys);
3720 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3721 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3722#ifdef VBOX_DEBUG_PHYS
3723 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3724#endif
3725}
3726
3727
3728/**
3729 * Write guest RAM, unsigned 8-bit.
3730 *
3731 * @param DstGCPhys The destination address (guest physical).
3732 * @param val Value
3733 */
3734void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3735{
3736 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3737 VBOX_CHECK_ADDR(DstGCPhys);
3738 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3739 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3740#ifdef VBOX_DEBUG_PHYS
3741 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3742#endif
3743}
3744
3745
3746/**
3747 * Write guest RAM, unsigned 8-bit.
3748 *
3749 * @param DstGCPhys The destination address (guest physical).
3750 * @param val Value
3751 */
3752void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3753{
3754 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3755 VBOX_CHECK_ADDR(DstGCPhys);
3756 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3757 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3758#ifdef VBOX_DEBUG_PHYS
3759 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3760#endif
3761}
3762
3763
3764/**
3765 * Write guest RAM, unsigned 32-bit.
3766 *
3767 * @param DstGCPhys The destination address (guest physical).
3768 * @param val Value
3769 */
3770void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3771{
3772 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3773 VBOX_CHECK_ADDR(DstGCPhys);
3774 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3775 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3776#ifdef VBOX_DEBUG_PHYS
3777 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3778#endif
3779}
3780
3781
3782/**
3783 * Write guest RAM, unsigned 64-bit.
3784 *
3785 * @param DstGCPhys The destination address (guest physical).
3786 * @param val Value
3787 */
3788void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3789{
3790 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3791 VBOX_CHECK_ADDR(DstGCPhys);
3792 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3793 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3794#ifdef VBOX_DEBUG_PHYS
3795 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3796#endif
3797}
3798
3799#undef LOG_GROUP
3800#define LOG_GROUP LOG_GROUP_REM_MMIO
3801
3802/** Read MMIO memory. */
3803static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3804{
3805 uint32_t u32 = 0;
3806 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3807 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3808 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3809 return u32;
3810}
3811
3812/** Read MMIO memory. */
3813static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3814{
3815 uint32_t u32 = 0;
3816 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3817 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3818 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3819 return u32;
3820}
3821
3822/** Read MMIO memory. */
3823static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3824{
3825 uint32_t u32 = 0;
3826 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3827 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3828 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3829 return u32;
3830}
3831
3832/** Write to MMIO memory. */
3833static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3834{
3835 int rc;
3836 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3837 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3838 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3839}
3840
3841/** Write to MMIO memory. */
3842static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3843{
3844 int rc;
3845 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3846 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3847 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3848}
3849
3850/** Write to MMIO memory. */
3851static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3852{
3853 int rc;
3854 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3855 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3856 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3857}
3858
3859
3860#undef LOG_GROUP
3861#define LOG_GROUP LOG_GROUP_REM_HANDLER
3862
3863/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3864
3865static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3866{
3867 uint8_t u8;
3868 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3869 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3870 return u8;
3871}
3872
3873static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3874{
3875 uint16_t u16;
3876 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3877 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3878 return u16;
3879}
3880
3881static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3882{
3883 uint32_t u32;
3884 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3885 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3886 return u32;
3887}
3888
3889static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3890{
3891 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3892 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3893}
3894
3895static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3896{
3897 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3898 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3899}
3900
3901static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3902{
3903 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3904 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3905}
3906
3907/* -+- disassembly -+- */
3908
3909#undef LOG_GROUP
3910#define LOG_GROUP LOG_GROUP_REM_DISAS
3911
3912
3913/**
3914 * Enables or disables singled stepped disassembly.
3915 *
3916 * @returns VBox status code.
3917 * @param pVM VM handle.
3918 * @param fEnable To enable set this flag, to disable clear it.
3919 */
3920static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3921{
3922 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3923 VM_ASSERT_EMT(pVM);
3924
3925 if (fEnable)
3926 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3927 else
3928 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3929#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3930 cpu_single_step(&pVM->rem.s.Env, fEnable);
3931#endif
3932 return VINF_SUCCESS;
3933}
3934
3935
3936/**
3937 * Enables or disables singled stepped disassembly.
3938 *
3939 * @returns VBox status code.
3940 * @param pVM VM handle.
3941 * @param fEnable To enable set this flag, to disable clear it.
3942 */
3943REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3944{
3945 int rc;
3946
3947 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3948 if (VM_IS_EMT(pVM))
3949 return remR3DisasEnableStepping(pVM, fEnable);
3950
3951 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3952 AssertRC(rc);
3953 return rc;
3954}
3955
3956
3957#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3958/**
3959 * External Debugger Command: .remstep [on|off|1|0]
3960 */
3961static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3962{
3963 int rc;
3964
3965 if (cArgs == 0)
3966 /*
3967 * Print the current status.
3968 */
3969 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3970 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3971 else
3972 {
3973 /*
3974 * Convert the argument and change the mode.
3975 */
3976 bool fEnable;
3977 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3978 if (RT_SUCCESS(rc))
3979 {
3980 rc = REMR3DisasEnableStepping(pVM, fEnable);
3981 if (RT_SUCCESS(rc))
3982 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3983 else
3984 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3985 }
3986 else
3987 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3988 }
3989 return rc;
3990}
3991#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3992
3993
3994/**
3995 * Disassembles one instruction and prints it to the log.
3996 *
3997 * @returns Success indicator.
3998 * @param env Pointer to the recompiler CPU structure.
3999 * @param f32BitCode Indicates that whether or not the code should
4000 * be disassembled as 16 or 32 bit. If -1 the CS
4001 * selector will be inspected.
4002 * @param pszPrefix
4003 */
4004bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4005{
4006 PVM pVM = env->pVM;
4007 const bool fLog = LogIsEnabled();
4008 const bool fLog2 = LogIs2Enabled();
4009 int rc = VINF_SUCCESS;
4010
4011 /*
4012 * Don't bother if there ain't any log output to do.
4013 */
4014 if (!fLog && !fLog2)
4015 return true;
4016
4017 /*
4018 * Update the state so DBGF reads the correct register values.
4019 */
4020 remR3StateUpdate(pVM, env->pVCpu);
4021
4022 /*
4023 * Log registers if requested.
4024 */
4025 if (fLog2)
4026 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
4027
4028 /*
4029 * Disassemble to log.
4030 */
4031 if (fLog)
4032 {
4033 PVMCPU pVCpu = VMMGetCpu(pVM);
4034 char szBuf[256];
4035 szBuf[0] = '\0';
4036 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
4037 pVCpu->idCpu,
4038 0, /* Sel */
4039 0, /* GCPtr */
4040 DBGF_DISAS_FLAGS_CURRENT_GUEST
4041 | DBGF_DISAS_FLAGS_DEFAULT_MODE
4042 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
4043 szBuf,
4044 sizeof(szBuf),
4045 NULL);
4046 if (RT_FAILURE(rc))
4047 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4048 if (pszPrefix && *pszPrefix)
4049 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4050 else
4051 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4052 }
4053
4054 return RT_SUCCESS(rc);
4055}
4056
4057
4058/**
4059 * Disassemble recompiled code.
4060 *
4061 * @param phFileIgnored Ignored, logfile usually.
4062 * @param pvCode Pointer to the code block.
4063 * @param cb Size of the code block.
4064 */
4065void disas(FILE *phFile, void *pvCode, unsigned long cb)
4066{
4067 if (LogIs2Enabled())
4068 {
4069 unsigned off = 0;
4070 char szOutput[256];
4071 DISCPUSTATE Cpu;
4072
4073 memset(&Cpu, 0, sizeof(Cpu));
4074#ifdef RT_ARCH_X86
4075 Cpu.mode = CPUMODE_32BIT;
4076#else
4077 Cpu.mode = CPUMODE_64BIT;
4078#endif
4079
4080 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4081 while (off < cb)
4082 {
4083 uint32_t cbInstr;
4084 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4085 RTLogPrintf("%s", szOutput);
4086 else
4087 {
4088 RTLogPrintf("disas error\n");
4089 cbInstr = 1;
4090#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4091 break;
4092#endif
4093 }
4094 off += cbInstr;
4095 }
4096 }
4097}
4098
4099
4100/**
4101 * Disassemble guest code.
4102 *
4103 * @param phFileIgnored Ignored, logfile usually.
4104 * @param uCode The guest address of the code to disassemble. (flat?)
4105 * @param cb Number of bytes to disassemble.
4106 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4107 */
4108void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4109{
4110 if (LogIs2Enabled())
4111 {
4112 PVM pVM = cpu_single_env->pVM;
4113 PVMCPU pVCpu = cpu_single_env->pVCpu;
4114 RTSEL cs;
4115 RTGCUINTPTR eip;
4116
4117 Assert(pVCpu);
4118
4119 /*
4120 * Update the state so DBGF reads the correct register values (flags).
4121 */
4122 remR3StateUpdate(pVM, pVCpu);
4123
4124 /*
4125 * Do the disassembling.
4126 */
4127 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4128 cs = cpu_single_env->segs[R_CS].selector;
4129 eip = uCode - cpu_single_env->segs[R_CS].base;
4130 for (;;)
4131 {
4132 char szBuf[256];
4133 uint32_t cbInstr;
4134 int rc = DBGFR3DisasInstrEx(pVM,
4135 pVCpu->idCpu,
4136 cs,
4137 eip,
4138 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4139 szBuf, sizeof(szBuf),
4140 &cbInstr);
4141 if (RT_SUCCESS(rc))
4142 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4143 else
4144 {
4145 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4146 cbInstr = 1;
4147 }
4148
4149 /* next */
4150 if (cb <= cbInstr)
4151 break;
4152 cb -= cbInstr;
4153 uCode += cbInstr;
4154 eip += cbInstr;
4155 }
4156 }
4157}
4158
4159
4160/**
4161 * Looks up a guest symbol.
4162 *
4163 * @returns Pointer to symbol name. This is a static buffer.
4164 * @param orig_addr The address in question.
4165 */
4166const char *lookup_symbol(target_ulong orig_addr)
4167{
4168 PVM pVM = cpu_single_env->pVM;
4169 RTGCINTPTR off = 0;
4170 RTDBGSYMBOL Sym;
4171 DBGFADDRESS Addr;
4172
4173 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4174 if (RT_SUCCESS(rc))
4175 {
4176 static char szSym[sizeof(Sym.szName) + 48];
4177 if (!off)
4178 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4179 else if (off > 0)
4180 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4181 else
4182 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4183 return szSym;
4184 }
4185 return "<N/A>";
4186}
4187
4188
4189#undef LOG_GROUP
4190#define LOG_GROUP LOG_GROUP_REM
4191
4192
4193/* -+- FF notifications -+- */
4194
4195
4196/**
4197 * Notification about a pending interrupt.
4198 *
4199 * @param pVM VM Handle.
4200 * @param pVCpu VMCPU Handle.
4201 * @param u8Interrupt Interrupt
4202 * @thread The emulation thread.
4203 */
4204REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4205{
4206 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4207 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4208}
4209
4210/**
4211 * Notification about a pending interrupt.
4212 *
4213 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4214 * @param pVM VM Handle.
4215 * @param pVCpu VMCPU Handle.
4216 * @thread The emulation thread.
4217 */
4218REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4219{
4220 return pVM->rem.s.u32PendingInterrupt;
4221}
4222
4223/**
4224 * Notification about the interrupt FF being set.
4225 *
4226 * @param pVM VM Handle.
4227 * @param pVCpu VMCPU Handle.
4228 * @thread The emulation thread.
4229 */
4230REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4231{
4232#ifndef IEM_VERIFICATION_MODE
4233 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4234 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4235 if (pVM->rem.s.fInREM)
4236 {
4237 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4238 CPU_INTERRUPT_EXTERNAL_HARD);
4239 }
4240#endif
4241}
4242
4243
4244/**
4245 * Notification about the interrupt FF being set.
4246 *
4247 * @param pVM VM Handle.
4248 * @param pVCpu VMCPU Handle.
4249 * @thread Any.
4250 */
4251REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4252{
4253 LogFlow(("REMR3NotifyInterruptClear:\n"));
4254 if (pVM->rem.s.fInREM)
4255 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4256}
4257
4258
4259/**
4260 * Notification about pending timer(s).
4261 *
4262 * @param pVM VM Handle.
4263 * @param pVCpuDst The target cpu for this notification.
4264 * TM will not broadcast pending timer events, but use
4265 * a dedicated EMT for them. So, only interrupt REM
4266 * execution if the given CPU is executing in REM.
4267 * @thread Any.
4268 */
4269REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4270{
4271#ifndef IEM_VERIFICATION_MODE
4272#ifndef DEBUG_bird
4273 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4274#endif
4275 if (pVM->rem.s.fInREM)
4276 {
4277 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4278 {
4279 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4280 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4281 CPU_INTERRUPT_EXTERNAL_TIMER);
4282 }
4283 else
4284 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4285 }
4286 else
4287 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4288#endif
4289}
4290
4291
4292/**
4293 * Notification about pending DMA transfers.
4294 *
4295 * @param pVM VM Handle.
4296 * @thread Any.
4297 */
4298REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4299{
4300#ifndef IEM_VERIFICATION_MODE
4301 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4302 if (pVM->rem.s.fInREM)
4303 {
4304 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4305 CPU_INTERRUPT_EXTERNAL_DMA);
4306 }
4307#endif
4308}
4309
4310
4311/**
4312 * Notification about pending timer(s).
4313 *
4314 * @param pVM VM Handle.
4315 * @thread Any.
4316 */
4317REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4318{
4319#ifndef IEM_VERIFICATION_MODE
4320 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4321 if (pVM->rem.s.fInREM)
4322 {
4323 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4324 CPU_INTERRUPT_EXTERNAL_EXIT);
4325 }
4326#endif
4327}
4328
4329
4330/**
4331 * Notification about pending FF set by an external thread.
4332 *
4333 * @param pVM VM handle.
4334 * @thread Any.
4335 */
4336REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4337{
4338#ifndef IEM_VERIFICATION_MODE
4339 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4340 if (pVM->rem.s.fInREM)
4341 {
4342 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4343 CPU_INTERRUPT_EXTERNAL_EXIT);
4344 }
4345#endif
4346}
4347
4348
4349#ifdef VBOX_WITH_STATISTICS
4350void remR3ProfileStart(int statcode)
4351{
4352 STAMPROFILEADV *pStat;
4353 switch(statcode)
4354 {
4355 case STATS_EMULATE_SINGLE_INSTR:
4356 pStat = &gStatExecuteSingleInstr;
4357 break;
4358 case STATS_QEMU_COMPILATION:
4359 pStat = &gStatCompilationQEmu;
4360 break;
4361 case STATS_QEMU_RUN_EMULATED_CODE:
4362 pStat = &gStatRunCodeQEmu;
4363 break;
4364 case STATS_QEMU_TOTAL:
4365 pStat = &gStatTotalTimeQEmu;
4366 break;
4367 case STATS_QEMU_RUN_TIMERS:
4368 pStat = &gStatTimers;
4369 break;
4370 case STATS_TLB_LOOKUP:
4371 pStat= &gStatTBLookup;
4372 break;
4373 case STATS_IRQ_HANDLING:
4374 pStat= &gStatIRQ;
4375 break;
4376 case STATS_RAW_CHECK:
4377 pStat = &gStatRawCheck;
4378 break;
4379
4380 default:
4381 AssertMsgFailed(("unknown stat %d\n", statcode));
4382 return;
4383 }
4384 STAM_PROFILE_ADV_START(pStat, a);
4385}
4386
4387
4388void remR3ProfileStop(int statcode)
4389{
4390 STAMPROFILEADV *pStat;
4391 switch(statcode)
4392 {
4393 case STATS_EMULATE_SINGLE_INSTR:
4394 pStat = &gStatExecuteSingleInstr;
4395 break;
4396 case STATS_QEMU_COMPILATION:
4397 pStat = &gStatCompilationQEmu;
4398 break;
4399 case STATS_QEMU_RUN_EMULATED_CODE:
4400 pStat = &gStatRunCodeQEmu;
4401 break;
4402 case STATS_QEMU_TOTAL:
4403 pStat = &gStatTotalTimeQEmu;
4404 break;
4405 case STATS_QEMU_RUN_TIMERS:
4406 pStat = &gStatTimers;
4407 break;
4408 case STATS_TLB_LOOKUP:
4409 pStat= &gStatTBLookup;
4410 break;
4411 case STATS_IRQ_HANDLING:
4412 pStat= &gStatIRQ;
4413 break;
4414 case STATS_RAW_CHECK:
4415 pStat = &gStatRawCheck;
4416 break;
4417 default:
4418 AssertMsgFailed(("unknown stat %d\n", statcode));
4419 return;
4420 }
4421 STAM_PROFILE_ADV_STOP(pStat, a);
4422}
4423#endif
4424
4425/**
4426 * Raise an RC, force rem exit.
4427 *
4428 * @param pVM VM handle.
4429 * @param rc The rc.
4430 */
4431void remR3RaiseRC(PVM pVM, int rc)
4432{
4433 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4434 Assert(pVM->rem.s.fInREM);
4435 VM_ASSERT_EMT(pVM);
4436 pVM->rem.s.rc = rc;
4437 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4438}
4439
4440
4441/* -+- timers -+- */
4442
4443uint64_t cpu_get_tsc(CPUX86State *env)
4444{
4445 STAM_COUNTER_INC(&gStatCpuGetTSC);
4446 return TMCpuTickGet(env->pVCpu);
4447}
4448
4449
4450/* -+- interrupts -+- */
4451
4452void cpu_set_ferr(CPUX86State *env)
4453{
4454 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4455 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4456}
4457
4458int cpu_get_pic_interrupt(CPUX86State *env)
4459{
4460 uint8_t u8Interrupt;
4461 int rc;
4462
4463 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4464 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4465 * with the (a)pic.
4466 */
4467 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4468 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4469 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4470 * remove this kludge. */
4471 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4472 {
4473 rc = VINF_SUCCESS;
4474 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4475 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4476 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4477 }
4478 else
4479 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4480
4481 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4482 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4483 if (RT_SUCCESS(rc))
4484 {
4485 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4486 env->interrupt_request |= CPU_INTERRUPT_HARD;
4487 return u8Interrupt;
4488 }
4489 return -1;
4490}
4491
4492
4493/* -+- local apic -+- */
4494
4495#if 0 /* CPUMSetGuestMsr does this now. */
4496void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4497{
4498 int rc = PDMApicSetBase(env->pVM, val);
4499 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4500}
4501#endif
4502
4503uint64_t cpu_get_apic_base(CPUX86State *env)
4504{
4505 uint64_t u64;
4506 int rc = PDMApicGetBase(env->pVM, &u64);
4507 if (RT_SUCCESS(rc))
4508 {
4509 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4510 return u64;
4511 }
4512 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4513 return 0;
4514}
4515
4516void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4517{
4518 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4519 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4520}
4521
4522uint8_t cpu_get_apic_tpr(CPUX86State *env)
4523{
4524 uint8_t u8;
4525 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4526 if (RT_SUCCESS(rc))
4527 {
4528 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4529 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4530 }
4531 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4532 return 0;
4533}
4534
4535/**
4536 * Read an MSR.
4537 *
4538 * @retval 0 success.
4539 * @retval -1 failure, raise \#GP(0).
4540 * @param env The cpu state.
4541 * @param idMsr The MSR to read.
4542 * @param puValue Where to return the value.
4543 */
4544int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4545{
4546 Assert(env->pVCpu);
4547 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4548}
4549
4550/**
4551 * Write to an MSR.
4552 *
4553 * @retval 0 success.
4554 * @retval -1 failure, raise \#GP(0).
4555 * @param env The cpu state.
4556 * @param idMsr The MSR to read.
4557 * @param puValue Where to return the value.
4558 */
4559int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4560{
4561 Assert(env->pVCpu);
4562 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4563}
4564
4565/* -+- I/O Ports -+- */
4566
4567#undef LOG_GROUP
4568#define LOG_GROUP LOG_GROUP_REM_IOPORT
4569
4570void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4571{
4572 int rc;
4573
4574 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4575 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4576
4577 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4578 if (RT_LIKELY(rc == VINF_SUCCESS))
4579 return;
4580 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4581 {
4582 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4583 remR3RaiseRC(env->pVM, rc);
4584 return;
4585 }
4586 remAbort(rc, __FUNCTION__);
4587}
4588
4589void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4590{
4591 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4592 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4593 if (RT_LIKELY(rc == VINF_SUCCESS))
4594 return;
4595 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4596 {
4597 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4598 remR3RaiseRC(env->pVM, rc);
4599 return;
4600 }
4601 remAbort(rc, __FUNCTION__);
4602}
4603
4604void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4605{
4606 int rc;
4607 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4608 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4609 if (RT_LIKELY(rc == VINF_SUCCESS))
4610 return;
4611 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4612 {
4613 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4614 remR3RaiseRC(env->pVM, rc);
4615 return;
4616 }
4617 remAbort(rc, __FUNCTION__);
4618}
4619
4620uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4621{
4622 uint32_t u32 = 0;
4623 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4624 if (RT_LIKELY(rc == VINF_SUCCESS))
4625 {
4626 if (/*addr != 0x61 && */addr != 0x71)
4627 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4628 return (uint8_t)u32;
4629 }
4630 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4631 {
4632 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4633 remR3RaiseRC(env->pVM, rc);
4634 return (uint8_t)u32;
4635 }
4636 remAbort(rc, __FUNCTION__);
4637 return UINT8_C(0xff);
4638}
4639
4640uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4641{
4642 uint32_t u32 = 0;
4643 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4644 if (RT_LIKELY(rc == VINF_SUCCESS))
4645 {
4646 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4647 return (uint16_t)u32;
4648 }
4649 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4650 {
4651 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4652 remR3RaiseRC(env->pVM, rc);
4653 return (uint16_t)u32;
4654 }
4655 remAbort(rc, __FUNCTION__);
4656 return UINT16_C(0xffff);
4657}
4658
4659uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4660{
4661 uint32_t u32 = 0;
4662 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4663 if (RT_LIKELY(rc == VINF_SUCCESS))
4664 {
4665//if (addr==0x01f0 && u32 == 0x6b6d)
4666// loglevel = ~0;
4667 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4668 return u32;
4669 }
4670 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4671 {
4672 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4673 remR3RaiseRC(env->pVM, rc);
4674 return u32;
4675 }
4676 remAbort(rc, __FUNCTION__);
4677 return UINT32_C(0xffffffff);
4678}
4679
4680#undef LOG_GROUP
4681#define LOG_GROUP LOG_GROUP_REM
4682
4683
4684/* -+- helpers and misc other interfaces -+- */
4685
4686/**
4687 * Perform the CPUID instruction.
4688 *
4689 * @param env Pointer to the recompiler CPU structure.
4690 * @param idx The CPUID leaf (eax).
4691 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4692 * @param pvEAX Where to store eax.
4693 * @param pvEBX Where to store ebx.
4694 * @param pvECX Where to store ecx.
4695 * @param pvEDX Where to store edx.
4696 */
4697void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4698 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4699{
4700 NOREF(idxSub);
4701 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4702}
4703
4704
4705#if 0 /* not used */
4706/**
4707 * Interface for qemu hardware to report back fatal errors.
4708 */
4709void hw_error(const char *pszFormat, ...)
4710{
4711 /*
4712 * Bitch about it.
4713 */
4714 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4715 * this in my Odin32 tree at home! */
4716 va_list args;
4717 va_start(args, pszFormat);
4718 RTLogPrintf("fatal error in virtual hardware:");
4719 RTLogPrintfV(pszFormat, args);
4720 va_end(args);
4721 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4722
4723 /*
4724 * If we're in REM context we'll sync back the state before 'jumping' to
4725 * the EMs failure handling.
4726 */
4727 PVM pVM = cpu_single_env->pVM;
4728 if (pVM->rem.s.fInREM)
4729 REMR3StateBack(pVM);
4730 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4731 AssertMsgFailed(("EMR3FatalError returned!\n"));
4732}
4733#endif
4734
4735/**
4736 * Interface for the qemu cpu to report unhandled situation
4737 * raising a fatal VM error.
4738 */
4739void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4740{
4741 va_list va;
4742 PVM pVM;
4743 PVMCPU pVCpu;
4744 char szMsg[256];
4745
4746 /*
4747 * Bitch about it.
4748 */
4749 RTLogFlags(NULL, "nodisabled nobuffered");
4750 RTLogFlush(NULL);
4751
4752 va_start(va, pszFormat);
4753#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4754 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4755 unsigned cArgs = 0;
4756 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4757 const char *psz = strchr(pszFormat, '%');
4758 while (psz && cArgs < 6)
4759 {
4760 auArgs[cArgs++] = va_arg(va, uintptr_t);
4761 psz = strchr(psz + 1, '%');
4762 }
4763 switch (cArgs)
4764 {
4765 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4766 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4767 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4768 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4769 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4770 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4771 default:
4772 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4773 }
4774#else
4775 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4776#endif
4777 va_end(va);
4778
4779 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4780 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4781
4782 /*
4783 * If we're in REM context we'll sync back the state before 'jumping' to
4784 * the EMs failure handling.
4785 */
4786 pVM = cpu_single_env->pVM;
4787 pVCpu = cpu_single_env->pVCpu;
4788 Assert(pVCpu);
4789
4790 if (pVM->rem.s.fInREM)
4791 REMR3StateBack(pVM, pVCpu);
4792 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4793 AssertMsgFailed(("EMR3FatalError returned!\n"));
4794}
4795
4796
4797/**
4798 * Aborts the VM.
4799 *
4800 * @param rc VBox error code.
4801 * @param pszTip Hint about why/when this happened.
4802 */
4803void remAbort(int rc, const char *pszTip)
4804{
4805 PVM pVM;
4806 PVMCPU pVCpu;
4807
4808 /*
4809 * Bitch about it.
4810 */
4811 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4812 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4813
4814 /*
4815 * Jump back to where we entered the recompiler.
4816 */
4817 pVM = cpu_single_env->pVM;
4818 pVCpu = cpu_single_env->pVCpu;
4819 Assert(pVCpu);
4820
4821 if (pVM->rem.s.fInREM)
4822 REMR3StateBack(pVM, pVCpu);
4823
4824 EMR3FatalError(pVCpu, rc);
4825 AssertMsgFailed(("EMR3FatalError returned!\n"));
4826}
4827
4828
4829/**
4830 * Dumps a linux system call.
4831 * @param pVCpu VMCPU handle.
4832 */
4833void remR3DumpLnxSyscall(PVMCPU pVCpu)
4834{
4835 static const char *apsz[] =
4836 {
4837 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4838 "sys_exit",
4839 "sys_fork",
4840 "sys_read",
4841 "sys_write",
4842 "sys_open", /* 5 */
4843 "sys_close",
4844 "sys_waitpid",
4845 "sys_creat",
4846 "sys_link",
4847 "sys_unlink", /* 10 */
4848 "sys_execve",
4849 "sys_chdir",
4850 "sys_time",
4851 "sys_mknod",
4852 "sys_chmod", /* 15 */
4853 "sys_lchown16",
4854 "sys_ni_syscall", /* old break syscall holder */
4855 "sys_stat",
4856 "sys_lseek",
4857 "sys_getpid", /* 20 */
4858 "sys_mount",
4859 "sys_oldumount",
4860 "sys_setuid16",
4861 "sys_getuid16",
4862 "sys_stime", /* 25 */
4863 "sys_ptrace",
4864 "sys_alarm",
4865 "sys_fstat",
4866 "sys_pause",
4867 "sys_utime", /* 30 */
4868 "sys_ni_syscall", /* old stty syscall holder */
4869 "sys_ni_syscall", /* old gtty syscall holder */
4870 "sys_access",
4871 "sys_nice",
4872 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4873 "sys_sync",
4874 "sys_kill",
4875 "sys_rename",
4876 "sys_mkdir",
4877 "sys_rmdir", /* 40 */
4878 "sys_dup",
4879 "sys_pipe",
4880 "sys_times",
4881 "sys_ni_syscall", /* old prof syscall holder */
4882 "sys_brk", /* 45 */
4883 "sys_setgid16",
4884 "sys_getgid16",
4885 "sys_signal",
4886 "sys_geteuid16",
4887 "sys_getegid16", /* 50 */
4888 "sys_acct",
4889 "sys_umount", /* recycled never used phys() */
4890 "sys_ni_syscall", /* old lock syscall holder */
4891 "sys_ioctl",
4892 "sys_fcntl", /* 55 */
4893 "sys_ni_syscall", /* old mpx syscall holder */
4894 "sys_setpgid",
4895 "sys_ni_syscall", /* old ulimit syscall holder */
4896 "sys_olduname",
4897 "sys_umask", /* 60 */
4898 "sys_chroot",
4899 "sys_ustat",
4900 "sys_dup2",
4901 "sys_getppid",
4902 "sys_getpgrp", /* 65 */
4903 "sys_setsid",
4904 "sys_sigaction",
4905 "sys_sgetmask",
4906 "sys_ssetmask",
4907 "sys_setreuid16", /* 70 */
4908 "sys_setregid16",
4909 "sys_sigsuspend",
4910 "sys_sigpending",
4911 "sys_sethostname",
4912 "sys_setrlimit", /* 75 */
4913 "sys_old_getrlimit",
4914 "sys_getrusage",
4915 "sys_gettimeofday",
4916 "sys_settimeofday",
4917 "sys_getgroups16", /* 80 */
4918 "sys_setgroups16",
4919 "old_select",
4920 "sys_symlink",
4921 "sys_lstat",
4922 "sys_readlink", /* 85 */
4923 "sys_uselib",
4924 "sys_swapon",
4925 "sys_reboot",
4926 "old_readdir",
4927 "old_mmap", /* 90 */
4928 "sys_munmap",
4929 "sys_truncate",
4930 "sys_ftruncate",
4931 "sys_fchmod",
4932 "sys_fchown16", /* 95 */
4933 "sys_getpriority",
4934 "sys_setpriority",
4935 "sys_ni_syscall", /* old profil syscall holder */
4936 "sys_statfs",
4937 "sys_fstatfs", /* 100 */
4938 "sys_ioperm",
4939 "sys_socketcall",
4940 "sys_syslog",
4941 "sys_setitimer",
4942 "sys_getitimer", /* 105 */
4943 "sys_newstat",
4944 "sys_newlstat",
4945 "sys_newfstat",
4946 "sys_uname",
4947 "sys_iopl", /* 110 */
4948 "sys_vhangup",
4949 "sys_ni_syscall", /* old "idle" system call */
4950 "sys_vm86old",
4951 "sys_wait4",
4952 "sys_swapoff", /* 115 */
4953 "sys_sysinfo",
4954 "sys_ipc",
4955 "sys_fsync",
4956 "sys_sigreturn",
4957 "sys_clone", /* 120 */
4958 "sys_setdomainname",
4959 "sys_newuname",
4960 "sys_modify_ldt",
4961 "sys_adjtimex",
4962 "sys_mprotect", /* 125 */
4963 "sys_sigprocmask",
4964 "sys_ni_syscall", /* old "create_module" */
4965 "sys_init_module",
4966 "sys_delete_module",
4967 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4968 "sys_quotactl",
4969 "sys_getpgid",
4970 "sys_fchdir",
4971 "sys_bdflush",
4972 "sys_sysfs", /* 135 */
4973 "sys_personality",
4974 "sys_ni_syscall", /* reserved for afs_syscall */
4975 "sys_setfsuid16",
4976 "sys_setfsgid16",
4977 "sys_llseek", /* 140 */
4978 "sys_getdents",
4979 "sys_select",
4980 "sys_flock",
4981 "sys_msync",
4982 "sys_readv", /* 145 */
4983 "sys_writev",
4984 "sys_getsid",
4985 "sys_fdatasync",
4986 "sys_sysctl",
4987 "sys_mlock", /* 150 */
4988 "sys_munlock",
4989 "sys_mlockall",
4990 "sys_munlockall",
4991 "sys_sched_setparam",
4992 "sys_sched_getparam", /* 155 */
4993 "sys_sched_setscheduler",
4994 "sys_sched_getscheduler",
4995 "sys_sched_yield",
4996 "sys_sched_get_priority_max",
4997 "sys_sched_get_priority_min", /* 160 */
4998 "sys_sched_rr_get_interval",
4999 "sys_nanosleep",
5000 "sys_mremap",
5001 "sys_setresuid16",
5002 "sys_getresuid16", /* 165 */
5003 "sys_vm86",
5004 "sys_ni_syscall", /* Old sys_query_module */
5005 "sys_poll",
5006 "sys_nfsservctl",
5007 "sys_setresgid16", /* 170 */
5008 "sys_getresgid16",
5009 "sys_prctl",
5010 "sys_rt_sigreturn",
5011 "sys_rt_sigaction",
5012 "sys_rt_sigprocmask", /* 175 */
5013 "sys_rt_sigpending",
5014 "sys_rt_sigtimedwait",
5015 "sys_rt_sigqueueinfo",
5016 "sys_rt_sigsuspend",
5017 "sys_pread64", /* 180 */
5018 "sys_pwrite64",
5019 "sys_chown16",
5020 "sys_getcwd",
5021 "sys_capget",
5022 "sys_capset", /* 185 */
5023 "sys_sigaltstack",
5024 "sys_sendfile",
5025 "sys_ni_syscall", /* reserved for streams1 */
5026 "sys_ni_syscall", /* reserved for streams2 */
5027 "sys_vfork", /* 190 */
5028 "sys_getrlimit",
5029 "sys_mmap2",
5030 "sys_truncate64",
5031 "sys_ftruncate64",
5032 "sys_stat64", /* 195 */
5033 "sys_lstat64",
5034 "sys_fstat64",
5035 "sys_lchown",
5036 "sys_getuid",
5037 "sys_getgid", /* 200 */
5038 "sys_geteuid",
5039 "sys_getegid",
5040 "sys_setreuid",
5041 "sys_setregid",
5042 "sys_getgroups", /* 205 */
5043 "sys_setgroups",
5044 "sys_fchown",
5045 "sys_setresuid",
5046 "sys_getresuid",
5047 "sys_setresgid", /* 210 */
5048 "sys_getresgid",
5049 "sys_chown",
5050 "sys_setuid",
5051 "sys_setgid",
5052 "sys_setfsuid", /* 215 */
5053 "sys_setfsgid",
5054 "sys_pivot_root",
5055 "sys_mincore",
5056 "sys_madvise",
5057 "sys_getdents64", /* 220 */
5058 "sys_fcntl64",
5059 "sys_ni_syscall", /* reserved for TUX */
5060 "sys_ni_syscall",
5061 "sys_gettid",
5062 "sys_readahead", /* 225 */
5063 "sys_setxattr",
5064 "sys_lsetxattr",
5065 "sys_fsetxattr",
5066 "sys_getxattr",
5067 "sys_lgetxattr", /* 230 */
5068 "sys_fgetxattr",
5069 "sys_listxattr",
5070 "sys_llistxattr",
5071 "sys_flistxattr",
5072 "sys_removexattr", /* 235 */
5073 "sys_lremovexattr",
5074 "sys_fremovexattr",
5075 "sys_tkill",
5076 "sys_sendfile64",
5077 "sys_futex", /* 240 */
5078 "sys_sched_setaffinity",
5079 "sys_sched_getaffinity",
5080 "sys_set_thread_area",
5081 "sys_get_thread_area",
5082 "sys_io_setup", /* 245 */
5083 "sys_io_destroy",
5084 "sys_io_getevents",
5085 "sys_io_submit",
5086 "sys_io_cancel",
5087 "sys_fadvise64", /* 250 */
5088 "sys_ni_syscall",
5089 "sys_exit_group",
5090 "sys_lookup_dcookie",
5091 "sys_epoll_create",
5092 "sys_epoll_ctl", /* 255 */
5093 "sys_epoll_wait",
5094 "sys_remap_file_pages",
5095 "sys_set_tid_address",
5096 "sys_timer_create",
5097 "sys_timer_settime", /* 260 */
5098 "sys_timer_gettime",
5099 "sys_timer_getoverrun",
5100 "sys_timer_delete",
5101 "sys_clock_settime",
5102 "sys_clock_gettime", /* 265 */
5103 "sys_clock_getres",
5104 "sys_clock_nanosleep",
5105 "sys_statfs64",
5106 "sys_fstatfs64",
5107 "sys_tgkill", /* 270 */
5108 "sys_utimes",
5109 "sys_fadvise64_64",
5110 "sys_ni_syscall" /* sys_vserver */
5111 };
5112
5113 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5114 switch (uEAX)
5115 {
5116 default:
5117 if (uEAX < RT_ELEMENTS(apsz))
5118 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5119 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5120 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5121 else
5122 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5123 break;
5124
5125 }
5126}
5127
5128
5129/**
5130 * Dumps an OpenBSD system call.
5131 * @param pVCpu VMCPU handle.
5132 */
5133void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5134{
5135 static const char *apsz[] =
5136 {
5137 "SYS_syscall", //0
5138 "SYS_exit", //1
5139 "SYS_fork", //2
5140 "SYS_read", //3
5141 "SYS_write", //4
5142 "SYS_open", //5
5143 "SYS_close", //6
5144 "SYS_wait4", //7
5145 "SYS_8",
5146 "SYS_link", //9
5147 "SYS_unlink", //10
5148 "SYS_11",
5149 "SYS_chdir", //12
5150 "SYS_fchdir", //13
5151 "SYS_mknod", //14
5152 "SYS_chmod", //15
5153 "SYS_chown", //16
5154 "SYS_break", //17
5155 "SYS_18",
5156 "SYS_19",
5157 "SYS_getpid", //20
5158 "SYS_mount", //21
5159 "SYS_unmount", //22
5160 "SYS_setuid", //23
5161 "SYS_getuid", //24
5162 "SYS_geteuid", //25
5163 "SYS_ptrace", //26
5164 "SYS_recvmsg", //27
5165 "SYS_sendmsg", //28
5166 "SYS_recvfrom", //29
5167 "SYS_accept", //30
5168 "SYS_getpeername", //31
5169 "SYS_getsockname", //32
5170 "SYS_access", //33
5171 "SYS_chflags", //34
5172 "SYS_fchflags", //35
5173 "SYS_sync", //36
5174 "SYS_kill", //37
5175 "SYS_38",
5176 "SYS_getppid", //39
5177 "SYS_40",
5178 "SYS_dup", //41
5179 "SYS_opipe", //42
5180 "SYS_getegid", //43
5181 "SYS_profil", //44
5182 "SYS_ktrace", //45
5183 "SYS_sigaction", //46
5184 "SYS_getgid", //47
5185 "SYS_sigprocmask", //48
5186 "SYS_getlogin", //49
5187 "SYS_setlogin", //50
5188 "SYS_acct", //51
5189 "SYS_sigpending", //52
5190 "SYS_osigaltstack", //53
5191 "SYS_ioctl", //54
5192 "SYS_reboot", //55
5193 "SYS_revoke", //56
5194 "SYS_symlink", //57
5195 "SYS_readlink", //58
5196 "SYS_execve", //59
5197 "SYS_umask", //60
5198 "SYS_chroot", //61
5199 "SYS_62",
5200 "SYS_63",
5201 "SYS_64",
5202 "SYS_65",
5203 "SYS_vfork", //66
5204 "SYS_67",
5205 "SYS_68",
5206 "SYS_sbrk", //69
5207 "SYS_sstk", //70
5208 "SYS_61",
5209 "SYS_vadvise", //72
5210 "SYS_munmap", //73
5211 "SYS_mprotect", //74
5212 "SYS_madvise", //75
5213 "SYS_76",
5214 "SYS_77",
5215 "SYS_mincore", //78
5216 "SYS_getgroups", //79
5217 "SYS_setgroups", //80
5218 "SYS_getpgrp", //81
5219 "SYS_setpgid", //82
5220 "SYS_setitimer", //83
5221 "SYS_84",
5222 "SYS_85",
5223 "SYS_getitimer", //86
5224 "SYS_87",
5225 "SYS_88",
5226 "SYS_89",
5227 "SYS_dup2", //90
5228 "SYS_91",
5229 "SYS_fcntl", //92
5230 "SYS_select", //93
5231 "SYS_94",
5232 "SYS_fsync", //95
5233 "SYS_setpriority", //96
5234 "SYS_socket", //97
5235 "SYS_connect", //98
5236 "SYS_99",
5237 "SYS_getpriority", //100
5238 "SYS_101",
5239 "SYS_102",
5240 "SYS_sigreturn", //103
5241 "SYS_bind", //104
5242 "SYS_setsockopt", //105
5243 "SYS_listen", //106
5244 "SYS_107",
5245 "SYS_108",
5246 "SYS_109",
5247 "SYS_110",
5248 "SYS_sigsuspend", //111
5249 "SYS_112",
5250 "SYS_113",
5251 "SYS_114",
5252 "SYS_115",
5253 "SYS_gettimeofday", //116
5254 "SYS_getrusage", //117
5255 "SYS_getsockopt", //118
5256 "SYS_119",
5257 "SYS_readv", //120
5258 "SYS_writev", //121
5259 "SYS_settimeofday", //122
5260 "SYS_fchown", //123
5261 "SYS_fchmod", //124
5262 "SYS_125",
5263 "SYS_setreuid", //126
5264 "SYS_setregid", //127
5265 "SYS_rename", //128
5266 "SYS_129",
5267 "SYS_130",
5268 "SYS_flock", //131
5269 "SYS_mkfifo", //132
5270 "SYS_sendto", //133
5271 "SYS_shutdown", //134
5272 "SYS_socketpair", //135
5273 "SYS_mkdir", //136
5274 "SYS_rmdir", //137
5275 "SYS_utimes", //138
5276 "SYS_139",
5277 "SYS_adjtime", //140
5278 "SYS_141",
5279 "SYS_142",
5280 "SYS_143",
5281 "SYS_144",
5282 "SYS_145",
5283 "SYS_146",
5284 "SYS_setsid", //147
5285 "SYS_quotactl", //148
5286 "SYS_149",
5287 "SYS_150",
5288 "SYS_151",
5289 "SYS_152",
5290 "SYS_153",
5291 "SYS_154",
5292 "SYS_nfssvc", //155
5293 "SYS_156",
5294 "SYS_157",
5295 "SYS_158",
5296 "SYS_159",
5297 "SYS_160",
5298 "SYS_getfh", //161
5299 "SYS_162",
5300 "SYS_163",
5301 "SYS_164",
5302 "SYS_sysarch", //165
5303 "SYS_166",
5304 "SYS_167",
5305 "SYS_168",
5306 "SYS_169",
5307 "SYS_170",
5308 "SYS_171",
5309 "SYS_172",
5310 "SYS_pread", //173
5311 "SYS_pwrite", //174
5312 "SYS_175",
5313 "SYS_176",
5314 "SYS_177",
5315 "SYS_178",
5316 "SYS_179",
5317 "SYS_180",
5318 "SYS_setgid", //181
5319 "SYS_setegid", //182
5320 "SYS_seteuid", //183
5321 "SYS_lfs_bmapv", //184
5322 "SYS_lfs_markv", //185
5323 "SYS_lfs_segclean", //186
5324 "SYS_lfs_segwait", //187
5325 "SYS_188",
5326 "SYS_189",
5327 "SYS_190",
5328 "SYS_pathconf", //191
5329 "SYS_fpathconf", //192
5330 "SYS_swapctl", //193
5331 "SYS_getrlimit", //194
5332 "SYS_setrlimit", //195
5333 "SYS_getdirentries", //196
5334 "SYS_mmap", //197
5335 "SYS___syscall", //198
5336 "SYS_lseek", //199
5337 "SYS_truncate", //200
5338 "SYS_ftruncate", //201
5339 "SYS___sysctl", //202
5340 "SYS_mlock", //203
5341 "SYS_munlock", //204
5342 "SYS_205",
5343 "SYS_futimes", //206
5344 "SYS_getpgid", //207
5345 "SYS_xfspioctl", //208
5346 "SYS_209",
5347 "SYS_210",
5348 "SYS_211",
5349 "SYS_212",
5350 "SYS_213",
5351 "SYS_214",
5352 "SYS_215",
5353 "SYS_216",
5354 "SYS_217",
5355 "SYS_218",
5356 "SYS_219",
5357 "SYS_220",
5358 "SYS_semget", //221
5359 "SYS_222",
5360 "SYS_223",
5361 "SYS_224",
5362 "SYS_msgget", //225
5363 "SYS_msgsnd", //226
5364 "SYS_msgrcv", //227
5365 "SYS_shmat", //228
5366 "SYS_229",
5367 "SYS_shmdt", //230
5368 "SYS_231",
5369 "SYS_clock_gettime", //232
5370 "SYS_clock_settime", //233
5371 "SYS_clock_getres", //234
5372 "SYS_235",
5373 "SYS_236",
5374 "SYS_237",
5375 "SYS_238",
5376 "SYS_239",
5377 "SYS_nanosleep", //240
5378 "SYS_241",
5379 "SYS_242",
5380 "SYS_243",
5381 "SYS_244",
5382 "SYS_245",
5383 "SYS_246",
5384 "SYS_247",
5385 "SYS_248",
5386 "SYS_249",
5387 "SYS_minherit", //250
5388 "SYS_rfork", //251
5389 "SYS_poll", //252
5390 "SYS_issetugid", //253
5391 "SYS_lchown", //254
5392 "SYS_getsid", //255
5393 "SYS_msync", //256
5394 "SYS_257",
5395 "SYS_258",
5396 "SYS_259",
5397 "SYS_getfsstat", //260
5398 "SYS_statfs", //261
5399 "SYS_fstatfs", //262
5400 "SYS_pipe", //263
5401 "SYS_fhopen", //264
5402 "SYS_265",
5403 "SYS_fhstatfs", //266
5404 "SYS_preadv", //267
5405 "SYS_pwritev", //268
5406 "SYS_kqueue", //269
5407 "SYS_kevent", //270
5408 "SYS_mlockall", //271
5409 "SYS_munlockall", //272
5410 "SYS_getpeereid", //273
5411 "SYS_274",
5412 "SYS_275",
5413 "SYS_276",
5414 "SYS_277",
5415 "SYS_278",
5416 "SYS_279",
5417 "SYS_280",
5418 "SYS_getresuid", //281
5419 "SYS_setresuid", //282
5420 "SYS_getresgid", //283
5421 "SYS_setresgid", //284
5422 "SYS_285",
5423 "SYS_mquery", //286
5424 "SYS_closefrom", //287
5425 "SYS_sigaltstack", //288
5426 "SYS_shmget", //289
5427 "SYS_semop", //290
5428 "SYS_stat", //291
5429 "SYS_fstat", //292
5430 "SYS_lstat", //293
5431 "SYS_fhstat", //294
5432 "SYS___semctl", //295
5433 "SYS_shmctl", //296
5434 "SYS_msgctl", //297
5435 "SYS_MAXSYSCALL", //298
5436 //299
5437 //300
5438 };
5439 uint32_t uEAX;
5440 if (!LogIsEnabled())
5441 return;
5442 uEAX = CPUMGetGuestEAX(pVCpu);
5443 switch (uEAX)
5444 {
5445 default:
5446 if (uEAX < RT_ELEMENTS(apsz))
5447 {
5448 uint32_t au32Args[8] = {0};
5449 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5450 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5451 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5452 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5453 }
5454 else
5455 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5456 break;
5457 }
5458}
5459
5460
5461#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5462/**
5463 * The Dll main entry point (stub).
5464 */
5465bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5466{
5467 return true;
5468}
5469
5470void *memcpy(void *dst, const void *src, size_t size)
5471{
5472 uint8_t*pbDst = dst, *pbSrc = src;
5473 while (size-- > 0)
5474 *pbDst++ = *pbSrc++;
5475 return dst;
5476}
5477
5478#endif
5479
5480void cpu_smm_update(CPUX86State *env)
5481{
5482}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette