VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 80102

Last change on this file since 80102 was 80024, checked in by vboxsync, 6 years ago

VMM: Kicking out raw-mode (work in progress) - em config. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 182.5 KB
Line 
1/* $Id: VBoxRecompiler.c 80024 2019-07-28 13:30:53Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_rem REM - Recompiled Execution Manager.
19 *
20 * The recompiled exeuction manager (REM) serves the final fallback for guest
21 * execution, after HM / raw-mode and IEM have given up.
22 *
23 * The REM is qemu with a whole bunch of VBox specific customization for
24 * interfacing with PATM, CSAM, PGM and other components.
25 *
26 * @sa @ref grp_rem
27 */
28
29
30/*********************************************************************************************************************************
31* Header Files *
32*********************************************************************************************************************************/
33#define LOG_GROUP LOG_GROUP_REM
34#include <stdio.h> /* FILE */
35#include "osdep.h"
36#include "config.h"
37#include "cpu.h"
38#include "exec-all.h"
39#include "ioport.h"
40
41#include <VBox/vmm/rem.h>
42#include <VBox/vmm/vmapi.h>
43#include <VBox/vmm/tm.h>
44#include <VBox/vmm/ssm.h>
45#include <VBox/vmm/em.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/trpm.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/pgm.h>
51#include <VBox/vmm/pdm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/dbg.h>
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/hm.h>
56#include "REMInternal.h"
57#include <VBox/vmm/vm.h>
58#include <VBox/vmm/uvm.h>
59#include <VBox/param.h>
60#include <VBox/err.h>
61
62#include <VBox/log.h>
63#include <iprt/alloca.h>
64#include <iprt/semaphore.h>
65#include <iprt/asm.h>
66#include <iprt/assert.h>
67#include <iprt/thread.h>
68#include <iprt/string.h>
69
70/* Don't wanna include everything. */
71extern void cpu_exec_init_all(uintptr_t tb_size);
72extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
73extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
74extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
75extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
76extern void tlb_flush(CPUX86State *env, int flush_global);
77extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
78extern void sync_ldtr(CPUX86State *env1, int selector);
79
80#ifdef VBOX_STRICT
81ram_addr_t get_phys_page_offset(target_ulong addr);
82#endif
83
84
85/*********************************************************************************************************************************
86* Defined Constants And Macros *
87*********************************************************************************************************************************/
88
89/** Copy 80-bit fpu register at pSrc to pDst.
90 * This is probably faster than *calling* memcpy.
91 */
92#define REM_COPY_FPU_REG(pDst, pSrc) \
93 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
94
95/** How remR3RunLoggingStep operates. */
96#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
97
98
99/** Selector flag shift between qemu and VBox.
100 * VBox shifts the qemu bits to the right. */
101#define SEL_FLAGS_SHIFT (8)
102/** Mask applied to the shifted qemu selector flags to get the attributes VBox
103 * (VT-x) needs. */
104#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
105
106
107/*********************************************************************************************************************************
108* Internal Functions *
109*********************************************************************************************************************************/
110static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
111static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
112static DECLCALLBACK(int) remR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
113static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
114static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
115
116static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
117static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
118static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
119static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
120static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
121static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
122
123static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
124static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
125static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
126static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
127static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
128static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
129
130static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
131static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
132static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
133
134
135/*********************************************************************************************************************************
136* Global Variables *
137*********************************************************************************************************************************/
138
139/** @todo Move stats to REM::s some rainy day we have nothing do to. */
140#ifdef VBOX_WITH_STATISTICS
141static STAMPROFILEADV gStatExecuteSingleInstr;
142static STAMPROFILEADV gStatCompilationQEmu;
143static STAMPROFILEADV gStatRunCodeQEmu;
144static STAMPROFILEADV gStatTotalTimeQEmu;
145static STAMPROFILEADV gStatTimers;
146static STAMPROFILEADV gStatTBLookup;
147static STAMPROFILEADV gStatIRQ;
148static STAMPROFILEADV gStatRawCheck;
149static STAMPROFILEADV gStatMemRead;
150static STAMPROFILEADV gStatMemWrite;
151static STAMPROFILE gStatGCPhys2HCVirt;
152static STAMCOUNTER gStatCpuGetTSC;
153static STAMCOUNTER gStatRefuseTFInhibit;
154static STAMCOUNTER gStatRefuseVM86;
155static STAMCOUNTER gStatRefusePaging;
156static STAMCOUNTER gStatRefusePAE;
157static STAMCOUNTER gStatRefuseIOPLNot0;
158static STAMCOUNTER gStatRefuseIF0;
159static STAMCOUNTER gStatRefuseCode16;
160static STAMCOUNTER gStatRefuseWP0;
161static STAMCOUNTER gStatRefuseRing1or2;
162static STAMCOUNTER gStatRefuseCanExecute;
163static STAMCOUNTER gaStatRefuseStale[6];
164static STAMCOUNTER gStatREMGDTChange;
165static STAMCOUNTER gStatREMIDTChange;
166static STAMCOUNTER gStatREMLDTRChange;
167static STAMCOUNTER gStatREMTRChange;
168static STAMCOUNTER gStatSelOutOfSync[6];
169static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
170static STAMCOUNTER gStatFlushTBs;
171#endif
172/* in exec.c */
173extern uint32_t tlb_flush_count;
174extern uint32_t tb_flush_count;
175extern uint32_t tb_phys_invalidate_count;
176
177/*
178 * Global stuff.
179 */
180
181/** MMIO read callbacks. */
182CPUReadMemoryFunc *g_apfnMMIORead[3] =
183{
184 remR3MMIOReadU8,
185 remR3MMIOReadU16,
186 remR3MMIOReadU32
187};
188
189/** MMIO write callbacks. */
190CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
191{
192 remR3MMIOWriteU8,
193 remR3MMIOWriteU16,
194 remR3MMIOWriteU32
195};
196
197/** Handler read callbacks. */
198CPUReadMemoryFunc *g_apfnHandlerRead[3] =
199{
200 remR3HandlerReadU8,
201 remR3HandlerReadU16,
202 remR3HandlerReadU32
203};
204
205/** Handler write callbacks. */
206CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
207{
208 remR3HandlerWriteU8,
209 remR3HandlerWriteU16,
210 remR3HandlerWriteU32
211};
212
213
214#ifdef VBOX_WITH_DEBUGGER
215/*
216 * Debugger commands.
217 */
218static FNDBGCCMD remR3CmdDisasEnableStepping;;
219
220/** '.remstep' arguments. */
221static const DBGCVARDESC g_aArgRemStep[] =
222{
223 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
224 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
225};
226
227/** Command descriptors. */
228static const DBGCCMD g_aCmds[] =
229{
230 {
231 .pszCmd ="remstep",
232 .cArgsMin = 0,
233 .cArgsMax = 1,
234 .paArgDescs = &g_aArgRemStep[0],
235 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
236 .fFlags = 0,
237 .pfnHandler = remR3CmdDisasEnableStepping,
238 .pszSyntax = "[on/off]",
239 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
240 "If no arguments show the current state."
241 }
242};
243#endif
244
245/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
246 * @todo huh??? That cannot be the case on the mac... So, this
247 * point is probably not valid any longer. */
248uint8_t *code_gen_prologue;
249
250
251/*********************************************************************************************************************************
252* Internal Functions *
253*********************************************************************************************************************************/
254void remAbort(int rc, const char *pszTip);
255extern int testmath(void);
256
257/* Put them here to avoid unused variable warning. */
258AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
259#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
260//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
261/* Why did this have to be identical?? */
262AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
263#else
264AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
265#endif
266
267
268/**
269 * Initializes the REM.
270 *
271 * @returns VBox status code.
272 * @param pVM The VM to operate on.
273 */
274REMR3DECL(int) REMR3Init(PVM pVM)
275{
276 PREMHANDLERNOTIFICATION pCur;
277 uint32_t u32Dummy;
278 int rc;
279 unsigned i;
280
281#ifdef VBOX_ENABLE_VBOXREM64
282 LogRel(("Using 64-bit aware REM\n"));
283#endif
284
285 /*
286 * Assert sanity.
287 */
288 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
289 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
290 AssertReleaseMsg(!(RT_UOFFSETOF(VM, rem) & 31), ("off=%#zx\n", RT_UOFFSETOF(VM, rem)));
291#if 0 /* just an annoyance at the moment. */
292#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
293 Assert(!testmath());
294#endif
295#endif
296
297 /*
298 * Init some internal data members.
299 */
300 pVM->rem.s.offVM = RT_UOFFSETOF(VM, rem.s);
301 pVM->rem.s.Env.pVM = pVM;
302#ifdef CPU_RAW_MODE_INIT
303 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
304#endif
305
306 /*
307 * Initialize the REM critical section.
308 *
309 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
310 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
311 * deadlocks. (mostly pgm vs rem locking)
312 */
313 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
314 AssertRCReturn(rc, rc);
315
316 /* ctx. */
317 pVM->rem.s.pCtx = NULL; /* set when executing code. */
318 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
319
320 /* ignore all notifications */
321 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
322
323 code_gen_prologue = RTMemExecAlloc(_1K);
324 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
325
326 cpu_exec_init_all(0);
327
328 /*
329 * Init the recompiler.
330 */
331 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
332 {
333 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
334 return VERR_GENERAL_FAILURE;
335 }
336 PVMCPU pVCpu = VMMGetCpu(pVM);
337 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
338 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
339
340 EMRemLock(pVM);
341 cpu_reset(&pVM->rem.s.Env);
342 EMRemUnlock(pVM);
343
344 /* allocate code buffer for single instruction emulation. */
345 pVM->rem.s.Env.cbCodeBuffer = 4096;
346 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
347 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
348
349 /* Finally, set the cpu_single_env global. */
350 cpu_single_env = &pVM->rem.s.Env;
351
352 /* Nothing is pending by default */
353 pVM->rem.s.uStateLoadPendingInterrupt = REM_NO_PENDING_IRQ;
354
355 /*
356 * Register ram types.
357 */
358 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
359 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
360 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
361 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
362 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
363
364 /* stop ignoring. */
365 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
366
367 /*
368 * Register the saved state data unit.
369 */
370 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
371 NULL, NULL, NULL,
372 NULL, remR3Save, NULL,
373 NULL, remR3Load, remR3LoadDone);
374 if (RT_FAILURE(rc))
375 return rc;
376
377#ifdef VBOX_WITH_DEBUGGER
378 /*
379 * Debugger commands.
380 */
381 static bool fRegisteredCmds = false;
382 if (!fRegisteredCmds)
383 {
384 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
385 if (RT_SUCCESS(rc))
386 fRegisteredCmds = true;
387 }
388#endif
389
390#ifdef VBOX_WITH_STATISTICS
391 /*
392 * Statistics.
393 */
394 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
395 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
396 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
397 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
398 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
399 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
400 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
401 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
402 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
403 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
404 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
405
406 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
407
408 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
409 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
410 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
411 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
412 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
413 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
414 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
415 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
416 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
417 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
418 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
419 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
420 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
421 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
422 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
423 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
424 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
425
426 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
427 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
428 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
429 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
430
431 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
432 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
433 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
434 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
435 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
436 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
437
438 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
439 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
440 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
441 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
442 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
443 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
444
445 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
446#endif /* VBOX_WITH_STATISTICS */
447 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
448 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
449
450 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
451 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
452 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
453
454
455#ifdef DEBUG_ALL_LOGGING
456 loglevel = ~0;
457#endif
458
459 /*
460 * Init the handler notification lists.
461 */
462 pVM->rem.s.idxPendingList = UINT32_MAX;
463 pVM->rem.s.idxFreeList = 0;
464
465 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
466 {
467 pCur = &pVM->rem.s.aHandlerNotifications[i];
468 pCur->idxNext = i + 1;
469 pCur->idxSelf = i;
470 }
471 pCur->idxNext = UINT32_MAX; /* the last record. */
472
473 return rc;
474}
475
476
477/**
478 * Finalizes the REM initialization.
479 *
480 * This is called after all components, devices and drivers has
481 * been initialized. Its main purpose it to finish the RAM related
482 * initialization.
483 *
484 * @returns VBox status code.
485 *
486 * @param pVM The VM handle.
487 */
488REMR3DECL(int) REMR3InitFinalize(PVM pVM)
489{
490 int rc;
491
492 /*
493 * Ram size & dirty bit map.
494 */
495 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
496 pVM->rem.s.fGCPhysLastRamFixed = true;
497#ifdef RT_STRICT
498 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
499#else
500 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
501#endif
502 return rc;
503}
504
505/**
506 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
507 *
508 * @returns VBox status code.
509 * @param pVM The VM handle.
510 * @param fGuarded Whether to guard the map.
511 */
512static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
513{
514 int rc = VINF_SUCCESS;
515 RTGCPHYS cb;
516
517 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
518
519 cb = pVM->rem.s.GCPhysLastRam + 1;
520 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
521 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
522 VERR_OUT_OF_RANGE);
523
524 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
525 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
526
527 if (!fGuarded)
528 {
529 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
530 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
531 }
532 else
533 {
534 /*
535 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
536 */
537 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
538 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
539 if (cbBitmapFull == cbBitmapAligned)
540 cbBitmapFull += _4G >> PAGE_SHIFT;
541 else if (cbBitmapFull - cbBitmapAligned < _64K)
542 cbBitmapFull += _64K;
543
544 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
545 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
546
547 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
548 if (RT_FAILURE(rc))
549 {
550 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
551 AssertLogRelRCReturn(rc, rc);
552 }
553
554 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
555 }
556
557 /* initialize it. */
558 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
559 return rc;
560}
561
562
563/**
564 * Terminates the REM.
565 *
566 * Termination means cleaning up and freeing all resources,
567 * the VM it self is at this point powered off or suspended.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM to operate on.
571 */
572REMR3DECL(int) REMR3Term(PVM pVM)
573{
574 /*
575 * Statistics.
576 */
577 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
578 STAMR3Deregister(pVM->pUVM, "/REM/*");
579
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * The VM is being reset.
586 *
587 * For the REM component this means to call the cpu_reset() and
588 * reinitialize some state variables.
589 *
590 * @param pVM VM handle.
591 */
592REMR3DECL(void) REMR3Reset(PVM pVM)
593{
594 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
595
596 /*
597 * Reset the REM cpu.
598 */
599 Assert(pVM->rem.s.cIgnoreAll == 0);
600 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
601 cpu_reset(&pVM->rem.s.Env);
602 pVM->rem.s.cInvalidatedPages = 0;
603 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
604 Assert(pVM->rem.s.cIgnoreAll == 0);
605
606 /* Clear raw ring 0 init state */
607 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
608
609 /* Flush the TBs the next time we execute code here. */
610 pVM->rem.s.fFlushTBs = true;
611
612 EMRemUnlock(pVM);
613}
614
615
616/**
617 * Execute state save operation.
618 *
619 * @returns VBox status code.
620 * @param pVM VM Handle.
621 * @param pSSM SSM operation handle.
622 */
623static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
624{
625 PREM pRem = &pVM->rem.s;
626
627 /*
628 * Save the required CPU Env bits.
629 * (Not much because we're never in REM when doing the save.)
630 */
631 LogFlow(("remR3Save:\n"));
632 Assert(!pRem->fInREM);
633 SSMR3PutU32(pSSM, pRem->Env.hflags);
634 SSMR3PutU32(pSSM, ~0); /* separator */
635
636 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
637 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
638 SSMR3PutU32(pSSM, REM_NO_PENDING_IRQ);
639
640 return SSMR3PutU32(pSSM, ~0); /* terminator */
641}
642
643
644/**
645 * Execute state load operation.
646 *
647 * @returns VBox status code.
648 * @param pVM VM Handle.
649 * @param pSSM SSM operation handle.
650 * @param uVersion Data layout version.
651 * @param uPass The data pass.
652 */
653static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
654{
655 uint32_t u32Dummy;
656 uint32_t fRawRing0 = false;
657 uint32_t u32Sep;
658 uint32_t i;
659 int rc;
660 PREM pRem;
661
662 LogFlow(("remR3Load:\n"));
663 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
664
665 /*
666 * Validate version.
667 */
668 if ( uVersion != REM_SAVED_STATE_VERSION
669 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
670 {
671 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
672 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
673 }
674
675 /*
676 * Do a reset to be on the safe side...
677 */
678 REMR3Reset(pVM);
679
680 /*
681 * Ignore all ignorable notifications.
682 * (Not doing this will cause serious trouble.)
683 */
684 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
685
686 /*
687 * Load the required CPU Env bits.
688 * (Not much because we're never in REM when doing the save.)
689 */
690 pRem = &pVM->rem.s;
691 Assert(!pRem->fInREM);
692 SSMR3GetU32(pSSM, &pRem->Env.hflags);
693 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
694 {
695 /* Redundant REM CPU state has to be loaded, but can be ignored. */
696 CPUX86State_Ver16 temp;
697 SSMR3GetMem(pSSM, &temp, RT_UOFFSETOF(CPUX86State_Ver16, jmp_env));
698 }
699
700 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
701 if (RT_FAILURE(rc))
702 return rc;
703 if (u32Sep != ~0U)
704 {
705 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
706 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
707 }
708
709 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
710 SSMR3GetUInt(pSSM, &fRawRing0);
711 if (fRawRing0)
712 pRem->Env.state |= CPU_RAW_RING0;
713
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /*
717 * Load the REM stuff.
718 */
719 /** @todo r=bird: We should just drop all these items, restoring doesn't make
720 * sense. */
721 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
722 if (RT_FAILURE(rc))
723 return rc;
724 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
725 {
726 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729 for (i = 0; i < pRem->cInvalidatedPages; i++)
730 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
731 }
732
733 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.uStateLoadPendingInterrupt);
734 AssertRCReturn(rc, rc);
735 AssertLogRelMsgReturn( pVM->rem.s.uStateLoadPendingInterrupt == REM_NO_PENDING_IRQ
736 || pVM->rem.s.uStateLoadPendingInterrupt < 256,
737 ("uStateLoadPendingInterrupt=%#x\n", pVM->rem.s.uStateLoadPendingInterrupt),
738 VERR_SSM_UNEXPECTED_DATA);
739
740 /* check the terminator. */
741 rc = SSMR3GetU32(pSSM, &u32Sep);
742 if (RT_FAILURE(rc))
743 return rc;
744 if (u32Sep != ~0U)
745 {
746 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
747 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
748 }
749
750 /*
751 * Get the CPUID features.
752 */
753 PVMCPU pVCpu = VMMGetCpu(pVM);
754 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
755 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
756
757 /*
758 * Stop ignoring ignorable notifications.
759 */
760 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
761
762 /*
763 * Sync the whole CPU state when executing code in the recompiler.
764 */
765 for (i = 0; i < pVM->cCpus; i++)
766 {
767 PVMCPU pVCpu = &pVM->aCpus[i];
768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
769 }
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * @callback_method_impl{FNSSMINTLOADDONE,
776 * For pushing misdesigned pending-interrupt mess to TRPM where it belongs. }
777 */
778static DECLCALLBACK(int) remR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
779{
780 if (pVM->rem.s.uStateLoadPendingInterrupt != REM_NO_PENDING_IRQ)
781 {
782 int rc = TRPMAssertTrap(&pVM->aCpus[0], pVM->rem.s.uStateLoadPendingInterrupt, TRPM_HARDWARE_INT);
783 AssertLogRelMsgReturn(rc, ("uStateLoadPendingInterrupt=%#x rc=%Rrc\n", pVM->rem.s.uStateLoadPendingInterrupt, rc), rc);
784 pVM->rem.s.uStateLoadPendingInterrupt = REM_NO_PENDING_IRQ;
785 }
786 return VINF_SUCCESS;
787}
788
789
790#undef LOG_GROUP
791#define LOG_GROUP LOG_GROUP_REM_RUN
792
793/**
794 * Single steps an instruction in recompiled mode.
795 *
796 * Before calling this function the REM state needs to be in sync with
797 * the VM. Call REMR3State() to perform the sync. It's only necessary
798 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
799 * and after calling REMR3StateBack().
800 *
801 * @returns VBox status code.
802 *
803 * @param pVM VM Handle.
804 * @param pVCpu VMCPU Handle.
805 */
806REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
807{
808 int rc, interrupt_request;
809 RTGCPTR GCPtrPC;
810 bool fBp;
811
812 /*
813 * Lock the REM - we don't wanna have anyone interrupting us
814 * while stepping - and enabled single stepping. We also ignore
815 * pending interrupts and suchlike.
816 */
817 interrupt_request = pVM->rem.s.Env.interrupt_request;
818 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
819 pVM->rem.s.Env.interrupt_request = 0;
820 cpu_single_step(&pVM->rem.s.Env, 1);
821
822 /*
823 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
824 */
825 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
826 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
827
828 /*
829 * Execute and handle the return code.
830 * We execute without enabling the cpu tick, so on success we'll
831 * just flip it on and off to make sure it moves
832 */
833 rc = cpu_exec(&pVM->rem.s.Env);
834 if (rc == EXCP_DEBUG)
835 {
836 TMR3NotifyResume(pVM, pVCpu);
837 TMR3NotifySuspend(pVM, pVCpu);
838 rc = VINF_EM_DBG_STEPPED;
839 }
840 else
841 {
842 switch (rc)
843 {
844 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
845 case EXCP_HLT:
846 case EXCP_HALTED: rc = VINF_EM_HALT; break;
847 case EXCP_RC:
848 rc = pVM->rem.s.rc;
849 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
850 break;
851 case EXCP_EXECUTE_RAW:
852 case EXCP_EXECUTE_HM:
853 /** @todo is it correct? No! */
854 rc = VINF_SUCCESS;
855 break;
856 default:
857 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
858 rc = VERR_INTERNAL_ERROR;
859 break;
860 }
861 }
862
863 /*
864 * Restore the stuff we changed to prevent interruption.
865 * Unlock the REM.
866 */
867 if (fBp)
868 {
869 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
870 Assert(rc2 == 0); NOREF(rc2);
871 }
872 cpu_single_step(&pVM->rem.s.Env, 0);
873 pVM->rem.s.Env.interrupt_request = interrupt_request;
874
875 return rc;
876}
877
878
879/**
880 * Set a breakpoint using the REM facilities.
881 *
882 * @returns VBox status code.
883 * @param pVM The VM handle.
884 * @param Address The breakpoint address.
885 * @thread The emulation thread.
886 */
887REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
888{
889 VM_ASSERT_EMT(pVM);
890 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
891 {
892 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
893 return VINF_SUCCESS;
894 }
895 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
896 return VERR_REM_NO_MORE_BP_SLOTS;
897}
898
899
900/**
901 * Clears a breakpoint set by REMR3BreakpointSet().
902 *
903 * @returns VBox status code.
904 * @param pVM The VM handle.
905 * @param Address The breakpoint address.
906 * @thread The emulation thread.
907 */
908REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
909{
910 VM_ASSERT_EMT(pVM);
911 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
912 {
913 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
914 return VINF_SUCCESS;
915 }
916 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
917 return VERR_REM_BP_NOT_FOUND;
918}
919
920
921/**
922 * Emulate an instruction.
923 *
924 * This function executes one instruction without letting anyone
925 * interrupt it. This is intended for being called while being in
926 * raw mode and thus will take care of all the state syncing between
927 * REM and the rest.
928 *
929 * @returns VBox status code.
930 * @param pVM VM handle.
931 * @param pVCpu VMCPU Handle.
932 */
933REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
934{
935 bool fFlushTBs;
936
937 int rc, rc2;
938 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
939
940 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
941 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
942 */
943 if (!VM_IS_RAW_MODE_ENABLED(pVM))
944 pVM->rem.s.Env.state |= CPU_RAW_HM;
945
946 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
947 fFlushTBs = pVM->rem.s.fFlushTBs;
948 pVM->rem.s.fFlushTBs = false;
949
950 /*
951 * Sync the state and enable single instruction / single stepping.
952 */
953 rc = REMR3State(pVM, pVCpu);
954 pVM->rem.s.fFlushTBs = fFlushTBs;
955 if (RT_SUCCESS(rc))
956 {
957 int interrupt_request = pVM->rem.s.Env.interrupt_request;
958 Assert(!( interrupt_request
959 & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD
960 | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER
961 | CPU_INTERRUPT_EXTERNAL_DMA)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HM:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1045 rc = VINF_EM_RESCHEDULE_HM;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */ 0, /* GCPtr */
1116 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1117 szBuf,
1118 sizeof(szBuf),
1119 NULL);
1120 if (RT_FAILURE(rc))
1121 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1122 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1123
1124 /*
1125 * Execute the instruction.
1126 */
1127 TMNotifyStartOfExecution(pVCpu);
1128
1129 if ( pVM->rem.s.Env.exception_index < 0
1130 || pVM->rem.s.Env.exception_index > 256)
1131 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1132
1133#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1134 pVM->rem.s.Env.interrupt_request = 0;
1135#else
1136 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1137#endif
1138 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1139 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1140 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n",
1141 pVM->rem.s.Env.interrupt_request,
1142 pVM->rem.s.Env.halted,
1143 pVM->rem.s.Env.exception_index
1144 );
1145
1146 rc = cpu_exec(&pVM->rem.s.Env);
1147
1148 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1149 pVM->rem.s.Env.interrupt_request,
1150 pVM->rem.s.Env.halted,
1151 pVM->rem.s.Env.exception_index
1152 );
1153
1154 TMNotifyEndOfExecution(pVCpu);
1155
1156 switch (rc)
1157 {
1158#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1159 /*
1160 * The normal exit.
1161 */
1162 case EXCP_SINGLE_INSTR:
1163 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1164 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK))
1165 continue;
1166 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#RX64)\n",
1167 pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions);
1168 rc = VINF_SUCCESS;
1169 break;
1170
1171#else
1172 /*
1173 * The normal exit, check for breakpoints at PC just to be sure.
1174 */
1175#endif
1176 case EXCP_DEBUG:
1177 if (pVM->rem.s.Env.watchpoint_hit)
1178 {
1179 /** @todo deal with watchpoints */
1180 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1181 rc = VINF_EM_DBG_BREAKPOINT;
1182 }
1183 else
1184 {
1185 CPUBreakpoint *pBP;
1186 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1187 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1188 if (pBP->pc == GCPtrPC)
1189 break;
1190 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1191 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1192 }
1193#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1194 if (rc == VINF_EM_DBG_STEPPED)
1195 {
1196 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1197 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK))
1198 continue;
1199
1200 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#RX64)\n",
1201 pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions);
1202 rc = VINF_SUCCESS;
1203 }
1204#endif
1205 break;
1206
1207 /*
1208 * If we take a trap or start servicing a pending interrupt, we might end up here.
1209 * (Timer thread or some other thread wishing EMT's attention.)
1210 */
1211 case EXCP_INTERRUPT:
1212 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1213 rc = VINF_SUCCESS;
1214 break;
1215
1216 /*
1217 * hlt instruction.
1218 */
1219 case EXCP_HLT:
1220 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1221 rc = VINF_EM_HALT;
1222 break;
1223
1224 /*
1225 * The VM has halted.
1226 */
1227 case EXCP_HALTED:
1228 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1229 rc = VINF_EM_HALT;
1230 break;
1231
1232 /*
1233 * Switch to RAW-mode.
1234 */
1235 case EXCP_EXECUTE_RAW:
1236 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1237 rc = VINF_EM_RESCHEDULE_RAW;
1238 break;
1239
1240 /*
1241 * Switch to hardware accelerated RAW-mode.
1242 */
1243 case EXCP_EXECUTE_HM:
1244 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1245 rc = VINF_EM_RESCHEDULE_HM;
1246 break;
1247
1248 /*
1249 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1250 */
1251 case EXCP_RC:
1252 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1253 rc = pVM->rem.s.rc;
1254 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1255 break;
1256
1257 /*
1258 * Figure out the rest when they arrive....
1259 */
1260 default:
1261 AssertMsgFailed(("rc=%d\n", rc));
1262 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1263 rc = VINF_EM_RESCHEDULE;
1264 break;
1265 }
1266 break;
1267 }
1268
1269#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1270// cpu_single_step(&pVM->rem.s.Env, 0);
1271#else
1272 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1273#endif
1274 return rc;
1275}
1276
1277
1278/**
1279 * Runs code in recompiled mode.
1280 *
1281 * Before calling this function the REM state needs to be in sync with
1282 * the VM. Call REMR3State() to perform the sync. It's only necessary
1283 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1284 * and after calling REMR3StateBack().
1285 *
1286 * @returns VBox status code.
1287 *
1288 * @param pVM VM Handle.
1289 * @param pVCpu VMCPU Handle.
1290 */
1291REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1292{
1293 int rc;
1294
1295 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1296 return remR3RunLoggingStep(pVM, pVCpu);
1297
1298 Assert(pVM->rem.s.fInREM);
1299 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1300
1301 TMNotifyStartOfExecution(pVCpu);
1302 rc = cpu_exec(&pVM->rem.s.Env);
1303 TMNotifyEndOfExecution(pVCpu);
1304 switch (rc)
1305 {
1306 /*
1307 * This happens when the execution was interrupted
1308 * by an external event, like pending timers.
1309 */
1310 case EXCP_INTERRUPT:
1311 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1312 rc = VINF_SUCCESS;
1313 break;
1314
1315 /*
1316 * hlt instruction.
1317 */
1318 case EXCP_HLT:
1319 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1320 rc = VINF_EM_HALT;
1321 break;
1322
1323 /*
1324 * The VM has halted.
1325 */
1326 case EXCP_HALTED:
1327 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1328 rc = VINF_EM_HALT;
1329 break;
1330
1331 /*
1332 * Breakpoint/single step.
1333 */
1334 case EXCP_DEBUG:
1335 if (pVM->rem.s.Env.watchpoint_hit)
1336 {
1337 /** @todo deal with watchpoints */
1338 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1339 rc = VINF_EM_DBG_BREAKPOINT;
1340 }
1341 else
1342 {
1343 CPUBreakpoint *pBP;
1344 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1345 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1346 if (pBP->pc == GCPtrPC)
1347 break;
1348 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1349 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1350 }
1351 break;
1352
1353 /*
1354 * Switch to RAW-mode.
1355 */
1356 case EXCP_EXECUTE_RAW:
1357 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1358 rc = VINF_EM_RESCHEDULE_RAW;
1359 break;
1360
1361 /*
1362 * Switch to hardware accelerated RAW-mode.
1363 */
1364 case EXCP_EXECUTE_HM:
1365 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1366 rc = VINF_EM_RESCHEDULE_HM;
1367 break;
1368
1369 /*
1370 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1371 */
1372 case EXCP_RC:
1373 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1374 rc = pVM->rem.s.rc;
1375 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1376 break;
1377
1378 /*
1379 * Figure out the rest when they arrive....
1380 */
1381 default:
1382 AssertMsgFailed(("rc=%d\n", rc));
1383 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1384 rc = VINF_SUCCESS;
1385 break;
1386 }
1387
1388 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1389 return rc;
1390}
1391
1392
1393/**
1394 * Check if the cpu state is suitable for Raw execution.
1395 *
1396 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1397 *
1398 * @param env The CPU env struct.
1399 * @param eip The EIP to check this for (might differ from env->eip).
1400 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1401 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1402 *
1403 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1404 */
1405bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1406{
1407 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1410 uint32_t u32CR0;
1411
1412 /* Update counter. */
1413 env->pVM->rem.s.cCanExecuteRaw++;
1414
1415 /* Never when single stepping+logging guest code. */
1416 if (env->state & CPU_EMULATE_SINGLE_STEP)
1417 return false;
1418
1419 if (!VM_IS_RAW_MODE_ENABLED(env->pVM))
1420 {
1421#ifdef RT_OS_WINDOWS
1422 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1423#else
1424 CPUMCTX Ctx;
1425 PCPUMCTX pCtx = &Ctx;
1426#endif
1427 /** @todo NEM: scheduling. */
1428
1429 env->state |= CPU_RAW_HM;
1430
1431 /*
1432 * Create partial context for HMCanExecuteGuest.
1433 */
1434 pCtx->cr0 = env->cr[0];
1435 pCtx->cr3 = env->cr[3];
1436 pCtx->cr4 = env->cr[4];
1437
1438 pCtx->tr.Sel = env->tr.selector;
1439 pCtx->tr.ValidSel = env->tr.selector;
1440 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1441 pCtx->tr.u64Base = env->tr.base;
1442 pCtx->tr.u32Limit = env->tr.limit;
1443 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1444
1445 pCtx->ldtr.Sel = env->ldt.selector;
1446 pCtx->ldtr.ValidSel = env->ldt.selector;
1447 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1448 pCtx->ldtr.u64Base = env->ldt.base;
1449 pCtx->ldtr.u32Limit = env->ldt.limit;
1450 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1451
1452 pCtx->idtr.cbIdt = env->idt.limit;
1453 pCtx->idtr.pIdt = env->idt.base;
1454
1455 pCtx->gdtr.cbGdt = env->gdt.limit;
1456 pCtx->gdtr.pGdt = env->gdt.base;
1457
1458 pCtx->rsp = env->regs[R_ESP];
1459 pCtx->rip = env->eip;
1460
1461 pCtx->eflags.u32 = env->eflags;
1462
1463 pCtx->cs.Sel = env->segs[R_CS].selector;
1464 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1465 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1466 pCtx->cs.u64Base = env->segs[R_CS].base;
1467 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1468 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1469
1470 pCtx->ds.Sel = env->segs[R_DS].selector;
1471 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1472 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1473 pCtx->ds.u64Base = env->segs[R_DS].base;
1474 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1475 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1476
1477 pCtx->es.Sel = env->segs[R_ES].selector;
1478 pCtx->es.ValidSel = env->segs[R_ES].selector;
1479 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1480 pCtx->es.u64Base = env->segs[R_ES].base;
1481 pCtx->es.u32Limit = env->segs[R_ES].limit;
1482 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1483
1484 pCtx->fs.Sel = env->segs[R_FS].selector;
1485 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1486 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1487 pCtx->fs.u64Base = env->segs[R_FS].base;
1488 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1489 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1490
1491 pCtx->gs.Sel = env->segs[R_GS].selector;
1492 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1493 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1494 pCtx->gs.u64Base = env->segs[R_GS].base;
1495 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1496 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1497
1498 pCtx->ss.Sel = env->segs[R_SS].selector;
1499 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1500 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1501 pCtx->ss.u64Base = env->segs[R_SS].base;
1502 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1503 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1504
1505 pCtx->msrEFER = env->efer;
1506 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_NONE;
1507
1508 /*
1509 * Hardware accelerated mode:
1510 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1511 */
1512 PVMCPU pVCpu = &env->pVM->aCpus[0];
1513 if (HMCanExecuteGuest(pVCpu, pCtx))
1514 {
1515 *piException = EXCP_EXECUTE_HM;
1516 return true;
1517 }
1518 return false;
1519 }
1520
1521 /*
1522 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1523 * or 32 bits protected mode ring 0 code
1524 *
1525 * The tests are ordered by the likelihood of being true during normal execution.
1526 */
1527 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1528 {
1529 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1530 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1531 return false;
1532 }
1533
1534#ifndef VBOX_RAW_V86
1535 if (fFlags & VM_MASK) {
1536 STAM_COUNTER_INC(&gStatRefuseVM86);
1537 Log2(("raw mode refused: VM_MASK\n"));
1538 return false;
1539 }
1540#endif
1541
1542 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1543 {
1544#ifndef DEBUG_bird
1545 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1546#endif
1547 return false;
1548 }
1549
1550 if (env->singlestep_enabled)
1551 {
1552 //Log2(("raw mode refused: Single step\n"));
1553 return false;
1554 }
1555
1556 if (!QTAILQ_EMPTY(&env->breakpoints))
1557 {
1558 //Log2(("raw mode refused: Breakpoints\n"));
1559 return false;
1560 }
1561
1562 if (!QTAILQ_EMPTY(&env->watchpoints))
1563 {
1564 //Log2(("raw mode refused: Watchpoints\n"));
1565 return false;
1566 }
1567
1568 u32CR0 = env->cr[0];
1569 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1570 {
1571 STAM_COUNTER_INC(&gStatRefusePaging);
1572 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1573 return false;
1574 }
1575
1576 if (env->cr[4] & CR4_PAE_MASK)
1577 {
1578 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1579 {
1580 STAM_COUNTER_INC(&gStatRefusePAE);
1581 return false;
1582 }
1583 }
1584
1585 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1586 {
1587 if (!(env->eflags & IF_MASK))
1588 {
1589 STAM_COUNTER_INC(&gStatRefuseIF0);
1590 Log2(("raw mode refused: IF (RawR3)\n"));
1591 return false;
1592 }
1593
1594 if (!(u32CR0 & CR0_WP_MASK))
1595 {
1596 STAM_COUNTER_INC(&gStatRefuseWP0);
1597 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1598 return false;
1599 }
1600 }
1601 else
1602 {
1603 // Let's start with pure 32 bits ring 0 code first
1604 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1605 {
1606 STAM_COUNTER_INC(&gStatRefuseCode16);
1607 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1608 return false;
1609 }
1610
1611 /* Only R0. */
1612 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1613 {
1614 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1615 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1616 return false;
1617 }
1618
1619 if (!(u32CR0 & CR0_WP_MASK))
1620 {
1621 STAM_COUNTER_INC(&gStatRefuseWP0);
1622 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1623 return false;
1624 }
1625
1626#ifdef VBOX_WITH_RAW_MODE
1627 if (PATMIsPatchGCAddr(env->pVM, eip))
1628 {
1629 Log2(("raw r0 mode forced: patch code\n"));
1630 *piException = EXCP_EXECUTE_RAW;
1631 return true;
1632 }
1633#endif
1634
1635#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1636 if (!(env->eflags & IF_MASK))
1637 {
1638 STAM_COUNTER_INC(&gStatRefuseIF0);
1639 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1640 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1641 return false;
1642 }
1643#endif
1644
1645#ifndef VBOX_WITH_RAW_RING1
1646 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1647 {
1648 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1649 return false;
1650 }
1651#endif
1652 env->state |= CPU_RAW_RING0;
1653 }
1654
1655 /*
1656 * Don't reschedule the first time we're called, because there might be
1657 * special reasons why we're here that is not covered by the above checks.
1658 */
1659 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1660 {
1661 Log2(("raw mode refused: first scheduling\n"));
1662 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1663 return false;
1664 }
1665
1666 /*
1667 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1668 */
1669 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1670 {
1671 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1672 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1673 return false;
1674 }
1675 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1676 {
1677 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1678 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1679 return false;
1680 }
1681 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1682 {
1683 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1684 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1685 return false;
1686 }
1687 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1688 {
1689 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1690 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1691 return false;
1692 }
1693 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1694 {
1695 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1696 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1697 return false;
1698 }
1699 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1700 {
1701 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1702 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1703 return false;
1704 }
1705
1706/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1707 *piException = EXCP_EXECUTE_RAW;
1708 return true;
1709}
1710
1711
1712#ifdef VBOX_WITH_RAW_MODE
1713/**
1714 * Fetches a code byte.
1715 *
1716 * @returns Success indicator (bool) for ease of use.
1717 * @param env The CPU environment structure.
1718 * @param GCPtrInstr Where to fetch code.
1719 * @param pu8Byte Where to store the byte on success
1720 */
1721bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1722{
1723 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1724 if (RT_SUCCESS(rc))
1725 return true;
1726 return false;
1727}
1728#endif /* VBOX_WITH_RAW_MODE */
1729
1730
1731/**
1732 * Flush (or invalidate if you like) page table/dir entry.
1733 *
1734 * (invlpg instruction; tlb_flush_page)
1735 *
1736 * @param env Pointer to cpu environment.
1737 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1738 */
1739void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1740{
1741 PVM pVM = env->pVM;
1742 PCPUMCTX pCtx;
1743 int rc;
1744
1745 Assert(EMRemIsLockOwner(env->pVM));
1746
1747 /*
1748 * When we're replaying invlpg instructions or restoring a saved
1749 * state we disable this path.
1750 */
1751 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1752 return;
1753 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1754 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1755
1756 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1757
1758 /*
1759 * Update the control registers before calling PGMFlushPage.
1760 */
1761 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1762 Assert(pCtx);
1763 pCtx->cr0 = env->cr[0];
1764 pCtx->cr3 = env->cr[3];
1765 pCtx->cr4 = env->cr[4];
1766
1767 /*
1768 * Let PGM do the rest.
1769 */
1770 Assert(env->pVCpu);
1771 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1772 if (RT_FAILURE(rc))
1773 {
1774 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1775 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1776 }
1777 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1778}
1779
1780
1781#ifndef REM_PHYS_ADDR_IN_TLB
1782/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1783void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1784{
1785 void *pv;
1786 int rc;
1787
1788
1789 /* Address must be aligned enough to fiddle with lower bits */
1790 Assert((physAddr & 0x3) == 0);
1791 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1792
1793 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1794 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1795 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1796 Assert( rc == VINF_SUCCESS
1797 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1798 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1799 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1800 if (RT_FAILURE(rc))
1801 return (void *)1;
1802 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1803 return (void *)((uintptr_t)pv | 2);
1804 return pv;
1805}
1806#endif /* REM_PHYS_ADDR_IN_TLB */
1807
1808
1809/**
1810 * Called from tlb_protect_code in order to write monitor a code page.
1811 *
1812 * @param env Pointer to the CPU environment.
1813 * @param GCPtr Code page to monitor
1814 */
1815void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1816{
1817#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1818 Assert(env->pVM->rem.s.fInREM);
1819 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1820 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1821 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1822 && !(env->eflags & VM_MASK) /* no V86 mode */
1823 && VM_IS_RAW_MODE_ENABLED(env->pVM))
1824 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1825#endif
1826}
1827
1828
1829/**
1830 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1831 *
1832 * @param env Pointer to the CPU environment.
1833 * @param GCPtr Code page to monitor
1834 */
1835void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1836{
1837 Assert(env->pVM->rem.s.fInREM);
1838#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1839 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1840 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1841 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1842 && !(env->eflags & VM_MASK) /* no V86 mode */
1843 && VM_IS_RAW_MODE_ENABLED(env->pVM))
1844 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1845#endif
1846}
1847
1848
1849/**
1850 * Called when the CPU is initialized, any of the CRx registers are changed or
1851 * when the A20 line is modified.
1852 *
1853 * @param env Pointer to the CPU environment.
1854 * @param fGlobal Set if the flush is global.
1855 */
1856void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1857{
1858 PVM pVM = env->pVM;
1859 PCPUMCTX pCtx;
1860 Assert(EMRemIsLockOwner(pVM));
1861
1862 /*
1863 * When we're replaying invlpg instructions or restoring a saved
1864 * state we disable this path.
1865 */
1866 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1867 return;
1868 Assert(pVM->rem.s.fInREM);
1869
1870 /*
1871 * The caller doesn't check cr4, so we have to do that for ourselves.
1872 */
1873 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1874 fGlobal = true;
1875 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1876
1877 /*
1878 * Update the control registers before calling PGMR3FlushTLB.
1879 */
1880 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1881 Assert(pCtx);
1882 pCtx->cr0 = env->cr[0];
1883 pCtx->cr3 = env->cr[3];
1884 pCtx->cr4 = env->cr[4];
1885
1886 /*
1887 * Let PGM do the rest.
1888 */
1889 Assert(env->pVCpu);
1890 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1891}
1892
1893
1894/**
1895 * Called when any of the cr0, cr4 or efer registers is updated.
1896 *
1897 * @param env Pointer to the CPU environment.
1898 */
1899void remR3ChangeCpuMode(CPUX86State *env)
1900{
1901 PVM pVM = env->pVM;
1902 uint64_t efer;
1903 PCPUMCTX pCtx;
1904 int rc;
1905
1906 /*
1907 * When we're replaying loads or restoring a saved
1908 * state this path is disabled.
1909 */
1910 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1911 return;
1912 Assert(pVM->rem.s.fInREM);
1913
1914 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1915 Assert(pCtx);
1916
1917 /*
1918 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1919 */
1920 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1921 PGMCr0WpEnabled(env->pVCpu);
1922
1923 /*
1924 * Update the control registers before calling PGMChangeMode()
1925 * as it may need to map whatever cr3 is pointing to.
1926 */
1927 pCtx->cr0 = env->cr[0];
1928 pCtx->cr3 = env->cr[3];
1929 pCtx->cr4 = env->cr[4];
1930#ifdef TARGET_X86_64
1931 efer = env->efer;
1932 pCtx->msrEFER = efer;
1933#else
1934 efer = 0;
1935#endif
1936 Assert(env->pVCpu);
1937 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1938 if (rc != VINF_SUCCESS)
1939 {
1940 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1941 {
1942 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1943 remR3RaiseRC(env->pVM, rc);
1944 }
1945 else
1946 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1947 }
1948}
1949
1950
1951/**
1952 * Called from compiled code to run dma.
1953 *
1954 * @param env Pointer to the CPU environment.
1955 */
1956void remR3DmaRun(CPUX86State *env)
1957{
1958 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1959 PDMR3DmaRun(env->pVM);
1960 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1961}
1962
1963
1964/**
1965 * Called from compiled code to schedule pending timers in VMM
1966 *
1967 * @param env Pointer to the CPU environment.
1968 */
1969void remR3TimersRun(CPUX86State *env)
1970{
1971 LogFlow(("remR3TimersRun:\n"));
1972 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1973 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1974 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1975 TMR3TimerQueuesDo(env->pVM);
1976 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1977 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1978}
1979
1980
1981/**
1982 * Record trap occurrence
1983 *
1984 * @returns VBox status code
1985 * @param env Pointer to the CPU environment.
1986 * @param uTrap Trap nr
1987 * @param uErrorCode Error code
1988 * @param pvNextEIP Next EIP
1989 */
1990int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1991{
1992 PVM pVM = env->pVM;
1993#ifdef VBOX_WITH_STATISTICS
1994 static STAMCOUNTER s_aStatTrap[255];
1995 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1996#endif
1997
1998#ifdef VBOX_WITH_STATISTICS
1999 if (uTrap < 255)
2000 {
2001 if (!s_aRegisters[uTrap])
2002 {
2003 char szStatName[64];
2004 s_aRegisters[uTrap] = true;
2005 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2006 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2007 }
2008 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2009 }
2010#endif
2011 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2012 if( uTrap < 0x20
2013 && (env->cr[0] & X86_CR0_PE)
2014 && !(env->eflags & X86_EFL_VM))
2015 {
2016#ifdef DEBUG
2017 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2018#endif
2019 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2020 {
2021 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2022 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2023 return VERR_REM_TOO_MANY_TRAPS;
2024 }
2025 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2026 {
2027 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2028 pVM->rem.s.cPendingExceptions = 1;
2029 }
2030 pVM->rem.s.uPendingException = uTrap;
2031 pVM->rem.s.uPendingExcptEIP = env->eip;
2032 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2033 }
2034 else
2035 {
2036 pVM->rem.s.cPendingExceptions = 0;
2037 pVM->rem.s.uPendingException = uTrap;
2038 pVM->rem.s.uPendingExcptEIP = env->eip;
2039 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2040 }
2041 return VINF_SUCCESS;
2042}
2043
2044
2045/*
2046 * Clear current active trap
2047 *
2048 * @param pVM VM Handle.
2049 */
2050void remR3TrapClear(PVM pVM)
2051{
2052 pVM->rem.s.cPendingExceptions = 0;
2053 pVM->rem.s.uPendingException = 0;
2054 pVM->rem.s.uPendingExcptEIP = 0;
2055 pVM->rem.s.uPendingExcptCR2 = 0;
2056}
2057
2058
2059/*
2060 * Record previous call instruction addresses
2061 *
2062 * @param env Pointer to the CPU environment.
2063 */
2064void remR3RecordCall(CPUX86State *env)
2065{
2066#ifdef VBOX_WITH_RAW_MODE
2067 CSAMR3RecordCallAddress(env->pVM, env->eip);
2068#endif
2069}
2070
2071
2072/**
2073 * Syncs the internal REM state with the VM.
2074 *
2075 * This must be called before REMR3Run() is invoked whenever when the REM
2076 * state is not up to date. Calling it several times in a row is not
2077 * permitted.
2078 *
2079 * @returns VBox status code.
2080 *
2081 * @param pVM VM Handle.
2082 * @param pVCpu VMCPU Handle.
2083 *
2084 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2085 * no do this since the majority of the callers don't want any unnecessary of events
2086 * pending that would immediately interrupt execution.
2087 */
2088REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2089{
2090 register const CPUMCTX *pCtx;
2091 register unsigned fFlags;
2092 unsigned i;
2093 TRPMEVENT enmType;
2094 uint8_t u8TrapNo;
2095 uint32_t uCpl;
2096 int rc;
2097
2098 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2099 Log2(("REMR3State:\n"));
2100
2101 pVM->rem.s.Env.pVCpu = pVCpu;
2102 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2103
2104 Assert(pCtx);
2105 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2106 || CPUMIsGuestInVmxNonRootMode(pCtx))
2107 {
2108 AssertMsgFailed(("Bad scheduling - can't exec. nested-guest in REM!\n"));
2109 return VERR_EM_CANNOT_EXEC_GUEST;
2110 }
2111
2112 Assert(!pVM->rem.s.fInREM);
2113 pVM->rem.s.fInStateSync = true;
2114
2115 /*
2116 * If we have to flush TBs, do that immediately.
2117 */
2118 if (pVM->rem.s.fFlushTBs)
2119 {
2120 STAM_COUNTER_INC(&gStatFlushTBs);
2121 tb_flush(&pVM->rem.s.Env);
2122 pVM->rem.s.fFlushTBs = false;
2123 }
2124
2125 /*
2126 * Copy the registers which require no special handling.
2127 */
2128#ifdef TARGET_X86_64
2129 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2130 Assert(R_EAX == 0);
2131 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2132 Assert(R_ECX == 1);
2133 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2134 Assert(R_EDX == 2);
2135 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2136 Assert(R_EBX == 3);
2137 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2138 Assert(R_ESP == 4);
2139 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2140 Assert(R_EBP == 5);
2141 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2142 Assert(R_ESI == 6);
2143 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2144 Assert(R_EDI == 7);
2145 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2146 pVM->rem.s.Env.regs[8] = pCtx->r8;
2147 pVM->rem.s.Env.regs[9] = pCtx->r9;
2148 pVM->rem.s.Env.regs[10] = pCtx->r10;
2149 pVM->rem.s.Env.regs[11] = pCtx->r11;
2150 pVM->rem.s.Env.regs[12] = pCtx->r12;
2151 pVM->rem.s.Env.regs[13] = pCtx->r13;
2152 pVM->rem.s.Env.regs[14] = pCtx->r14;
2153 pVM->rem.s.Env.regs[15] = pCtx->r15;
2154
2155 pVM->rem.s.Env.eip = pCtx->rip;
2156
2157 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2158#else
2159 Assert(R_EAX == 0);
2160 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2161 Assert(R_ECX == 1);
2162 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2163 Assert(R_EDX == 2);
2164 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2165 Assert(R_EBX == 3);
2166 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2167 Assert(R_ESP == 4);
2168 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2169 Assert(R_EBP == 5);
2170 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2171 Assert(R_ESI == 6);
2172 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2173 Assert(R_EDI == 7);
2174 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2175 pVM->rem.s.Env.eip = pCtx->eip;
2176
2177 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2178#endif
2179
2180 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2181
2182 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2183 for (i=0;i<8;i++)
2184 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2185
2186#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2187 /*
2188 * Clear the halted hidden flag (the interrupt waking up the CPU can
2189 * have been dispatched in raw mode).
2190 */
2191 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2192#endif
2193
2194 /*
2195 * Replay invlpg? Only if we're not flushing the TLB.
2196 */
2197 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2198 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2199 if (pVM->rem.s.cInvalidatedPages)
2200 {
2201 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2202 {
2203 RTUINT i;
2204
2205 pVM->rem.s.fIgnoreCR3Load = true;
2206 pVM->rem.s.fIgnoreInvlPg = true;
2207 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2208 {
2209 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2210 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2211 }
2212 pVM->rem.s.fIgnoreInvlPg = false;
2213 pVM->rem.s.fIgnoreCR3Load = false;
2214 }
2215 pVM->rem.s.cInvalidatedPages = 0;
2216 }
2217
2218 /* Replay notification changes. */
2219 REMR3ReplayHandlerNotifications(pVM);
2220
2221 /* Update MSRs; before CRx registers! */
2222 pVM->rem.s.Env.efer = pCtx->msrEFER;
2223 pVM->rem.s.Env.star = pCtx->msrSTAR;
2224 pVM->rem.s.Env.pat = pCtx->msrPAT;
2225#ifdef TARGET_X86_64
2226 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2227 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2228 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2229 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2230
2231 /* Update the internal long mode activate flag according to the new EFER value. */
2232 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2233 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2234 else
2235 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2236#endif
2237
2238 /* Update the inhibit IRQ mask. */
2239 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2240 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2241 {
2242 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2243 if (InhibitPC == pCtx->rip)
2244 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2245 else
2246 {
2247 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2248 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2249 }
2250 }
2251
2252 /* Update the inhibit NMI mask. */
2253 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2254 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2255 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2256
2257 /*
2258 * Sync the A20 gate.
2259 */
2260 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2261 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2262 {
2263 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2264 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2265 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2266 }
2267
2268 /*
2269 * Registers which are rarely changed and require special handling / order when changed.
2270 */
2271 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2272 | CPUM_CHANGED_CR4
2273 | CPUM_CHANGED_CR0
2274 | CPUM_CHANGED_CR3
2275 | CPUM_CHANGED_GDTR
2276 | CPUM_CHANGED_IDTR
2277 | CPUM_CHANGED_SYSENTER_MSR
2278 | CPUM_CHANGED_LDTR
2279 | CPUM_CHANGED_CPUID
2280 | CPUM_CHANGED_FPU_REM
2281 )
2282 )
2283 {
2284 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2285 {
2286 pVM->rem.s.fIgnoreCR3Load = true;
2287 tlb_flush(&pVM->rem.s.Env, true);
2288 pVM->rem.s.fIgnoreCR3Load = false;
2289 }
2290
2291 /* CR4 before CR0! */
2292 if (fFlags & CPUM_CHANGED_CR4)
2293 {
2294 pVM->rem.s.fIgnoreCR3Load = true;
2295 pVM->rem.s.fIgnoreCpuMode = true;
2296 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2297 pVM->rem.s.fIgnoreCpuMode = false;
2298 pVM->rem.s.fIgnoreCR3Load = false;
2299 }
2300
2301 if (fFlags & CPUM_CHANGED_CR0)
2302 {
2303 pVM->rem.s.fIgnoreCR3Load = true;
2304 pVM->rem.s.fIgnoreCpuMode = true;
2305 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2306 pVM->rem.s.fIgnoreCpuMode = false;
2307 pVM->rem.s.fIgnoreCR3Load = false;
2308 }
2309
2310 if (fFlags & CPUM_CHANGED_CR3)
2311 {
2312 pVM->rem.s.fIgnoreCR3Load = true;
2313 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2314 pVM->rem.s.fIgnoreCR3Load = false;
2315 }
2316
2317 if (fFlags & CPUM_CHANGED_GDTR)
2318 {
2319 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2320 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2321 }
2322
2323 if (fFlags & CPUM_CHANGED_IDTR)
2324 {
2325 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2326 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2327 }
2328
2329 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2330 {
2331 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2332 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2333 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2334 }
2335
2336 if (fFlags & CPUM_CHANGED_LDTR)
2337 {
2338 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2339 {
2340 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2341 pVM->rem.s.Env.ldt.newselector = 0;
2342 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2343 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2344 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2345 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2346 }
2347 else
2348 {
2349 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2350 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2351 }
2352 }
2353
2354 if (fFlags & CPUM_CHANGED_CPUID)
2355 {
2356 uint32_t u32Dummy;
2357
2358 /*
2359 * Get the CPUID features.
2360 */
2361 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2362 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2363 }
2364
2365 /* Sync FPU state after CR4, CPUID and EFER (!). */
2366 if (fFlags & CPUM_CHANGED_FPU_REM)
2367 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */
2368 }
2369
2370 /*
2371 * Sync TR unconditionally to make life simpler.
2372 */
2373 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2374 pVM->rem.s.Env.tr.newselector = 0;
2375 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2376 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2377 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2378 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2379
2380 /*
2381 * Update selector registers.
2382 *
2383 * This must be done *after* we've synced gdt, ldt and crX registers
2384 * since we're reading the GDT/LDT om sync_seg. This will happen with
2385 * saved state which takes a quick dip into rawmode for instance.
2386 *
2387 * CPL/Stack; Note first check this one as the CPL might have changed.
2388 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2389 */
2390 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2391 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2392#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2393 do \
2394 { \
2395 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2396 { \
2397 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2398 (a_pVBoxSReg)->Sel, \
2399 (a_pVBoxSReg)->u64Base, \
2400 (a_pVBoxSReg)->u32Limit, \
2401 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2402 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2403 } \
2404 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2405 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2406 { \
2407 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2408 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2409 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2410 if ((a_pRemSReg)->newselector) \
2411 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2412 } \
2413 else \
2414 (a_pRemSReg)->newselector = 0; \
2415 } while (0)
2416
2417 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2418 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2419 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2420 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2421 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2422 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2423 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2424 * be the same but not the base/limit. */
2425
2426 /*
2427 * Check for traps.
2428 */
2429 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2430 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2431 if (RT_SUCCESS(rc))
2432 {
2433#ifdef DEBUG
2434 if (u8TrapNo == 0x80)
2435 {
2436 remR3DumpLnxSyscall(pVCpu);
2437 remR3DumpOBsdSyscall(pVCpu);
2438 }
2439#endif
2440
2441 pVM->rem.s.Env.exception_index = u8TrapNo;
2442 if (enmType != TRPM_SOFTWARE_INT)
2443 {
2444 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT
2445 ? EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ : 0; /* HACK ALERT! */
2446 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2447 }
2448 else
2449 {
2450 /*
2451 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2452 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2453 * for int03 and into.
2454 */
2455 pVM->rem.s.Env.exception_is_int = 1;
2456 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2457 /* int 3 may be generated by one-byte 0xcc */
2458 if (u8TrapNo == X86_XCPT_BP)
2459 {
2460 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2461 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2462 }
2463 /* int 4 may be generated by one-byte 0xce */
2464 else if (u8TrapNo == X86_XCPT_OF)
2465 {
2466 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2467 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2468 }
2469 }
2470
2471 /* get error code and cr2 if needed. */
2472 if (enmType == TRPM_TRAP)
2473 {
2474 switch (u8TrapNo)
2475 {
2476 case X86_XCPT_PF:
2477 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2478 /* fallthru */
2479 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2480 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2481 break;
2482
2483 case X86_XCPT_AC: case X86_XCPT_DF:
2484 default:
2485 pVM->rem.s.Env.error_code = 0;
2486 break;
2487 }
2488 }
2489 else
2490 pVM->rem.s.Env.error_code = 0;
2491
2492 /*
2493 * We can now reset the active trap since the recompiler is gonna have a go at it.
2494 */
2495 rc = TRPMResetTrap(pVCpu);
2496 AssertRC(rc);
2497 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2498 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2499 }
2500
2501 /*
2502 * Clear old interrupt request flags; Check for pending hardware interrupts.
2503 * (See @remark for why we don't check for other FFs.)
2504 */
2505 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2506 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2507 APICUpdatePendingInterrupts(pVCpu);
2508 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2509 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2510
2511 /*
2512 * We're now in REM mode.
2513 */
2514 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2515 pVM->rem.s.fInREM = true;
2516 pVM->rem.s.fInStateSync = false;
2517 pVM->rem.s.cCanExecuteRaw = 0;
2518 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2519 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2520 return VINF_SUCCESS;
2521}
2522
2523
2524/**
2525 * Syncs back changes in the REM state to the VM state.
2526 *
2527 * This must be called after invoking REMR3Run().
2528 * Calling it several times in a row is not permitted.
2529 *
2530 * @returns VBox status code.
2531 *
2532 * @param pVM VM Handle.
2533 * @param pVCpu VMCPU Handle.
2534 */
2535REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2536{
2537 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2538 Assert(pCtx);
2539 unsigned i;
2540
2541 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2542 Log2(("REMR3StateBack:\n"));
2543 Assert(pVM->rem.s.fInREM);
2544
2545 /*
2546 * Copy back the registers.
2547 * This is done in the order they are declared in the CPUMCTX structure.
2548 */
2549
2550 /** @todo FOP */
2551 /** @todo FPUIP */
2552 /** @todo CS */
2553 /** @todo FPUDP */
2554 /** @todo DS */
2555
2556 /** @todo check if FPU/XMM was actually used in the recompiler */
2557 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87);
2558//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2559
2560#ifdef TARGET_X86_64
2561 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2562 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2563 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2564 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2565 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2566 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2567 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2568 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2569 pCtx->r8 = pVM->rem.s.Env.regs[8];
2570 pCtx->r9 = pVM->rem.s.Env.regs[9];
2571 pCtx->r10 = pVM->rem.s.Env.regs[10];
2572 pCtx->r11 = pVM->rem.s.Env.regs[11];
2573 pCtx->r12 = pVM->rem.s.Env.regs[12];
2574 pCtx->r13 = pVM->rem.s.Env.regs[13];
2575 pCtx->r14 = pVM->rem.s.Env.regs[14];
2576 pCtx->r15 = pVM->rem.s.Env.regs[15];
2577
2578 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2579
2580#else
2581 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2582 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2583 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2584 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2585 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2586 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2587 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2588
2589 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2590#endif
2591
2592#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2593 do \
2594 { \
2595 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2596 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2597 { \
2598 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2599 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2600 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2601 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2602 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2603 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2604 } \
2605 else \
2606 { \
2607 pCtx->a_sreg.fFlags = 0; \
2608 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2609 } \
2610 } while (0)
2611
2612 SYNC_BACK_SREG(es, ES);
2613 SYNC_BACK_SREG(cs, CS);
2614 SYNC_BACK_SREG(ss, SS);
2615 SYNC_BACK_SREG(ds, DS);
2616 SYNC_BACK_SREG(fs, FS);
2617 SYNC_BACK_SREG(gs, GS);
2618
2619#ifdef TARGET_X86_64
2620 pCtx->rip = pVM->rem.s.Env.eip;
2621 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2622#else
2623 pCtx->eip = pVM->rem.s.Env.eip;
2624 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2625#endif
2626
2627 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2628 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2629 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2630 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2631
2632 for (i = 0; i < 8; i++)
2633 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2634
2635 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2636 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2637 {
2638 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2639 STAM_COUNTER_INC(&gStatREMGDTChange);
2640 }
2641
2642 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2643 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2644 {
2645 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2646 STAM_COUNTER_INC(&gStatREMIDTChange);
2647 }
2648
2649 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2650 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2651 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2652 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2653 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2654 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2655 )
2656 {
2657 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2658 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2659 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2660 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2661 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2662 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2663 STAM_COUNTER_INC(&gStatREMLDTRChange);
2664 }
2665
2666 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2667 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2668 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2669 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2670 || pCtx->tr.Attr.u != ((pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2671 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2672 )
2673 {
2674 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2675 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2676 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2677 pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT));
2678 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2679 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2680 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2681 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2682 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2683 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2684 Assert(pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE);
2685 STAM_COUNTER_INC(&gStatREMTRChange);
2686 }
2687
2688 /* Sysenter MSR */
2689 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2690 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2691 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2692
2693 /* System MSRs. */
2694 pCtx->msrEFER = pVM->rem.s.Env.efer;
2695 pCtx->msrSTAR = pVM->rem.s.Env.star;
2696 pCtx->msrPAT = pVM->rem.s.Env.pat;
2697#ifdef TARGET_X86_64
2698 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2699 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2700 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2701 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2702#endif
2703
2704 /* Inhibit interrupt flag. */
2705 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2706 {
2707 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2708 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2709 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2710 }
2711 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2712 {
2713 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2714 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2715 }
2716
2717 /* Inhibit NMI flag. */
2718 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2719 {
2720 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2721 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2722 }
2723 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2724 {
2725 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2726 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2727 }
2728
2729 remR3TrapClear(pVM);
2730
2731 /*
2732 * Check for traps.
2733 */
2734 if ( pVM->rem.s.Env.exception_index >= 0
2735 && pVM->rem.s.Env.exception_index < 256)
2736 {
2737 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2738 int rc;
2739
2740 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2741 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int == 0 ? TRPM_TRAP
2742 : pVM->rem.s.Env.exception_is_int == EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ ? TRPM_HARDWARE_INT
2743 : TRPM_SOFTWARE_INT;
2744 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2745 AssertRC(rc);
2746 if (enmType == TRPM_TRAP)
2747 {
2748 switch (pVM->rem.s.Env.exception_index)
2749 {
2750 case X86_XCPT_PF:
2751 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2752 /* fallthru */
2753 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2754 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2755 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2756 break;
2757 }
2758 }
2759 }
2760
2761 /*
2762 * We're not longer in REM mode.
2763 */
2764 CPUMR3RemLeave(pVCpu,
2765 !VM_IS_RAW_MODE_ENABLED(pVM)
2766 || ( pVM->rem.s.Env.segs[R_SS].newselector
2767 | pVM->rem.s.Env.segs[R_GS].newselector
2768 | pVM->rem.s.Env.segs[R_FS].newselector
2769 | pVM->rem.s.Env.segs[R_ES].newselector
2770 | pVM->rem.s.Env.segs[R_DS].newselector
2771 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2772 );
2773 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2774 pVM->rem.s.fInREM = false;
2775 pVM->rem.s.pCtx = NULL;
2776 pVM->rem.s.Env.pVCpu = NULL;
2777 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2778 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2779 return VINF_SUCCESS;
2780}
2781
2782
2783/**
2784 * This is called by the disassembler when it wants to update the cpu state
2785 * before for instance doing a register dump.
2786 */
2787static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2788{
2789 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2790 unsigned i;
2791
2792 Assert(pVM->rem.s.fInREM);
2793
2794 /*
2795 * Copy back the registers.
2796 * This is done in the order they are declared in the CPUMCTX structure.
2797 */
2798
2799 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87;
2800 /** @todo FOP */
2801 /** @todo FPUIP */
2802 /** @todo CS */
2803 /** @todo FPUDP */
2804 /** @todo DS */
2805 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2806 pFpuCtx->MXCSR = 0;
2807 pFpuCtx->MXCSR_MASK = 0;
2808
2809 /** @todo check if FPU/XMM was actually used in the recompiler */
2810 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx);
2811//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2812
2813#ifdef TARGET_X86_64
2814 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2815 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2816 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2817 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2818 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2819 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2820 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2821 pCtx->r8 = pVM->rem.s.Env.regs[8];
2822 pCtx->r9 = pVM->rem.s.Env.regs[9];
2823 pCtx->r10 = pVM->rem.s.Env.regs[10];
2824 pCtx->r11 = pVM->rem.s.Env.regs[11];
2825 pCtx->r12 = pVM->rem.s.Env.regs[12];
2826 pCtx->r13 = pVM->rem.s.Env.regs[13];
2827 pCtx->r14 = pVM->rem.s.Env.regs[14];
2828 pCtx->r15 = pVM->rem.s.Env.regs[15];
2829
2830 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2831#else
2832 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2833 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2834 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2835 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2836 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2837 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2838 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2839
2840 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2841#endif
2842
2843 SYNC_BACK_SREG(es, ES);
2844 SYNC_BACK_SREG(cs, CS);
2845 SYNC_BACK_SREG(ss, SS);
2846 SYNC_BACK_SREG(ds, DS);
2847 SYNC_BACK_SREG(fs, FS);
2848 SYNC_BACK_SREG(gs, GS);
2849
2850#ifdef TARGET_X86_64
2851 pCtx->rip = pVM->rem.s.Env.eip;
2852 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2853#else
2854 pCtx->eip = pVM->rem.s.Env.eip;
2855 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2856#endif
2857
2858 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2859 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2860 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2861 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2862
2863 for (i = 0; i < 8; i++)
2864 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2865
2866 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2867 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2868 {
2869 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2870 STAM_COUNTER_INC(&gStatREMGDTChange);
2871 }
2872
2873 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2874 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2875 {
2876 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2877 STAM_COUNTER_INC(&gStatREMIDTChange);
2878 }
2879
2880 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2881 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2882 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2883 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2884 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2885 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2886 )
2887 {
2888 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2889 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2890 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2891 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2892 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2893 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2894 STAM_COUNTER_INC(&gStatREMLDTRChange);
2895 }
2896
2897 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2898 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2899 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2900 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2901 || pCtx->tr.Attr.u != ((pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2902 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2903 )
2904 {
2905 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2906 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2907 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2908 pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT));
2909 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2910 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2911 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2912 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2913 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2914 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2915 Assert(pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE);
2916 STAM_COUNTER_INC(&gStatREMTRChange);
2917 }
2918
2919 /* Sysenter MSR */
2920 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2921 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2922 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2923
2924 /* System MSRs. */
2925 pCtx->msrEFER = pVM->rem.s.Env.efer;
2926 pCtx->msrSTAR = pVM->rem.s.Env.star;
2927 pCtx->msrPAT = pVM->rem.s.Env.pat;
2928#ifdef TARGET_X86_64
2929 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2930 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2931 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2932 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2933#endif
2934
2935}
2936
2937
2938/**
2939 * Update the VMM state information if we're currently in REM.
2940 *
2941 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2942 * we're currently executing in REM and the VMM state is invalid. This method will of
2943 * course check that we're executing in REM before syncing any data over to the VMM.
2944 *
2945 * @param pVM The VM handle.
2946 * @param pVCpu The VMCPU handle.
2947 */
2948REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2949{
2950 if (pVM->rem.s.fInREM)
2951 remR3StateUpdate(pVM, pVCpu);
2952}
2953
2954
2955#undef LOG_GROUP
2956#define LOG_GROUP LOG_GROUP_REM
2957
2958
2959/**
2960 * Notify the recompiler about Address Gate 20 state change.
2961 *
2962 * This notification is required since A20 gate changes are
2963 * initialized from a device driver and the VM might just as
2964 * well be in REM mode as in RAW mode.
2965 *
2966 * @param pVM VM handle.
2967 * @param pVCpu VMCPU handle.
2968 * @param fEnable True if the gate should be enabled.
2969 * False if the gate should be disabled.
2970 */
2971REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2972{
2973 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2974 VM_ASSERT_EMT(pVM);
2975
2976 /** @todo SMP and the A20 gate... */
2977 if (pVM->rem.s.Env.pVCpu == pVCpu)
2978 {
2979 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2980 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2981 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2982 }
2983}
2984
2985
2986/**
2987 * Replays the handler notification changes
2988 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2989 *
2990 * @param pVM VM handle.
2991 */
2992REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2993{
2994 /*
2995 * Replay the flushes.
2996 */
2997 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2998 VM_ASSERT_EMT(pVM);
2999
3000 /** @todo this isn't ensuring correct replay order. */
3001 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3002 {
3003 uint32_t idxNext;
3004 uint32_t idxRevHead;
3005 uint32_t idxHead;
3006#ifdef VBOX_STRICT
3007 int32_t c = 0;
3008#endif
3009
3010 /* Lockless purging of pending notifications. */
3011 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3012 if (idxHead == UINT32_MAX)
3013 return;
3014 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3015
3016 /*
3017 * Reverse the list to process it in FIFO order.
3018 */
3019 idxRevHead = UINT32_MAX;
3020 do
3021 {
3022 /* Save the index of the next rec. */
3023 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3024 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3025 /* Push the record onto the reversed list. */
3026 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3027 idxRevHead = idxHead;
3028 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3029 /* Advance. */
3030 idxHead = idxNext;
3031 } while (idxHead != UINT32_MAX);
3032
3033 /*
3034 * Loop thru the list, reinserting the record into the free list as they are
3035 * processed to avoid having other EMTs running out of entries while we're flushing.
3036 */
3037 idxHead = idxRevHead;
3038 do
3039 {
3040 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3041 uint32_t idxCur;
3042 Assert(--c >= 0);
3043
3044 switch (pCur->enmKind)
3045 {
3046 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3047 remR3NotifyHandlerPhysicalRegister(pVM,
3048 pCur->u.PhysicalRegister.enmKind,
3049 pCur->u.PhysicalRegister.GCPhys,
3050 pCur->u.PhysicalRegister.cb,
3051 pCur->u.PhysicalRegister.fHasHCHandler);
3052 break;
3053
3054 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3055 remR3NotifyHandlerPhysicalDeregister(pVM,
3056 pCur->u.PhysicalDeregister.enmKind,
3057 pCur->u.PhysicalDeregister.GCPhys,
3058 pCur->u.PhysicalDeregister.cb,
3059 pCur->u.PhysicalDeregister.fHasHCHandler,
3060 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3061 break;
3062
3063 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3064 remR3NotifyHandlerPhysicalModify(pVM,
3065 pCur->u.PhysicalModify.enmKind,
3066 pCur->u.PhysicalModify.GCPhysOld,
3067 pCur->u.PhysicalModify.GCPhysNew,
3068 pCur->u.PhysicalModify.cb,
3069 pCur->u.PhysicalModify.fHasHCHandler,
3070 pCur->u.PhysicalModify.fRestoreAsRAM);
3071 break;
3072
3073 default:
3074 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3075 break;
3076 }
3077
3078 /*
3079 * Advance idxHead.
3080 */
3081 idxCur = idxHead;
3082 idxHead = pCur->idxNext;
3083 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3084
3085 /*
3086 * Put the record back into the free list.
3087 */
3088 do
3089 {
3090 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3091 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3092 ASMCompilerBarrier();
3093 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3094 } while (idxHead != UINT32_MAX);
3095
3096#ifdef VBOX_STRICT
3097 if (pVM->cCpus == 1)
3098 {
3099 unsigned c;
3100 /* Check that all records are now on the free list. */
3101 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3102 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3103 c++;
3104 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3105 }
3106#endif
3107 }
3108}
3109
3110
3111/**
3112 * Notify REM about changed code page.
3113 *
3114 * @returns VBox status code.
3115 * @param pVM VM handle.
3116 * @param pVCpu VMCPU handle.
3117 * @param pvCodePage Code page address
3118 */
3119REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3120{
3121#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3122 int rc;
3123 RTGCPHYS PhysGC;
3124 uint64_t flags;
3125
3126 VM_ASSERT_EMT(pVM);
3127
3128 /*
3129 * Get the physical page address.
3130 */
3131 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3132 if (rc == VINF_SUCCESS)
3133 {
3134 /*
3135 * Sync the required registers and flush the whole page.
3136 * (Easier to do the whole page than notifying it about each physical
3137 * byte that was changed.
3138 */
3139 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3140 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3141 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3142 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3143
3144 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3145 }
3146#endif
3147 return VINF_SUCCESS;
3148}
3149
3150
3151/**
3152 * Notification about a successful MMR3PhysRegister() call.
3153 *
3154 * @param pVM VM handle.
3155 * @param GCPhys The physical address the RAM.
3156 * @param cb Size of the memory.
3157 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3158 */
3159REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3160{
3161 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3162 VM_ASSERT_EMT(pVM);
3163
3164 /*
3165 * Validate input - we trust the caller.
3166 */
3167 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3168 Assert(cb);
3169 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3170 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("%#x\n", fFlags));
3171
3172 /*
3173 * Base ram? Update GCPhysLastRam.
3174 */
3175 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3176 {
3177 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3178 {
3179 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3180 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3181 }
3182 }
3183
3184 /*
3185 * Register the ram.
3186 */
3187 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3188
3189 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3190 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3191 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3192
3193 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3194}
3195
3196
3197/**
3198 * Notification about a successful MMR3PhysRomRegister() call.
3199 *
3200 * @param pVM VM handle.
3201 * @param GCPhys The physical address of the ROM.
3202 * @param cb The size of the ROM.
3203 * @param pvCopy Pointer to the ROM copy.
3204 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3205 * This function will be called when ever the protection of the
3206 * shadow ROM changes (at reset and end of POST).
3207 */
3208REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3209{
3210 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3211 VM_ASSERT_EMT(pVM);
3212
3213 /*
3214 * Validate input - we trust the caller.
3215 */
3216 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3217 Assert(cb);
3218 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3219
3220 /*
3221 * Register the rom.
3222 */
3223 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3224
3225 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3226 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3227 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3228
3229 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3230}
3231
3232
3233/**
3234 * Notification about a successful memory deregistration or reservation.
3235 *
3236 * @param pVM VM Handle.
3237 * @param GCPhys Start physical address.
3238 * @param cb The size of the range.
3239 */
3240REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3241{
3242 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3243 VM_ASSERT_EMT(pVM);
3244
3245 /*
3246 * Validate input - we trust the caller.
3247 */
3248 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3249 Assert(cb);
3250 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3251
3252 /*
3253 * Unassigning the memory.
3254 */
3255 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3256
3257 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3258 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3259 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3260
3261 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3262}
3263
3264
3265/**
3266 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3267 *
3268 * @param pVM VM Handle.
3269 * @param enmKind Kind of access handler.
3270 * @param GCPhys Handler range address.
3271 * @param cb Size of the handler range.
3272 * @param fHasHCHandler Set if the handler has a HC callback function.
3273 *
3274 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3275 * Handler memory type to memory which has no HC handler.
3276 */
3277static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3278 bool fHasHCHandler)
3279{
3280 Log(("REMR3NotifyHandlerPhysicalRegister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3281 enmKind, GCPhys, cb, fHasHCHandler));
3282
3283 VM_ASSERT_EMT(pVM);
3284 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3285 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3286
3287
3288 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3289
3290 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3291 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3292 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3293 else if (fHasHCHandler)
3294 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3295 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3296
3297 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3298}
3299
3300/**
3301 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3302 *
3303 * @param pVM VM Handle.
3304 * @param enmKind Kind of access handler.
3305 * @param GCPhys Handler range address.
3306 * @param cb Size of the handler range.
3307 * @param fHasHCHandler Set if the handler has a HC callback function.
3308 *
3309 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3310 * Handler memory type to memory which has no HC handler.
3311 */
3312REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3313 bool fHasHCHandler)
3314{
3315 REMR3ReplayHandlerNotifications(pVM);
3316
3317 remR3NotifyHandlerPhysicalRegister(pVM, enmKind, GCPhys, cb, fHasHCHandler);
3318}
3319
3320/**
3321 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3322 *
3323 * @param pVM VM Handle.
3324 * @param enmKind Kind of access handler.
3325 * @param GCPhys Handler range address.
3326 * @param cb Size of the handler range.
3327 * @param fHasHCHandler Set if the handler has a HC callback function.
3328 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3329 */
3330static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3331 bool fHasHCHandler, bool fRestoreAsRAM)
3332{
3333 Log(("REMR3NotifyHandlerPhysicalDeregister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3334 enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3335 VM_ASSERT_EMT(pVM);
3336
3337
3338 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3339
3340 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3341 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3342 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3343 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3344 else if (fHasHCHandler)
3345 {
3346 if (!fRestoreAsRAM)
3347 {
3348 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3349 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3350 }
3351 else
3352 {
3353 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3354 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3355 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3356 }
3357 }
3358 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3359
3360 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3361}
3362
3363/**
3364 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3365 *
3366 * @param pVM VM Handle.
3367 * @param enmKind Kind of access handler.
3368 * @param GCPhys Handler range address.
3369 * @param cb Size of the handler range.
3370 * @param fHasHCHandler Set if the handler has a HC callback function.
3371 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3372 */
3373REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3374{
3375 REMR3ReplayHandlerNotifications(pVM);
3376 remR3NotifyHandlerPhysicalDeregister(pVM, enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3377}
3378
3379
3380/**
3381 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3382 *
3383 * @param pVM VM Handle.
3384 * @param enmKind Kind of access handler.
3385 * @param GCPhysOld Old handler range address.
3386 * @param GCPhysNew New handler range address.
3387 * @param cb Size of the handler range.
3388 * @param fHasHCHandler Set if the handler has a HC callback function.
3389 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3390 */
3391static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3392{
3393 Log(("REMR3NotifyHandlerPhysicalModify: enmKind=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3394 enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3395 VM_ASSERT_EMT(pVM);
3396 AssertReleaseMsg(enmKind != PGMPHYSHANDLERKIND_MMIO, ("enmKind=%d\n", enmKind));
3397
3398 if (fHasHCHandler)
3399 {
3400 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3401
3402 /*
3403 * Reset the old page.
3404 */
3405 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3406 if (!fRestoreAsRAM)
3407 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3408 else
3409 {
3410 /* This is not perfect, but it'll do for PD monitoring... */
3411 Assert(cb == PAGE_SIZE);
3412 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3413 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3414 }
3415
3416 /*
3417 * Update the new page.
3418 */
3419 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3420 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3421 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3422 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3423
3424 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3425 }
3426}
3427
3428/**
3429 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3430 *
3431 * @param pVM VM Handle.
3432 * @param enmKind Kind of access handler.
3433 * @param GCPhysOld Old handler range address.
3434 * @param GCPhysNew New handler range address.
3435 * @param cb Size of the handler range.
3436 * @param fHasHCHandler Set if the handler has a HC callback function.
3437 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3438 */
3439REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3440{
3441 REMR3ReplayHandlerNotifications(pVM);
3442
3443 remR3NotifyHandlerPhysicalModify(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3444}
3445
3446/**
3447 * Checks if we're handling access to this page or not.
3448 *
3449 * @returns true if we're trapping access.
3450 * @returns false if we aren't.
3451 * @param pVM The VM handle.
3452 * @param GCPhys The physical address.
3453 *
3454 * @remark This function will only work correctly in VBOX_STRICT builds!
3455 */
3456REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3457{
3458#ifdef VBOX_STRICT
3459 ram_addr_t off;
3460 REMR3ReplayHandlerNotifications(pVM);
3461
3462 off = get_phys_page_offset(GCPhys);
3463 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3464 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3465 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3466#else
3467 return false;
3468#endif
3469}
3470
3471
3472/**
3473 * Deals with a rare case in get_phys_addr_code where the code
3474 * is being monitored.
3475 *
3476 * It could also be an MMIO page, in which case we will raise a fatal error.
3477 *
3478 * @returns The physical address corresponding to addr.
3479 * @param env The cpu environment.
3480 * @param addr The virtual address.
3481 * @param pTLBEntry The TLB entry.
3482 * @param IoTlbEntry The I/O TLB entry address.
3483 */
3484target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3485 target_ulong addr,
3486 CPUTLBEntry *pTLBEntry,
3487 target_phys_addr_t IoTlbEntry)
3488{
3489 PVM pVM = env->pVM;
3490
3491 if ((IoTlbEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3492 {
3493 /* If code memory is being monitored, appropriate IOTLB entry will have
3494 handler IO type, and addend will provide real physical address, no
3495 matter if we store VA in TLB or not, as handlers are always passed PA */
3496 target_ulong ret = (IoTlbEntry & TARGET_PAGE_MASK) + addr;
3497 return ret;
3498 }
3499 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3500 "*** handlers\n",
3501 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)IoTlbEntry));
3502 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3503 LogRel(("*** mmio\n"));
3504 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3505 LogRel(("*** phys\n"));
3506 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3507 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3508 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3509 AssertFatalFailed();
3510}
3511
3512/**
3513 * Read guest RAM and ROM.
3514 *
3515 * @param SrcGCPhys The source address (guest physical).
3516 * @param pvDst The destination address.
3517 * @param cb Number of bytes
3518 */
3519void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3520{
3521 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3522 VBOX_CHECK_ADDR(SrcGCPhys);
3523 VBOXSTRICTRC rcStrict = PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb, PGMACCESSORIGIN_REM);
3524 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3525#ifdef VBOX_DEBUG_PHYS
3526 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3527#endif
3528 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3529}
3530
3531
3532/**
3533 * Read guest RAM and ROM, unsigned 8-bit.
3534 *
3535 * @param SrcGCPhys The source address (guest physical).
3536 */
3537RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3538{
3539 uint8_t val;
3540 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3541 VBOX_CHECK_ADDR(SrcGCPhys);
3542 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3543 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3544#ifdef VBOX_DEBUG_PHYS
3545 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3546#endif
3547 return val;
3548}
3549
3550
3551/**
3552 * Read guest RAM and ROM, signed 8-bit.
3553 *
3554 * @param SrcGCPhys The source address (guest physical).
3555 */
3556RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3557{
3558 int8_t val;
3559 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3560 VBOX_CHECK_ADDR(SrcGCPhys);
3561 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3562 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3563#ifdef VBOX_DEBUG_PHYS
3564 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3565#endif
3566 return val;
3567}
3568
3569
3570/**
3571 * Read guest RAM and ROM, unsigned 16-bit.
3572 *
3573 * @param SrcGCPhys The source address (guest physical).
3574 */
3575RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3576{
3577 uint16_t val;
3578 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3579 VBOX_CHECK_ADDR(SrcGCPhys);
3580 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3581 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3582#ifdef VBOX_DEBUG_PHYS
3583 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3584#endif
3585 return val;
3586}
3587
3588
3589/**
3590 * Read guest RAM and ROM, signed 16-bit.
3591 *
3592 * @param SrcGCPhys The source address (guest physical).
3593 */
3594RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3595{
3596 int16_t val;
3597 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3598 VBOX_CHECK_ADDR(SrcGCPhys);
3599 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3600 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3601#ifdef VBOX_DEBUG_PHYS
3602 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3603#endif
3604 return val;
3605}
3606
3607
3608/**
3609 * Read guest RAM and ROM, unsigned 32-bit.
3610 *
3611 * @param SrcGCPhys The source address (guest physical).
3612 */
3613RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3614{
3615 uint32_t val;
3616 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3617 VBOX_CHECK_ADDR(SrcGCPhys);
3618 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3619 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3620#ifdef VBOX_DEBUG_PHYS
3621 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3622#endif
3623 return val;
3624}
3625
3626
3627/**
3628 * Read guest RAM and ROM, signed 32-bit.
3629 *
3630 * @param SrcGCPhys The source address (guest physical).
3631 */
3632RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3633{
3634 int32_t val;
3635 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3636 VBOX_CHECK_ADDR(SrcGCPhys);
3637 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3638 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3639#ifdef VBOX_DEBUG_PHYS
3640 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3641#endif
3642 return val;
3643}
3644
3645
3646/**
3647 * Read guest RAM and ROM, unsigned 64-bit.
3648 *
3649 * @param SrcGCPhys The source address (guest physical).
3650 */
3651uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3652{
3653 uint64_t val;
3654 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3655 VBOX_CHECK_ADDR(SrcGCPhys);
3656 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3657 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3658#ifdef VBOX_DEBUG_PHYS
3659 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3660#endif
3661 return val;
3662}
3663
3664
3665/**
3666 * Read guest RAM and ROM, signed 64-bit.
3667 *
3668 * @param SrcGCPhys The source address (guest physical).
3669 */
3670int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3671{
3672 int64_t val;
3673 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3674 VBOX_CHECK_ADDR(SrcGCPhys);
3675 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3676 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3677#ifdef VBOX_DEBUG_PHYS
3678 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3679#endif
3680 return val;
3681}
3682
3683
3684/**
3685 * Write guest RAM.
3686 *
3687 * @param DstGCPhys The destination address (guest physical).
3688 * @param pvSrc The source address.
3689 * @param cb Number of bytes to write
3690 */
3691void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3692{
3693 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3694 VBOX_CHECK_ADDR(DstGCPhys);
3695 VBOXSTRICTRC rcStrict = PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb, PGMACCESSORIGIN_REM);
3696 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3697 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3698#ifdef VBOX_DEBUG_PHYS
3699 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3700#endif
3701}
3702
3703
3704/**
3705 * Write guest RAM, unsigned 8-bit.
3706 *
3707 * @param DstGCPhys The destination address (guest physical).
3708 * @param val Value
3709 */
3710void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3711{
3712 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3713 VBOX_CHECK_ADDR(DstGCPhys);
3714 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3715 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3716#ifdef VBOX_DEBUG_PHYS
3717 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3718#endif
3719}
3720
3721
3722/**
3723 * Write guest RAM, unsigned 8-bit.
3724 *
3725 * @param DstGCPhys The destination address (guest physical).
3726 * @param val Value
3727 */
3728void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3729{
3730 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3731 VBOX_CHECK_ADDR(DstGCPhys);
3732 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3733 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3734#ifdef VBOX_DEBUG_PHYS
3735 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3736#endif
3737}
3738
3739
3740/**
3741 * Write guest RAM, unsigned 32-bit.
3742 *
3743 * @param DstGCPhys The destination address (guest physical).
3744 * @param val Value
3745 */
3746void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3747{
3748 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3749 VBOX_CHECK_ADDR(DstGCPhys);
3750 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3751 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3752#ifdef VBOX_DEBUG_PHYS
3753 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3754#endif
3755}
3756
3757
3758/**
3759 * Write guest RAM, unsigned 64-bit.
3760 *
3761 * @param DstGCPhys The destination address (guest physical).
3762 * @param val Value
3763 */
3764void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3765{
3766 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3767 VBOX_CHECK_ADDR(DstGCPhys);
3768 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3769 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3770#ifdef VBOX_DEBUG_PHYS
3771 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3772#endif
3773}
3774
3775#undef LOG_GROUP
3776#define LOG_GROUP LOG_GROUP_REM_MMIO
3777
3778/** Read MMIO memory. */
3779static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3780{
3781 CPUX86State *env = (CPUX86State *)pvEnv;
3782 uint32_t u32 = 0;
3783 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3784 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3785 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3786 return u32;
3787}
3788
3789/** Read MMIO memory. */
3790static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3791{
3792 CPUX86State *env = (CPUX86State *)pvEnv;
3793 uint32_t u32 = 0;
3794 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3795 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3796 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3797 return u32;
3798}
3799
3800/** Read MMIO memory. */
3801static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3802{
3803 CPUX86State *env = (CPUX86State *)pvEnv;
3804 uint32_t u32 = 0;
3805 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3806 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3807 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3808 return u32;
3809}
3810
3811/** Write to MMIO memory. */
3812static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3813{
3814 CPUX86State *env = (CPUX86State *)pvEnv;
3815 int rc;
3816 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3817 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3818 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3819}
3820
3821/** Write to MMIO memory. */
3822static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3823{
3824 CPUX86State *env = (CPUX86State *)pvEnv;
3825 int rc;
3826 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3827 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3828 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3829}
3830
3831/** Write to MMIO memory. */
3832static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3833{
3834 CPUX86State *env = (CPUX86State *)pvEnv;
3835 int rc;
3836 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3837 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3838 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3839}
3840
3841
3842#undef LOG_GROUP
3843#define LOG_GROUP LOG_GROUP_REM_HANDLER
3844
3845/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3846
3847static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3848{
3849 uint8_t u8;
3850 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3851 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8), PGMACCESSORIGIN_REM);
3852 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3853 return u8;
3854}
3855
3856static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3857{
3858 uint16_t u16;
3859 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3860 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16), PGMACCESSORIGIN_REM);
3861 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3862 return u16;
3863}
3864
3865static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3866{
3867 uint32_t u32;
3868 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3869 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32), PGMACCESSORIGIN_REM);
3870 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3871 return u32;
3872}
3873
3874static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3875{
3876 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3877 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t), PGMACCESSORIGIN_REM);
3878 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3879}
3880
3881static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3882{
3883 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3884 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t), PGMACCESSORIGIN_REM);
3885 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3886}
3887
3888static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3889{
3890 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3891 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t), PGMACCESSORIGIN_REM);
3892 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3893}
3894
3895/* -+- disassembly -+- */
3896
3897#undef LOG_GROUP
3898#define LOG_GROUP LOG_GROUP_REM_DISAS
3899
3900
3901/**
3902 * Enables or disables singled stepped disassembly.
3903 *
3904 * @returns VBox status code.
3905 * @param pVM VM handle.
3906 * @param fEnable To enable set this flag, to disable clear it.
3907 */
3908static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3909{
3910 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3911 VM_ASSERT_EMT(pVM);
3912
3913 if (fEnable)
3914 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3915 else
3916 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3917#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3918 cpu_single_step(&pVM->rem.s.Env, fEnable);
3919#endif
3920 return VINF_SUCCESS;
3921}
3922
3923
3924/**
3925 * Enables or disables singled stepped disassembly.
3926 *
3927 * @returns VBox status code.
3928 * @param pVM VM handle.
3929 * @param fEnable To enable set this flag, to disable clear it.
3930 */
3931REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3932{
3933 int rc;
3934
3935 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3936 if (VM_IS_EMT(pVM))
3937 return remR3DisasEnableStepping(pVM, fEnable);
3938
3939 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3940 AssertRC(rc);
3941 return rc;
3942}
3943
3944
3945#ifdef VBOX_WITH_DEBUGGER
3946/**
3947 * External Debugger Command: .remstep [on|off|1|0]
3948 */
3949static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
3950 PCDBGCVAR paArgs, unsigned cArgs)
3951{
3952 int rc;
3953 PVM pVM = pUVM->pVM;
3954
3955 if (cArgs == 0)
3956 /*
3957 * Print the current status.
3958 */
3959 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3960 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3961 else
3962 {
3963 /*
3964 * Convert the argument and change the mode.
3965 */
3966 bool fEnable;
3967 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3968 if (RT_SUCCESS(rc))
3969 {
3970 rc = REMR3DisasEnableStepping(pVM, fEnable);
3971 if (RT_SUCCESS(rc))
3972 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3973 else
3974 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3975 }
3976 else
3977 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3978 }
3979 return rc;
3980}
3981#endif /* VBOX_WITH_DEBUGGER */
3982
3983
3984/**
3985 * Disassembles one instruction and prints it to the log.
3986 *
3987 * @returns Success indicator.
3988 * @param env Pointer to the recompiler CPU structure.
3989 * @param f32BitCode Indicates that whether or not the code should
3990 * be disassembled as 16 or 32 bit. If -1 the CS
3991 * selector will be inspected.
3992 * @param pszPrefix
3993 */
3994bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3995{
3996 PVM pVM = env->pVM;
3997 const bool fLog = LogIsEnabled();
3998 const bool fLog2 = LogIs2Enabled();
3999 int rc = VINF_SUCCESS;
4000
4001 /*
4002 * Don't bother if there ain't any log output to do.
4003 */
4004 if (!fLog && !fLog2)
4005 return true;
4006
4007 /*
4008 * Update the state so DBGF reads the correct register values.
4009 */
4010 remR3StateUpdate(pVM, env->pVCpu);
4011
4012 /*
4013 * Log registers if requested.
4014 */
4015 if (fLog2)
4016 DBGFR3_INFO_LOG(pVM, env->pVCpu, "cpumguest", pszPrefix);
4017
4018 /*
4019 * Disassemble to log.
4020 */
4021 if (fLog)
4022 {
4023 PVMCPU pVCpu = VMMGetCpu(pVM);
4024 char szBuf[256];
4025 szBuf[0] = '\0';
4026 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4027 pVCpu->idCpu,
4028 0, /* Sel */ 0, /* GCPtr */
4029 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4030 szBuf,
4031 sizeof(szBuf),
4032 NULL);
4033 if (RT_FAILURE(rc))
4034 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4035 if (pszPrefix && *pszPrefix)
4036 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4037 else
4038 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4039 }
4040
4041 return RT_SUCCESS(rc);
4042}
4043
4044
4045/**
4046 * Disassemble recompiled code.
4047 *
4048 * @param phFileIgnored Ignored, logfile usually.
4049 * @param pvCode Pointer to the code block.
4050 * @param cb Size of the code block.
4051 */
4052void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
4053{
4054 if (LogIs2Enabled())
4055 {
4056 unsigned off = 0;
4057 char szOutput[256];
4058 DISCPUSTATE Cpu;
4059#ifdef RT_ARCH_X86
4060 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4061#else
4062 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4063#endif
4064
4065 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4066 while (off < cb)
4067 {
4068 uint32_t cbInstr;
4069 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4070 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4071 if (RT_SUCCESS(rc))
4072 RTLogPrintf("%s", szOutput);
4073 else
4074 {
4075 RTLogPrintf("disas error %Rrc\n", rc);
4076 cbInstr = 1;
4077 }
4078 off += cbInstr;
4079 }
4080 }
4081}
4082
4083
4084/**
4085 * Disassemble guest code.
4086 *
4087 * @param phFileIgnored Ignored, logfile usually.
4088 * @param uCode The guest address of the code to disassemble. (flat?)
4089 * @param cb Number of bytes to disassemble.
4090 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4091 */
4092void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
4093{
4094 if (LogIs2Enabled())
4095 {
4096 PVM pVM = cpu_single_env->pVM;
4097 PVMCPU pVCpu = cpu_single_env->pVCpu;
4098 RTSEL cs;
4099 RTGCUINTPTR eip;
4100
4101 Assert(pVCpu);
4102
4103 /*
4104 * Update the state so DBGF reads the correct register values (flags).
4105 */
4106 remR3StateUpdate(pVM, pVCpu);
4107
4108 /*
4109 * Do the disassembling.
4110 */
4111 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4112 cs = cpu_single_env->segs[R_CS].selector;
4113 eip = uCode - cpu_single_env->segs[R_CS].base;
4114 for (;;)
4115 {
4116 char szBuf[256];
4117 uint32_t cbInstr;
4118 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4119 pVCpu->idCpu,
4120 cs,
4121 eip,
4122 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4123 szBuf, sizeof(szBuf),
4124 &cbInstr);
4125 if (RT_SUCCESS(rc))
4126 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4127 else
4128 {
4129 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4130 cbInstr = 1;
4131 }
4132
4133 /* next */
4134 if (cb <= cbInstr)
4135 break;
4136 cb -= cbInstr;
4137 uCode += cbInstr;
4138 eip += cbInstr;
4139 }
4140 }
4141}
4142
4143
4144/**
4145 * Looks up a guest symbol.
4146 *
4147 * @returns Pointer to symbol name. This is a static buffer.
4148 * @param orig_addr The address in question.
4149 */
4150const char *lookup_symbol(target_ulong orig_addr)
4151{
4152 PVM pVM = cpu_single_env->pVM;
4153 RTGCINTPTR off = 0;
4154 RTDBGSYMBOL Sym;
4155 DBGFADDRESS Addr;
4156
4157 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4158 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
4159 &off, &Sym, NULL /*phMod*/);
4160 if (RT_SUCCESS(rc))
4161 {
4162 static char szSym[sizeof(Sym.szName) + 48];
4163 if (!off)
4164 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4165 else if (off > 0)
4166 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4167 else
4168 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4169 return szSym;
4170 }
4171 return "<N/A>";
4172}
4173
4174
4175#undef LOG_GROUP
4176#define LOG_GROUP LOG_GROUP_REM
4177
4178
4179/* -+- FF notifications -+- */
4180
4181/**
4182 * Notification about the interrupt FF being set.
4183 *
4184 * @param pVM VM Handle.
4185 * @param pVCpu VMCPU Handle.
4186 * @thread The emulation thread.
4187 */
4188REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4189{
4190 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4191 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4192 if (pVM->rem.s.fInREM)
4193 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD);
4194}
4195
4196
4197/**
4198 * Notification about the interrupt FF being set.
4199 *
4200 * @param pVM VM Handle.
4201 * @param pVCpu VMCPU Handle.
4202 * @thread Any.
4203 */
4204REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4205{
4206 LogFlow(("REMR3NotifyInterruptClear:\n"));
4207 if (pVM->rem.s.fInREM)
4208 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4209}
4210
4211
4212/**
4213 * Notification about pending timer(s).
4214 *
4215 * @param pVM VM Handle.
4216 * @param pVCpuDst The target cpu for this notification.
4217 * TM will not broadcast pending timer events, but use
4218 * a dedicated EMT for them. So, only interrupt REM
4219 * execution if the given CPU is executing in REM.
4220 * @thread Any.
4221 */
4222REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4223{
4224#ifndef DEBUG_bird
4225 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4226#endif
4227 if (pVM->rem.s.fInREM)
4228 {
4229 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4230 {
4231 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4232 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4233 CPU_INTERRUPT_EXTERNAL_TIMER);
4234 }
4235 else
4236 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4237 }
4238 else
4239 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4240}
4241
4242
4243/**
4244 * Notification about pending DMA transfers.
4245 *
4246 * @param pVM VM Handle.
4247 * @thread Any.
4248 */
4249REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4250{
4251 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4252 if (pVM->rem.s.fInREM)
4253 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA);
4254}
4255
4256
4257/**
4258 * Notification about pending timer(s).
4259 *
4260 * @param pVM VM Handle.
4261 * @thread Any.
4262 */
4263REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4264{
4265 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4266 if (pVM->rem.s.fInREM)
4267 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
4268}
4269
4270
4271/**
4272 * Notification about pending FF set by an external thread.
4273 *
4274 * @param pVM VM handle.
4275 * @thread Any.
4276 */
4277REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4278{
4279 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4280 if (pVM->rem.s.fInREM)
4281 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
4282}
4283
4284
4285#ifdef VBOX_WITH_STATISTICS
4286void remR3ProfileStart(int statcode)
4287{
4288 STAMPROFILEADV *pStat;
4289 switch(statcode)
4290 {
4291 case STATS_EMULATE_SINGLE_INSTR:
4292 pStat = &gStatExecuteSingleInstr;
4293 break;
4294 case STATS_QEMU_COMPILATION:
4295 pStat = &gStatCompilationQEmu;
4296 break;
4297 case STATS_QEMU_RUN_EMULATED_CODE:
4298 pStat = &gStatRunCodeQEmu;
4299 break;
4300 case STATS_QEMU_TOTAL:
4301 pStat = &gStatTotalTimeQEmu;
4302 break;
4303 case STATS_QEMU_RUN_TIMERS:
4304 pStat = &gStatTimers;
4305 break;
4306 case STATS_TLB_LOOKUP:
4307 pStat= &gStatTBLookup;
4308 break;
4309 case STATS_IRQ_HANDLING:
4310 pStat= &gStatIRQ;
4311 break;
4312 case STATS_RAW_CHECK:
4313 pStat = &gStatRawCheck;
4314 break;
4315
4316 default:
4317 AssertMsgFailed(("unknown stat %d\n", statcode));
4318 return;
4319 }
4320 STAM_PROFILE_ADV_START(pStat, a);
4321}
4322
4323
4324void remR3ProfileStop(int statcode)
4325{
4326 STAMPROFILEADV *pStat;
4327 switch(statcode)
4328 {
4329 case STATS_EMULATE_SINGLE_INSTR:
4330 pStat = &gStatExecuteSingleInstr;
4331 break;
4332 case STATS_QEMU_COMPILATION:
4333 pStat = &gStatCompilationQEmu;
4334 break;
4335 case STATS_QEMU_RUN_EMULATED_CODE:
4336 pStat = &gStatRunCodeQEmu;
4337 break;
4338 case STATS_QEMU_TOTAL:
4339 pStat = &gStatTotalTimeQEmu;
4340 break;
4341 case STATS_QEMU_RUN_TIMERS:
4342 pStat = &gStatTimers;
4343 break;
4344 case STATS_TLB_LOOKUP:
4345 pStat= &gStatTBLookup;
4346 break;
4347 case STATS_IRQ_HANDLING:
4348 pStat= &gStatIRQ;
4349 break;
4350 case STATS_RAW_CHECK:
4351 pStat = &gStatRawCheck;
4352 break;
4353 default:
4354 AssertMsgFailed(("unknown stat %d\n", statcode));
4355 return;
4356 }
4357 STAM_PROFILE_ADV_STOP(pStat, a);
4358}
4359#endif
4360
4361/**
4362 * Raise an RC, force rem exit.
4363 *
4364 * @param pVM VM handle.
4365 * @param rc The rc.
4366 */
4367void remR3RaiseRC(PVM pVM, int rc)
4368{
4369 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4370 Assert(pVM->rem.s.fInREM);
4371 VM_ASSERT_EMT(pVM);
4372 pVM->rem.s.rc = rc;
4373 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4374}
4375
4376
4377/* -+- timers -+- */
4378
4379uint64_t cpu_get_tsc(CPUX86State *env)
4380{
4381 STAM_COUNTER_INC(&gStatCpuGetTSC);
4382 return TMCpuTickGet(env->pVCpu);
4383}
4384
4385
4386/* -+- interrupts -+- */
4387
4388void cpu_set_ferr(CPUX86State *env)
4389{
4390 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4391 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4392}
4393
4394int cpu_get_pic_interrupt(CPUX86State *env)
4395{
4396 uint8_t u8Interrupt;
4397 int rc;
4398
4399 if (VMCPU_FF_TEST_AND_CLEAR(env->pVCpu, VMCPU_FF_UPDATE_APIC))
4400 APICUpdatePendingInterrupts(env->pVCpu);
4401
4402 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4403 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4404 * with the (a)pic.
4405 */
4406 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4407 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4408 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4409 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4410 if (RT_SUCCESS(rc))
4411 {
4412 if (VMCPU_FF_IS_ANY_SET(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4413 env->interrupt_request |= CPU_INTERRUPT_HARD;
4414 return u8Interrupt;
4415 }
4416 return -1;
4417}
4418
4419
4420/* -+- local apic -+- */
4421
4422#if 0 /* CPUMSetGuestMsr does this now. */
4423void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4424{
4425 int rc = PDMApicSetBase(env->pVM, val);
4426 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4427}
4428#endif
4429
4430uint64_t cpu_get_apic_base(CPUX86State *env)
4431{
4432 uint64_t u64;
4433 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4434 if (RT_SUCCESS(rcStrict))
4435 {
4436 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4437 return u64;
4438 }
4439 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4440 return 0;
4441}
4442
4443void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4444{
4445 int rc = APICSetTpr(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4446 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4447}
4448
4449uint8_t cpu_get_apic_tpr(CPUX86State *env)
4450{
4451 uint8_t u8;
4452 int rc = APICGetTpr(env->pVCpu, &u8, NULL, NULL);
4453 if (RT_SUCCESS(rc))
4454 {
4455 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4456 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4457 }
4458 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4459 return 0;
4460}
4461
4462/**
4463 * Read an MSR.
4464 *
4465 * @retval 0 success.
4466 * @retval -1 failure, raise \#GP(0).
4467 * @param env The cpu state.
4468 * @param idMsr The MSR to read.
4469 * @param puValue Where to return the value.
4470 */
4471int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4472{
4473 Assert(env->pVCpu);
4474 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4475}
4476
4477/**
4478 * Write to an MSR.
4479 *
4480 * @retval 0 success.
4481 * @retval -1 failure, raise \#GP(0).
4482 * @param env The cpu state.
4483 * @param idMsr The MSR to write to.
4484 * @param uValue The value to write.
4485 */
4486int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4487{
4488 Assert(env->pVCpu);
4489 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4490}
4491
4492/* -+- I/O Ports -+- */
4493
4494#undef LOG_GROUP
4495#define LOG_GROUP LOG_GROUP_REM_IOPORT
4496
4497void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4498{
4499 int rc;
4500
4501 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4502 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4503
4504 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4505 if (RT_LIKELY(rc == VINF_SUCCESS))
4506 return;
4507 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4508 {
4509 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4510 remR3RaiseRC(env->pVM, rc);
4511 return;
4512 }
4513 remAbort(rc, __FUNCTION__);
4514}
4515
4516void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4517{
4518 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4519 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4520 if (RT_LIKELY(rc == VINF_SUCCESS))
4521 return;
4522 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4523 {
4524 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4525 remR3RaiseRC(env->pVM, rc);
4526 return;
4527 }
4528 remAbort(rc, __FUNCTION__);
4529}
4530
4531void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4532{
4533 int rc;
4534 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4535 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4536 if (RT_LIKELY(rc == VINF_SUCCESS))
4537 return;
4538 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4539 {
4540 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4541 remR3RaiseRC(env->pVM, rc);
4542 return;
4543 }
4544 remAbort(rc, __FUNCTION__);
4545}
4546
4547uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4548{
4549 uint32_t u32 = 0;
4550 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4551 if (RT_LIKELY(rc == VINF_SUCCESS))
4552 {
4553 if (/*addr != 0x61 && */addr != 0x71)
4554 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4555 return (uint8_t)u32;
4556 }
4557 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4558 {
4559 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4560 remR3RaiseRC(env->pVM, rc);
4561 return (uint8_t)u32;
4562 }
4563 remAbort(rc, __FUNCTION__);
4564 return UINT8_C(0xff);
4565}
4566
4567uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4568{
4569 uint32_t u32 = 0;
4570 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4571 if (RT_LIKELY(rc == VINF_SUCCESS))
4572 {
4573 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4574 return (uint16_t)u32;
4575 }
4576 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4577 {
4578 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4579 remR3RaiseRC(env->pVM, rc);
4580 return (uint16_t)u32;
4581 }
4582 remAbort(rc, __FUNCTION__);
4583 return UINT16_C(0xffff);
4584}
4585
4586uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4587{
4588 uint32_t u32 = 0;
4589 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4590 if (RT_LIKELY(rc == VINF_SUCCESS))
4591 {
4592 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4593 return u32;
4594 }
4595 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4596 {
4597 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4598 remR3RaiseRC(env->pVM, rc);
4599 return u32;
4600 }
4601 remAbort(rc, __FUNCTION__);
4602 return UINT32_C(0xffffffff);
4603}
4604
4605#undef LOG_GROUP
4606#define LOG_GROUP LOG_GROUP_REM
4607
4608
4609/* -+- helpers and misc other interfaces -+- */
4610
4611/**
4612 * Perform the CPUID instruction.
4613 *
4614 * @param env Pointer to the recompiler CPU structure.
4615 * @param idx The CPUID leaf (eax).
4616 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4617 * @param pEAX Where to store eax.
4618 * @param pEBX Where to store ebx.
4619 * @param pECX Where to store ecx.
4620 * @param pEDX Where to store edx.
4621 */
4622void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4623 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4624{
4625 NOREF(idxSub);
4626 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4627}
4628
4629
4630#if 0 /* not used */
4631/**
4632 * Interface for qemu hardware to report back fatal errors.
4633 */
4634void hw_error(const char *pszFormat, ...)
4635{
4636 /*
4637 * Bitch about it.
4638 */
4639 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4640 * this in my Odin32 tree at home! */
4641 va_list args;
4642 va_start(args, pszFormat);
4643 RTLogPrintf("fatal error in virtual hardware:");
4644 RTLogPrintfV(pszFormat, args);
4645 va_end(args);
4646 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4647
4648 /*
4649 * If we're in REM context we'll sync back the state before 'jumping' to
4650 * the EMs failure handling.
4651 */
4652 PVM pVM = cpu_single_env->pVM;
4653 if (pVM->rem.s.fInREM)
4654 REMR3StateBack(pVM);
4655 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4656 AssertMsgFailed(("EMR3FatalError returned!\n"));
4657}
4658#endif
4659
4660/**
4661 * Interface for the qemu cpu to report unhandled situation
4662 * raising a fatal VM error.
4663 */
4664void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4665{
4666 va_list va;
4667 PVM pVM;
4668 PVMCPU pVCpu;
4669 char szMsg[256];
4670
4671 /*
4672 * Bitch about it.
4673 */
4674 RTLogFlags(NULL, "nodisabled nobuffered");
4675 RTLogFlush(NULL);
4676
4677 va_start(va, pszFormat);
4678#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4679 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4680 unsigned cArgs = 0;
4681 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4682 const char *psz = strchr(pszFormat, '%');
4683 while (psz && cArgs < 6)
4684 {
4685 auArgs[cArgs++] = va_arg(va, uintptr_t);
4686 psz = strchr(psz + 1, '%');
4687 }
4688 switch (cArgs)
4689 {
4690 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4691 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4692 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4693 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4694 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4695 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4696 default:
4697 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4698 }
4699#else
4700 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4701#endif
4702 va_end(va);
4703
4704 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4705 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4706
4707 /*
4708 * If we're in REM context we'll sync back the state before 'jumping' to
4709 * the EMs failure handling.
4710 */
4711 pVM = cpu_single_env->pVM;
4712 pVCpu = cpu_single_env->pVCpu;
4713 Assert(pVCpu);
4714
4715 if (pVM->rem.s.fInREM)
4716 REMR3StateBack(pVM, pVCpu);
4717 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4718 AssertMsgFailed(("EMR3FatalError returned!\n"));
4719}
4720
4721
4722/**
4723 * Aborts the VM.
4724 *
4725 * @param rc VBox error code.
4726 * @param pszTip Hint about why/when this happened.
4727 */
4728void remAbort(int rc, const char *pszTip)
4729{
4730 PVM pVM;
4731 PVMCPU pVCpu;
4732
4733 /*
4734 * Bitch about it.
4735 */
4736 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4737 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4738
4739 /*
4740 * Jump back to where we entered the recompiler.
4741 */
4742 pVM = cpu_single_env->pVM;
4743 pVCpu = cpu_single_env->pVCpu;
4744 Assert(pVCpu);
4745
4746 if (pVM->rem.s.fInREM)
4747 REMR3StateBack(pVM, pVCpu);
4748
4749 EMR3FatalError(pVCpu, rc);
4750 AssertMsgFailed(("EMR3FatalError returned!\n"));
4751}
4752
4753
4754/**
4755 * Dumps a linux system call.
4756 * @param pVCpu VMCPU handle.
4757 */
4758void remR3DumpLnxSyscall(PVMCPU pVCpu)
4759{
4760 static const char *apsz[] =
4761 {
4762 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4763 "sys_exit",
4764 "sys_fork",
4765 "sys_read",
4766 "sys_write",
4767 "sys_open", /* 5 */
4768 "sys_close",
4769 "sys_waitpid",
4770 "sys_creat",
4771 "sys_link",
4772 "sys_unlink", /* 10 */
4773 "sys_execve",
4774 "sys_chdir",
4775 "sys_time",
4776 "sys_mknod",
4777 "sys_chmod", /* 15 */
4778 "sys_lchown16",
4779 "sys_ni_syscall", /* old break syscall holder */
4780 "sys_stat",
4781 "sys_lseek",
4782 "sys_getpid", /* 20 */
4783 "sys_mount",
4784 "sys_oldumount",
4785 "sys_setuid16",
4786 "sys_getuid16",
4787 "sys_stime", /* 25 */
4788 "sys_ptrace",
4789 "sys_alarm",
4790 "sys_fstat",
4791 "sys_pause",
4792 "sys_utime", /* 30 */
4793 "sys_ni_syscall", /* old stty syscall holder */
4794 "sys_ni_syscall", /* old gtty syscall holder */
4795 "sys_access",
4796 "sys_nice",
4797 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4798 "sys_sync",
4799 "sys_kill",
4800 "sys_rename",
4801 "sys_mkdir",
4802 "sys_rmdir", /* 40 */
4803 "sys_dup",
4804 "sys_pipe",
4805 "sys_times",
4806 "sys_ni_syscall", /* old prof syscall holder */
4807 "sys_brk", /* 45 */
4808 "sys_setgid16",
4809 "sys_getgid16",
4810 "sys_signal",
4811 "sys_geteuid16",
4812 "sys_getegid16", /* 50 */
4813 "sys_acct",
4814 "sys_umount", /* recycled never used phys() */
4815 "sys_ni_syscall", /* old lock syscall holder */
4816 "sys_ioctl",
4817 "sys_fcntl", /* 55 */
4818 "sys_ni_syscall", /* old mpx syscall holder */
4819 "sys_setpgid",
4820 "sys_ni_syscall", /* old ulimit syscall holder */
4821 "sys_olduname",
4822 "sys_umask", /* 60 */
4823 "sys_chroot",
4824 "sys_ustat",
4825 "sys_dup2",
4826 "sys_getppid",
4827 "sys_getpgrp", /* 65 */
4828 "sys_setsid",
4829 "sys_sigaction",
4830 "sys_sgetmask",
4831 "sys_ssetmask",
4832 "sys_setreuid16", /* 70 */
4833 "sys_setregid16",
4834 "sys_sigsuspend",
4835 "sys_sigpending",
4836 "sys_sethostname",
4837 "sys_setrlimit", /* 75 */
4838 "sys_old_getrlimit",
4839 "sys_getrusage",
4840 "sys_gettimeofday",
4841 "sys_settimeofday",
4842 "sys_getgroups16", /* 80 */
4843 "sys_setgroups16",
4844 "old_select",
4845 "sys_symlink",
4846 "sys_lstat",
4847 "sys_readlink", /* 85 */
4848 "sys_uselib",
4849 "sys_swapon",
4850 "sys_reboot",
4851 "old_readdir",
4852 "old_mmap", /* 90 */
4853 "sys_munmap",
4854 "sys_truncate",
4855 "sys_ftruncate",
4856 "sys_fchmod",
4857 "sys_fchown16", /* 95 */
4858 "sys_getpriority",
4859 "sys_setpriority",
4860 "sys_ni_syscall", /* old profil syscall holder */
4861 "sys_statfs",
4862 "sys_fstatfs", /* 100 */
4863 "sys_ioperm",
4864 "sys_socketcall",
4865 "sys_syslog",
4866 "sys_setitimer",
4867 "sys_getitimer", /* 105 */
4868 "sys_newstat",
4869 "sys_newlstat",
4870 "sys_newfstat",
4871 "sys_uname",
4872 "sys_iopl", /* 110 */
4873 "sys_vhangup",
4874 "sys_ni_syscall", /* old "idle" system call */
4875 "sys_vm86old",
4876 "sys_wait4",
4877 "sys_swapoff", /* 115 */
4878 "sys_sysinfo",
4879 "sys_ipc",
4880 "sys_fsync",
4881 "sys_sigreturn",
4882 "sys_clone", /* 120 */
4883 "sys_setdomainname",
4884 "sys_newuname",
4885 "sys_modify_ldt",
4886 "sys_adjtimex",
4887 "sys_mprotect", /* 125 */
4888 "sys_sigprocmask",
4889 "sys_ni_syscall", /* old "create_module" */
4890 "sys_init_module",
4891 "sys_delete_module",
4892 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4893 "sys_quotactl",
4894 "sys_getpgid",
4895 "sys_fchdir",
4896 "sys_bdflush",
4897 "sys_sysfs", /* 135 */
4898 "sys_personality",
4899 "sys_ni_syscall", /* reserved for afs_syscall */
4900 "sys_setfsuid16",
4901 "sys_setfsgid16",
4902 "sys_llseek", /* 140 */
4903 "sys_getdents",
4904 "sys_select",
4905 "sys_flock",
4906 "sys_msync",
4907 "sys_readv", /* 145 */
4908 "sys_writev",
4909 "sys_getsid",
4910 "sys_fdatasync",
4911 "sys_sysctl",
4912 "sys_mlock", /* 150 */
4913 "sys_munlock",
4914 "sys_mlockall",
4915 "sys_munlockall",
4916 "sys_sched_setparam",
4917 "sys_sched_getparam", /* 155 */
4918 "sys_sched_setscheduler",
4919 "sys_sched_getscheduler",
4920 "sys_sched_yield",
4921 "sys_sched_get_priority_max",
4922 "sys_sched_get_priority_min", /* 160 */
4923 "sys_sched_rr_get_interval",
4924 "sys_nanosleep",
4925 "sys_mremap",
4926 "sys_setresuid16",
4927 "sys_getresuid16", /* 165 */
4928 "sys_vm86",
4929 "sys_ni_syscall", /* Old sys_query_module */
4930 "sys_poll",
4931 "sys_nfsservctl",
4932 "sys_setresgid16", /* 170 */
4933 "sys_getresgid16",
4934 "sys_prctl",
4935 "sys_rt_sigreturn",
4936 "sys_rt_sigaction",
4937 "sys_rt_sigprocmask", /* 175 */
4938 "sys_rt_sigpending",
4939 "sys_rt_sigtimedwait",
4940 "sys_rt_sigqueueinfo",
4941 "sys_rt_sigsuspend",
4942 "sys_pread64", /* 180 */
4943 "sys_pwrite64",
4944 "sys_chown16",
4945 "sys_getcwd",
4946 "sys_capget",
4947 "sys_capset", /* 185 */
4948 "sys_sigaltstack",
4949 "sys_sendfile",
4950 "sys_ni_syscall", /* reserved for streams1 */
4951 "sys_ni_syscall", /* reserved for streams2 */
4952 "sys_vfork", /* 190 */
4953 "sys_getrlimit",
4954 "sys_mmap2",
4955 "sys_truncate64",
4956 "sys_ftruncate64",
4957 "sys_stat64", /* 195 */
4958 "sys_lstat64",
4959 "sys_fstat64",
4960 "sys_lchown",
4961 "sys_getuid",
4962 "sys_getgid", /* 200 */
4963 "sys_geteuid",
4964 "sys_getegid",
4965 "sys_setreuid",
4966 "sys_setregid",
4967 "sys_getgroups", /* 205 */
4968 "sys_setgroups",
4969 "sys_fchown",
4970 "sys_setresuid",
4971 "sys_getresuid",
4972 "sys_setresgid", /* 210 */
4973 "sys_getresgid",
4974 "sys_chown",
4975 "sys_setuid",
4976 "sys_setgid",
4977 "sys_setfsuid", /* 215 */
4978 "sys_setfsgid",
4979 "sys_pivot_root",
4980 "sys_mincore",
4981 "sys_madvise",
4982 "sys_getdents64", /* 220 */
4983 "sys_fcntl64",
4984 "sys_ni_syscall", /* reserved for TUX */
4985 "sys_ni_syscall",
4986 "sys_gettid",
4987 "sys_readahead", /* 225 */
4988 "sys_setxattr",
4989 "sys_lsetxattr",
4990 "sys_fsetxattr",
4991 "sys_getxattr",
4992 "sys_lgetxattr", /* 230 */
4993 "sys_fgetxattr",
4994 "sys_listxattr",
4995 "sys_llistxattr",
4996 "sys_flistxattr",
4997 "sys_removexattr", /* 235 */
4998 "sys_lremovexattr",
4999 "sys_fremovexattr",
5000 "sys_tkill",
5001 "sys_sendfile64",
5002 "sys_futex", /* 240 */
5003 "sys_sched_setaffinity",
5004 "sys_sched_getaffinity",
5005 "sys_set_thread_area",
5006 "sys_get_thread_area",
5007 "sys_io_setup", /* 245 */
5008 "sys_io_destroy",
5009 "sys_io_getevents",
5010 "sys_io_submit",
5011 "sys_io_cancel",
5012 "sys_fadvise64", /* 250 */
5013 "sys_ni_syscall",
5014 "sys_exit_group",
5015 "sys_lookup_dcookie",
5016 "sys_epoll_create",
5017 "sys_epoll_ctl", /* 255 */
5018 "sys_epoll_wait",
5019 "sys_remap_file_pages",
5020 "sys_set_tid_address",
5021 "sys_timer_create",
5022 "sys_timer_settime", /* 260 */
5023 "sys_timer_gettime",
5024 "sys_timer_getoverrun",
5025 "sys_timer_delete",
5026 "sys_clock_settime",
5027 "sys_clock_gettime", /* 265 */
5028 "sys_clock_getres",
5029 "sys_clock_nanosleep",
5030 "sys_statfs64",
5031 "sys_fstatfs64",
5032 "sys_tgkill", /* 270 */
5033 "sys_utimes",
5034 "sys_fadvise64_64",
5035 "sys_ni_syscall" /* sys_vserver */
5036 };
5037
5038 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5039 switch (uEAX)
5040 {
5041 default:
5042 if (uEAX < RT_ELEMENTS(apsz))
5043 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5044 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5045 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5046 else
5047 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5048 break;
5049
5050 }
5051}
5052
5053
5054/**
5055 * Dumps an OpenBSD system call.
5056 * @param pVCpu VMCPU handle.
5057 */
5058void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5059{
5060 static const char *apsz[] =
5061 {
5062 "SYS_syscall", //0
5063 "SYS_exit", //1
5064 "SYS_fork", //2
5065 "SYS_read", //3
5066 "SYS_write", //4
5067 "SYS_open", //5
5068 "SYS_close", //6
5069 "SYS_wait4", //7
5070 "SYS_8",
5071 "SYS_link", //9
5072 "SYS_unlink", //10
5073 "SYS_11",
5074 "SYS_chdir", //12
5075 "SYS_fchdir", //13
5076 "SYS_mknod", //14
5077 "SYS_chmod", //15
5078 "SYS_chown", //16
5079 "SYS_break", //17
5080 "SYS_18",
5081 "SYS_19",
5082 "SYS_getpid", //20
5083 "SYS_mount", //21
5084 "SYS_unmount", //22
5085 "SYS_setuid", //23
5086 "SYS_getuid", //24
5087 "SYS_geteuid", //25
5088 "SYS_ptrace", //26
5089 "SYS_recvmsg", //27
5090 "SYS_sendmsg", //28
5091 "SYS_recvfrom", //29
5092 "SYS_accept", //30
5093 "SYS_getpeername", //31
5094 "SYS_getsockname", //32
5095 "SYS_access", //33
5096 "SYS_chflags", //34
5097 "SYS_fchflags", //35
5098 "SYS_sync", //36
5099 "SYS_kill", //37
5100 "SYS_38",
5101 "SYS_getppid", //39
5102 "SYS_40",
5103 "SYS_dup", //41
5104 "SYS_opipe", //42
5105 "SYS_getegid", //43
5106 "SYS_profil", //44
5107 "SYS_ktrace", //45
5108 "SYS_sigaction", //46
5109 "SYS_getgid", //47
5110 "SYS_sigprocmask", //48
5111 "SYS_getlogin", //49
5112 "SYS_setlogin", //50
5113 "SYS_acct", //51
5114 "SYS_sigpending", //52
5115 "SYS_osigaltstack", //53
5116 "SYS_ioctl", //54
5117 "SYS_reboot", //55
5118 "SYS_revoke", //56
5119 "SYS_symlink", //57
5120 "SYS_readlink", //58
5121 "SYS_execve", //59
5122 "SYS_umask", //60
5123 "SYS_chroot", //61
5124 "SYS_62",
5125 "SYS_63",
5126 "SYS_64",
5127 "SYS_65",
5128 "SYS_vfork", //66
5129 "SYS_67",
5130 "SYS_68",
5131 "SYS_sbrk", //69
5132 "SYS_sstk", //70
5133 "SYS_61",
5134 "SYS_vadvise", //72
5135 "SYS_munmap", //73
5136 "SYS_mprotect", //74
5137 "SYS_madvise", //75
5138 "SYS_76",
5139 "SYS_77",
5140 "SYS_mincore", //78
5141 "SYS_getgroups", //79
5142 "SYS_setgroups", //80
5143 "SYS_getpgrp", //81
5144 "SYS_setpgid", //82
5145 "SYS_setitimer", //83
5146 "SYS_84",
5147 "SYS_85",
5148 "SYS_getitimer", //86
5149 "SYS_87",
5150 "SYS_88",
5151 "SYS_89",
5152 "SYS_dup2", //90
5153 "SYS_91",
5154 "SYS_fcntl", //92
5155 "SYS_select", //93
5156 "SYS_94",
5157 "SYS_fsync", //95
5158 "SYS_setpriority", //96
5159 "SYS_socket", //97
5160 "SYS_connect", //98
5161 "SYS_99",
5162 "SYS_getpriority", //100
5163 "SYS_101",
5164 "SYS_102",
5165 "SYS_sigreturn", //103
5166 "SYS_bind", //104
5167 "SYS_setsockopt", //105
5168 "SYS_listen", //106
5169 "SYS_107",
5170 "SYS_108",
5171 "SYS_109",
5172 "SYS_110",
5173 "SYS_sigsuspend", //111
5174 "SYS_112",
5175 "SYS_113",
5176 "SYS_114",
5177 "SYS_115",
5178 "SYS_gettimeofday", //116
5179 "SYS_getrusage", //117
5180 "SYS_getsockopt", //118
5181 "SYS_119",
5182 "SYS_readv", //120
5183 "SYS_writev", //121
5184 "SYS_settimeofday", //122
5185 "SYS_fchown", //123
5186 "SYS_fchmod", //124
5187 "SYS_125",
5188 "SYS_setreuid", //126
5189 "SYS_setregid", //127
5190 "SYS_rename", //128
5191 "SYS_129",
5192 "SYS_130",
5193 "SYS_flock", //131
5194 "SYS_mkfifo", //132
5195 "SYS_sendto", //133
5196 "SYS_shutdown", //134
5197 "SYS_socketpair", //135
5198 "SYS_mkdir", //136
5199 "SYS_rmdir", //137
5200 "SYS_utimes", //138
5201 "SYS_139",
5202 "SYS_adjtime", //140
5203 "SYS_141",
5204 "SYS_142",
5205 "SYS_143",
5206 "SYS_144",
5207 "SYS_145",
5208 "SYS_146",
5209 "SYS_setsid", //147
5210 "SYS_quotactl", //148
5211 "SYS_149",
5212 "SYS_150",
5213 "SYS_151",
5214 "SYS_152",
5215 "SYS_153",
5216 "SYS_154",
5217 "SYS_nfssvc", //155
5218 "SYS_156",
5219 "SYS_157",
5220 "SYS_158",
5221 "SYS_159",
5222 "SYS_160",
5223 "SYS_getfh", //161
5224 "SYS_162",
5225 "SYS_163",
5226 "SYS_164",
5227 "SYS_sysarch", //165
5228 "SYS_166",
5229 "SYS_167",
5230 "SYS_168",
5231 "SYS_169",
5232 "SYS_170",
5233 "SYS_171",
5234 "SYS_172",
5235 "SYS_pread", //173
5236 "SYS_pwrite", //174
5237 "SYS_175",
5238 "SYS_176",
5239 "SYS_177",
5240 "SYS_178",
5241 "SYS_179",
5242 "SYS_180",
5243 "SYS_setgid", //181
5244 "SYS_setegid", //182
5245 "SYS_seteuid", //183
5246 "SYS_lfs_bmapv", //184
5247 "SYS_lfs_markv", //185
5248 "SYS_lfs_segclean", //186
5249 "SYS_lfs_segwait", //187
5250 "SYS_188",
5251 "SYS_189",
5252 "SYS_190",
5253 "SYS_pathconf", //191
5254 "SYS_fpathconf", //192
5255 "SYS_swapctl", //193
5256 "SYS_getrlimit", //194
5257 "SYS_setrlimit", //195
5258 "SYS_getdirentries", //196
5259 "SYS_mmap", //197
5260 "SYS___syscall", //198
5261 "SYS_lseek", //199
5262 "SYS_truncate", //200
5263 "SYS_ftruncate", //201
5264 "SYS___sysctl", //202
5265 "SYS_mlock", //203
5266 "SYS_munlock", //204
5267 "SYS_205",
5268 "SYS_futimes", //206
5269 "SYS_getpgid", //207
5270 "SYS_xfspioctl", //208
5271 "SYS_209",
5272 "SYS_210",
5273 "SYS_211",
5274 "SYS_212",
5275 "SYS_213",
5276 "SYS_214",
5277 "SYS_215",
5278 "SYS_216",
5279 "SYS_217",
5280 "SYS_218",
5281 "SYS_219",
5282 "SYS_220",
5283 "SYS_semget", //221
5284 "SYS_222",
5285 "SYS_223",
5286 "SYS_224",
5287 "SYS_msgget", //225
5288 "SYS_msgsnd", //226
5289 "SYS_msgrcv", //227
5290 "SYS_shmat", //228
5291 "SYS_229",
5292 "SYS_shmdt", //230
5293 "SYS_231",
5294 "SYS_clock_gettime", //232
5295 "SYS_clock_settime", //233
5296 "SYS_clock_getres", //234
5297 "SYS_235",
5298 "SYS_236",
5299 "SYS_237",
5300 "SYS_238",
5301 "SYS_239",
5302 "SYS_nanosleep", //240
5303 "SYS_241",
5304 "SYS_242",
5305 "SYS_243",
5306 "SYS_244",
5307 "SYS_245",
5308 "SYS_246",
5309 "SYS_247",
5310 "SYS_248",
5311 "SYS_249",
5312 "SYS_minherit", //250
5313 "SYS_rfork", //251
5314 "SYS_poll", //252
5315 "SYS_issetugid", //253
5316 "SYS_lchown", //254
5317 "SYS_getsid", //255
5318 "SYS_msync", //256
5319 "SYS_257",
5320 "SYS_258",
5321 "SYS_259",
5322 "SYS_getfsstat", //260
5323 "SYS_statfs", //261
5324 "SYS_fstatfs", //262
5325 "SYS_pipe", //263
5326 "SYS_fhopen", //264
5327 "SYS_265",
5328 "SYS_fhstatfs", //266
5329 "SYS_preadv", //267
5330 "SYS_pwritev", //268
5331 "SYS_kqueue", //269
5332 "SYS_kevent", //270
5333 "SYS_mlockall", //271
5334 "SYS_munlockall", //272
5335 "SYS_getpeereid", //273
5336 "SYS_274",
5337 "SYS_275",
5338 "SYS_276",
5339 "SYS_277",
5340 "SYS_278",
5341 "SYS_279",
5342 "SYS_280",
5343 "SYS_getresuid", //281
5344 "SYS_setresuid", //282
5345 "SYS_getresgid", //283
5346 "SYS_setresgid", //284
5347 "SYS_285",
5348 "SYS_mquery", //286
5349 "SYS_closefrom", //287
5350 "SYS_sigaltstack", //288
5351 "SYS_shmget", //289
5352 "SYS_semop", //290
5353 "SYS_stat", //291
5354 "SYS_fstat", //292
5355 "SYS_lstat", //293
5356 "SYS_fhstat", //294
5357 "SYS___semctl", //295
5358 "SYS_shmctl", //296
5359 "SYS_msgctl", //297
5360 "SYS_MAXSYSCALL", //298
5361 //299
5362 //300
5363 };
5364 uint32_t uEAX;
5365 if (!LogIsEnabled())
5366 return;
5367 uEAX = CPUMGetGuestEAX(pVCpu);
5368 switch (uEAX)
5369 {
5370 default:
5371 if (uEAX < RT_ELEMENTS(apsz))
5372 {
5373 uint32_t au32Args[8] = {0};
5374 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5375 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5376 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5377 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5378 }
5379 else
5380 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5381 break;
5382 }
5383}
5384
5385
5386#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5387/**
5388 * The Dll main entry point (stub).
5389 */
5390bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5391{
5392 return true;
5393}
5394
5395void *memcpy(void *dst, const void *src, size_t size)
5396{
5397 uint8_t*pbDst = dst, *pbSrc = src;
5398 while (size-- > 0)
5399 *pbDst++ = *pbSrc++;
5400 return dst;
5401}
5402
5403#endif
5404
5405void cpu_smm_update(CPUX86State *env)
5406{
5407}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette