VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 80990

Last change on this file since 80990 was 80281, checked in by vboxsync, 5 years ago

VMM,++: Refactoring code to use VMMC & VMMCPUCC. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 175.0 KB
Line 
1/* $Id: VBoxRecompiler.c 80281 2019-08-15 07:29:37Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_rem REM - Recompiled Execution Manager.
19 *
20 * The recompiled exeuction manager (REM) serves the final fallback for guest
21 * execution, after HM / raw-mode and IEM have given up.
22 *
23 * The REM is qemu with a whole bunch of VBox specific customization for
24 * interfacing with PATM, CSAM, PGM and other components.
25 *
26 * @sa @ref grp_rem
27 */
28
29
30/*********************************************************************************************************************************
31* Header Files *
32*********************************************************************************************************************************/
33#define LOG_GROUP LOG_GROUP_REM
34#include <stdio.h> /* FILE */
35#include "osdep.h"
36#include "config.h"
37#include "cpu.h"
38#include "exec-all.h"
39#include "ioport.h"
40
41#include <VBox/vmm/rem.h>
42#include <VBox/vmm/vmapi.h>
43#include <VBox/vmm/tm.h>
44#include <VBox/vmm/ssm.h>
45#include <VBox/vmm/em.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/trpm.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/pgm.h>
51#include <VBox/vmm/pdm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/dbg.h>
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/hm.h>
56#include "REMInternal.h"
57#include <VBox/vmm/vmcc.h>
58#include <VBox/vmm/uvm.h>
59#include <VBox/param.h>
60#include <VBox/err.h>
61
62#include <VBox/log.h>
63#include <iprt/alloca.h>
64#include <iprt/semaphore.h>
65#include <iprt/asm.h>
66#include <iprt/assert.h>
67#include <iprt/thread.h>
68#include <iprt/string.h>
69
70/* Don't wanna include everything. */
71extern void cpu_exec_init_all(uintptr_t tb_size);
72extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
73extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
74extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
75extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
76extern void tlb_flush(CPUX86State *env, int flush_global);
77extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
78extern void sync_ldtr(CPUX86State *env1, int selector);
79
80#ifdef VBOX_STRICT
81ram_addr_t get_phys_page_offset(target_ulong addr);
82#endif
83
84
85/*********************************************************************************************************************************
86* Defined Constants And Macros *
87*********************************************************************************************************************************/
88
89/** Copy 80-bit fpu register at pSrc to pDst.
90 * This is probably faster than *calling* memcpy.
91 */
92#define REM_COPY_FPU_REG(pDst, pSrc) \
93 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
94
95/** How remR3RunLoggingStep operates. */
96#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
97
98
99/** Selector flag shift between qemu and VBox.
100 * VBox shifts the qemu bits to the right. */
101#define SEL_FLAGS_SHIFT (8)
102/** Mask applied to the shifted qemu selector flags to get the attributes VBox
103 * (VT-x) needs. */
104#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
105
106
107/*********************************************************************************************************************************
108* Internal Functions *
109*********************************************************************************************************************************/
110static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
111static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
112static DECLCALLBACK(int) remR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
113static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
114static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
115
116static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
117static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
118static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
119static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
120static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
121static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
122
123static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
124static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
125static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
126static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
127static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
128static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
129
130static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
131static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
132static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
133
134
135/*********************************************************************************************************************************
136* Global Variables *
137*********************************************************************************************************************************/
138
139/** @todo Move stats to REM::s some rainy day we have nothing do to. */
140#ifdef VBOX_WITH_STATISTICS
141static STAMPROFILEADV gStatExecuteSingleInstr;
142static STAMPROFILEADV gStatCompilationQEmu;
143static STAMPROFILEADV gStatRunCodeQEmu;
144static STAMPROFILEADV gStatTotalTimeQEmu;
145static STAMPROFILEADV gStatTimers;
146static STAMPROFILEADV gStatTBLookup;
147static STAMPROFILEADV gStatIRQ;
148static STAMPROFILEADV gStatRawCheck;
149static STAMPROFILEADV gStatMemRead;
150static STAMPROFILEADV gStatMemWrite;
151static STAMPROFILE gStatGCPhys2HCVirt;
152static STAMCOUNTER gStatCpuGetTSC;
153static STAMCOUNTER gStatRefuseTFInhibit;
154static STAMCOUNTER gStatRefuseVM86;
155static STAMCOUNTER gStatRefusePaging;
156static STAMCOUNTER gStatRefusePAE;
157static STAMCOUNTER gStatRefuseIOPLNot0;
158static STAMCOUNTER gStatRefuseIF0;
159static STAMCOUNTER gStatRefuseCode16;
160static STAMCOUNTER gStatRefuseWP0;
161static STAMCOUNTER gStatRefuseRing1or2;
162static STAMCOUNTER gStatRefuseCanExecute;
163static STAMCOUNTER gaStatRefuseStale[6];
164static STAMCOUNTER gStatREMGDTChange;
165static STAMCOUNTER gStatREMIDTChange;
166static STAMCOUNTER gStatREMLDTRChange;
167static STAMCOUNTER gStatREMTRChange;
168static STAMCOUNTER gStatSelOutOfSync[6];
169static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
170static STAMCOUNTER gStatFlushTBs;
171#endif
172/* in exec.c */
173extern uint32_t tlb_flush_count;
174extern uint32_t tb_flush_count;
175extern uint32_t tb_phys_invalidate_count;
176
177/*
178 * Global stuff.
179 */
180
181/** MMIO read callbacks. */
182CPUReadMemoryFunc *g_apfnMMIORead[3] =
183{
184 remR3MMIOReadU8,
185 remR3MMIOReadU16,
186 remR3MMIOReadU32
187};
188
189/** MMIO write callbacks. */
190CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
191{
192 remR3MMIOWriteU8,
193 remR3MMIOWriteU16,
194 remR3MMIOWriteU32
195};
196
197/** Handler read callbacks. */
198CPUReadMemoryFunc *g_apfnHandlerRead[3] =
199{
200 remR3HandlerReadU8,
201 remR3HandlerReadU16,
202 remR3HandlerReadU32
203};
204
205/** Handler write callbacks. */
206CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
207{
208 remR3HandlerWriteU8,
209 remR3HandlerWriteU16,
210 remR3HandlerWriteU32
211};
212
213
214#ifdef VBOX_WITH_DEBUGGER
215/*
216 * Debugger commands.
217 */
218static FNDBGCCMD remR3CmdDisasEnableStepping;;
219
220/** '.remstep' arguments. */
221static const DBGCVARDESC g_aArgRemStep[] =
222{
223 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
224 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
225};
226
227/** Command descriptors. */
228static const DBGCCMD g_aCmds[] =
229{
230 {
231 .pszCmd ="remstep",
232 .cArgsMin = 0,
233 .cArgsMax = 1,
234 .paArgDescs = &g_aArgRemStep[0],
235 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
236 .fFlags = 0,
237 .pfnHandler = remR3CmdDisasEnableStepping,
238 .pszSyntax = "[on/off]",
239 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
240 "If no arguments show the current state."
241 }
242};
243#endif
244
245/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
246 * @todo huh??? That cannot be the case on the mac... So, this
247 * point is probably not valid any longer. */
248uint8_t *code_gen_prologue;
249
250
251/*********************************************************************************************************************************
252* Internal Functions *
253*********************************************************************************************************************************/
254void remAbort(int rc, const char *pszTip);
255extern int testmath(void);
256
257/* Put them here to avoid unused variable warning. */
258AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
259#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
260//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
261/* Why did this have to be identical?? */
262AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
263#else
264AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
265#endif
266
267
268/**
269 * Initializes the REM.
270 *
271 * @returns VBox status code.
272 * @param pVM The VM to operate on.
273 */
274REMR3DECL(int) REMR3Init(PVM pVM)
275{
276 PREMHANDLERNOTIFICATION pCur;
277 uint32_t u32Dummy;
278 int rc;
279 unsigned i;
280
281#ifdef VBOX_ENABLE_VBOXREM64
282 LogRel(("Using 64-bit aware REM\n"));
283#endif
284
285 /*
286 * Assert sanity.
287 */
288 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
289 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
290 AssertReleaseMsg(!(RT_UOFFSETOF(VM, rem) & 31), ("off=%#zx\n", RT_UOFFSETOF(VM, rem)));
291#if 0 /* just an annoyance at the moment. */
292#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
293 Assert(!testmath());
294#endif
295#endif
296
297 /*
298 * Init some internal data members.
299 */
300 pVM->rem.s.offVM = RT_UOFFSETOF(VM, rem.s);
301 pVM->rem.s.Env.pVM = pVM;
302#ifdef CPU_RAW_MODE_INIT
303 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
304#endif
305
306 /*
307 * Initialize the REM critical section.
308 *
309 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
310 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
311 * deadlocks. (mostly pgm vs rem locking)
312 */
313 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
314 AssertRCReturn(rc, rc);
315
316 /* ctx. */
317 pVM->rem.s.pCtx = NULL; /* set when executing code. */
318 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
319
320 /* ignore all notifications */
321 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
322
323 code_gen_prologue = RTMemExecAlloc(_1K);
324 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
325
326 cpu_exec_init_all(0);
327
328 /*
329 * Init the recompiler.
330 */
331 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
332 {
333 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
334 return VERR_GENERAL_FAILURE;
335 }
336 PVMCPU pVCpu = VMMGetCpu(pVM);
337 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
338 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
339
340 EMRemLock(pVM);
341 cpu_reset(&pVM->rem.s.Env);
342 EMRemUnlock(pVM);
343
344 /* allocate code buffer for single instruction emulation. */
345 pVM->rem.s.Env.cbCodeBuffer = 4096;
346 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
347 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
348
349 /* Finally, set the cpu_single_env global. */
350 cpu_single_env = &pVM->rem.s.Env;
351
352 /* Nothing is pending by default */
353 pVM->rem.s.uStateLoadPendingInterrupt = REM_NO_PENDING_IRQ;
354
355 /*
356 * Register ram types.
357 */
358 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
359 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
360 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
361 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
362 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
363
364 /* stop ignoring. */
365 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
366
367 /*
368 * Register the saved state data unit.
369 */
370 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
371 NULL, NULL, NULL,
372 NULL, remR3Save, NULL,
373 NULL, remR3Load, remR3LoadDone);
374 if (RT_FAILURE(rc))
375 return rc;
376
377#ifdef VBOX_WITH_DEBUGGER
378 /*
379 * Debugger commands.
380 */
381 static bool fRegisteredCmds = false;
382 if (!fRegisteredCmds)
383 {
384 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
385 if (RT_SUCCESS(rc))
386 fRegisteredCmds = true;
387 }
388#endif
389
390#ifdef VBOX_WITH_STATISTICS
391 /*
392 * Statistics.
393 */
394 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
395 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
396 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
397 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
398 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
399 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
400 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
401 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
402 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
403 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
404 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
405
406 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
407
408 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
409 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
410 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
411 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
412 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
413 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
414 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
415 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
416 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
417 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
418 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
419 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
420 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
421 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
422 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
423 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
424 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
425
426 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
427 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
428 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
429 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
430
431 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
432 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
433 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
434 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
435 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
436 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
437
438 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
439 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
440 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
441 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
442 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
443 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
444
445 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
446#endif /* VBOX_WITH_STATISTICS */
447 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
448 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
449
450 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
451 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
452 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
453
454
455#ifdef DEBUG_ALL_LOGGING
456 loglevel = ~0;
457#endif
458
459 /*
460 * Init the handler notification lists.
461 */
462 pVM->rem.s.idxPendingList = UINT32_MAX;
463 pVM->rem.s.idxFreeList = 0;
464
465 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
466 {
467 pCur = &pVM->rem.s.aHandlerNotifications[i];
468 pCur->idxNext = i + 1;
469 pCur->idxSelf = i;
470 }
471 pCur->idxNext = UINT32_MAX; /* the last record. */
472
473 return rc;
474}
475
476
477/**
478 * Finalizes the REM initialization.
479 *
480 * This is called after all components, devices and drivers has
481 * been initialized. Its main purpose it to finish the RAM related
482 * initialization.
483 *
484 * @returns VBox status code.
485 *
486 * @param pVM The VM handle.
487 */
488REMR3DECL(int) REMR3InitFinalize(PVM pVM)
489{
490 int rc;
491
492 /*
493 * Ram size & dirty bit map.
494 */
495 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
496 pVM->rem.s.fGCPhysLastRamFixed = true;
497#ifdef RT_STRICT
498 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
499#else
500 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
501#endif
502 return rc;
503}
504
505/**
506 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
507 *
508 * @returns VBox status code.
509 * @param pVM The VM handle.
510 * @param fGuarded Whether to guard the map.
511 */
512static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
513{
514 int rc = VINF_SUCCESS;
515 RTGCPHYS cb;
516
517 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
518
519 cb = pVM->rem.s.GCPhysLastRam + 1;
520 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
521 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
522 VERR_OUT_OF_RANGE);
523
524 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
525 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
526
527 if (!fGuarded)
528 {
529 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
530 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
531 }
532 else
533 {
534 /*
535 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
536 */
537 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
538 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
539 if (cbBitmapFull == cbBitmapAligned)
540 cbBitmapFull += _4G >> PAGE_SHIFT;
541 else if (cbBitmapFull - cbBitmapAligned < _64K)
542 cbBitmapFull += _64K;
543
544 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
545 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
546
547 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
548 if (RT_FAILURE(rc))
549 {
550 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
551 AssertLogRelRCReturn(rc, rc);
552 }
553
554 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
555 }
556
557 /* initialize it. */
558 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
559 return rc;
560}
561
562
563/**
564 * Terminates the REM.
565 *
566 * Termination means cleaning up and freeing all resources,
567 * the VM it self is at this point powered off or suspended.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM to operate on.
571 */
572REMR3DECL(int) REMR3Term(PVM pVM)
573{
574 /*
575 * Statistics.
576 */
577 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
578 STAMR3Deregister(pVM->pUVM, "/REM/*");
579
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * The VM is being reset.
586 *
587 * For the REM component this means to call the cpu_reset() and
588 * reinitialize some state variables.
589 *
590 * @param pVM VM handle.
591 */
592REMR3DECL(void) REMR3Reset(PVM pVM)
593{
594 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
595
596 /*
597 * Reset the REM cpu.
598 */
599 Assert(pVM->rem.s.cIgnoreAll == 0);
600 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
601 cpu_reset(&pVM->rem.s.Env);
602 pVM->rem.s.cInvalidatedPages = 0;
603 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
604 Assert(pVM->rem.s.cIgnoreAll == 0);
605
606 /* Clear raw ring 0 init state */
607 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
608
609 /* Flush the TBs the next time we execute code here. */
610 pVM->rem.s.fFlushTBs = true;
611
612 EMRemUnlock(pVM);
613}
614
615
616/**
617 * Execute state save operation.
618 *
619 * @returns VBox status code.
620 * @param pVM VM Handle.
621 * @param pSSM SSM operation handle.
622 */
623static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
624{
625 PREM pRem = &pVM->rem.s;
626
627 /*
628 * Save the required CPU Env bits.
629 * (Not much because we're never in REM when doing the save.)
630 */
631 LogFlow(("remR3Save:\n"));
632 Assert(!pRem->fInREM);
633 SSMR3PutU32(pSSM, pRem->Env.hflags);
634 SSMR3PutU32(pSSM, ~0); /* separator */
635
636 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
637 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
638 SSMR3PutU32(pSSM, REM_NO_PENDING_IRQ);
639
640 return SSMR3PutU32(pSSM, ~0); /* terminator */
641}
642
643
644/**
645 * Execute state load operation.
646 *
647 * @returns VBox status code.
648 * @param pVM VM Handle.
649 * @param pSSM SSM operation handle.
650 * @param uVersion Data layout version.
651 * @param uPass The data pass.
652 */
653static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
654{
655 uint32_t u32Dummy;
656 uint32_t fRawRing0 = false;
657 uint32_t u32Sep;
658 uint32_t i;
659 int rc;
660 PREM pRem;
661
662 LogFlow(("remR3Load:\n"));
663 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
664
665 /*
666 * Validate version.
667 */
668 if ( uVersion != REM_SAVED_STATE_VERSION
669 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
670 {
671 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
672 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
673 }
674
675 /*
676 * Do a reset to be on the safe side...
677 */
678 REMR3Reset(pVM);
679
680 /*
681 * Ignore all ignorable notifications.
682 * (Not doing this will cause serious trouble.)
683 */
684 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
685
686 /*
687 * Load the required CPU Env bits.
688 * (Not much because we're never in REM when doing the save.)
689 */
690 pRem = &pVM->rem.s;
691 Assert(!pRem->fInREM);
692 SSMR3GetU32(pSSM, &pRem->Env.hflags);
693 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
694 {
695 /* Redundant REM CPU state has to be loaded, but can be ignored. */
696 CPUX86State_Ver16 temp;
697 SSMR3GetMem(pSSM, &temp, RT_UOFFSETOF(CPUX86State_Ver16, jmp_env));
698 }
699
700 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
701 if (RT_FAILURE(rc))
702 return rc;
703 if (u32Sep != ~0U)
704 {
705 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
706 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
707 }
708
709 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
710 SSMR3GetUInt(pSSM, &fRawRing0);
711 if (fRawRing0)
712 pRem->Env.state |= CPU_RAW_RING0;
713
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /*
717 * Load the REM stuff.
718 */
719 /** @todo r=bird: We should just drop all these items, restoring doesn't make
720 * sense. */
721 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
722 if (RT_FAILURE(rc))
723 return rc;
724 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
725 {
726 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729 for (i = 0; i < pRem->cInvalidatedPages; i++)
730 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
731 }
732
733 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.uStateLoadPendingInterrupt);
734 AssertRCReturn(rc, rc);
735 AssertLogRelMsgReturn( pVM->rem.s.uStateLoadPendingInterrupt == REM_NO_PENDING_IRQ
736 || pVM->rem.s.uStateLoadPendingInterrupt < 256,
737 ("uStateLoadPendingInterrupt=%#x\n", pVM->rem.s.uStateLoadPendingInterrupt),
738 VERR_SSM_UNEXPECTED_DATA);
739
740 /* check the terminator. */
741 rc = SSMR3GetU32(pSSM, &u32Sep);
742 if (RT_FAILURE(rc))
743 return rc;
744 if (u32Sep != ~0U)
745 {
746 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
747 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
748 }
749
750 /*
751 * Get the CPUID features.
752 */
753 PVMCPU pVCpu = VMMGetCpu(pVM);
754 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
755 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
756
757 /*
758 * Stop ignoring ignorable notifications.
759 */
760 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
761
762 /*
763 * Sync the whole CPU state when executing code in the recompiler.
764 */
765 for (i = 0; i < pVM->cCpus; i++)
766 {
767 PVMCPU pVCpu = pVM->apCpusR3[i];
768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
769 }
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * @callback_method_impl{FNSSMINTLOADDONE,
776 * For pushing misdesigned pending-interrupt mess to TRPM where it belongs. }
777 */
778static DECLCALLBACK(int) remR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
779{
780 if (pVM->rem.s.uStateLoadPendingInterrupt != REM_NO_PENDING_IRQ)
781 {
782 int rc = TRPMAssertTrap(pVM->apCpusR3[0], pVM->rem.s.uStateLoadPendingInterrupt, TRPM_HARDWARE_INT);
783 AssertLogRelMsgReturn(rc, ("uStateLoadPendingInterrupt=%#x rc=%Rrc\n", pVM->rem.s.uStateLoadPendingInterrupt, rc), rc);
784 pVM->rem.s.uStateLoadPendingInterrupt = REM_NO_PENDING_IRQ;
785 }
786 return VINF_SUCCESS;
787}
788
789
790#undef LOG_GROUP
791#define LOG_GROUP LOG_GROUP_REM_RUN
792
793/**
794 * Single steps an instruction in recompiled mode.
795 *
796 * Before calling this function the REM state needs to be in sync with
797 * the VM. Call REMR3State() to perform the sync. It's only necessary
798 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
799 * and after calling REMR3StateBack().
800 *
801 * @returns VBox status code.
802 *
803 * @param pVM VM Handle.
804 * @param pVCpu VMCPU Handle.
805 */
806REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
807{
808 int rc, interrupt_request;
809 RTGCPTR GCPtrPC;
810 bool fBp;
811
812 /*
813 * Lock the REM - we don't wanna have anyone interrupting us
814 * while stepping - and enabled single stepping. We also ignore
815 * pending interrupts and suchlike.
816 */
817 interrupt_request = pVM->rem.s.Env.interrupt_request;
818 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
819 pVM->rem.s.Env.interrupt_request = 0;
820 cpu_single_step(&pVM->rem.s.Env, 1);
821
822 /*
823 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
824 */
825 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
826 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
827
828 /*
829 * Execute and handle the return code.
830 * We execute without enabling the cpu tick, so on success we'll
831 * just flip it on and off to make sure it moves
832 */
833 rc = cpu_exec(&pVM->rem.s.Env);
834 if (rc == EXCP_DEBUG)
835 {
836 TMR3NotifyResume(pVM, pVCpu);
837 TMR3NotifySuspend(pVM, pVCpu);
838 rc = VINF_EM_DBG_STEPPED;
839 }
840 else
841 {
842 switch (rc)
843 {
844 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
845 case EXCP_HLT:
846 case EXCP_HALTED: rc = VINF_EM_HALT; break;
847 case EXCP_RC:
848 rc = pVM->rem.s.rc;
849 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
850 break;
851 case EXCP_EXECUTE_RAW:
852 case EXCP_EXECUTE_HM:
853 /** @todo is it correct? No! */
854 rc = VINF_SUCCESS;
855 break;
856 default:
857 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
858 rc = VERR_INTERNAL_ERROR;
859 break;
860 }
861 }
862
863 /*
864 * Restore the stuff we changed to prevent interruption.
865 * Unlock the REM.
866 */
867 if (fBp)
868 {
869 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
870 Assert(rc2 == 0); NOREF(rc2);
871 }
872 cpu_single_step(&pVM->rem.s.Env, 0);
873 pVM->rem.s.Env.interrupt_request = interrupt_request;
874
875 return rc;
876}
877
878
879/**
880 * Set a breakpoint using the REM facilities.
881 *
882 * @returns VBox status code.
883 * @param pVM The VM handle.
884 * @param Address The breakpoint address.
885 * @thread The emulation thread.
886 */
887REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
888{
889 VM_ASSERT_EMT(pVM);
890 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
891 {
892 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
893 return VINF_SUCCESS;
894 }
895 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
896 return VERR_REM_NO_MORE_BP_SLOTS;
897}
898
899
900/**
901 * Clears a breakpoint set by REMR3BreakpointSet().
902 *
903 * @returns VBox status code.
904 * @param pVM The VM handle.
905 * @param Address The breakpoint address.
906 * @thread The emulation thread.
907 */
908REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
909{
910 VM_ASSERT_EMT(pVM);
911 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
912 {
913 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
914 return VINF_SUCCESS;
915 }
916 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
917 return VERR_REM_BP_NOT_FOUND;
918}
919
920
921/**
922 * Emulate an instruction.
923 *
924 * This function executes one instruction without letting anyone
925 * interrupt it. This is intended for being called while being in
926 * raw mode and thus will take care of all the state syncing between
927 * REM and the rest.
928 *
929 * @returns VBox status code.
930 * @param pVM VM handle.
931 * @param pVCpu VMCPU Handle.
932 */
933REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
934{
935 bool fFlushTBs;
936
937 int rc, rc2;
938 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
939
940 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
941 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
942 */
943 pVM->rem.s.Env.state |= CPU_RAW_HM;
944
945 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
946 fFlushTBs = pVM->rem.s.fFlushTBs;
947 pVM->rem.s.fFlushTBs = false;
948
949 /*
950 * Sync the state and enable single instruction / single stepping.
951 */
952 rc = REMR3State(pVM, pVCpu);
953 pVM->rem.s.fFlushTBs = fFlushTBs;
954 if (RT_SUCCESS(rc))
955 {
956 int interrupt_request = pVM->rem.s.Env.interrupt_request;
957 Assert(!( interrupt_request
958 & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD
959 | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER
960 | CPU_INTERRUPT_EXTERNAL_DMA)));
961#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
962 cpu_single_step(&pVM->rem.s.Env, 0);
963#endif
964 Assert(!pVM->rem.s.Env.singlestep_enabled);
965
966 /*
967 * Now we set the execute single instruction flag and enter the cpu_exec loop.
968 */
969 TMNotifyStartOfExecution(pVM, pVCpu);
970 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
971 rc = cpu_exec(&pVM->rem.s.Env);
972 TMNotifyEndOfExecution(pVM, pVCpu);
973 switch (rc)
974 {
975 /*
976 * Executed without anything out of the way happening.
977 */
978 case EXCP_SINGLE_INSTR:
979 rc = VINF_EM_RESCHEDULE;
980 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
981 break;
982
983 /*
984 * If we take a trap or start servicing a pending interrupt, we might end up here.
985 * (Timer thread or some other thread wishing EMT's attention.)
986 */
987 case EXCP_INTERRUPT:
988 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
989 rc = VINF_EM_RESCHEDULE;
990 break;
991
992 /*
993 * Single step, we assume!
994 * If there was a breakpoint there we're fucked now.
995 */
996 case EXCP_DEBUG:
997 if (pVM->rem.s.Env.watchpoint_hit)
998 {
999 /** @todo deal with watchpoints */
1000 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1001 rc = VINF_EM_DBG_BREAKPOINT;
1002 }
1003 else
1004 {
1005 CPUBreakpoint *pBP;
1006 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1007 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1008 if (pBP->pc == GCPtrPC)
1009 break;
1010 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1012 }
1013 break;
1014
1015 /*
1016 * hlt instruction.
1017 */
1018 case EXCP_HLT:
1019 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1020 rc = VINF_EM_HALT;
1021 break;
1022
1023 /*
1024 * The VM has halted.
1025 */
1026 case EXCP_HALTED:
1027 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1028 rc = VINF_EM_HALT;
1029 break;
1030
1031 /*
1032 * Switch to RAW-mode.
1033 */
1034 case EXCP_EXECUTE_RAW:
1035 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1036 rc = VINF_EM_RESCHEDULE_RAW;
1037 break;
1038
1039 /*
1040 * Switch to hardware accelerated RAW-mode.
1041 */
1042 case EXCP_EXECUTE_HM:
1043 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1044 rc = VINF_EM_RESCHEDULE_HM;
1045 break;
1046
1047 /*
1048 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1049 */
1050 case EXCP_RC:
1051 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1052 rc = pVM->rem.s.rc;
1053 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1054 break;
1055
1056 /*
1057 * Figure out the rest when they arrive....
1058 */
1059 default:
1060 AssertMsgFailed(("rc=%d\n", rc));
1061 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1062 rc = VINF_EM_RESCHEDULE;
1063 break;
1064 }
1065
1066 /*
1067 * Switch back the state.
1068 */
1069 pVM->rem.s.Env.interrupt_request = interrupt_request;
1070 rc2 = REMR3StateBack(pVM, pVCpu);
1071 AssertRC(rc2);
1072 }
1073
1074 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1075 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1076 return rc;
1077}
1078
1079
1080/**
1081 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1082 *
1083 * @returns VBox status code.
1084 *
1085 * @param pVM The VM handle.
1086 * @param pVCpu The Virtual CPU handle.
1087 */
1088static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1089{
1090 int rc;
1091
1092 Assert(pVM->rem.s.fInREM);
1093#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1094 cpu_single_step(&pVM->rem.s.Env, 1);
1095#else
1096 Assert(!pVM->rem.s.Env.singlestep_enabled);
1097#endif
1098
1099 /*
1100 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1101 */
1102 for (;;)
1103 {
1104 char szBuf[256];
1105
1106 /*
1107 * Log the current registers state and instruction.
1108 */
1109 remR3StateUpdate(pVM, pVCpu);
1110 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1111 szBuf[0] = '\0';
1112 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1113 pVCpu->idCpu,
1114 0, /* Sel */ 0, /* GCPtr */
1115 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1116 szBuf,
1117 sizeof(szBuf),
1118 NULL);
1119 if (RT_FAILURE(rc))
1120 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1121 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1122
1123 /*
1124 * Execute the instruction.
1125 */
1126 TMNotifyStartOfExecution(pVM, pVCpu);
1127
1128 if ( pVM->rem.s.Env.exception_index < 0
1129 || pVM->rem.s.Env.exception_index > 256)
1130 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1131
1132#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1133 pVM->rem.s.Env.interrupt_request = 0;
1134#else
1135 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1136#endif
1137 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1138 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1139 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n",
1140 pVM->rem.s.Env.interrupt_request,
1141 pVM->rem.s.Env.halted,
1142 pVM->rem.s.Env.exception_index
1143 );
1144
1145 rc = cpu_exec(&pVM->rem.s.Env);
1146
1147 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1148 pVM->rem.s.Env.interrupt_request,
1149 pVM->rem.s.Env.halted,
1150 pVM->rem.s.Env.exception_index
1151 );
1152
1153 TMNotifyEndOfExecution(pVM, pVCpu);
1154
1155 switch (rc)
1156 {
1157#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1158 /*
1159 * The normal exit.
1160 */
1161 case EXCP_SINGLE_INSTR:
1162 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1163 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK))
1164 continue;
1165 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#RX64)\n",
1166 pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions);
1167 rc = VINF_SUCCESS;
1168 break;
1169
1170#else
1171 /*
1172 * The normal exit, check for breakpoints at PC just to be sure.
1173 */
1174#endif
1175 case EXCP_DEBUG:
1176 if (pVM->rem.s.Env.watchpoint_hit)
1177 {
1178 /** @todo deal with watchpoints */
1179 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1180 rc = VINF_EM_DBG_BREAKPOINT;
1181 }
1182 else
1183 {
1184 CPUBreakpoint *pBP;
1185 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1186 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1187 if (pBP->pc == GCPtrPC)
1188 break;
1189 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1190 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1191 }
1192#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1193 if (rc == VINF_EM_DBG_STEPPED)
1194 {
1195 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1196 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK))
1197 continue;
1198
1199 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#RX64)\n",
1200 pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions);
1201 rc = VINF_SUCCESS;
1202 }
1203#endif
1204 break;
1205
1206 /*
1207 * If we take a trap or start servicing a pending interrupt, we might end up here.
1208 * (Timer thread or some other thread wishing EMT's attention.)
1209 */
1210 case EXCP_INTERRUPT:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1212 rc = VINF_SUCCESS;
1213 break;
1214
1215 /*
1216 * hlt instruction.
1217 */
1218 case EXCP_HLT:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1220 rc = VINF_EM_HALT;
1221 break;
1222
1223 /*
1224 * The VM has halted.
1225 */
1226 case EXCP_HALTED:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1228 rc = VINF_EM_HALT;
1229 break;
1230
1231 /*
1232 * Switch to RAW-mode.
1233 */
1234 case EXCP_EXECUTE_RAW:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1236 rc = VINF_EM_RESCHEDULE_RAW;
1237 break;
1238
1239 /*
1240 * Switch to hardware accelerated RAW-mode.
1241 */
1242 case EXCP_EXECUTE_HM:
1243 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1244 rc = VINF_EM_RESCHEDULE_HM;
1245 break;
1246
1247 /*
1248 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1249 */
1250 case EXCP_RC:
1251 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1252 rc = pVM->rem.s.rc;
1253 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1254 break;
1255
1256 /*
1257 * Figure out the rest when they arrive....
1258 */
1259 default:
1260 AssertMsgFailed(("rc=%d\n", rc));
1261 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1262 rc = VINF_EM_RESCHEDULE;
1263 break;
1264 }
1265 break;
1266 }
1267
1268#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1269// cpu_single_step(&pVM->rem.s.Env, 0);
1270#else
1271 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1272#endif
1273 return rc;
1274}
1275
1276
1277/**
1278 * Runs code in recompiled mode.
1279 *
1280 * Before calling this function the REM state needs to be in sync with
1281 * the VM. Call REMR3State() to perform the sync. It's only necessary
1282 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1283 * and after calling REMR3StateBack().
1284 *
1285 * @returns VBox status code.
1286 *
1287 * @param pVM VM Handle.
1288 * @param pVCpu VMCPU Handle.
1289 */
1290REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1291{
1292 int rc;
1293
1294 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1295 return remR3RunLoggingStep(pVM, pVCpu);
1296
1297 Assert(pVM->rem.s.fInREM);
1298 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1299
1300 TMNotifyStartOfExecution(pVM, pVCpu);
1301 rc = cpu_exec(&pVM->rem.s.Env);
1302 TMNotifyEndOfExecution(pVM, pVCpu);
1303 switch (rc)
1304 {
1305 /*
1306 * This happens when the execution was interrupted
1307 * by an external event, like pending timers.
1308 */
1309 case EXCP_INTERRUPT:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1311 rc = VINF_SUCCESS;
1312 break;
1313
1314 /*
1315 * hlt instruction.
1316 */
1317 case EXCP_HLT:
1318 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1319 rc = VINF_EM_HALT;
1320 break;
1321
1322 /*
1323 * The VM has halted.
1324 */
1325 case EXCP_HALTED:
1326 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1327 rc = VINF_EM_HALT;
1328 break;
1329
1330 /*
1331 * Breakpoint/single step.
1332 */
1333 case EXCP_DEBUG:
1334 if (pVM->rem.s.Env.watchpoint_hit)
1335 {
1336 /** @todo deal with watchpoints */
1337 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1338 rc = VINF_EM_DBG_BREAKPOINT;
1339 }
1340 else
1341 {
1342 CPUBreakpoint *pBP;
1343 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1344 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1345 if (pBP->pc == GCPtrPC)
1346 break;
1347 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1348 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1349 }
1350 break;
1351
1352 /*
1353 * Switch to RAW-mode.
1354 */
1355 case EXCP_EXECUTE_RAW:
1356 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1357 rc = VINF_EM_RESCHEDULE_RAW;
1358 break;
1359
1360 /*
1361 * Switch to hardware accelerated RAW-mode.
1362 */
1363 case EXCP_EXECUTE_HM:
1364 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1365 rc = VINF_EM_RESCHEDULE_HM;
1366 break;
1367
1368 /*
1369 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1370 */
1371 case EXCP_RC:
1372 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1373 rc = pVM->rem.s.rc;
1374 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1375 break;
1376
1377 /*
1378 * Figure out the rest when they arrive....
1379 */
1380 default:
1381 AssertMsgFailed(("rc=%d\n", rc));
1382 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1383 rc = VINF_SUCCESS;
1384 break;
1385 }
1386
1387 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1388 return rc;
1389}
1390
1391
1392/**
1393 * Check if the cpu state is suitable for Raw execution.
1394 *
1395 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1396 *
1397 * @param env The CPU env struct.
1398 * @param eip The EIP to check this for (might differ from env->eip).
1399 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1400 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1401 *
1402 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1403 */
1404bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1405{
1406 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1407 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409
1410 /* Update counter. */
1411 env->pVM->rem.s.cCanExecuteRaw++;
1412
1413 /* Never when single stepping+logging guest code. */
1414 if (env->state & CPU_EMULATE_SINGLE_STEP)
1415 return false;
1416
1417#ifdef RT_OS_WINDOWS
1418 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1419#else
1420 CPUMCTX Ctx;
1421 PCPUMCTX pCtx = &Ctx;
1422#endif
1423 /** @todo NEM: scheduling. */
1424
1425 env->state |= CPU_RAW_HM;
1426
1427 /*
1428 * Create partial context for HMCanExecuteGuest.
1429 */
1430 pCtx->cr0 = env->cr[0];
1431 pCtx->cr3 = env->cr[3];
1432 pCtx->cr4 = env->cr[4];
1433
1434 pCtx->tr.Sel = env->tr.selector;
1435 pCtx->tr.ValidSel = env->tr.selector;
1436 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1437 pCtx->tr.u64Base = env->tr.base;
1438 pCtx->tr.u32Limit = env->tr.limit;
1439 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1440
1441 pCtx->ldtr.Sel = env->ldt.selector;
1442 pCtx->ldtr.ValidSel = env->ldt.selector;
1443 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1444 pCtx->ldtr.u64Base = env->ldt.base;
1445 pCtx->ldtr.u32Limit = env->ldt.limit;
1446 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1447
1448 pCtx->idtr.cbIdt = env->idt.limit;
1449 pCtx->idtr.pIdt = env->idt.base;
1450
1451 pCtx->gdtr.cbGdt = env->gdt.limit;
1452 pCtx->gdtr.pGdt = env->gdt.base;
1453
1454 pCtx->rsp = env->regs[R_ESP];
1455 pCtx->rip = env->eip;
1456
1457 pCtx->eflags.u32 = env->eflags;
1458
1459 pCtx->cs.Sel = env->segs[R_CS].selector;
1460 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1461 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1462 pCtx->cs.u64Base = env->segs[R_CS].base;
1463 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1464 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1465
1466 pCtx->ds.Sel = env->segs[R_DS].selector;
1467 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1468 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1469 pCtx->ds.u64Base = env->segs[R_DS].base;
1470 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1471 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1472
1473 pCtx->es.Sel = env->segs[R_ES].selector;
1474 pCtx->es.ValidSel = env->segs[R_ES].selector;
1475 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1476 pCtx->es.u64Base = env->segs[R_ES].base;
1477 pCtx->es.u32Limit = env->segs[R_ES].limit;
1478 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1479
1480 pCtx->fs.Sel = env->segs[R_FS].selector;
1481 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1482 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1483 pCtx->fs.u64Base = env->segs[R_FS].base;
1484 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1485 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1486
1487 pCtx->gs.Sel = env->segs[R_GS].selector;
1488 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1489 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1490 pCtx->gs.u64Base = env->segs[R_GS].base;
1491 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1492 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1493
1494 pCtx->ss.Sel = env->segs[R_SS].selector;
1495 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1496 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1497 pCtx->ss.u64Base = env->segs[R_SS].base;
1498 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1499 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1500
1501 pCtx->msrEFER = env->efer;
1502 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_NONE;
1503
1504 /*
1505 * Hardware accelerated mode:
1506 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1507 */
1508 PVM pVM = env->pVM;
1509 PVMCPU pVCpu = pVM->apCpusR3[0];
1510 if (HMCanExecuteGuest(pVM, pVCpu, pCtx))
1511 {
1512 *piException = EXCP_EXECUTE_HM;
1513 return true;
1514 }
1515 return false;
1516}
1517
1518
1519/**
1520 * Flush (or invalidate if you like) page table/dir entry.
1521 *
1522 * (invlpg instruction; tlb_flush_page)
1523 *
1524 * @param env Pointer to cpu environment.
1525 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1526 */
1527void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1528{
1529 PVM pVM = env->pVM;
1530 PCPUMCTX pCtx;
1531 int rc;
1532
1533 Assert(EMRemIsLockOwner(env->pVM));
1534
1535 /*
1536 * When we're replaying invlpg instructions or restoring a saved
1537 * state we disable this path.
1538 */
1539 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1540 return;
1541 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1542 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1543
1544 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1545
1546 /*
1547 * Update the control registers before calling PGMFlushPage.
1548 */
1549 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1550 Assert(pCtx);
1551 pCtx->cr0 = env->cr[0];
1552 pCtx->cr3 = env->cr[3];
1553 pCtx->cr4 = env->cr[4];
1554
1555 /*
1556 * Let PGM do the rest.
1557 */
1558 Assert(env->pVCpu);
1559 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1560 if (RT_FAILURE(rc))
1561 {
1562 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1563 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1564 }
1565 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1566}
1567
1568
1569#ifndef REM_PHYS_ADDR_IN_TLB
1570/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1571void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1572{
1573 void *pv;
1574 int rc;
1575
1576
1577 /* Address must be aligned enough to fiddle with lower bits */
1578 Assert((physAddr & 0x3) == 0);
1579 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1580
1581 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1582 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1583 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1584 Assert( rc == VINF_SUCCESS
1585 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1586 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1587 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1588 if (RT_FAILURE(rc))
1589 return (void *)1;
1590 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1591 return (void *)((uintptr_t)pv | 2);
1592 return pv;
1593}
1594#endif /* REM_PHYS_ADDR_IN_TLB */
1595
1596
1597/**
1598 * Called from tlb_protect_code in order to write monitor a code page.
1599 *
1600 * @param env Pointer to the CPU environment.
1601 * @param GCPtr Code page to monitor
1602 */
1603void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1604{
1605}
1606
1607
1608/**
1609 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1610 *
1611 * @param env Pointer to the CPU environment.
1612 * @param GCPtr Code page to monitor
1613 */
1614void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1615{
1616 Assert(env->pVM->rem.s.fInREM);
1617}
1618
1619
1620/**
1621 * Called when the CPU is initialized, any of the CRx registers are changed or
1622 * when the A20 line is modified.
1623 *
1624 * @param env Pointer to the CPU environment.
1625 * @param fGlobal Set if the flush is global.
1626 */
1627void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1628{
1629 PVM pVM = env->pVM;
1630 PCPUMCTX pCtx;
1631 Assert(EMRemIsLockOwner(pVM));
1632
1633 /*
1634 * When we're replaying invlpg instructions or restoring a saved
1635 * state we disable this path.
1636 */
1637 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1638 return;
1639 Assert(pVM->rem.s.fInREM);
1640
1641 /*
1642 * The caller doesn't check cr4, so we have to do that for ourselves.
1643 */
1644 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1645 fGlobal = true;
1646 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1647
1648 /*
1649 * Update the control registers before calling PGMR3FlushTLB.
1650 */
1651 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1652 Assert(pCtx);
1653 pCtx->cr0 = env->cr[0];
1654 pCtx->cr3 = env->cr[3];
1655 pCtx->cr4 = env->cr[4];
1656
1657 /*
1658 * Let PGM do the rest.
1659 */
1660 Assert(env->pVCpu);
1661 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1662}
1663
1664
1665/**
1666 * Called when any of the cr0, cr4 or efer registers is updated.
1667 *
1668 * @param env Pointer to the CPU environment.
1669 */
1670void remR3ChangeCpuMode(CPUX86State *env)
1671{
1672 PVM pVM = env->pVM;
1673 uint64_t efer;
1674 PCPUMCTX pCtx;
1675 int rc;
1676
1677 /*
1678 * When we're replaying loads or restoring a saved
1679 * state this path is disabled.
1680 */
1681 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1682 return;
1683 Assert(pVM->rem.s.fInREM);
1684
1685 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1686 Assert(pCtx);
1687
1688 /*
1689 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1690 */
1691 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1692 PGMCr0WpEnabled(env->pVCpu);
1693
1694 /*
1695 * Update the control registers before calling PGMChangeMode()
1696 * as it may need to map whatever cr3 is pointing to.
1697 */
1698 pCtx->cr0 = env->cr[0];
1699 pCtx->cr3 = env->cr[3];
1700 pCtx->cr4 = env->cr[4];
1701#ifdef TARGET_X86_64
1702 efer = env->efer;
1703 pCtx->msrEFER = efer;
1704#else
1705 efer = 0;
1706#endif
1707 Assert(env->pVCpu);
1708 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1709 if (rc != VINF_SUCCESS)
1710 {
1711 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1712 {
1713 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1714 remR3RaiseRC(env->pVM, rc);
1715 }
1716 else
1717 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1718 }
1719}
1720
1721
1722/**
1723 * Called from compiled code to run dma.
1724 *
1725 * @param env Pointer to the CPU environment.
1726 */
1727void remR3DmaRun(CPUX86State *env)
1728{
1729 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1730 PDMR3DmaRun(env->pVM);
1731 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1732}
1733
1734
1735/**
1736 * Called from compiled code to schedule pending timers in VMM
1737 *
1738 * @param env Pointer to the CPU environment.
1739 */
1740void remR3TimersRun(CPUX86State *env)
1741{
1742 LogFlow(("remR3TimersRun:\n"));
1743 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1744 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1745 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1746 TMR3TimerQueuesDo(env->pVM);
1747 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1748 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1749}
1750
1751
1752/**
1753 * Record trap occurrence
1754 *
1755 * @returns VBox status code
1756 * @param env Pointer to the CPU environment.
1757 * @param uTrap Trap nr
1758 * @param uErrorCode Error code
1759 * @param pvNextEIP Next EIP
1760 */
1761int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1762{
1763 PVM pVM = env->pVM;
1764#ifdef VBOX_WITH_STATISTICS
1765 static STAMCOUNTER s_aStatTrap[255];
1766 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1767#endif
1768
1769#ifdef VBOX_WITH_STATISTICS
1770 if (uTrap < 255)
1771 {
1772 if (!s_aRegisters[uTrap])
1773 {
1774 char szStatName[64];
1775 s_aRegisters[uTrap] = true;
1776 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1777 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1778 }
1779 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1780 }
1781#endif
1782 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1783 if( uTrap < 0x20
1784 && (env->cr[0] & X86_CR0_PE)
1785 && !(env->eflags & X86_EFL_VM))
1786 {
1787#ifdef DEBUG
1788 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1789#endif
1790 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1791 {
1792 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1793 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1794 return VERR_REM_TOO_MANY_TRAPS;
1795 }
1796 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1797 {
1798 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
1799 pVM->rem.s.cPendingExceptions = 1;
1800 }
1801 pVM->rem.s.uPendingException = uTrap;
1802 pVM->rem.s.uPendingExcptEIP = env->eip;
1803 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1804 }
1805 else
1806 {
1807 pVM->rem.s.cPendingExceptions = 0;
1808 pVM->rem.s.uPendingException = uTrap;
1809 pVM->rem.s.uPendingExcptEIP = env->eip;
1810 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1811 }
1812 return VINF_SUCCESS;
1813}
1814
1815
1816/*
1817 * Clear current active trap
1818 *
1819 * @param pVM VM Handle.
1820 */
1821void remR3TrapClear(PVM pVM)
1822{
1823 pVM->rem.s.cPendingExceptions = 0;
1824 pVM->rem.s.uPendingException = 0;
1825 pVM->rem.s.uPendingExcptEIP = 0;
1826 pVM->rem.s.uPendingExcptCR2 = 0;
1827}
1828
1829
1830/*
1831 * Record previous call instruction addresses
1832 *
1833 * @param env Pointer to the CPU environment.
1834 */
1835void remR3RecordCall(CPUX86State *env)
1836{
1837}
1838
1839
1840/**
1841 * Syncs the internal REM state with the VM.
1842 *
1843 * This must be called before REMR3Run() is invoked whenever when the REM
1844 * state is not up to date. Calling it several times in a row is not
1845 * permitted.
1846 *
1847 * @returns VBox status code.
1848 *
1849 * @param pVM VM Handle.
1850 * @param pVCpu VMCPU Handle.
1851 *
1852 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1853 * no do this since the majority of the callers don't want any unnecessary of events
1854 * pending that would immediately interrupt execution.
1855 */
1856REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1857{
1858 register const CPUMCTX *pCtx;
1859 register unsigned fFlags;
1860 unsigned i;
1861 TRPMEVENT enmType;
1862 uint8_t u8TrapNo;
1863 uint32_t uCpl;
1864 int rc;
1865
1866 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1867 Log2(("REMR3State:\n"));
1868
1869 pVM->rem.s.Env.pVCpu = pVCpu;
1870 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1871
1872 Assert(pCtx);
1873 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
1874 || CPUMIsGuestInVmxNonRootMode(pCtx))
1875 {
1876 AssertMsgFailed(("Bad scheduling - can't exec. nested-guest in REM!\n"));
1877 return VERR_EM_CANNOT_EXEC_GUEST;
1878 }
1879
1880 Assert(!pVM->rem.s.fInREM);
1881 pVM->rem.s.fInStateSync = true;
1882
1883 /*
1884 * If we have to flush TBs, do that immediately.
1885 */
1886 if (pVM->rem.s.fFlushTBs)
1887 {
1888 STAM_COUNTER_INC(&gStatFlushTBs);
1889 tb_flush(&pVM->rem.s.Env);
1890 pVM->rem.s.fFlushTBs = false;
1891 }
1892
1893 /*
1894 * Copy the registers which require no special handling.
1895 */
1896#ifdef TARGET_X86_64
1897 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1898 Assert(R_EAX == 0);
1899 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1900 Assert(R_ECX == 1);
1901 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1902 Assert(R_EDX == 2);
1903 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1904 Assert(R_EBX == 3);
1905 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1906 Assert(R_ESP == 4);
1907 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1908 Assert(R_EBP == 5);
1909 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1910 Assert(R_ESI == 6);
1911 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1912 Assert(R_EDI == 7);
1913 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1914 pVM->rem.s.Env.regs[8] = pCtx->r8;
1915 pVM->rem.s.Env.regs[9] = pCtx->r9;
1916 pVM->rem.s.Env.regs[10] = pCtx->r10;
1917 pVM->rem.s.Env.regs[11] = pCtx->r11;
1918 pVM->rem.s.Env.regs[12] = pCtx->r12;
1919 pVM->rem.s.Env.regs[13] = pCtx->r13;
1920 pVM->rem.s.Env.regs[14] = pCtx->r14;
1921 pVM->rem.s.Env.regs[15] = pCtx->r15;
1922
1923 pVM->rem.s.Env.eip = pCtx->rip;
1924
1925 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1926#else
1927 Assert(R_EAX == 0);
1928 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1929 Assert(R_ECX == 1);
1930 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1931 Assert(R_EDX == 2);
1932 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1933 Assert(R_EBX == 3);
1934 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1935 Assert(R_ESP == 4);
1936 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1937 Assert(R_EBP == 5);
1938 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1939 Assert(R_ESI == 6);
1940 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1941 Assert(R_EDI == 7);
1942 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1943 pVM->rem.s.Env.eip = pCtx->eip;
1944
1945 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1946#endif
1947
1948 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1949
1950 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1951 for (i=0;i<8;i++)
1952 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1953
1954#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
1955 /*
1956 * Clear the halted hidden flag (the interrupt waking up the CPU can
1957 * have been dispatched in raw mode).
1958 */
1959 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1960#endif
1961
1962 /*
1963 * Replay invlpg? Only if we're not flushing the TLB.
1964 */
1965 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
1966 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
1967 if (pVM->rem.s.cInvalidatedPages)
1968 {
1969 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
1970 {
1971 RTUINT i;
1972
1973 pVM->rem.s.fIgnoreCR3Load = true;
1974 pVM->rem.s.fIgnoreInvlPg = true;
1975 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1976 {
1977 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1978 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1979 }
1980 pVM->rem.s.fIgnoreInvlPg = false;
1981 pVM->rem.s.fIgnoreCR3Load = false;
1982 }
1983 pVM->rem.s.cInvalidatedPages = 0;
1984 }
1985
1986 /* Replay notification changes. */
1987 REMR3ReplayHandlerNotifications(pVM);
1988
1989 /* Update MSRs; before CRx registers! */
1990 pVM->rem.s.Env.efer = pCtx->msrEFER;
1991 pVM->rem.s.Env.star = pCtx->msrSTAR;
1992 pVM->rem.s.Env.pat = pCtx->msrPAT;
1993#ifdef TARGET_X86_64
1994 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1995 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1996 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1997 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1998
1999 /* Update the internal long mode activate flag according to the new EFER value. */
2000 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2001 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2002 else
2003 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2004#endif
2005
2006 /* Update the inhibit IRQ mask. */
2007 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2008 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2009 {
2010 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2011 if (InhibitPC == pCtx->rip)
2012 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2013 else
2014 {
2015 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2016 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2017 }
2018 }
2019
2020 /* Update the inhibit NMI mask. */
2021 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2022 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2023 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2024
2025 /*
2026 * Sync the A20 gate.
2027 */
2028 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2029 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2030 {
2031 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2032 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2033 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2034 }
2035
2036 /*
2037 * Registers which are rarely changed and require special handling / order when changed.
2038 */
2039 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2040 | CPUM_CHANGED_CR4
2041 | CPUM_CHANGED_CR0
2042 | CPUM_CHANGED_CR3
2043 | CPUM_CHANGED_GDTR
2044 | CPUM_CHANGED_IDTR
2045 | CPUM_CHANGED_SYSENTER_MSR
2046 | CPUM_CHANGED_LDTR
2047 | CPUM_CHANGED_CPUID
2048 | CPUM_CHANGED_FPU_REM
2049 )
2050 )
2051 {
2052 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2053 {
2054 pVM->rem.s.fIgnoreCR3Load = true;
2055 tlb_flush(&pVM->rem.s.Env, true);
2056 pVM->rem.s.fIgnoreCR3Load = false;
2057 }
2058
2059 /* CR4 before CR0! */
2060 if (fFlags & CPUM_CHANGED_CR4)
2061 {
2062 pVM->rem.s.fIgnoreCR3Load = true;
2063 pVM->rem.s.fIgnoreCpuMode = true;
2064 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2065 pVM->rem.s.fIgnoreCpuMode = false;
2066 pVM->rem.s.fIgnoreCR3Load = false;
2067 }
2068
2069 if (fFlags & CPUM_CHANGED_CR0)
2070 {
2071 pVM->rem.s.fIgnoreCR3Load = true;
2072 pVM->rem.s.fIgnoreCpuMode = true;
2073 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2074 pVM->rem.s.fIgnoreCpuMode = false;
2075 pVM->rem.s.fIgnoreCR3Load = false;
2076 }
2077
2078 if (fFlags & CPUM_CHANGED_CR3)
2079 {
2080 pVM->rem.s.fIgnoreCR3Load = true;
2081 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2082 pVM->rem.s.fIgnoreCR3Load = false;
2083 }
2084
2085 if (fFlags & CPUM_CHANGED_GDTR)
2086 {
2087 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2088 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2089 }
2090
2091 if (fFlags & CPUM_CHANGED_IDTR)
2092 {
2093 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2094 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2095 }
2096
2097 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2098 {
2099 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2100 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2101 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2102 }
2103
2104 if (fFlags & CPUM_CHANGED_LDTR)
2105 {
2106 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2107 {
2108 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2109 pVM->rem.s.Env.ldt.newselector = 0;
2110 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2111 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2112 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2113 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2114 }
2115 else
2116 {
2117 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2118 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2119 }
2120 }
2121
2122 if (fFlags & CPUM_CHANGED_CPUID)
2123 {
2124 uint32_t u32Dummy;
2125
2126 /*
2127 * Get the CPUID features.
2128 */
2129 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2130 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2131 }
2132
2133 /* Sync FPU state after CR4, CPUID and EFER (!). */
2134 if (fFlags & CPUM_CHANGED_FPU_REM)
2135 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */
2136 }
2137
2138 /*
2139 * Sync TR unconditionally to make life simpler.
2140 */
2141 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2142 pVM->rem.s.Env.tr.newselector = 0;
2143 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2144 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2145 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2146 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2147
2148 /*
2149 * Update selector registers.
2150 *
2151 * This must be done *after* we've synced gdt, ldt and crX registers
2152 * since we're reading the GDT/LDT om sync_seg. This will happen with
2153 * saved state which takes a quick dip into rawmode for instance.
2154 *
2155 * CPL/Stack; Note first check this one as the CPL might have changed.
2156 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2157 */
2158 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2159 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2160#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2161 do \
2162 { \
2163 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2164 { \
2165 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2166 (a_pVBoxSReg)->Sel, \
2167 (a_pVBoxSReg)->u64Base, \
2168 (a_pVBoxSReg)->u32Limit, \
2169 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2170 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2171 } \
2172 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2173 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2174 { \
2175 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2176 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2177 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2178 if ((a_pRemSReg)->newselector) \
2179 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2180 } \
2181 else \
2182 (a_pRemSReg)->newselector = 0; \
2183 } while (0)
2184
2185 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2186 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2187 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2188 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2189 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2190 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2191 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2192 * be the same but not the base/limit. */
2193
2194 /*
2195 * Check for traps.
2196 */
2197 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2198 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2199 if (RT_SUCCESS(rc))
2200 {
2201#ifdef DEBUG
2202 if (u8TrapNo == 0x80)
2203 {
2204 remR3DumpLnxSyscall(pVCpu);
2205 remR3DumpOBsdSyscall(pVCpu);
2206 }
2207#endif
2208
2209 pVM->rem.s.Env.exception_index = u8TrapNo;
2210 if (enmType != TRPM_SOFTWARE_INT)
2211 {
2212 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT
2213 ? EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ : 0; /* HACK ALERT! */
2214 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2215 }
2216 else
2217 {
2218 /*
2219 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2220 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2221 * for int03 and into.
2222 */
2223 pVM->rem.s.Env.exception_is_int = 1;
2224 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2225 /* int 3 may be generated by one-byte 0xcc */
2226 if (u8TrapNo == X86_XCPT_BP)
2227 {
2228 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2229 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2230 }
2231 /* int 4 may be generated by one-byte 0xce */
2232 else if (u8TrapNo == X86_XCPT_OF)
2233 {
2234 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2235 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2236 }
2237 }
2238
2239 /* get error code and cr2 if needed. */
2240 if (enmType == TRPM_TRAP)
2241 {
2242 switch (u8TrapNo)
2243 {
2244 case X86_XCPT_PF:
2245 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2246 /* fallthru */
2247 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2248 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2249 break;
2250
2251 case X86_XCPT_AC: case X86_XCPT_DF:
2252 default:
2253 pVM->rem.s.Env.error_code = 0;
2254 break;
2255 }
2256 }
2257 else
2258 pVM->rem.s.Env.error_code = 0;
2259
2260 /*
2261 * We can now reset the active trap since the recompiler is gonna have a go at it.
2262 */
2263 rc = TRPMResetTrap(pVCpu);
2264 AssertRC(rc);
2265 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2266 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2267 }
2268
2269 /*
2270 * Clear old interrupt request flags; Check for pending hardware interrupts.
2271 * (See @remark for why we don't check for other FFs.)
2272 */
2273 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2274 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2275 APICUpdatePendingInterrupts(pVCpu);
2276 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2277 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2278
2279 /*
2280 * We're now in REM mode.
2281 */
2282 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2283 pVM->rem.s.fInREM = true;
2284 pVM->rem.s.fInStateSync = false;
2285 pVM->rem.s.cCanExecuteRaw = 0;
2286 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2287 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2288 return VINF_SUCCESS;
2289}
2290
2291
2292/**
2293 * Syncs back changes in the REM state to the VM state.
2294 *
2295 * This must be called after invoking REMR3Run().
2296 * Calling it several times in a row is not permitted.
2297 *
2298 * @returns VBox status code.
2299 *
2300 * @param pVM VM Handle.
2301 * @param pVCpu VMCPU Handle.
2302 */
2303REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2304{
2305 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2306 Assert(pCtx);
2307 unsigned i;
2308
2309 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2310 Log2(("REMR3StateBack:\n"));
2311 Assert(pVM->rem.s.fInREM);
2312
2313 /*
2314 * Copy back the registers.
2315 * This is done in the order they are declared in the CPUMCTX structure.
2316 */
2317
2318 /** @todo FOP */
2319 /** @todo FPUIP */
2320 /** @todo CS */
2321 /** @todo FPUDP */
2322 /** @todo DS */
2323
2324 /** @todo check if FPU/XMM was actually used in the recompiler */
2325 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87);
2326//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2327
2328#ifdef TARGET_X86_64
2329 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2330 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2331 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2332 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2333 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2334 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2335 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2336 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2337 pCtx->r8 = pVM->rem.s.Env.regs[8];
2338 pCtx->r9 = pVM->rem.s.Env.regs[9];
2339 pCtx->r10 = pVM->rem.s.Env.regs[10];
2340 pCtx->r11 = pVM->rem.s.Env.regs[11];
2341 pCtx->r12 = pVM->rem.s.Env.regs[12];
2342 pCtx->r13 = pVM->rem.s.Env.regs[13];
2343 pCtx->r14 = pVM->rem.s.Env.regs[14];
2344 pCtx->r15 = pVM->rem.s.Env.regs[15];
2345
2346 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2347
2348#else
2349 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2350 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2351 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2352 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2353 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2354 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2355 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2356
2357 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2358#endif
2359
2360#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2361 do \
2362 { \
2363 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2364 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2365 { \
2366 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2367 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2368 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2369 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2370 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2371 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2372 } \
2373 else \
2374 { \
2375 pCtx->a_sreg.fFlags = 0; \
2376 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2377 } \
2378 } while (0)
2379
2380 SYNC_BACK_SREG(es, ES);
2381 SYNC_BACK_SREG(cs, CS);
2382 SYNC_BACK_SREG(ss, SS);
2383 SYNC_BACK_SREG(ds, DS);
2384 SYNC_BACK_SREG(fs, FS);
2385 SYNC_BACK_SREG(gs, GS);
2386
2387#ifdef TARGET_X86_64
2388 pCtx->rip = pVM->rem.s.Env.eip;
2389 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2390#else
2391 pCtx->eip = pVM->rem.s.Env.eip;
2392 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2393#endif
2394
2395 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2396 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2397 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2398 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2399
2400 for (i = 0; i < 8; i++)
2401 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2402
2403 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2404 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2405 {
2406 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2407 STAM_COUNTER_INC(&gStatREMGDTChange);
2408 }
2409
2410 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2411 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2412 {
2413 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2414 STAM_COUNTER_INC(&gStatREMIDTChange);
2415 }
2416
2417 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2418 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2419 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2420 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2421 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2422 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2423 )
2424 {
2425 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2426 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2427 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2428 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2429 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2430 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2431 STAM_COUNTER_INC(&gStatREMLDTRChange);
2432 }
2433
2434 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2435 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2436 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2437 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2438 || pCtx->tr.Attr.u != ((pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2439 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2440 )
2441 {
2442 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2443 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2444 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2445 pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT));
2446 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2447 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2448 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2449 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2450 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2451 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2452 Assert(pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE);
2453 STAM_COUNTER_INC(&gStatREMTRChange);
2454 }
2455
2456 /* Sysenter MSR */
2457 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2458 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2459 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2460
2461 /* System MSRs. */
2462 pCtx->msrEFER = pVM->rem.s.Env.efer;
2463 pCtx->msrSTAR = pVM->rem.s.Env.star;
2464 pCtx->msrPAT = pVM->rem.s.Env.pat;
2465#ifdef TARGET_X86_64
2466 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2467 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2468 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2469 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2470#endif
2471
2472 /* Inhibit interrupt flag. */
2473 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2474 {
2475 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2476 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2477 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2478 }
2479 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2480 {
2481 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2482 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2483 }
2484
2485 /* Inhibit NMI flag. */
2486 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2487 {
2488 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2489 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2490 }
2491 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2492 {
2493 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2494 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2495 }
2496
2497 remR3TrapClear(pVM);
2498
2499 /*
2500 * Check for traps.
2501 */
2502 if ( pVM->rem.s.Env.exception_index >= 0
2503 && pVM->rem.s.Env.exception_index < 256)
2504 {
2505 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2506 int rc;
2507
2508 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2509 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int == 0 ? TRPM_TRAP
2510 : pVM->rem.s.Env.exception_is_int == EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ ? TRPM_HARDWARE_INT
2511 : TRPM_SOFTWARE_INT;
2512 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2513 AssertRC(rc);
2514 if (enmType == TRPM_TRAP)
2515 {
2516 switch (pVM->rem.s.Env.exception_index)
2517 {
2518 case X86_XCPT_PF:
2519 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2520 /* fallthru */
2521 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2522 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2523 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2524 break;
2525 }
2526 }
2527 }
2528
2529 /*
2530 * We're not longer in REM mode.
2531 */
2532 CPUMR3RemLeave(pVCpu,
2533 !VM_IS_RAW_MODE_ENABLED(pVM)
2534 || ( pVM->rem.s.Env.segs[R_SS].newselector
2535 | pVM->rem.s.Env.segs[R_GS].newselector
2536 | pVM->rem.s.Env.segs[R_FS].newselector
2537 | pVM->rem.s.Env.segs[R_ES].newselector
2538 | pVM->rem.s.Env.segs[R_DS].newselector
2539 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2540 );
2541 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2542 pVM->rem.s.fInREM = false;
2543 pVM->rem.s.pCtx = NULL;
2544 pVM->rem.s.Env.pVCpu = NULL;
2545 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2546 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2547 return VINF_SUCCESS;
2548}
2549
2550
2551/**
2552 * This is called by the disassembler when it wants to update the cpu state
2553 * before for instance doing a register dump.
2554 */
2555static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2556{
2557 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2558 unsigned i;
2559
2560 Assert(pVM->rem.s.fInREM);
2561
2562 /*
2563 * Copy back the registers.
2564 * This is done in the order they are declared in the CPUMCTX structure.
2565 */
2566
2567 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87;
2568 /** @todo FOP */
2569 /** @todo FPUIP */
2570 /** @todo CS */
2571 /** @todo FPUDP */
2572 /** @todo DS */
2573 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2574 pFpuCtx->MXCSR = 0;
2575 pFpuCtx->MXCSR_MASK = 0;
2576
2577 /** @todo check if FPU/XMM was actually used in the recompiler */
2578 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx);
2579//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2580
2581#ifdef TARGET_X86_64
2582 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2583 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2584 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2585 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2586 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2587 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2588 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2589 pCtx->r8 = pVM->rem.s.Env.regs[8];
2590 pCtx->r9 = pVM->rem.s.Env.regs[9];
2591 pCtx->r10 = pVM->rem.s.Env.regs[10];
2592 pCtx->r11 = pVM->rem.s.Env.regs[11];
2593 pCtx->r12 = pVM->rem.s.Env.regs[12];
2594 pCtx->r13 = pVM->rem.s.Env.regs[13];
2595 pCtx->r14 = pVM->rem.s.Env.regs[14];
2596 pCtx->r15 = pVM->rem.s.Env.regs[15];
2597
2598 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2599#else
2600 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2601 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2602 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2603 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2604 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2605 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2606 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2607
2608 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2609#endif
2610
2611 SYNC_BACK_SREG(es, ES);
2612 SYNC_BACK_SREG(cs, CS);
2613 SYNC_BACK_SREG(ss, SS);
2614 SYNC_BACK_SREG(ds, DS);
2615 SYNC_BACK_SREG(fs, FS);
2616 SYNC_BACK_SREG(gs, GS);
2617
2618#ifdef TARGET_X86_64
2619 pCtx->rip = pVM->rem.s.Env.eip;
2620 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2621#else
2622 pCtx->eip = pVM->rem.s.Env.eip;
2623 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2624#endif
2625
2626 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2627 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2628 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2629 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2630
2631 for (i = 0; i < 8; i++)
2632 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2633
2634 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2635 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2636 {
2637 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2638 STAM_COUNTER_INC(&gStatREMGDTChange);
2639 }
2640
2641 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2642 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2643 {
2644 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2645 STAM_COUNTER_INC(&gStatREMIDTChange);
2646 }
2647
2648 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2649 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2650 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2651 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2652 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2653 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2654 )
2655 {
2656 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2657 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2658 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2659 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2660 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2661 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2662 STAM_COUNTER_INC(&gStatREMLDTRChange);
2663 }
2664
2665 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2666 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2667 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2668 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2669 || pCtx->tr.Attr.u != ((pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2670 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2671 )
2672 {
2673 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2674 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2675 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2676 pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT));
2677 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2678 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2679 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2680 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2681 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2682 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2683 Assert(pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE);
2684 STAM_COUNTER_INC(&gStatREMTRChange);
2685 }
2686
2687 /* Sysenter MSR */
2688 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2689 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2690 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2691
2692 /* System MSRs. */
2693 pCtx->msrEFER = pVM->rem.s.Env.efer;
2694 pCtx->msrSTAR = pVM->rem.s.Env.star;
2695 pCtx->msrPAT = pVM->rem.s.Env.pat;
2696#ifdef TARGET_X86_64
2697 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2698 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2699 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2700 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2701#endif
2702
2703}
2704
2705
2706/**
2707 * Update the VMM state information if we're currently in REM.
2708 *
2709 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2710 * we're currently executing in REM and the VMM state is invalid. This method will of
2711 * course check that we're executing in REM before syncing any data over to the VMM.
2712 *
2713 * @param pVM The VM handle.
2714 * @param pVCpu The VMCPU handle.
2715 */
2716REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2717{
2718 if (pVM->rem.s.fInREM)
2719 remR3StateUpdate(pVM, pVCpu);
2720}
2721
2722
2723#undef LOG_GROUP
2724#define LOG_GROUP LOG_GROUP_REM
2725
2726
2727/**
2728 * Notify the recompiler about Address Gate 20 state change.
2729 *
2730 * This notification is required since A20 gate changes are
2731 * initialized from a device driver and the VM might just as
2732 * well be in REM mode as in RAW mode.
2733 *
2734 * @param pVM VM handle.
2735 * @param pVCpu VMCPU handle.
2736 * @param fEnable True if the gate should be enabled.
2737 * False if the gate should be disabled.
2738 */
2739REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2740{
2741 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2742 VM_ASSERT_EMT(pVM);
2743
2744 /** @todo SMP and the A20 gate... */
2745 if (pVM->rem.s.Env.pVCpu == pVCpu)
2746 {
2747 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2748 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2749 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2750 }
2751}
2752
2753
2754/**
2755 * Replays the handler notification changes
2756 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2757 *
2758 * @param pVM VM handle.
2759 */
2760REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2761{
2762 /*
2763 * Replay the flushes.
2764 */
2765 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2766 VM_ASSERT_EMT(pVM);
2767
2768 /** @todo this isn't ensuring correct replay order. */
2769 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2770 {
2771 uint32_t idxNext;
2772 uint32_t idxRevHead;
2773 uint32_t idxHead;
2774#ifdef VBOX_STRICT
2775 int32_t c = 0;
2776#endif
2777
2778 /* Lockless purging of pending notifications. */
2779 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2780 if (idxHead == UINT32_MAX)
2781 return;
2782 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2783
2784 /*
2785 * Reverse the list to process it in FIFO order.
2786 */
2787 idxRevHead = UINT32_MAX;
2788 do
2789 {
2790 /* Save the index of the next rec. */
2791 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2792 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2793 /* Push the record onto the reversed list. */
2794 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2795 idxRevHead = idxHead;
2796 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2797 /* Advance. */
2798 idxHead = idxNext;
2799 } while (idxHead != UINT32_MAX);
2800
2801 /*
2802 * Loop thru the list, reinserting the record into the free list as they are
2803 * processed to avoid having other EMTs running out of entries while we're flushing.
2804 */
2805 idxHead = idxRevHead;
2806 do
2807 {
2808 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2809 uint32_t idxCur;
2810 Assert(--c >= 0);
2811
2812 switch (pCur->enmKind)
2813 {
2814 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2815 remR3NotifyHandlerPhysicalRegister(pVM,
2816 pCur->u.PhysicalRegister.enmKind,
2817 pCur->u.PhysicalRegister.GCPhys,
2818 pCur->u.PhysicalRegister.cb,
2819 pCur->u.PhysicalRegister.fHasHCHandler);
2820 break;
2821
2822 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2823 remR3NotifyHandlerPhysicalDeregister(pVM,
2824 pCur->u.PhysicalDeregister.enmKind,
2825 pCur->u.PhysicalDeregister.GCPhys,
2826 pCur->u.PhysicalDeregister.cb,
2827 pCur->u.PhysicalDeregister.fHasHCHandler,
2828 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2829 break;
2830
2831 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2832 remR3NotifyHandlerPhysicalModify(pVM,
2833 pCur->u.PhysicalModify.enmKind,
2834 pCur->u.PhysicalModify.GCPhysOld,
2835 pCur->u.PhysicalModify.GCPhysNew,
2836 pCur->u.PhysicalModify.cb,
2837 pCur->u.PhysicalModify.fHasHCHandler,
2838 pCur->u.PhysicalModify.fRestoreAsRAM);
2839 break;
2840
2841 default:
2842 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2843 break;
2844 }
2845
2846 /*
2847 * Advance idxHead.
2848 */
2849 idxCur = idxHead;
2850 idxHead = pCur->idxNext;
2851 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2852
2853 /*
2854 * Put the record back into the free list.
2855 */
2856 do
2857 {
2858 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2859 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2860 ASMCompilerBarrier();
2861 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2862 } while (idxHead != UINT32_MAX);
2863
2864#ifdef VBOX_STRICT
2865 if (pVM->cCpus == 1)
2866 {
2867 unsigned c;
2868 /* Check that all records are now on the free list. */
2869 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2870 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2871 c++;
2872 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2873 }
2874#endif
2875 }
2876}
2877
2878
2879/**
2880 * Notify REM about changed code page.
2881 *
2882 * @returns VBox status code.
2883 * @param pVM VM handle.
2884 * @param pVCpu VMCPU handle.
2885 * @param pvCodePage Code page address
2886 */
2887REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2888{
2889#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2890 int rc;
2891 RTGCPHYS PhysGC;
2892 uint64_t flags;
2893
2894 VM_ASSERT_EMT(pVM);
2895
2896 /*
2897 * Get the physical page address.
2898 */
2899 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2900 if (rc == VINF_SUCCESS)
2901 {
2902 /*
2903 * Sync the required registers and flush the whole page.
2904 * (Easier to do the whole page than notifying it about each physical
2905 * byte that was changed.
2906 */
2907 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2908 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2909 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2910 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2911
2912 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2913 }
2914#endif
2915 return VINF_SUCCESS;
2916}
2917
2918
2919/**
2920 * Notification about a successful MMR3PhysRegister() call.
2921 *
2922 * @param pVM VM handle.
2923 * @param GCPhys The physical address the RAM.
2924 * @param cb Size of the memory.
2925 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2926 */
2927REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2928{
2929 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2930 VM_ASSERT_EMT(pVM);
2931
2932 /*
2933 * Validate input - we trust the caller.
2934 */
2935 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2936 Assert(cb);
2937 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2938 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("%#x\n", fFlags));
2939
2940 /*
2941 * Base ram? Update GCPhysLastRam.
2942 */
2943 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2944 {
2945 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2946 {
2947 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2948 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2949 }
2950 }
2951
2952 /*
2953 * Register the ram.
2954 */
2955 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2956
2957 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2958 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
2959 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2960
2961 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2962}
2963
2964
2965/**
2966 * Notification about a successful MMR3PhysRomRegister() call.
2967 *
2968 * @param pVM VM handle.
2969 * @param GCPhys The physical address of the ROM.
2970 * @param cb The size of the ROM.
2971 * @param pvCopy Pointer to the ROM copy.
2972 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2973 * This function will be called when ever the protection of the
2974 * shadow ROM changes (at reset and end of POST).
2975 */
2976REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2977{
2978 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2979 VM_ASSERT_EMT(pVM);
2980
2981 /*
2982 * Validate input - we trust the caller.
2983 */
2984 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2985 Assert(cb);
2986 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2987
2988 /*
2989 * Register the rom.
2990 */
2991 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2992
2993 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2994 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
2995 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2996
2997 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2998}
2999
3000
3001/**
3002 * Notification about a successful memory deregistration or reservation.
3003 *
3004 * @param pVM VM Handle.
3005 * @param GCPhys Start physical address.
3006 * @param cb The size of the range.
3007 */
3008REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3009{
3010 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3011 VM_ASSERT_EMT(pVM);
3012
3013 /*
3014 * Validate input - we trust the caller.
3015 */
3016 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3017 Assert(cb);
3018 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3019
3020 /*
3021 * Unassigning the memory.
3022 */
3023 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3024
3025 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3026 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3027 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3028
3029 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3030}
3031
3032
3033/**
3034 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3035 *
3036 * @param pVM VM Handle.
3037 * @param enmKind Kind of access handler.
3038 * @param GCPhys Handler range address.
3039 * @param cb Size of the handler range.
3040 * @param fHasHCHandler Set if the handler has a HC callback function.
3041 *
3042 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3043 * Handler memory type to memory which has no HC handler.
3044 */
3045static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3046 bool fHasHCHandler)
3047{
3048 Log(("REMR3NotifyHandlerPhysicalRegister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3049 enmKind, GCPhys, cb, fHasHCHandler));
3050
3051 VM_ASSERT_EMT(pVM);
3052 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3053 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3054
3055
3056 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3057
3058 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3059 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3060 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3061 else if (fHasHCHandler)
3062 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3063 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3064
3065 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3066}
3067
3068/**
3069 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3070 *
3071 * @param pVM VM Handle.
3072 * @param enmKind Kind of access handler.
3073 * @param GCPhys Handler range address.
3074 * @param cb Size of the handler range.
3075 * @param fHasHCHandler Set if the handler has a HC callback function.
3076 *
3077 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3078 * Handler memory type to memory which has no HC handler.
3079 */
3080REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3081 bool fHasHCHandler)
3082{
3083 REMR3ReplayHandlerNotifications(pVM);
3084
3085 remR3NotifyHandlerPhysicalRegister(pVM, enmKind, GCPhys, cb, fHasHCHandler);
3086}
3087
3088/**
3089 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3090 *
3091 * @param pVM VM Handle.
3092 * @param enmKind Kind of access handler.
3093 * @param GCPhys Handler range address.
3094 * @param cb Size of the handler range.
3095 * @param fHasHCHandler Set if the handler has a HC callback function.
3096 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3097 */
3098static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3099 bool fHasHCHandler, bool fRestoreAsRAM)
3100{
3101 Log(("REMR3NotifyHandlerPhysicalDeregister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3102 enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3103 VM_ASSERT_EMT(pVM);
3104
3105
3106 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3107
3108 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3109 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3110 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3111 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3112 else if (fHasHCHandler)
3113 {
3114 if (!fRestoreAsRAM)
3115 {
3116 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3117 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3118 }
3119 else
3120 {
3121 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3122 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3123 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3124 }
3125 }
3126 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3127
3128 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3129}
3130
3131/**
3132 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3133 *
3134 * @param pVM VM Handle.
3135 * @param enmKind Kind of access handler.
3136 * @param GCPhys Handler range address.
3137 * @param cb Size of the handler range.
3138 * @param fHasHCHandler Set if the handler has a HC callback function.
3139 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3140 */
3141REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3142{
3143 REMR3ReplayHandlerNotifications(pVM);
3144 remR3NotifyHandlerPhysicalDeregister(pVM, enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3145}
3146
3147
3148/**
3149 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3150 *
3151 * @param pVM VM Handle.
3152 * @param enmKind Kind of access handler.
3153 * @param GCPhysOld Old handler range address.
3154 * @param GCPhysNew New handler range address.
3155 * @param cb Size of the handler range.
3156 * @param fHasHCHandler Set if the handler has a HC callback function.
3157 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3158 */
3159static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3160{
3161 Log(("REMR3NotifyHandlerPhysicalModify: enmKind=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3162 enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3163 VM_ASSERT_EMT(pVM);
3164 AssertReleaseMsg(enmKind != PGMPHYSHANDLERKIND_MMIO, ("enmKind=%d\n", enmKind));
3165
3166 if (fHasHCHandler)
3167 {
3168 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3169
3170 /*
3171 * Reset the old page.
3172 */
3173 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3174 if (!fRestoreAsRAM)
3175 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3176 else
3177 {
3178 /* This is not perfect, but it'll do for PD monitoring... */
3179 Assert(cb == PAGE_SIZE);
3180 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3181 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3182 }
3183
3184 /*
3185 * Update the new page.
3186 */
3187 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3188 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3189 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3190 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3191
3192 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3193 }
3194}
3195
3196/**
3197 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3198 *
3199 * @param pVM VM Handle.
3200 * @param enmKind Kind of access handler.
3201 * @param GCPhysOld Old handler range address.
3202 * @param GCPhysNew New handler range address.
3203 * @param cb Size of the handler range.
3204 * @param fHasHCHandler Set if the handler has a HC callback function.
3205 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3206 */
3207REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3208{
3209 REMR3ReplayHandlerNotifications(pVM);
3210
3211 remR3NotifyHandlerPhysicalModify(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3212}
3213
3214/**
3215 * Checks if we're handling access to this page or not.
3216 *
3217 * @returns true if we're trapping access.
3218 * @returns false if we aren't.
3219 * @param pVM The VM handle.
3220 * @param GCPhys The physical address.
3221 *
3222 * @remark This function will only work correctly in VBOX_STRICT builds!
3223 */
3224REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3225{
3226#ifdef VBOX_STRICT
3227 ram_addr_t off;
3228 REMR3ReplayHandlerNotifications(pVM);
3229
3230 off = get_phys_page_offset(GCPhys);
3231 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3232 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3233 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3234#else
3235 return false;
3236#endif
3237}
3238
3239
3240/**
3241 * Deals with a rare case in get_phys_addr_code where the code
3242 * is being monitored.
3243 *
3244 * It could also be an MMIO page, in which case we will raise a fatal error.
3245 *
3246 * @returns The physical address corresponding to addr.
3247 * @param env The cpu environment.
3248 * @param addr The virtual address.
3249 * @param pTLBEntry The TLB entry.
3250 * @param IoTlbEntry The I/O TLB entry address.
3251 */
3252target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3253 target_ulong addr,
3254 CPUTLBEntry *pTLBEntry,
3255 target_phys_addr_t IoTlbEntry)
3256{
3257 PVM pVM = env->pVM;
3258
3259 if ((IoTlbEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3260 {
3261 /* If code memory is being monitored, appropriate IOTLB entry will have
3262 handler IO type, and addend will provide real physical address, no
3263 matter if we store VA in TLB or not, as handlers are always passed PA */
3264 target_ulong ret = (IoTlbEntry & TARGET_PAGE_MASK) + addr;
3265 return ret;
3266 }
3267 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3268 "*** handlers\n",
3269 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)IoTlbEntry));
3270 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3271 LogRel(("*** mmio\n"));
3272 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3273 LogRel(("*** phys\n"));
3274 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3275 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3276 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3277 AssertFatalFailed();
3278}
3279
3280/**
3281 * Read guest RAM and ROM.
3282 *
3283 * @param SrcGCPhys The source address (guest physical).
3284 * @param pvDst The destination address.
3285 * @param cb Number of bytes
3286 */
3287void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3288{
3289 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3290 VBOX_CHECK_ADDR(SrcGCPhys);
3291 VBOXSTRICTRC rcStrict = PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb, PGMACCESSORIGIN_REM);
3292 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3293#ifdef VBOX_DEBUG_PHYS
3294 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3295#endif
3296 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3297}
3298
3299
3300/**
3301 * Read guest RAM and ROM, unsigned 8-bit.
3302 *
3303 * @param SrcGCPhys The source address (guest physical).
3304 */
3305RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3306{
3307 uint8_t val;
3308 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3309 VBOX_CHECK_ADDR(SrcGCPhys);
3310 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3311 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3312#ifdef VBOX_DEBUG_PHYS
3313 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3314#endif
3315 return val;
3316}
3317
3318
3319/**
3320 * Read guest RAM and ROM, signed 8-bit.
3321 *
3322 * @param SrcGCPhys The source address (guest physical).
3323 */
3324RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3325{
3326 int8_t val;
3327 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3328 VBOX_CHECK_ADDR(SrcGCPhys);
3329 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3330 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3331#ifdef VBOX_DEBUG_PHYS
3332 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3333#endif
3334 return val;
3335}
3336
3337
3338/**
3339 * Read guest RAM and ROM, unsigned 16-bit.
3340 *
3341 * @param SrcGCPhys The source address (guest physical).
3342 */
3343RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3344{
3345 uint16_t val;
3346 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3347 VBOX_CHECK_ADDR(SrcGCPhys);
3348 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3349 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3350#ifdef VBOX_DEBUG_PHYS
3351 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3352#endif
3353 return val;
3354}
3355
3356
3357/**
3358 * Read guest RAM and ROM, signed 16-bit.
3359 *
3360 * @param SrcGCPhys The source address (guest physical).
3361 */
3362RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3363{
3364 int16_t val;
3365 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3366 VBOX_CHECK_ADDR(SrcGCPhys);
3367 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3368 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3369#ifdef VBOX_DEBUG_PHYS
3370 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3371#endif
3372 return val;
3373}
3374
3375
3376/**
3377 * Read guest RAM and ROM, unsigned 32-bit.
3378 *
3379 * @param SrcGCPhys The source address (guest physical).
3380 */
3381RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3382{
3383 uint32_t val;
3384 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3385 VBOX_CHECK_ADDR(SrcGCPhys);
3386 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3387 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3388#ifdef VBOX_DEBUG_PHYS
3389 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3390#endif
3391 return val;
3392}
3393
3394
3395/**
3396 * Read guest RAM and ROM, signed 32-bit.
3397 *
3398 * @param SrcGCPhys The source address (guest physical).
3399 */
3400RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3401{
3402 int32_t val;
3403 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3404 VBOX_CHECK_ADDR(SrcGCPhys);
3405 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3406 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3407#ifdef VBOX_DEBUG_PHYS
3408 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3409#endif
3410 return val;
3411}
3412
3413
3414/**
3415 * Read guest RAM and ROM, unsigned 64-bit.
3416 *
3417 * @param SrcGCPhys The source address (guest physical).
3418 */
3419uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3420{
3421 uint64_t val;
3422 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3423 VBOX_CHECK_ADDR(SrcGCPhys);
3424 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3425 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3426#ifdef VBOX_DEBUG_PHYS
3427 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3428#endif
3429 return val;
3430}
3431
3432
3433/**
3434 * Read guest RAM and ROM, signed 64-bit.
3435 *
3436 * @param SrcGCPhys The source address (guest physical).
3437 */
3438int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3439{
3440 int64_t val;
3441 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3442 VBOX_CHECK_ADDR(SrcGCPhys);
3443 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3444 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3445#ifdef VBOX_DEBUG_PHYS
3446 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3447#endif
3448 return val;
3449}
3450
3451
3452/**
3453 * Write guest RAM.
3454 *
3455 * @param DstGCPhys The destination address (guest physical).
3456 * @param pvSrc The source address.
3457 * @param cb Number of bytes to write
3458 */
3459void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3460{
3461 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3462 VBOX_CHECK_ADDR(DstGCPhys);
3463 VBOXSTRICTRC rcStrict = PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb, PGMACCESSORIGIN_REM);
3464 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3465 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3466#ifdef VBOX_DEBUG_PHYS
3467 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3468#endif
3469}
3470
3471
3472/**
3473 * Write guest RAM, unsigned 8-bit.
3474 *
3475 * @param DstGCPhys The destination address (guest physical).
3476 * @param val Value
3477 */
3478void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3479{
3480 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3481 VBOX_CHECK_ADDR(DstGCPhys);
3482 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3483 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3484#ifdef VBOX_DEBUG_PHYS
3485 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3486#endif
3487}
3488
3489
3490/**
3491 * Write guest RAM, unsigned 8-bit.
3492 *
3493 * @param DstGCPhys The destination address (guest physical).
3494 * @param val Value
3495 */
3496void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3497{
3498 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3499 VBOX_CHECK_ADDR(DstGCPhys);
3500 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3501 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3502#ifdef VBOX_DEBUG_PHYS
3503 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3504#endif
3505}
3506
3507
3508/**
3509 * Write guest RAM, unsigned 32-bit.
3510 *
3511 * @param DstGCPhys The destination address (guest physical).
3512 * @param val Value
3513 */
3514void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3515{
3516 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3517 VBOX_CHECK_ADDR(DstGCPhys);
3518 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3519 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3520#ifdef VBOX_DEBUG_PHYS
3521 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3522#endif
3523}
3524
3525
3526/**
3527 * Write guest RAM, unsigned 64-bit.
3528 *
3529 * @param DstGCPhys The destination address (guest physical).
3530 * @param val Value
3531 */
3532void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3533{
3534 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3535 VBOX_CHECK_ADDR(DstGCPhys);
3536 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3537 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3538#ifdef VBOX_DEBUG_PHYS
3539 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3540#endif
3541}
3542
3543#undef LOG_GROUP
3544#define LOG_GROUP LOG_GROUP_REM_MMIO
3545
3546/** Read MMIO memory. */
3547static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3548{
3549 CPUX86State *env = (CPUX86State *)pvEnv;
3550 uint32_t u32 = 0;
3551 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3552 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3553 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3554 return u32;
3555}
3556
3557/** Read MMIO memory. */
3558static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3559{
3560 CPUX86State *env = (CPUX86State *)pvEnv;
3561 uint32_t u32 = 0;
3562 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3563 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3564 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3565 return u32;
3566}
3567
3568/** Read MMIO memory. */
3569static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3570{
3571 CPUX86State *env = (CPUX86State *)pvEnv;
3572 uint32_t u32 = 0;
3573 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3574 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3575 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3576 return u32;
3577}
3578
3579/** Write to MMIO memory. */
3580static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3581{
3582 CPUX86State *env = (CPUX86State *)pvEnv;
3583 int rc;
3584 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3585 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3586 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3587}
3588
3589/** Write to MMIO memory. */
3590static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3591{
3592 CPUX86State *env = (CPUX86State *)pvEnv;
3593 int rc;
3594 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3595 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3596 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3597}
3598
3599/** Write to MMIO memory. */
3600static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3601{
3602 CPUX86State *env = (CPUX86State *)pvEnv;
3603 int rc;
3604 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3605 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3606 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3607}
3608
3609
3610#undef LOG_GROUP
3611#define LOG_GROUP LOG_GROUP_REM_HANDLER
3612
3613/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3614
3615static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3616{
3617 uint8_t u8;
3618 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3619 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8), PGMACCESSORIGIN_REM);
3620 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3621 return u8;
3622}
3623
3624static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3625{
3626 uint16_t u16;
3627 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3628 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16), PGMACCESSORIGIN_REM);
3629 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3630 return u16;
3631}
3632
3633static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3634{
3635 uint32_t u32;
3636 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3637 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32), PGMACCESSORIGIN_REM);
3638 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3639 return u32;
3640}
3641
3642static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3643{
3644 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3645 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t), PGMACCESSORIGIN_REM);
3646 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3647}
3648
3649static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3650{
3651 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3652 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t), PGMACCESSORIGIN_REM);
3653 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3654}
3655
3656static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3657{
3658 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3659 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t), PGMACCESSORIGIN_REM);
3660 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3661}
3662
3663/* -+- disassembly -+- */
3664
3665#undef LOG_GROUP
3666#define LOG_GROUP LOG_GROUP_REM_DISAS
3667
3668
3669/**
3670 * Enables or disables singled stepped disassembly.
3671 *
3672 * @returns VBox status code.
3673 * @param pVM VM handle.
3674 * @param fEnable To enable set this flag, to disable clear it.
3675 */
3676static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3677{
3678 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3679 VM_ASSERT_EMT(pVM);
3680
3681 if (fEnable)
3682 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3683 else
3684 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3685#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3686 cpu_single_step(&pVM->rem.s.Env, fEnable);
3687#endif
3688 return VINF_SUCCESS;
3689}
3690
3691
3692/**
3693 * Enables or disables singled stepped disassembly.
3694 *
3695 * @returns VBox status code.
3696 * @param pVM VM handle.
3697 * @param fEnable To enable set this flag, to disable clear it.
3698 */
3699REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3700{
3701 int rc;
3702
3703 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3704 if (VM_IS_EMT(pVM))
3705 return remR3DisasEnableStepping(pVM, fEnable);
3706
3707 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3708 AssertRC(rc);
3709 return rc;
3710}
3711
3712
3713#ifdef VBOX_WITH_DEBUGGER
3714/**
3715 * External Debugger Command: .remstep [on|off|1|0]
3716 */
3717static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
3718 PCDBGCVAR paArgs, unsigned cArgs)
3719{
3720 int rc;
3721 PVM pVM = pUVM->pVM;
3722
3723 if (cArgs == 0)
3724 /*
3725 * Print the current status.
3726 */
3727 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3728 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3729 else
3730 {
3731 /*
3732 * Convert the argument and change the mode.
3733 */
3734 bool fEnable;
3735 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3736 if (RT_SUCCESS(rc))
3737 {
3738 rc = REMR3DisasEnableStepping(pVM, fEnable);
3739 if (RT_SUCCESS(rc))
3740 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3741 else
3742 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3743 }
3744 else
3745 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3746 }
3747 return rc;
3748}
3749#endif /* VBOX_WITH_DEBUGGER */
3750
3751
3752/**
3753 * Disassembles one instruction and prints it to the log.
3754 *
3755 * @returns Success indicator.
3756 * @param env Pointer to the recompiler CPU structure.
3757 * @param f32BitCode Indicates that whether or not the code should
3758 * be disassembled as 16 or 32 bit. If -1 the CS
3759 * selector will be inspected.
3760 * @param pszPrefix
3761 */
3762bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3763{
3764 PVM pVM = env->pVM;
3765 const bool fLog = LogIsEnabled();
3766 const bool fLog2 = LogIs2Enabled();
3767 int rc = VINF_SUCCESS;
3768
3769 /*
3770 * Don't bother if there ain't any log output to do.
3771 */
3772 if (!fLog && !fLog2)
3773 return true;
3774
3775 /*
3776 * Update the state so DBGF reads the correct register values.
3777 */
3778 remR3StateUpdate(pVM, env->pVCpu);
3779
3780 /*
3781 * Log registers if requested.
3782 */
3783 if (fLog2)
3784 DBGFR3_INFO_LOG(pVM, env->pVCpu, "cpumguest", pszPrefix);
3785
3786 /*
3787 * Disassemble to log.
3788 */
3789 if (fLog)
3790 {
3791 PVMCPU pVCpu = VMMGetCpu(pVM);
3792 char szBuf[256];
3793 szBuf[0] = '\0';
3794 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
3795 pVCpu->idCpu,
3796 0, /* Sel */ 0, /* GCPtr */
3797 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3798 szBuf,
3799 sizeof(szBuf),
3800 NULL);
3801 if (RT_FAILURE(rc))
3802 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3803 if (pszPrefix && *pszPrefix)
3804 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3805 else
3806 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3807 }
3808
3809 return RT_SUCCESS(rc);
3810}
3811
3812
3813/**
3814 * Disassemble recompiled code.
3815 *
3816 * @param phFileIgnored Ignored, logfile usually.
3817 * @param pvCode Pointer to the code block.
3818 * @param cb Size of the code block.
3819 */
3820void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
3821{
3822 if (LogIs2Enabled())
3823 {
3824 unsigned off = 0;
3825 char szOutput[256];
3826 DISCPUSTATE Cpu;
3827#ifdef RT_ARCH_X86
3828 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
3829#else
3830 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
3831#endif
3832
3833 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3834 while (off < cb)
3835 {
3836 uint32_t cbInstr;
3837 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
3838 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
3839 if (RT_SUCCESS(rc))
3840 RTLogPrintf("%s", szOutput);
3841 else
3842 {
3843 RTLogPrintf("disas error %Rrc\n", rc);
3844 cbInstr = 1;
3845 }
3846 off += cbInstr;
3847 }
3848 }
3849}
3850
3851
3852/**
3853 * Disassemble guest code.
3854 *
3855 * @param phFileIgnored Ignored, logfile usually.
3856 * @param uCode The guest address of the code to disassemble. (flat?)
3857 * @param cb Number of bytes to disassemble.
3858 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3859 */
3860void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
3861{
3862 if (LogIs2Enabled())
3863 {
3864 PVM pVM = cpu_single_env->pVM;
3865 PVMCPU pVCpu = cpu_single_env->pVCpu;
3866 RTSEL cs;
3867 RTGCUINTPTR eip;
3868
3869 Assert(pVCpu);
3870
3871 /*
3872 * Update the state so DBGF reads the correct register values (flags).
3873 */
3874 remR3StateUpdate(pVM, pVCpu);
3875
3876 /*
3877 * Do the disassembling.
3878 */
3879 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3880 cs = cpu_single_env->segs[R_CS].selector;
3881 eip = uCode - cpu_single_env->segs[R_CS].base;
3882 for (;;)
3883 {
3884 char szBuf[256];
3885 uint32_t cbInstr;
3886 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
3887 pVCpu->idCpu,
3888 cs,
3889 eip,
3890 DBGF_DISAS_FLAGS_DEFAULT_MODE,
3891 szBuf, sizeof(szBuf),
3892 &cbInstr);
3893 if (RT_SUCCESS(rc))
3894 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
3895 else
3896 {
3897 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3898 cbInstr = 1;
3899 }
3900
3901 /* next */
3902 if (cb <= cbInstr)
3903 break;
3904 cb -= cbInstr;
3905 uCode += cbInstr;
3906 eip += cbInstr;
3907 }
3908 }
3909}
3910
3911
3912/**
3913 * Looks up a guest symbol.
3914 *
3915 * @returns Pointer to symbol name. This is a static buffer.
3916 * @param orig_addr The address in question.
3917 */
3918const char *lookup_symbol(target_ulong orig_addr)
3919{
3920 PVM pVM = cpu_single_env->pVM;
3921 RTGCINTPTR off = 0;
3922 RTDBGSYMBOL Sym;
3923 DBGFADDRESS Addr;
3924
3925 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
3926 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
3927 &off, &Sym, NULL /*phMod*/);
3928 if (RT_SUCCESS(rc))
3929 {
3930 static char szSym[sizeof(Sym.szName) + 48];
3931 if (!off)
3932 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3933 else if (off > 0)
3934 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3935 else
3936 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3937 return szSym;
3938 }
3939 return "<N/A>";
3940}
3941
3942
3943#undef LOG_GROUP
3944#define LOG_GROUP LOG_GROUP_REM
3945
3946
3947/* -+- FF notifications -+- */
3948
3949/**
3950 * Notification about the interrupt FF being set.
3951 *
3952 * @param pVM VM Handle.
3953 * @param pVCpu VMCPU Handle.
3954 * @thread The emulation thread.
3955 */
3956REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3957{
3958 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3959 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3960 if (pVM->rem.s.fInREM)
3961 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD);
3962}
3963
3964
3965/**
3966 * Notification about the interrupt FF being set.
3967 *
3968 * @param pVM VM Handle.
3969 * @param pVCpu VMCPU Handle.
3970 * @thread Any.
3971 */
3972REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3973{
3974 LogFlow(("REMR3NotifyInterruptClear:\n"));
3975 if (pVM->rem.s.fInREM)
3976 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3977}
3978
3979
3980/**
3981 * Notification about pending timer(s).
3982 *
3983 * @param pVM VM Handle.
3984 * @param pVCpuDst The target cpu for this notification.
3985 * TM will not broadcast pending timer events, but use
3986 * a dedicated EMT for them. So, only interrupt REM
3987 * execution if the given CPU is executing in REM.
3988 * @thread Any.
3989 */
3990REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3991{
3992#ifndef DEBUG_bird
3993 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3994#endif
3995 if (pVM->rem.s.fInREM)
3996 {
3997 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3998 {
3999 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4000 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4001 CPU_INTERRUPT_EXTERNAL_TIMER);
4002 }
4003 else
4004 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4005 }
4006 else
4007 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4008}
4009
4010
4011/**
4012 * Notification about pending DMA transfers.
4013 *
4014 * @param pVM VM Handle.
4015 * @thread Any.
4016 */
4017REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4018{
4019 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4020 if (pVM->rem.s.fInREM)
4021 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA);
4022}
4023
4024
4025/**
4026 * Notification about pending timer(s).
4027 *
4028 * @param pVM VM Handle.
4029 * @thread Any.
4030 */
4031REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4032{
4033 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4034 if (pVM->rem.s.fInREM)
4035 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
4036}
4037
4038
4039/**
4040 * Notification about pending FF set by an external thread.
4041 *
4042 * @param pVM VM handle.
4043 * @thread Any.
4044 */
4045REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4046{
4047 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4048 if (pVM->rem.s.fInREM)
4049 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
4050}
4051
4052
4053#ifdef VBOX_WITH_STATISTICS
4054void remR3ProfileStart(int statcode)
4055{
4056 STAMPROFILEADV *pStat;
4057 switch(statcode)
4058 {
4059 case STATS_EMULATE_SINGLE_INSTR:
4060 pStat = &gStatExecuteSingleInstr;
4061 break;
4062 case STATS_QEMU_COMPILATION:
4063 pStat = &gStatCompilationQEmu;
4064 break;
4065 case STATS_QEMU_RUN_EMULATED_CODE:
4066 pStat = &gStatRunCodeQEmu;
4067 break;
4068 case STATS_QEMU_TOTAL:
4069 pStat = &gStatTotalTimeQEmu;
4070 break;
4071 case STATS_QEMU_RUN_TIMERS:
4072 pStat = &gStatTimers;
4073 break;
4074 case STATS_TLB_LOOKUP:
4075 pStat= &gStatTBLookup;
4076 break;
4077 case STATS_IRQ_HANDLING:
4078 pStat= &gStatIRQ;
4079 break;
4080 case STATS_RAW_CHECK:
4081 pStat = &gStatRawCheck;
4082 break;
4083
4084 default:
4085 AssertMsgFailed(("unknown stat %d\n", statcode));
4086 return;
4087 }
4088 STAM_PROFILE_ADV_START(pStat, a);
4089}
4090
4091
4092void remR3ProfileStop(int statcode)
4093{
4094 STAMPROFILEADV *pStat;
4095 switch(statcode)
4096 {
4097 case STATS_EMULATE_SINGLE_INSTR:
4098 pStat = &gStatExecuteSingleInstr;
4099 break;
4100 case STATS_QEMU_COMPILATION:
4101 pStat = &gStatCompilationQEmu;
4102 break;
4103 case STATS_QEMU_RUN_EMULATED_CODE:
4104 pStat = &gStatRunCodeQEmu;
4105 break;
4106 case STATS_QEMU_TOTAL:
4107 pStat = &gStatTotalTimeQEmu;
4108 break;
4109 case STATS_QEMU_RUN_TIMERS:
4110 pStat = &gStatTimers;
4111 break;
4112 case STATS_TLB_LOOKUP:
4113 pStat= &gStatTBLookup;
4114 break;
4115 case STATS_IRQ_HANDLING:
4116 pStat= &gStatIRQ;
4117 break;
4118 case STATS_RAW_CHECK:
4119 pStat = &gStatRawCheck;
4120 break;
4121 default:
4122 AssertMsgFailed(("unknown stat %d\n", statcode));
4123 return;
4124 }
4125 STAM_PROFILE_ADV_STOP(pStat, a);
4126}
4127#endif
4128
4129/**
4130 * Raise an RC, force rem exit.
4131 *
4132 * @param pVM VM handle.
4133 * @param rc The rc.
4134 */
4135void remR3RaiseRC(PVM pVM, int rc)
4136{
4137 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4138 Assert(pVM->rem.s.fInREM);
4139 VM_ASSERT_EMT(pVM);
4140 pVM->rem.s.rc = rc;
4141 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4142}
4143
4144
4145/* -+- timers -+- */
4146
4147uint64_t cpu_get_tsc(CPUX86State *env)
4148{
4149 STAM_COUNTER_INC(&gStatCpuGetTSC);
4150 return TMCpuTickGet(env->pVCpu);
4151}
4152
4153
4154/* -+- interrupts -+- */
4155
4156void cpu_set_ferr(CPUX86State *env)
4157{
4158 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4159 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4160}
4161
4162int cpu_get_pic_interrupt(CPUX86State *env)
4163{
4164 uint8_t u8Interrupt;
4165 int rc;
4166
4167 if (VMCPU_FF_TEST_AND_CLEAR(env->pVCpu, VMCPU_FF_UPDATE_APIC))
4168 APICUpdatePendingInterrupts(env->pVCpu);
4169
4170 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4171 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4172 * with the (a)pic.
4173 */
4174 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4175 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4176 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4177 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4178 if (RT_SUCCESS(rc))
4179 {
4180 if (VMCPU_FF_IS_ANY_SET(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4181 env->interrupt_request |= CPU_INTERRUPT_HARD;
4182 return u8Interrupt;
4183 }
4184 return -1;
4185}
4186
4187
4188/* -+- local apic -+- */
4189
4190#if 0 /* CPUMSetGuestMsr does this now. */
4191void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4192{
4193 int rc = PDMApicSetBase(env->pVM, val);
4194 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4195}
4196#endif
4197
4198uint64_t cpu_get_apic_base(CPUX86State *env)
4199{
4200 uint64_t u64;
4201 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4202 if (RT_SUCCESS(rcStrict))
4203 {
4204 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4205 return u64;
4206 }
4207 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4208 return 0;
4209}
4210
4211void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4212{
4213 int rc = APICSetTpr(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4214 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4215}
4216
4217uint8_t cpu_get_apic_tpr(CPUX86State *env)
4218{
4219 uint8_t u8;
4220 int rc = APICGetTpr(env->pVCpu, &u8, NULL, NULL);
4221 if (RT_SUCCESS(rc))
4222 {
4223 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4224 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4225 }
4226 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4227 return 0;
4228}
4229
4230/**
4231 * Read an MSR.
4232 *
4233 * @retval 0 success.
4234 * @retval -1 failure, raise \#GP(0).
4235 * @param env The cpu state.
4236 * @param idMsr The MSR to read.
4237 * @param puValue Where to return the value.
4238 */
4239int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4240{
4241 Assert(env->pVCpu);
4242 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4243}
4244
4245/**
4246 * Write to an MSR.
4247 *
4248 * @retval 0 success.
4249 * @retval -1 failure, raise \#GP(0).
4250 * @param env The cpu state.
4251 * @param idMsr The MSR to write to.
4252 * @param uValue The value to write.
4253 */
4254int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4255{
4256 Assert(env->pVCpu);
4257 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4258}
4259
4260/* -+- I/O Ports -+- */
4261
4262#undef LOG_GROUP
4263#define LOG_GROUP LOG_GROUP_REM_IOPORT
4264
4265void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4266{
4267 int rc;
4268
4269 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4270 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4271
4272 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4273 if (RT_LIKELY(rc == VINF_SUCCESS))
4274 return;
4275 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4276 {
4277 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4278 remR3RaiseRC(env->pVM, rc);
4279 return;
4280 }
4281 remAbort(rc, __FUNCTION__);
4282}
4283
4284void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4285{
4286 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4287 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4288 if (RT_LIKELY(rc == VINF_SUCCESS))
4289 return;
4290 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4291 {
4292 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4293 remR3RaiseRC(env->pVM, rc);
4294 return;
4295 }
4296 remAbort(rc, __FUNCTION__);
4297}
4298
4299void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4300{
4301 int rc;
4302 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4303 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4304 if (RT_LIKELY(rc == VINF_SUCCESS))
4305 return;
4306 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4307 {
4308 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4309 remR3RaiseRC(env->pVM, rc);
4310 return;
4311 }
4312 remAbort(rc, __FUNCTION__);
4313}
4314
4315uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4316{
4317 uint32_t u32 = 0;
4318 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4319 if (RT_LIKELY(rc == VINF_SUCCESS))
4320 {
4321 if (/*addr != 0x61 && */addr != 0x71)
4322 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4323 return (uint8_t)u32;
4324 }
4325 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4326 {
4327 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4328 remR3RaiseRC(env->pVM, rc);
4329 return (uint8_t)u32;
4330 }
4331 remAbort(rc, __FUNCTION__);
4332 return UINT8_C(0xff);
4333}
4334
4335uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4336{
4337 uint32_t u32 = 0;
4338 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4339 if (RT_LIKELY(rc == VINF_SUCCESS))
4340 {
4341 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4342 return (uint16_t)u32;
4343 }
4344 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4345 {
4346 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4347 remR3RaiseRC(env->pVM, rc);
4348 return (uint16_t)u32;
4349 }
4350 remAbort(rc, __FUNCTION__);
4351 return UINT16_C(0xffff);
4352}
4353
4354uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4355{
4356 uint32_t u32 = 0;
4357 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4358 if (RT_LIKELY(rc == VINF_SUCCESS))
4359 {
4360 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4361 return u32;
4362 }
4363 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4364 {
4365 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4366 remR3RaiseRC(env->pVM, rc);
4367 return u32;
4368 }
4369 remAbort(rc, __FUNCTION__);
4370 return UINT32_C(0xffffffff);
4371}
4372
4373#undef LOG_GROUP
4374#define LOG_GROUP LOG_GROUP_REM
4375
4376
4377/* -+- helpers and misc other interfaces -+- */
4378
4379/**
4380 * Perform the CPUID instruction.
4381 *
4382 * @param env Pointer to the recompiler CPU structure.
4383 * @param idx The CPUID leaf (eax).
4384 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4385 * @param pEAX Where to store eax.
4386 * @param pEBX Where to store ebx.
4387 * @param pECX Where to store ecx.
4388 * @param pEDX Where to store edx.
4389 */
4390void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4391 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4392{
4393 NOREF(idxSub);
4394 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4395}
4396
4397
4398#if 0 /* not used */
4399/**
4400 * Interface for qemu hardware to report back fatal errors.
4401 */
4402void hw_error(const char *pszFormat, ...)
4403{
4404 /*
4405 * Bitch about it.
4406 */
4407 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4408 * this in my Odin32 tree at home! */
4409 va_list args;
4410 va_start(args, pszFormat);
4411 RTLogPrintf("fatal error in virtual hardware:");
4412 RTLogPrintfV(pszFormat, args);
4413 va_end(args);
4414 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4415
4416 /*
4417 * If we're in REM context we'll sync back the state before 'jumping' to
4418 * the EMs failure handling.
4419 */
4420 PVM pVM = cpu_single_env->pVM;
4421 if (pVM->rem.s.fInREM)
4422 REMR3StateBack(pVM);
4423 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4424 AssertMsgFailed(("EMR3FatalError returned!\n"));
4425}
4426#endif
4427
4428/**
4429 * Interface for the qemu cpu to report unhandled situation
4430 * raising a fatal VM error.
4431 */
4432void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4433{
4434 va_list va;
4435 PVM pVM;
4436 PVMCPU pVCpu;
4437 char szMsg[256];
4438
4439 /*
4440 * Bitch about it.
4441 */
4442 RTLogFlags(NULL, "nodisabled nobuffered");
4443 RTLogFlush(NULL);
4444
4445 va_start(va, pszFormat);
4446#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4447 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4448 unsigned cArgs = 0;
4449 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4450 const char *psz = strchr(pszFormat, '%');
4451 while (psz && cArgs < 6)
4452 {
4453 auArgs[cArgs++] = va_arg(va, uintptr_t);
4454 psz = strchr(psz + 1, '%');
4455 }
4456 switch (cArgs)
4457 {
4458 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4459 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4460 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4461 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4462 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4463 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4464 default:
4465 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4466 }
4467#else
4468 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4469#endif
4470 va_end(va);
4471
4472 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4473 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4474
4475 /*
4476 * If we're in REM context we'll sync back the state before 'jumping' to
4477 * the EMs failure handling.
4478 */
4479 pVM = cpu_single_env->pVM;
4480 pVCpu = cpu_single_env->pVCpu;
4481 Assert(pVCpu);
4482
4483 if (pVM->rem.s.fInREM)
4484 REMR3StateBack(pVM, pVCpu);
4485 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4486 AssertMsgFailed(("EMR3FatalError returned!\n"));
4487}
4488
4489
4490/**
4491 * Aborts the VM.
4492 *
4493 * @param rc VBox error code.
4494 * @param pszTip Hint about why/when this happened.
4495 */
4496void remAbort(int rc, const char *pszTip)
4497{
4498 PVM pVM;
4499 PVMCPU pVCpu;
4500
4501 /*
4502 * Bitch about it.
4503 */
4504 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4505 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4506
4507 /*
4508 * Jump back to where we entered the recompiler.
4509 */
4510 pVM = cpu_single_env->pVM;
4511 pVCpu = cpu_single_env->pVCpu;
4512 Assert(pVCpu);
4513
4514 if (pVM->rem.s.fInREM)
4515 REMR3StateBack(pVM, pVCpu);
4516
4517 EMR3FatalError(pVCpu, rc);
4518 AssertMsgFailed(("EMR3FatalError returned!\n"));
4519}
4520
4521
4522/**
4523 * Dumps a linux system call.
4524 * @param pVCpu VMCPU handle.
4525 */
4526void remR3DumpLnxSyscall(PVMCPU pVCpu)
4527{
4528 static const char *apsz[] =
4529 {
4530 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4531 "sys_exit",
4532 "sys_fork",
4533 "sys_read",
4534 "sys_write",
4535 "sys_open", /* 5 */
4536 "sys_close",
4537 "sys_waitpid",
4538 "sys_creat",
4539 "sys_link",
4540 "sys_unlink", /* 10 */
4541 "sys_execve",
4542 "sys_chdir",
4543 "sys_time",
4544 "sys_mknod",
4545 "sys_chmod", /* 15 */
4546 "sys_lchown16",
4547 "sys_ni_syscall", /* old break syscall holder */
4548 "sys_stat",
4549 "sys_lseek",
4550 "sys_getpid", /* 20 */
4551 "sys_mount",
4552 "sys_oldumount",
4553 "sys_setuid16",
4554 "sys_getuid16",
4555 "sys_stime", /* 25 */
4556 "sys_ptrace",
4557 "sys_alarm",
4558 "sys_fstat",
4559 "sys_pause",
4560 "sys_utime", /* 30 */
4561 "sys_ni_syscall", /* old stty syscall holder */
4562 "sys_ni_syscall", /* old gtty syscall holder */
4563 "sys_access",
4564 "sys_nice",
4565 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4566 "sys_sync",
4567 "sys_kill",
4568 "sys_rename",
4569 "sys_mkdir",
4570 "sys_rmdir", /* 40 */
4571 "sys_dup",
4572 "sys_pipe",
4573 "sys_times",
4574 "sys_ni_syscall", /* old prof syscall holder */
4575 "sys_brk", /* 45 */
4576 "sys_setgid16",
4577 "sys_getgid16",
4578 "sys_signal",
4579 "sys_geteuid16",
4580 "sys_getegid16", /* 50 */
4581 "sys_acct",
4582 "sys_umount", /* recycled never used phys() */
4583 "sys_ni_syscall", /* old lock syscall holder */
4584 "sys_ioctl",
4585 "sys_fcntl", /* 55 */
4586 "sys_ni_syscall", /* old mpx syscall holder */
4587 "sys_setpgid",
4588 "sys_ni_syscall", /* old ulimit syscall holder */
4589 "sys_olduname",
4590 "sys_umask", /* 60 */
4591 "sys_chroot",
4592 "sys_ustat",
4593 "sys_dup2",
4594 "sys_getppid",
4595 "sys_getpgrp", /* 65 */
4596 "sys_setsid",
4597 "sys_sigaction",
4598 "sys_sgetmask",
4599 "sys_ssetmask",
4600 "sys_setreuid16", /* 70 */
4601 "sys_setregid16",
4602 "sys_sigsuspend",
4603 "sys_sigpending",
4604 "sys_sethostname",
4605 "sys_setrlimit", /* 75 */
4606 "sys_old_getrlimit",
4607 "sys_getrusage",
4608 "sys_gettimeofday",
4609 "sys_settimeofday",
4610 "sys_getgroups16", /* 80 */
4611 "sys_setgroups16",
4612 "old_select",
4613 "sys_symlink",
4614 "sys_lstat",
4615 "sys_readlink", /* 85 */
4616 "sys_uselib",
4617 "sys_swapon",
4618 "sys_reboot",
4619 "old_readdir",
4620 "old_mmap", /* 90 */
4621 "sys_munmap",
4622 "sys_truncate",
4623 "sys_ftruncate",
4624 "sys_fchmod",
4625 "sys_fchown16", /* 95 */
4626 "sys_getpriority",
4627 "sys_setpriority",
4628 "sys_ni_syscall", /* old profil syscall holder */
4629 "sys_statfs",
4630 "sys_fstatfs", /* 100 */
4631 "sys_ioperm",
4632 "sys_socketcall",
4633 "sys_syslog",
4634 "sys_setitimer",
4635 "sys_getitimer", /* 105 */
4636 "sys_newstat",
4637 "sys_newlstat",
4638 "sys_newfstat",
4639 "sys_uname",
4640 "sys_iopl", /* 110 */
4641 "sys_vhangup",
4642 "sys_ni_syscall", /* old "idle" system call */
4643 "sys_vm86old",
4644 "sys_wait4",
4645 "sys_swapoff", /* 115 */
4646 "sys_sysinfo",
4647 "sys_ipc",
4648 "sys_fsync",
4649 "sys_sigreturn",
4650 "sys_clone", /* 120 */
4651 "sys_setdomainname",
4652 "sys_newuname",
4653 "sys_modify_ldt",
4654 "sys_adjtimex",
4655 "sys_mprotect", /* 125 */
4656 "sys_sigprocmask",
4657 "sys_ni_syscall", /* old "create_module" */
4658 "sys_init_module",
4659 "sys_delete_module",
4660 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4661 "sys_quotactl",
4662 "sys_getpgid",
4663 "sys_fchdir",
4664 "sys_bdflush",
4665 "sys_sysfs", /* 135 */
4666 "sys_personality",
4667 "sys_ni_syscall", /* reserved for afs_syscall */
4668 "sys_setfsuid16",
4669 "sys_setfsgid16",
4670 "sys_llseek", /* 140 */
4671 "sys_getdents",
4672 "sys_select",
4673 "sys_flock",
4674 "sys_msync",
4675 "sys_readv", /* 145 */
4676 "sys_writev",
4677 "sys_getsid",
4678 "sys_fdatasync",
4679 "sys_sysctl",
4680 "sys_mlock", /* 150 */
4681 "sys_munlock",
4682 "sys_mlockall",
4683 "sys_munlockall",
4684 "sys_sched_setparam",
4685 "sys_sched_getparam", /* 155 */
4686 "sys_sched_setscheduler",
4687 "sys_sched_getscheduler",
4688 "sys_sched_yield",
4689 "sys_sched_get_priority_max",
4690 "sys_sched_get_priority_min", /* 160 */
4691 "sys_sched_rr_get_interval",
4692 "sys_nanosleep",
4693 "sys_mremap",
4694 "sys_setresuid16",
4695 "sys_getresuid16", /* 165 */
4696 "sys_vm86",
4697 "sys_ni_syscall", /* Old sys_query_module */
4698 "sys_poll",
4699 "sys_nfsservctl",
4700 "sys_setresgid16", /* 170 */
4701 "sys_getresgid16",
4702 "sys_prctl",
4703 "sys_rt_sigreturn",
4704 "sys_rt_sigaction",
4705 "sys_rt_sigprocmask", /* 175 */
4706 "sys_rt_sigpending",
4707 "sys_rt_sigtimedwait",
4708 "sys_rt_sigqueueinfo",
4709 "sys_rt_sigsuspend",
4710 "sys_pread64", /* 180 */
4711 "sys_pwrite64",
4712 "sys_chown16",
4713 "sys_getcwd",
4714 "sys_capget",
4715 "sys_capset", /* 185 */
4716 "sys_sigaltstack",
4717 "sys_sendfile",
4718 "sys_ni_syscall", /* reserved for streams1 */
4719 "sys_ni_syscall", /* reserved for streams2 */
4720 "sys_vfork", /* 190 */
4721 "sys_getrlimit",
4722 "sys_mmap2",
4723 "sys_truncate64",
4724 "sys_ftruncate64",
4725 "sys_stat64", /* 195 */
4726 "sys_lstat64",
4727 "sys_fstat64",
4728 "sys_lchown",
4729 "sys_getuid",
4730 "sys_getgid", /* 200 */
4731 "sys_geteuid",
4732 "sys_getegid",
4733 "sys_setreuid",
4734 "sys_setregid",
4735 "sys_getgroups", /* 205 */
4736 "sys_setgroups",
4737 "sys_fchown",
4738 "sys_setresuid",
4739 "sys_getresuid",
4740 "sys_setresgid", /* 210 */
4741 "sys_getresgid",
4742 "sys_chown",
4743 "sys_setuid",
4744 "sys_setgid",
4745 "sys_setfsuid", /* 215 */
4746 "sys_setfsgid",
4747 "sys_pivot_root",
4748 "sys_mincore",
4749 "sys_madvise",
4750 "sys_getdents64", /* 220 */
4751 "sys_fcntl64",
4752 "sys_ni_syscall", /* reserved for TUX */
4753 "sys_ni_syscall",
4754 "sys_gettid",
4755 "sys_readahead", /* 225 */
4756 "sys_setxattr",
4757 "sys_lsetxattr",
4758 "sys_fsetxattr",
4759 "sys_getxattr",
4760 "sys_lgetxattr", /* 230 */
4761 "sys_fgetxattr",
4762 "sys_listxattr",
4763 "sys_llistxattr",
4764 "sys_flistxattr",
4765 "sys_removexattr", /* 235 */
4766 "sys_lremovexattr",
4767 "sys_fremovexattr",
4768 "sys_tkill",
4769 "sys_sendfile64",
4770 "sys_futex", /* 240 */
4771 "sys_sched_setaffinity",
4772 "sys_sched_getaffinity",
4773 "sys_set_thread_area",
4774 "sys_get_thread_area",
4775 "sys_io_setup", /* 245 */
4776 "sys_io_destroy",
4777 "sys_io_getevents",
4778 "sys_io_submit",
4779 "sys_io_cancel",
4780 "sys_fadvise64", /* 250 */
4781 "sys_ni_syscall",
4782 "sys_exit_group",
4783 "sys_lookup_dcookie",
4784 "sys_epoll_create",
4785 "sys_epoll_ctl", /* 255 */
4786 "sys_epoll_wait",
4787 "sys_remap_file_pages",
4788 "sys_set_tid_address",
4789 "sys_timer_create",
4790 "sys_timer_settime", /* 260 */
4791 "sys_timer_gettime",
4792 "sys_timer_getoverrun",
4793 "sys_timer_delete",
4794 "sys_clock_settime",
4795 "sys_clock_gettime", /* 265 */
4796 "sys_clock_getres",
4797 "sys_clock_nanosleep",
4798 "sys_statfs64",
4799 "sys_fstatfs64",
4800 "sys_tgkill", /* 270 */
4801 "sys_utimes",
4802 "sys_fadvise64_64",
4803 "sys_ni_syscall" /* sys_vserver */
4804 };
4805
4806 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4807 switch (uEAX)
4808 {
4809 default:
4810 if (uEAX < RT_ELEMENTS(apsz))
4811 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4812 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4813 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4814 else
4815 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4816 break;
4817
4818 }
4819}
4820
4821
4822/**
4823 * Dumps an OpenBSD system call.
4824 * @param pVCpu VMCPU handle.
4825 */
4826void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4827{
4828 static const char *apsz[] =
4829 {
4830 "SYS_syscall", //0
4831 "SYS_exit", //1
4832 "SYS_fork", //2
4833 "SYS_read", //3
4834 "SYS_write", //4
4835 "SYS_open", //5
4836 "SYS_close", //6
4837 "SYS_wait4", //7
4838 "SYS_8",
4839 "SYS_link", //9
4840 "SYS_unlink", //10
4841 "SYS_11",
4842 "SYS_chdir", //12
4843 "SYS_fchdir", //13
4844 "SYS_mknod", //14
4845 "SYS_chmod", //15
4846 "SYS_chown", //16
4847 "SYS_break", //17
4848 "SYS_18",
4849 "SYS_19",
4850 "SYS_getpid", //20
4851 "SYS_mount", //21
4852 "SYS_unmount", //22
4853 "SYS_setuid", //23
4854 "SYS_getuid", //24
4855 "SYS_geteuid", //25
4856 "SYS_ptrace", //26
4857 "SYS_recvmsg", //27
4858 "SYS_sendmsg", //28
4859 "SYS_recvfrom", //29
4860 "SYS_accept", //30
4861 "SYS_getpeername", //31
4862 "SYS_getsockname", //32
4863 "SYS_access", //33
4864 "SYS_chflags", //34
4865 "SYS_fchflags", //35
4866 "SYS_sync", //36
4867 "SYS_kill", //37
4868 "SYS_38",
4869 "SYS_getppid", //39
4870 "SYS_40",
4871 "SYS_dup", //41
4872 "SYS_opipe", //42
4873 "SYS_getegid", //43
4874 "SYS_profil", //44
4875 "SYS_ktrace", //45
4876 "SYS_sigaction", //46
4877 "SYS_getgid", //47
4878 "SYS_sigprocmask", //48
4879 "SYS_getlogin", //49
4880 "SYS_setlogin", //50
4881 "SYS_acct", //51
4882 "SYS_sigpending", //52
4883 "SYS_osigaltstack", //53
4884 "SYS_ioctl", //54
4885 "SYS_reboot", //55
4886 "SYS_revoke", //56
4887 "SYS_symlink", //57
4888 "SYS_readlink", //58
4889 "SYS_execve", //59
4890 "SYS_umask", //60
4891 "SYS_chroot", //61
4892 "SYS_62",
4893 "SYS_63",
4894 "SYS_64",
4895 "SYS_65",
4896 "SYS_vfork", //66
4897 "SYS_67",
4898 "SYS_68",
4899 "SYS_sbrk", //69
4900 "SYS_sstk", //70
4901 "SYS_61",
4902 "SYS_vadvise", //72
4903 "SYS_munmap", //73
4904 "SYS_mprotect", //74
4905 "SYS_madvise", //75
4906 "SYS_76",
4907 "SYS_77",
4908 "SYS_mincore", //78
4909 "SYS_getgroups", //79
4910 "SYS_setgroups", //80
4911 "SYS_getpgrp", //81
4912 "SYS_setpgid", //82
4913 "SYS_setitimer", //83
4914 "SYS_84",
4915 "SYS_85",
4916 "SYS_getitimer", //86
4917 "SYS_87",
4918 "SYS_88",
4919 "SYS_89",
4920 "SYS_dup2", //90
4921 "SYS_91",
4922 "SYS_fcntl", //92
4923 "SYS_select", //93
4924 "SYS_94",
4925 "SYS_fsync", //95
4926 "SYS_setpriority", //96
4927 "SYS_socket", //97
4928 "SYS_connect", //98
4929 "SYS_99",
4930 "SYS_getpriority", //100
4931 "SYS_101",
4932 "SYS_102",
4933 "SYS_sigreturn", //103
4934 "SYS_bind", //104
4935 "SYS_setsockopt", //105
4936 "SYS_listen", //106
4937 "SYS_107",
4938 "SYS_108",
4939 "SYS_109",
4940 "SYS_110",
4941 "SYS_sigsuspend", //111
4942 "SYS_112",
4943 "SYS_113",
4944 "SYS_114",
4945 "SYS_115",
4946 "SYS_gettimeofday", //116
4947 "SYS_getrusage", //117
4948 "SYS_getsockopt", //118
4949 "SYS_119",
4950 "SYS_readv", //120
4951 "SYS_writev", //121
4952 "SYS_settimeofday", //122
4953 "SYS_fchown", //123
4954 "SYS_fchmod", //124
4955 "SYS_125",
4956 "SYS_setreuid", //126
4957 "SYS_setregid", //127
4958 "SYS_rename", //128
4959 "SYS_129",
4960 "SYS_130",
4961 "SYS_flock", //131
4962 "SYS_mkfifo", //132
4963 "SYS_sendto", //133
4964 "SYS_shutdown", //134
4965 "SYS_socketpair", //135
4966 "SYS_mkdir", //136
4967 "SYS_rmdir", //137
4968 "SYS_utimes", //138
4969 "SYS_139",
4970 "SYS_adjtime", //140
4971 "SYS_141",
4972 "SYS_142",
4973 "SYS_143",
4974 "SYS_144",
4975 "SYS_145",
4976 "SYS_146",
4977 "SYS_setsid", //147
4978 "SYS_quotactl", //148
4979 "SYS_149",
4980 "SYS_150",
4981 "SYS_151",
4982 "SYS_152",
4983 "SYS_153",
4984 "SYS_154",
4985 "SYS_nfssvc", //155
4986 "SYS_156",
4987 "SYS_157",
4988 "SYS_158",
4989 "SYS_159",
4990 "SYS_160",
4991 "SYS_getfh", //161
4992 "SYS_162",
4993 "SYS_163",
4994 "SYS_164",
4995 "SYS_sysarch", //165
4996 "SYS_166",
4997 "SYS_167",
4998 "SYS_168",
4999 "SYS_169",
5000 "SYS_170",
5001 "SYS_171",
5002 "SYS_172",
5003 "SYS_pread", //173
5004 "SYS_pwrite", //174
5005 "SYS_175",
5006 "SYS_176",
5007 "SYS_177",
5008 "SYS_178",
5009 "SYS_179",
5010 "SYS_180",
5011 "SYS_setgid", //181
5012 "SYS_setegid", //182
5013 "SYS_seteuid", //183
5014 "SYS_lfs_bmapv", //184
5015 "SYS_lfs_markv", //185
5016 "SYS_lfs_segclean", //186
5017 "SYS_lfs_segwait", //187
5018 "SYS_188",
5019 "SYS_189",
5020 "SYS_190",
5021 "SYS_pathconf", //191
5022 "SYS_fpathconf", //192
5023 "SYS_swapctl", //193
5024 "SYS_getrlimit", //194
5025 "SYS_setrlimit", //195
5026 "SYS_getdirentries", //196
5027 "SYS_mmap", //197
5028 "SYS___syscall", //198
5029 "SYS_lseek", //199
5030 "SYS_truncate", //200
5031 "SYS_ftruncate", //201
5032 "SYS___sysctl", //202
5033 "SYS_mlock", //203
5034 "SYS_munlock", //204
5035 "SYS_205",
5036 "SYS_futimes", //206
5037 "SYS_getpgid", //207
5038 "SYS_xfspioctl", //208
5039 "SYS_209",
5040 "SYS_210",
5041 "SYS_211",
5042 "SYS_212",
5043 "SYS_213",
5044 "SYS_214",
5045 "SYS_215",
5046 "SYS_216",
5047 "SYS_217",
5048 "SYS_218",
5049 "SYS_219",
5050 "SYS_220",
5051 "SYS_semget", //221
5052 "SYS_222",
5053 "SYS_223",
5054 "SYS_224",
5055 "SYS_msgget", //225
5056 "SYS_msgsnd", //226
5057 "SYS_msgrcv", //227
5058 "SYS_shmat", //228
5059 "SYS_229",
5060 "SYS_shmdt", //230
5061 "SYS_231",
5062 "SYS_clock_gettime", //232
5063 "SYS_clock_settime", //233
5064 "SYS_clock_getres", //234
5065 "SYS_235",
5066 "SYS_236",
5067 "SYS_237",
5068 "SYS_238",
5069 "SYS_239",
5070 "SYS_nanosleep", //240
5071 "SYS_241",
5072 "SYS_242",
5073 "SYS_243",
5074 "SYS_244",
5075 "SYS_245",
5076 "SYS_246",
5077 "SYS_247",
5078 "SYS_248",
5079 "SYS_249",
5080 "SYS_minherit", //250
5081 "SYS_rfork", //251
5082 "SYS_poll", //252
5083 "SYS_issetugid", //253
5084 "SYS_lchown", //254
5085 "SYS_getsid", //255
5086 "SYS_msync", //256
5087 "SYS_257",
5088 "SYS_258",
5089 "SYS_259",
5090 "SYS_getfsstat", //260
5091 "SYS_statfs", //261
5092 "SYS_fstatfs", //262
5093 "SYS_pipe", //263
5094 "SYS_fhopen", //264
5095 "SYS_265",
5096 "SYS_fhstatfs", //266
5097 "SYS_preadv", //267
5098 "SYS_pwritev", //268
5099 "SYS_kqueue", //269
5100 "SYS_kevent", //270
5101 "SYS_mlockall", //271
5102 "SYS_munlockall", //272
5103 "SYS_getpeereid", //273
5104 "SYS_274",
5105 "SYS_275",
5106 "SYS_276",
5107 "SYS_277",
5108 "SYS_278",
5109 "SYS_279",
5110 "SYS_280",
5111 "SYS_getresuid", //281
5112 "SYS_setresuid", //282
5113 "SYS_getresgid", //283
5114 "SYS_setresgid", //284
5115 "SYS_285",
5116 "SYS_mquery", //286
5117 "SYS_closefrom", //287
5118 "SYS_sigaltstack", //288
5119 "SYS_shmget", //289
5120 "SYS_semop", //290
5121 "SYS_stat", //291
5122 "SYS_fstat", //292
5123 "SYS_lstat", //293
5124 "SYS_fhstat", //294
5125 "SYS___semctl", //295
5126 "SYS_shmctl", //296
5127 "SYS_msgctl", //297
5128 "SYS_MAXSYSCALL", //298
5129 //299
5130 //300
5131 };
5132 uint32_t uEAX;
5133 if (!LogIsEnabled())
5134 return;
5135 uEAX = CPUMGetGuestEAX(pVCpu);
5136 switch (uEAX)
5137 {
5138 default:
5139 if (uEAX < RT_ELEMENTS(apsz))
5140 {
5141 uint32_t au32Args[8] = {0};
5142 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5143 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5144 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5145 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5146 }
5147 else
5148 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5149 break;
5150 }
5151}
5152
5153
5154#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5155/**
5156 * The Dll main entry point (stub).
5157 */
5158bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5159{
5160 return true;
5161}
5162
5163void *memcpy(void *dst, const void *src, size_t size)
5164{
5165 uint8_t*pbDst = dst, *pbSrc = src;
5166 while (size-- > 0)
5167 *pbDst++ = *pbSrc++;
5168 return dst;
5169}
5170
5171#endif
5172
5173void cpu_smm_update(CPUX86State *env)
5174{
5175}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette