VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 55044

Last change on this file since 55044 was 55029, checked in by vboxsync, 10 years ago

VBoxRecompiler.cpp: Use alloca() to allocate the big CPUMCTX structure on windows to (hopefully) avoid compiler bug (32-bit).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 183.1 KB
Line 
1/* $Id: VBoxRecompiler.c 55029 2015-03-31 13:08:19Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/vmm/uvm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50
51#include <VBox/log.h>
52#include <iprt/alloca.h>
53#include <iprt/semaphore.h>
54#include <iprt/asm.h>
55#include <iprt/assert.h>
56#include <iprt/thread.h>
57#include <iprt/string.h>
58
59/* Don't wanna include everything. */
60extern void cpu_exec_init_all(uintptr_t tb_size);
61extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
62extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
63extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
64extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
65extern void tlb_flush(CPUX86State *env, int flush_global);
66extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
67extern void sync_ldtr(CPUX86State *env1, int selector);
68
69#ifdef VBOX_STRICT
70ram_addr_t get_phys_page_offset(target_ulong addr);
71#endif
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77
78/** Copy 80-bit fpu register at pSrc to pDst.
79 * This is probably faster than *calling* memcpy.
80 */
81#define REM_COPY_FPU_REG(pDst, pSrc) \
82 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
83
84/** How remR3RunLoggingStep operates. */
85#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
86
87
88/** Selector flag shift between qemu and VBox.
89 * VBox shifts the qemu bits to the right. */
90#define SEL_FLAGS_SHIFT (8)
91/** Mask applied to the shifted qemu selector flags to get the attributes VBox
92 * (VT-x) needs. */
93#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
94
95
96/*******************************************************************************
97* Internal Functions *
98*******************************************************************************/
99static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
101static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
102static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
103
104static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
105static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
106static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
107static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
108static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
109static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
110
111static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
112static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
113static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
114static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
115static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
116static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
117
118static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
119static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
120static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
121
122/*******************************************************************************
123* Global Variables *
124*******************************************************************************/
125
126/** @todo Move stats to REM::s some rainy day we have nothing do to. */
127#ifdef VBOX_WITH_STATISTICS
128static STAMPROFILEADV gStatExecuteSingleInstr;
129static STAMPROFILEADV gStatCompilationQEmu;
130static STAMPROFILEADV gStatRunCodeQEmu;
131static STAMPROFILEADV gStatTotalTimeQEmu;
132static STAMPROFILEADV gStatTimers;
133static STAMPROFILEADV gStatTBLookup;
134static STAMPROFILEADV gStatIRQ;
135static STAMPROFILEADV gStatRawCheck;
136static STAMPROFILEADV gStatMemRead;
137static STAMPROFILEADV gStatMemWrite;
138static STAMPROFILE gStatGCPhys2HCVirt;
139static STAMCOUNTER gStatCpuGetTSC;
140static STAMCOUNTER gStatRefuseTFInhibit;
141static STAMCOUNTER gStatRefuseVM86;
142static STAMCOUNTER gStatRefusePaging;
143static STAMCOUNTER gStatRefusePAE;
144static STAMCOUNTER gStatRefuseIOPLNot0;
145static STAMCOUNTER gStatRefuseIF0;
146static STAMCOUNTER gStatRefuseCode16;
147static STAMCOUNTER gStatRefuseWP0;
148static STAMCOUNTER gStatRefuseRing1or2;
149static STAMCOUNTER gStatRefuseCanExecute;
150static STAMCOUNTER gaStatRefuseStale[6];
151static STAMCOUNTER gStatREMGDTChange;
152static STAMCOUNTER gStatREMIDTChange;
153static STAMCOUNTER gStatREMLDTRChange;
154static STAMCOUNTER gStatREMTRChange;
155static STAMCOUNTER gStatSelOutOfSync[6];
156static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
157static STAMCOUNTER gStatFlushTBs;
158#endif
159/* in exec.c */
160extern uint32_t tlb_flush_count;
161extern uint32_t tb_flush_count;
162extern uint32_t tb_phys_invalidate_count;
163
164/*
165 * Global stuff.
166 */
167
168/** MMIO read callbacks. */
169CPUReadMemoryFunc *g_apfnMMIORead[3] =
170{
171 remR3MMIOReadU8,
172 remR3MMIOReadU16,
173 remR3MMIOReadU32
174};
175
176/** MMIO write callbacks. */
177CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
178{
179 remR3MMIOWriteU8,
180 remR3MMIOWriteU16,
181 remR3MMIOWriteU32
182};
183
184/** Handler read callbacks. */
185CPUReadMemoryFunc *g_apfnHandlerRead[3] =
186{
187 remR3HandlerReadU8,
188 remR3HandlerReadU16,
189 remR3HandlerReadU32
190};
191
192/** Handler write callbacks. */
193CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
194{
195 remR3HandlerWriteU8,
196 remR3HandlerWriteU16,
197 remR3HandlerWriteU32
198};
199
200
201#ifdef VBOX_WITH_DEBUGGER
202/*
203 * Debugger commands.
204 */
205static FNDBGCCMD remR3CmdDisasEnableStepping;;
206
207/** '.remstep' arguments. */
208static const DBGCVARDESC g_aArgRemStep[] =
209{
210 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
211 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
212};
213
214/** Command descriptors. */
215static const DBGCCMD g_aCmds[] =
216{
217 {
218 .pszCmd ="remstep",
219 .cArgsMin = 0,
220 .cArgsMax = 1,
221 .paArgDescs = &g_aArgRemStep[0],
222 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
223 .fFlags = 0,
224 .pfnHandler = remR3CmdDisasEnableStepping,
225 .pszSyntax = "[on/off]",
226 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
227 "If no arguments show the current state."
228 }
229};
230#endif
231
232/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
233 * @todo huh??? That cannot be the case on the mac... So, this
234 * point is probably not valid any longer. */
235uint8_t *code_gen_prologue;
236
237
238/*******************************************************************************
239* Internal Functions *
240*******************************************************************************/
241void remAbort(int rc, const char *pszTip);
242extern int testmath(void);
243
244/* Put them here to avoid unused variable warning. */
245AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
246#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
247//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
248/* Why did this have to be identical?? */
249AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
250#else
251AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
252#endif
253
254
255/**
256 * Initializes the REM.
257 *
258 * @returns VBox status code.
259 * @param pVM The VM to operate on.
260 */
261REMR3DECL(int) REMR3Init(PVM pVM)
262{
263 PREMHANDLERNOTIFICATION pCur;
264 uint32_t u32Dummy;
265 int rc;
266 unsigned i;
267
268#ifdef VBOX_ENABLE_VBOXREM64
269 LogRel(("Using 64-bit aware REM\n"));
270#endif
271
272 /*
273 * Assert sanity.
274 */
275 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
276 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
277 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
278#if 0 /* just an annoyance at the moment. */
279#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
280 Assert(!testmath());
281#endif
282#endif
283
284 /*
285 * Init some internal data members.
286 */
287 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
288 pVM->rem.s.Env.pVM = pVM;
289#ifdef CPU_RAW_MODE_INIT
290 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
291#endif
292
293 /*
294 * Initialize the REM critical section.
295 *
296 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
297 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
298 * deadlocks. (mostly pgm vs rem locking)
299 */
300 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
301 AssertRCReturn(rc, rc);
302
303 /* ctx. */
304 pVM->rem.s.pCtx = NULL; /* set when executing code. */
305 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
306
307 /* ignore all notifications */
308 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
309
310 code_gen_prologue = RTMemExecAlloc(_1K);
311 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
312
313 cpu_exec_init_all(0);
314
315 /*
316 * Init the recompiler.
317 */
318 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
319 {
320 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
321 return VERR_GENERAL_FAILURE;
322 }
323 PVMCPU pVCpu = VMMGetCpu(pVM);
324 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
325 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
326
327 EMRemLock(pVM);
328 cpu_reset(&pVM->rem.s.Env);
329 EMRemUnlock(pVM);
330
331 /* allocate code buffer for single instruction emulation. */
332 pVM->rem.s.Env.cbCodeBuffer = 4096;
333 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
334 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
335
336 /* Finally, set the cpu_single_env global. */
337 cpu_single_env = &pVM->rem.s.Env;
338
339 /* Nothing is pending by default */
340 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
341
342 /*
343 * Register ram types.
344 */
345 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
346 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
347 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
348 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
349 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
350
351 /* stop ignoring. */
352 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
353
354 /*
355 * Register the saved state data unit.
356 */
357 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
358 NULL, NULL, NULL,
359 NULL, remR3Save, NULL,
360 NULL, remR3Load, NULL);
361 if (RT_FAILURE(rc))
362 return rc;
363
364#ifdef VBOX_WITH_DEBUGGER
365 /*
366 * Debugger commands.
367 */
368 static bool fRegisteredCmds = false;
369 if (!fRegisteredCmds)
370 {
371 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
372 if (RT_SUCCESS(rc))
373 fRegisteredCmds = true;
374 }
375#endif
376
377#ifdef VBOX_WITH_STATISTICS
378 /*
379 * Statistics.
380 */
381 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
382 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
383 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
384 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
385 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
386 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
387 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
388 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
389 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
390 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
391 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
392
393 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
394
395 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
396 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
397 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
398 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
399 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
400 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
401 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
402 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
403 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
404 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
405 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
406 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
407 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
408 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
409 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
410 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
411 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
412
413 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
414 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
415 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
416 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
417
418 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
419 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
420 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
421 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
422 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
423 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
424
425 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
426 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
427 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
428 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
429 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
430 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
431
432 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
433#endif /* VBOX_WITH_STATISTICS */
434 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
435 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
436
437 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
438 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
439 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
440
441
442#ifdef DEBUG_ALL_LOGGING
443 loglevel = ~0;
444#endif
445
446 /*
447 * Init the handler notification lists.
448 */
449 pVM->rem.s.idxPendingList = UINT32_MAX;
450 pVM->rem.s.idxFreeList = 0;
451
452 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
453 {
454 pCur = &pVM->rem.s.aHandlerNotifications[i];
455 pCur->idxNext = i + 1;
456 pCur->idxSelf = i;
457 }
458 pCur->idxNext = UINT32_MAX; /* the last record. */
459
460 return rc;
461}
462
463
464/**
465 * Finalizes the REM initialization.
466 *
467 * This is called after all components, devices and drivers has
468 * been initialized. Its main purpose it to finish the RAM related
469 * initialization.
470 *
471 * @returns VBox status code.
472 *
473 * @param pVM The VM handle.
474 */
475REMR3DECL(int) REMR3InitFinalize(PVM pVM)
476{
477 int rc;
478
479 /*
480 * Ram size & dirty bit map.
481 */
482 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
483 pVM->rem.s.fGCPhysLastRamFixed = true;
484#ifdef RT_STRICT
485 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
486#else
487 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
488#endif
489 return rc;
490}
491
492/**
493 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM handle.
497 * @param fGuarded Whether to guard the map.
498 */
499static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
500{
501 int rc = VINF_SUCCESS;
502 RTGCPHYS cb;
503
504 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
505
506 cb = pVM->rem.s.GCPhysLastRam + 1;
507 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
508 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
509 VERR_OUT_OF_RANGE);
510
511 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
512 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
513
514 if (!fGuarded)
515 {
516 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
517 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
518 }
519 else
520 {
521 /*
522 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
523 */
524 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
525 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
526 if (cbBitmapFull == cbBitmapAligned)
527 cbBitmapFull += _4G >> PAGE_SHIFT;
528 else if (cbBitmapFull - cbBitmapAligned < _64K)
529 cbBitmapFull += _64K;
530
531 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
532 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
533
534 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
535 if (RT_FAILURE(rc))
536 {
537 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
538 AssertLogRelRCReturn(rc, rc);
539 }
540
541 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
542 }
543
544 /* initialize it. */
545 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
546 return rc;
547}
548
549
550/**
551 * Terminates the REM.
552 *
553 * Termination means cleaning up and freeing all resources,
554 * the VM it self is at this point powered off or suspended.
555 *
556 * @returns VBox status code.
557 * @param pVM The VM to operate on.
558 */
559REMR3DECL(int) REMR3Term(PVM pVM)
560{
561 /*
562 * Statistics.
563 */
564 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
565 STAMR3Deregister(pVM->pUVM, "/REM/*");
566
567 return VINF_SUCCESS;
568}
569
570
571/**
572 * The VM is being reset.
573 *
574 * For the REM component this means to call the cpu_reset() and
575 * reinitialize some state variables.
576 *
577 * @param pVM VM handle.
578 */
579REMR3DECL(void) REMR3Reset(PVM pVM)
580{
581 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
582
583 /*
584 * Reset the REM cpu.
585 */
586 Assert(pVM->rem.s.cIgnoreAll == 0);
587 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
588 cpu_reset(&pVM->rem.s.Env);
589 pVM->rem.s.cInvalidatedPages = 0;
590 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
591 Assert(pVM->rem.s.cIgnoreAll == 0);
592
593 /* Clear raw ring 0 init state */
594 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
595
596 /* Flush the TBs the next time we execute code here. */
597 pVM->rem.s.fFlushTBs = true;
598
599 EMRemUnlock(pVM);
600}
601
602
603/**
604 * Execute state save operation.
605 *
606 * @returns VBox status code.
607 * @param pVM VM Handle.
608 * @param pSSM SSM operation handle.
609 */
610static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
611{
612 PREM pRem = &pVM->rem.s;
613
614 /*
615 * Save the required CPU Env bits.
616 * (Not much because we're never in REM when doing the save.)
617 */
618 LogFlow(("remR3Save:\n"));
619 Assert(!pRem->fInREM);
620 SSMR3PutU32(pSSM, pRem->Env.hflags);
621 SSMR3PutU32(pSSM, ~0); /* separator */
622
623 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
624 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
625 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
626
627 return SSMR3PutU32(pSSM, ~0); /* terminator */
628}
629
630
631/**
632 * Execute state load operation.
633 *
634 * @returns VBox status code.
635 * @param pVM VM Handle.
636 * @param pSSM SSM operation handle.
637 * @param uVersion Data layout version.
638 * @param uPass The data pass.
639 */
640static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
641{
642 uint32_t u32Dummy;
643 uint32_t fRawRing0 = false;
644 uint32_t u32Sep;
645 uint32_t i;
646 int rc;
647 PREM pRem;
648
649 LogFlow(("remR3Load:\n"));
650 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
651
652 /*
653 * Validate version.
654 */
655 if ( uVersion != REM_SAVED_STATE_VERSION
656 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
657 {
658 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
659 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
660 }
661
662 /*
663 * Do a reset to be on the safe side...
664 */
665 REMR3Reset(pVM);
666
667 /*
668 * Ignore all ignorable notifications.
669 * (Not doing this will cause serious trouble.)
670 */
671 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
672
673 /*
674 * Load the required CPU Env bits.
675 * (Not much because we're never in REM when doing the save.)
676 */
677 pRem = &pVM->rem.s;
678 Assert(!pRem->fInREM);
679 SSMR3GetU32(pSSM, &pRem->Env.hflags);
680 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
681 {
682 /* Redundant REM CPU state has to be loaded, but can be ignored. */
683 CPUX86State_Ver16 temp;
684 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
685 }
686
687 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
688 if (RT_FAILURE(rc))
689 return rc;
690 if (u32Sep != ~0U)
691 {
692 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
693 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
694 }
695
696 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
697 SSMR3GetUInt(pSSM, &fRawRing0);
698 if (fRawRing0)
699 pRem->Env.state |= CPU_RAW_RING0;
700
701 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /*
704 * Load the REM stuff.
705 */
706 /** @todo r=bird: We should just drop all these items, restoring doesn't make
707 * sense. */
708 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
709 if (RT_FAILURE(rc))
710 return rc;
711 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
712 {
713 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716 for (i = 0; i < pRem->cInvalidatedPages; i++)
717 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
718 }
719
720 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
721 if (RT_FAILURE(rc))
722 return rc;
723
724 /* check the terminator. */
725 rc = SSMR3GetU32(pSSM, &u32Sep);
726 if (RT_FAILURE(rc))
727 return rc;
728 if (u32Sep != ~0U)
729 {
730 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
731 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
732 }
733
734 /*
735 * Get the CPUID features.
736 */
737 PVMCPU pVCpu = VMMGetCpu(pVM);
738 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
739 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
740
741 /*
742 * Stop ignoring ignorable notifications.
743 */
744 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
745
746 /*
747 * Sync the whole CPU state when executing code in the recompiler.
748 */
749 for (i = 0; i < pVM->cCpus; i++)
750 {
751 PVMCPU pVCpu = &pVM->aCpus[i];
752 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
753 }
754 return VINF_SUCCESS;
755}
756
757
758
759#undef LOG_GROUP
760#define LOG_GROUP LOG_GROUP_REM_RUN
761
762/**
763 * Single steps an instruction in recompiled mode.
764 *
765 * Before calling this function the REM state needs to be in sync with
766 * the VM. Call REMR3State() to perform the sync. It's only necessary
767 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
768 * and after calling REMR3StateBack().
769 *
770 * @returns VBox status code.
771 *
772 * @param pVM VM Handle.
773 * @param pVCpu VMCPU Handle.
774 */
775REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
776{
777 int rc, interrupt_request;
778 RTGCPTR GCPtrPC;
779 bool fBp;
780
781 /*
782 * Lock the REM - we don't wanna have anyone interrupting us
783 * while stepping - and enabled single stepping. We also ignore
784 * pending interrupts and suchlike.
785 */
786 interrupt_request = pVM->rem.s.Env.interrupt_request;
787 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
788 pVM->rem.s.Env.interrupt_request = 0;
789 cpu_single_step(&pVM->rem.s.Env, 1);
790
791 /*
792 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
793 */
794 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
795 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
796
797 /*
798 * Execute and handle the return code.
799 * We execute without enabling the cpu tick, so on success we'll
800 * just flip it on and off to make sure it moves
801 */
802 rc = cpu_exec(&pVM->rem.s.Env);
803 if (rc == EXCP_DEBUG)
804 {
805 TMR3NotifyResume(pVM, pVCpu);
806 TMR3NotifySuspend(pVM, pVCpu);
807 rc = VINF_EM_DBG_STEPPED;
808 }
809 else
810 {
811 switch (rc)
812 {
813 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
814 case EXCP_HLT:
815 case EXCP_HALTED: rc = VINF_EM_HALT; break;
816 case EXCP_RC:
817 rc = pVM->rem.s.rc;
818 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
819 break;
820 case EXCP_EXECUTE_RAW:
821 case EXCP_EXECUTE_HM:
822 /** @todo: is it correct? No! */
823 rc = VINF_SUCCESS;
824 break;
825 default:
826 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
827 rc = VERR_INTERNAL_ERROR;
828 break;
829 }
830 }
831
832 /*
833 * Restore the stuff we changed to prevent interruption.
834 * Unlock the REM.
835 */
836 if (fBp)
837 {
838 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
839 Assert(rc2 == 0); NOREF(rc2);
840 }
841 cpu_single_step(&pVM->rem.s.Env, 0);
842 pVM->rem.s.Env.interrupt_request = interrupt_request;
843
844 return rc;
845}
846
847
848/**
849 * Set a breakpoint using the REM facilities.
850 *
851 * @returns VBox status code.
852 * @param pVM The VM handle.
853 * @param Address The breakpoint address.
854 * @thread The emulation thread.
855 */
856REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
857{
858 VM_ASSERT_EMT(pVM);
859 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
860 {
861 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
862 return VINF_SUCCESS;
863 }
864 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
865 return VERR_REM_NO_MORE_BP_SLOTS;
866}
867
868
869/**
870 * Clears a breakpoint set by REMR3BreakpointSet().
871 *
872 * @returns VBox status code.
873 * @param pVM The VM handle.
874 * @param Address The breakpoint address.
875 * @thread The emulation thread.
876 */
877REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
878{
879 VM_ASSERT_EMT(pVM);
880 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
881 {
882 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
883 return VINF_SUCCESS;
884 }
885 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
886 return VERR_REM_BP_NOT_FOUND;
887}
888
889
890/**
891 * Emulate an instruction.
892 *
893 * This function executes one instruction without letting anyone
894 * interrupt it. This is intended for being called while being in
895 * raw mode and thus will take care of all the state syncing between
896 * REM and the rest.
897 *
898 * @returns VBox status code.
899 * @param pVM VM handle.
900 * @param pVCpu VMCPU Handle.
901 */
902REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
903{
904 bool fFlushTBs;
905
906 int rc, rc2;
907 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
908
909 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
910 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
911 */
912 if (HMIsEnabled(pVM))
913 pVM->rem.s.Env.state |= CPU_RAW_HM;
914
915 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
916 fFlushTBs = pVM->rem.s.fFlushTBs;
917 pVM->rem.s.fFlushTBs = false;
918
919 /*
920 * Sync the state and enable single instruction / single stepping.
921 */
922 rc = REMR3State(pVM, pVCpu);
923 pVM->rem.s.fFlushTBs = fFlushTBs;
924 if (RT_SUCCESS(rc))
925 {
926 int interrupt_request = pVM->rem.s.Env.interrupt_request;
927 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
928#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
929 cpu_single_step(&pVM->rem.s.Env, 0);
930#endif
931 Assert(!pVM->rem.s.Env.singlestep_enabled);
932
933 /*
934 * Now we set the execute single instruction flag and enter the cpu_exec loop.
935 */
936 TMNotifyStartOfExecution(pVCpu);
937 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
938 rc = cpu_exec(&pVM->rem.s.Env);
939 TMNotifyEndOfExecution(pVCpu);
940 switch (rc)
941 {
942 /*
943 * Executed without anything out of the way happening.
944 */
945 case EXCP_SINGLE_INSTR:
946 rc = VINF_EM_RESCHEDULE;
947 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
948 break;
949
950 /*
951 * If we take a trap or start servicing a pending interrupt, we might end up here.
952 * (Timer thread or some other thread wishing EMT's attention.)
953 */
954 case EXCP_INTERRUPT:
955 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
956 rc = VINF_EM_RESCHEDULE;
957 break;
958
959 /*
960 * Single step, we assume!
961 * If there was a breakpoint there we're fucked now.
962 */
963 case EXCP_DEBUG:
964 if (pVM->rem.s.Env.watchpoint_hit)
965 {
966 /** @todo deal with watchpoints */
967 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
968 rc = VINF_EM_DBG_BREAKPOINT;
969 }
970 else
971 {
972 CPUBreakpoint *pBP;
973 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
974 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
975 if (pBP->pc == GCPtrPC)
976 break;
977 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
978 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
979 }
980 break;
981
982 /*
983 * hlt instruction.
984 */
985 case EXCP_HLT:
986 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
987 rc = VINF_EM_HALT;
988 break;
989
990 /*
991 * The VM has halted.
992 */
993 case EXCP_HALTED:
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
995 rc = VINF_EM_HALT;
996 break;
997
998 /*
999 * Switch to RAW-mode.
1000 */
1001 case EXCP_EXECUTE_RAW:
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1003 rc = VINF_EM_RESCHEDULE_RAW;
1004 break;
1005
1006 /*
1007 * Switch to hardware accelerated RAW-mode.
1008 */
1009 case EXCP_EXECUTE_HM:
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1011 rc = VINF_EM_RESCHEDULE_HM;
1012 break;
1013
1014 /*
1015 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1016 */
1017 case EXCP_RC:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1019 rc = pVM->rem.s.rc;
1020 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1021 break;
1022
1023 /*
1024 * Figure out the rest when they arrive....
1025 */
1026 default:
1027 AssertMsgFailed(("rc=%d\n", rc));
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1029 rc = VINF_EM_RESCHEDULE;
1030 break;
1031 }
1032
1033 /*
1034 * Switch back the state.
1035 */
1036 pVM->rem.s.Env.interrupt_request = interrupt_request;
1037 rc2 = REMR3StateBack(pVM, pVCpu);
1038 AssertRC(rc2);
1039 }
1040
1041 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1042 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1043 return rc;
1044}
1045
1046
1047/**
1048 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1049 *
1050 * @returns VBox status code.
1051 *
1052 * @param pVM The VM handle.
1053 * @param pVCpu The Virtual CPU handle.
1054 */
1055static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1056{
1057 int rc;
1058
1059 Assert(pVM->rem.s.fInREM);
1060#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1061 cpu_single_step(&pVM->rem.s.Env, 1);
1062#else
1063 Assert(!pVM->rem.s.Env.singlestep_enabled);
1064#endif
1065
1066 /*
1067 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1068 */
1069 for (;;)
1070 {
1071 char szBuf[256];
1072
1073 /*
1074 * Log the current registers state and instruction.
1075 */
1076 remR3StateUpdate(pVM, pVCpu);
1077 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1078 szBuf[0] = '\0';
1079 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1080 pVCpu->idCpu,
1081 0, /* Sel */ 0, /* GCPtr */
1082 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1083 szBuf,
1084 sizeof(szBuf),
1085 NULL);
1086 if (RT_FAILURE(rc))
1087 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1088 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1089
1090 /*
1091 * Execute the instruction.
1092 */
1093 TMNotifyStartOfExecution(pVCpu);
1094
1095 if ( pVM->rem.s.Env.exception_index < 0
1096 || pVM->rem.s.Env.exception_index > 256)
1097 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1098
1099#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1100 pVM->rem.s.Env.interrupt_request = 0;
1101#else
1102 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1103#endif
1104 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1105 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1106 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1107 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1108 pVM->rem.s.Env.interrupt_request,
1109 pVM->rem.s.Env.halted,
1110 pVM->rem.s.Env.exception_index
1111 );
1112
1113 rc = cpu_exec(&pVM->rem.s.Env);
1114
1115 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1116 pVM->rem.s.Env.interrupt_request,
1117 pVM->rem.s.Env.halted,
1118 pVM->rem.s.Env.exception_index
1119 );
1120
1121 TMNotifyEndOfExecution(pVCpu);
1122
1123 switch (rc)
1124 {
1125#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1126 /*
1127 * The normal exit.
1128 */
1129 case EXCP_SINGLE_INSTR:
1130 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1131 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1132 continue;
1133 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1134 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1135 rc = VINF_SUCCESS;
1136 break;
1137
1138#else
1139 /*
1140 * The normal exit, check for breakpoints at PC just to be sure.
1141 */
1142#endif
1143 case EXCP_DEBUG:
1144 if (pVM->rem.s.Env.watchpoint_hit)
1145 {
1146 /** @todo deal with watchpoints */
1147 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1148 rc = VINF_EM_DBG_BREAKPOINT;
1149 }
1150 else
1151 {
1152 CPUBreakpoint *pBP;
1153 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1154 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1155 if (pBP->pc == GCPtrPC)
1156 break;
1157 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1158 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1159 }
1160#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1161 if (rc == VINF_EM_DBG_STEPPED)
1162 {
1163 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1164 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1165 continue;
1166
1167 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1168 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1169 rc = VINF_SUCCESS;
1170 }
1171#endif
1172 break;
1173
1174 /*
1175 * If we take a trap or start servicing a pending interrupt, we might end up here.
1176 * (Timer thread or some other thread wishing EMT's attention.)
1177 */
1178 case EXCP_INTERRUPT:
1179 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1180 rc = VINF_SUCCESS;
1181 break;
1182
1183 /*
1184 * hlt instruction.
1185 */
1186 case EXCP_HLT:
1187 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1188 rc = VINF_EM_HALT;
1189 break;
1190
1191 /*
1192 * The VM has halted.
1193 */
1194 case EXCP_HALTED:
1195 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1196 rc = VINF_EM_HALT;
1197 break;
1198
1199 /*
1200 * Switch to RAW-mode.
1201 */
1202 case EXCP_EXECUTE_RAW:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1204 rc = VINF_EM_RESCHEDULE_RAW;
1205 break;
1206
1207 /*
1208 * Switch to hardware accelerated RAW-mode.
1209 */
1210 case EXCP_EXECUTE_HM:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1212 rc = VINF_EM_RESCHEDULE_HM;
1213 break;
1214
1215 /*
1216 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1217 */
1218 case EXCP_RC:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1220 rc = pVM->rem.s.rc;
1221 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1222 break;
1223
1224 /*
1225 * Figure out the rest when they arrive....
1226 */
1227 default:
1228 AssertMsgFailed(("rc=%d\n", rc));
1229 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1230 rc = VINF_EM_RESCHEDULE;
1231 break;
1232 }
1233 break;
1234 }
1235
1236#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1237// cpu_single_step(&pVM->rem.s.Env, 0);
1238#else
1239 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1240#endif
1241 return rc;
1242}
1243
1244
1245/**
1246 * Runs code in recompiled mode.
1247 *
1248 * Before calling this function the REM state needs to be in sync with
1249 * the VM. Call REMR3State() to perform the sync. It's only necessary
1250 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1251 * and after calling REMR3StateBack().
1252 *
1253 * @returns VBox status code.
1254 *
1255 * @param pVM VM Handle.
1256 * @param pVCpu VMCPU Handle.
1257 */
1258REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1259{
1260 int rc;
1261
1262 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1263 return remR3RunLoggingStep(pVM, pVCpu);
1264
1265 Assert(pVM->rem.s.fInREM);
1266 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1267
1268 TMNotifyStartOfExecution(pVCpu);
1269 rc = cpu_exec(&pVM->rem.s.Env);
1270 TMNotifyEndOfExecution(pVCpu);
1271 switch (rc)
1272 {
1273 /*
1274 * This happens when the execution was interrupted
1275 * by an external event, like pending timers.
1276 */
1277 case EXCP_INTERRUPT:
1278 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1279 rc = VINF_SUCCESS;
1280 break;
1281
1282 /*
1283 * hlt instruction.
1284 */
1285 case EXCP_HLT:
1286 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1287 rc = VINF_EM_HALT;
1288 break;
1289
1290 /*
1291 * The VM has halted.
1292 */
1293 case EXCP_HALTED:
1294 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1295 rc = VINF_EM_HALT;
1296 break;
1297
1298 /*
1299 * Breakpoint/single step.
1300 */
1301 case EXCP_DEBUG:
1302 if (pVM->rem.s.Env.watchpoint_hit)
1303 {
1304 /** @todo deal with watchpoints */
1305 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1306 rc = VINF_EM_DBG_BREAKPOINT;
1307 }
1308 else
1309 {
1310 CPUBreakpoint *pBP;
1311 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1312 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1313 if (pBP->pc == GCPtrPC)
1314 break;
1315 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1316 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1317 }
1318 break;
1319
1320 /*
1321 * Switch to RAW-mode.
1322 */
1323 case EXCP_EXECUTE_RAW:
1324 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1325 rc = VINF_EM_RESCHEDULE_RAW;
1326 break;
1327
1328 /*
1329 * Switch to hardware accelerated RAW-mode.
1330 */
1331 case EXCP_EXECUTE_HM:
1332 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1333 rc = VINF_EM_RESCHEDULE_HM;
1334 break;
1335
1336 /*
1337 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1338 */
1339 case EXCP_RC:
1340 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1341 rc = pVM->rem.s.rc;
1342 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1343 break;
1344
1345 /*
1346 * Figure out the rest when they arrive....
1347 */
1348 default:
1349 AssertMsgFailed(("rc=%d\n", rc));
1350 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1351 rc = VINF_SUCCESS;
1352 break;
1353 }
1354
1355 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1356 return rc;
1357}
1358
1359
1360/**
1361 * Check if the cpu state is suitable for Raw execution.
1362 *
1363 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1364 *
1365 * @param env The CPU env struct.
1366 * @param eip The EIP to check this for (might differ from env->eip).
1367 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1368 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1369 *
1370 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1371 */
1372bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1373{
1374 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1375 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1376 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1377 uint32_t u32CR0;
1378
1379#ifdef IEM_VERIFICATION_MODE
1380 return false;
1381#endif
1382
1383 /* Update counter. */
1384 env->pVM->rem.s.cCanExecuteRaw++;
1385
1386 /* Never when single stepping+logging guest code. */
1387 if (env->state & CPU_EMULATE_SINGLE_STEP)
1388 return false;
1389
1390 if (HMIsEnabled(env->pVM))
1391 {
1392#ifdef RT_OS_WINDOWS
1393 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1394#else
1395 CPUMCTX Ctx;
1396 PCPUMCTX pCtx = &Ctx;
1397#endif
1398
1399 env->state |= CPU_RAW_HM;
1400
1401 /*
1402 * The simple check first...
1403 */
1404 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1405 return false;
1406
1407 /*
1408 * Create partial context for HMR3CanExecuteGuest
1409 */
1410 pCtx->cr0 = env->cr[0];
1411 pCtx->cr3 = env->cr[3];
1412 pCtx->cr4 = env->cr[4];
1413
1414 pCtx->tr.Sel = env->tr.selector;
1415 pCtx->tr.ValidSel = env->tr.selector;
1416 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1417 pCtx->tr.u64Base = env->tr.base;
1418 pCtx->tr.u32Limit = env->tr.limit;
1419 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1420
1421 pCtx->ldtr.Sel = env->ldt.selector;
1422 pCtx->ldtr.ValidSel = env->ldt.selector;
1423 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1424 pCtx->ldtr.u64Base = env->ldt.base;
1425 pCtx->ldtr.u32Limit = env->ldt.limit;
1426 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1427
1428 pCtx->idtr.cbIdt = env->idt.limit;
1429 pCtx->idtr.pIdt = env->idt.base;
1430
1431 pCtx->gdtr.cbGdt = env->gdt.limit;
1432 pCtx->gdtr.pGdt = env->gdt.base;
1433
1434 pCtx->rsp = env->regs[R_ESP];
1435 pCtx->rip = env->eip;
1436
1437 pCtx->eflags.u32 = env->eflags;
1438
1439 pCtx->cs.Sel = env->segs[R_CS].selector;
1440 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1441 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1442 pCtx->cs.u64Base = env->segs[R_CS].base;
1443 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1444 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1445
1446 pCtx->ds.Sel = env->segs[R_DS].selector;
1447 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1448 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1449 pCtx->ds.u64Base = env->segs[R_DS].base;
1450 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1451 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1452
1453 pCtx->es.Sel = env->segs[R_ES].selector;
1454 pCtx->es.ValidSel = env->segs[R_ES].selector;
1455 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1456 pCtx->es.u64Base = env->segs[R_ES].base;
1457 pCtx->es.u32Limit = env->segs[R_ES].limit;
1458 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1459
1460 pCtx->fs.Sel = env->segs[R_FS].selector;
1461 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1462 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1463 pCtx->fs.u64Base = env->segs[R_FS].base;
1464 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1465 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1466
1467 pCtx->gs.Sel = env->segs[R_GS].selector;
1468 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1469 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1470 pCtx->gs.u64Base = env->segs[R_GS].base;
1471 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1472 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1473
1474 pCtx->ss.Sel = env->segs[R_SS].selector;
1475 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1476 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1477 pCtx->ss.u64Base = env->segs[R_SS].base;
1478 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1479 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1480
1481 pCtx->msrEFER = env->efer;
1482
1483 /* Hardware accelerated raw-mode:
1484 *
1485 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1486 */
1487 if (HMR3CanExecuteGuest(env->pVM, pCtx) == true)
1488 {
1489 *piException = EXCP_EXECUTE_HM;
1490 return true;
1491 }
1492 return false;
1493 }
1494
1495 /*
1496 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1497 * or 32 bits protected mode ring 0 code
1498 *
1499 * The tests are ordered by the likelihood of being true during normal execution.
1500 */
1501 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1502 {
1503 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1504 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1505 return false;
1506 }
1507
1508#ifndef VBOX_RAW_V86
1509 if (fFlags & VM_MASK) {
1510 STAM_COUNTER_INC(&gStatRefuseVM86);
1511 Log2(("raw mode refused: VM_MASK\n"));
1512 return false;
1513 }
1514#endif
1515
1516 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1517 {
1518#ifndef DEBUG_bird
1519 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1520#endif
1521 return false;
1522 }
1523
1524 if (env->singlestep_enabled)
1525 {
1526 //Log2(("raw mode refused: Single step\n"));
1527 return false;
1528 }
1529
1530 if (!QTAILQ_EMPTY(&env->breakpoints))
1531 {
1532 //Log2(("raw mode refused: Breakpoints\n"));
1533 return false;
1534 }
1535
1536 if (!QTAILQ_EMPTY(&env->watchpoints))
1537 {
1538 //Log2(("raw mode refused: Watchpoints\n"));
1539 return false;
1540 }
1541
1542 u32CR0 = env->cr[0];
1543 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1544 {
1545 STAM_COUNTER_INC(&gStatRefusePaging);
1546 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1547 return false;
1548 }
1549
1550 if (env->cr[4] & CR4_PAE_MASK)
1551 {
1552 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1553 {
1554 STAM_COUNTER_INC(&gStatRefusePAE);
1555 return false;
1556 }
1557 }
1558
1559 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1560 {
1561 if (!EMIsRawRing3Enabled(env->pVM))
1562 return false;
1563
1564 if (!(env->eflags & IF_MASK))
1565 {
1566 STAM_COUNTER_INC(&gStatRefuseIF0);
1567 Log2(("raw mode refused: IF (RawR3)\n"));
1568 return false;
1569 }
1570
1571 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1572 {
1573 STAM_COUNTER_INC(&gStatRefuseWP0);
1574 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1575 return false;
1576 }
1577 }
1578 else
1579 {
1580 if (!EMIsRawRing0Enabled(env->pVM))
1581 return false;
1582
1583 // Let's start with pure 32 bits ring 0 code first
1584 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1585 {
1586 STAM_COUNTER_INC(&gStatRefuseCode16);
1587 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1588 return false;
1589 }
1590
1591 if (EMIsRawRing1Enabled(env->pVM))
1592 {
1593 /* Only ring 0 and 1 supervisor code. */
1594 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1595 {
1596 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1597 return false;
1598 }
1599 }
1600 /* Only R0. */
1601 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1602 {
1603 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1604 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1605 return false;
1606 }
1607
1608 if (!(u32CR0 & CR0_WP_MASK))
1609 {
1610 STAM_COUNTER_INC(&gStatRefuseWP0);
1611 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1612 return false;
1613 }
1614
1615#ifdef VBOX_WITH_RAW_MODE
1616 if (PATMIsPatchGCAddr(env->pVM, eip))
1617 {
1618 Log2(("raw r0 mode forced: patch code\n"));
1619 *piException = EXCP_EXECUTE_RAW;
1620 return true;
1621 }
1622#endif
1623
1624#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1625 if (!(env->eflags & IF_MASK))
1626 {
1627 STAM_COUNTER_INC(&gStatRefuseIF0);
1628 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1629 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1630 return false;
1631 }
1632#endif
1633
1634#ifndef VBOX_WITH_RAW_RING1
1635 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1636 {
1637 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1638 return false;
1639 }
1640#endif
1641 env->state |= CPU_RAW_RING0;
1642 }
1643
1644 /*
1645 * Don't reschedule the first time we're called, because there might be
1646 * special reasons why we're here that is not covered by the above checks.
1647 */
1648 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1649 {
1650 Log2(("raw mode refused: first scheduling\n"));
1651 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1652 return false;
1653 }
1654
1655 /*
1656 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1657 */
1658 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1659 {
1660 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1661 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1662 return false;
1663 }
1664 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1665 {
1666 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1667 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1668 return false;
1669 }
1670 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1671 {
1672 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1673 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1674 return false;
1675 }
1676 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1677 {
1678 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1679 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1680 return false;
1681 }
1682 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1683 {
1684 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1685 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1686 return false;
1687 }
1688 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1689 {
1690 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1691 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1692 return false;
1693 }
1694
1695/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1696 *piException = EXCP_EXECUTE_RAW;
1697 return true;
1698}
1699
1700
1701#ifdef VBOX_WITH_RAW_MODE
1702/**
1703 * Fetches a code byte.
1704 *
1705 * @returns Success indicator (bool) for ease of use.
1706 * @param env The CPU environment structure.
1707 * @param GCPtrInstr Where to fetch code.
1708 * @param pu8Byte Where to store the byte on success
1709 */
1710bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1711{
1712 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1713 if (RT_SUCCESS(rc))
1714 return true;
1715 return false;
1716}
1717#endif /* VBOX_WITH_RAW_MODE */
1718
1719
1720/**
1721 * Flush (or invalidate if you like) page table/dir entry.
1722 *
1723 * (invlpg instruction; tlb_flush_page)
1724 *
1725 * @param env Pointer to cpu environment.
1726 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1727 */
1728void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1729{
1730 PVM pVM = env->pVM;
1731 PCPUMCTX pCtx;
1732 int rc;
1733
1734 Assert(EMRemIsLockOwner(env->pVM));
1735
1736 /*
1737 * When we're replaying invlpg instructions or restoring a saved
1738 * state we disable this path.
1739 */
1740 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1741 return;
1742 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1743 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1744
1745 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1746
1747 /*
1748 * Update the control registers before calling PGMFlushPage.
1749 */
1750 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1751 Assert(pCtx);
1752 pCtx->cr0 = env->cr[0];
1753 pCtx->cr3 = env->cr[3];
1754#ifdef VBOX_WITH_RAW_MODE
1755 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1756 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1757#endif
1758 pCtx->cr4 = env->cr[4];
1759
1760 /*
1761 * Let PGM do the rest.
1762 */
1763 Assert(env->pVCpu);
1764 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1765 if (RT_FAILURE(rc))
1766 {
1767 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1768 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1769 }
1770 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1771}
1772
1773
1774#ifndef REM_PHYS_ADDR_IN_TLB
1775/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1776void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1777{
1778 void *pv;
1779 int rc;
1780
1781
1782 /* Address must be aligned enough to fiddle with lower bits */
1783 Assert((physAddr & 0x3) == 0);
1784 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1785
1786 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1787 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1788 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1789 Assert( rc == VINF_SUCCESS
1790 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1791 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1792 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1793 if (RT_FAILURE(rc))
1794 return (void *)1;
1795 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1796 return (void *)((uintptr_t)pv | 2);
1797 return pv;
1798}
1799#endif /* REM_PHYS_ADDR_IN_TLB */
1800
1801
1802/**
1803 * Called from tlb_protect_code in order to write monitor a code page.
1804 *
1805 * @param env Pointer to the CPU environment.
1806 * @param GCPtr Code page to monitor
1807 */
1808void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1809{
1810#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1811 Assert(env->pVM->rem.s.fInREM);
1812 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1813 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1814 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1815 && !(env->eflags & VM_MASK) /* no V86 mode */
1816 && !HMIsEnabled(env->pVM))
1817 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1818#endif
1819}
1820
1821
1822/**
1823 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1824 *
1825 * @param env Pointer to the CPU environment.
1826 * @param GCPtr Code page to monitor
1827 */
1828void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1829{
1830 Assert(env->pVM->rem.s.fInREM);
1831#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1832 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1833 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1834 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1835 && !(env->eflags & VM_MASK) /* no V86 mode */
1836 && !HMIsEnabled(env->pVM))
1837 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1838#endif
1839}
1840
1841
1842/**
1843 * Called when the CPU is initialized, any of the CRx registers are changed or
1844 * when the A20 line is modified.
1845 *
1846 * @param env Pointer to the CPU environment.
1847 * @param fGlobal Set if the flush is global.
1848 */
1849void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1850{
1851 PVM pVM = env->pVM;
1852 PCPUMCTX pCtx;
1853 Assert(EMRemIsLockOwner(pVM));
1854
1855 /*
1856 * When we're replaying invlpg instructions or restoring a saved
1857 * state we disable this path.
1858 */
1859 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1860 return;
1861 Assert(pVM->rem.s.fInREM);
1862
1863 /*
1864 * The caller doesn't check cr4, so we have to do that for ourselves.
1865 */
1866 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1867 fGlobal = true;
1868 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1869
1870 /*
1871 * Update the control registers before calling PGMR3FlushTLB.
1872 */
1873 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1874 Assert(pCtx);
1875 pCtx->cr0 = env->cr[0];
1876 pCtx->cr3 = env->cr[3];
1877#ifdef VBOX_WITH_RAW_MODE
1878 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1879 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1880#endif
1881 pCtx->cr4 = env->cr[4];
1882
1883 /*
1884 * Let PGM do the rest.
1885 */
1886 Assert(env->pVCpu);
1887 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1888}
1889
1890
1891/**
1892 * Called when any of the cr0, cr4 or efer registers is updated.
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3ChangeCpuMode(CPUX86State *env)
1897{
1898 PVM pVM = env->pVM;
1899 uint64_t efer;
1900 PCPUMCTX pCtx;
1901 int rc;
1902
1903 /*
1904 * When we're replaying loads or restoring a saved
1905 * state this path is disabled.
1906 */
1907 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1908 return;
1909 Assert(pVM->rem.s.fInREM);
1910
1911 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1912 Assert(pCtx);
1913
1914 /*
1915 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1916 */
1917 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1918 PGMCr0WpEnabled(env->pVCpu);
1919
1920 /*
1921 * Update the control registers before calling PGMChangeMode()
1922 * as it may need to map whatever cr3 is pointing to.
1923 */
1924 pCtx->cr0 = env->cr[0];
1925 pCtx->cr3 = env->cr[3];
1926#ifdef VBOX_WITH_RAW_MODE
1927 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1928 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1929#endif
1930 pCtx->cr4 = env->cr[4];
1931#ifdef TARGET_X86_64
1932 efer = env->efer;
1933 pCtx->msrEFER = efer;
1934#else
1935 efer = 0;
1936#endif
1937 Assert(env->pVCpu);
1938 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1939 if (rc != VINF_SUCCESS)
1940 {
1941 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1942 {
1943 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1944 remR3RaiseRC(env->pVM, rc);
1945 }
1946 else
1947 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1948 }
1949}
1950
1951
1952/**
1953 * Called from compiled code to run dma.
1954 *
1955 * @param env Pointer to the CPU environment.
1956 */
1957void remR3DmaRun(CPUX86State *env)
1958{
1959 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1960 PDMR3DmaRun(env->pVM);
1961 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1962}
1963
1964
1965/**
1966 * Called from compiled code to schedule pending timers in VMM
1967 *
1968 * @param env Pointer to the CPU environment.
1969 */
1970void remR3TimersRun(CPUX86State *env)
1971{
1972 LogFlow(("remR3TimersRun:\n"));
1973 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1974 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1975 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1976 TMR3TimerQueuesDo(env->pVM);
1977 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1978 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1979}
1980
1981
1982/**
1983 * Record trap occurrence
1984 *
1985 * @returns VBox status code
1986 * @param env Pointer to the CPU environment.
1987 * @param uTrap Trap nr
1988 * @param uErrorCode Error code
1989 * @param pvNextEIP Next EIP
1990 */
1991int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1992{
1993 PVM pVM = env->pVM;
1994#ifdef VBOX_WITH_STATISTICS
1995 static STAMCOUNTER s_aStatTrap[255];
1996 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1997#endif
1998
1999#ifdef VBOX_WITH_STATISTICS
2000 if (uTrap < 255)
2001 {
2002 if (!s_aRegisters[uTrap])
2003 {
2004 char szStatName[64];
2005 s_aRegisters[uTrap] = true;
2006 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2007 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2008 }
2009 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2010 }
2011#endif
2012 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2013 if( uTrap < 0x20
2014 && (env->cr[0] & X86_CR0_PE)
2015 && !(env->eflags & X86_EFL_VM))
2016 {
2017#ifdef DEBUG
2018 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2019#endif
2020 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2021 {
2022 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2023 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2024 return VERR_REM_TOO_MANY_TRAPS;
2025 }
2026 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2027 {
2028 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2029 pVM->rem.s.cPendingExceptions = 1;
2030 }
2031 pVM->rem.s.uPendingException = uTrap;
2032 pVM->rem.s.uPendingExcptEIP = env->eip;
2033 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2034 }
2035 else
2036 {
2037 pVM->rem.s.cPendingExceptions = 0;
2038 pVM->rem.s.uPendingException = uTrap;
2039 pVM->rem.s.uPendingExcptEIP = env->eip;
2040 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2041 }
2042 return VINF_SUCCESS;
2043}
2044
2045
2046/*
2047 * Clear current active trap
2048 *
2049 * @param pVM VM Handle.
2050 */
2051void remR3TrapClear(PVM pVM)
2052{
2053 pVM->rem.s.cPendingExceptions = 0;
2054 pVM->rem.s.uPendingException = 0;
2055 pVM->rem.s.uPendingExcptEIP = 0;
2056 pVM->rem.s.uPendingExcptCR2 = 0;
2057}
2058
2059
2060/*
2061 * Record previous call instruction addresses
2062 *
2063 * @param env Pointer to the CPU environment.
2064 */
2065void remR3RecordCall(CPUX86State *env)
2066{
2067#ifdef VBOX_WITH_RAW_MODE
2068 CSAMR3RecordCallAddress(env->pVM, env->eip);
2069#endif
2070}
2071
2072
2073/**
2074 * Syncs the internal REM state with the VM.
2075 *
2076 * This must be called before REMR3Run() is invoked whenever when the REM
2077 * state is not up to date. Calling it several times in a row is not
2078 * permitted.
2079 *
2080 * @returns VBox status code.
2081 *
2082 * @param pVM VM Handle.
2083 * @param pVCpu VMCPU Handle.
2084 *
2085 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2086 * no do this since the majority of the callers don't want any unnecessary of events
2087 * pending that would immediately interrupt execution.
2088 */
2089REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2090{
2091 register const CPUMCTX *pCtx;
2092 register unsigned fFlags;
2093 unsigned i;
2094 TRPMEVENT enmType;
2095 uint8_t u8TrapNo;
2096 uint32_t uCpl;
2097 int rc;
2098
2099 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2100 Log2(("REMR3State:\n"));
2101
2102 pVM->rem.s.Env.pVCpu = pVCpu;
2103 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2104
2105 Assert(!pVM->rem.s.fInREM);
2106 pVM->rem.s.fInStateSync = true;
2107
2108 /*
2109 * If we have to flush TBs, do that immediately.
2110 */
2111 if (pVM->rem.s.fFlushTBs)
2112 {
2113 STAM_COUNTER_INC(&gStatFlushTBs);
2114 tb_flush(&pVM->rem.s.Env);
2115 pVM->rem.s.fFlushTBs = false;
2116 }
2117
2118 /*
2119 * Copy the registers which require no special handling.
2120 */
2121#ifdef TARGET_X86_64
2122 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2123 Assert(R_EAX == 0);
2124 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2125 Assert(R_ECX == 1);
2126 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2127 Assert(R_EDX == 2);
2128 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2129 Assert(R_EBX == 3);
2130 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2131 Assert(R_ESP == 4);
2132 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2133 Assert(R_EBP == 5);
2134 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2135 Assert(R_ESI == 6);
2136 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2137 Assert(R_EDI == 7);
2138 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2139 pVM->rem.s.Env.regs[8] = pCtx->r8;
2140 pVM->rem.s.Env.regs[9] = pCtx->r9;
2141 pVM->rem.s.Env.regs[10] = pCtx->r10;
2142 pVM->rem.s.Env.regs[11] = pCtx->r11;
2143 pVM->rem.s.Env.regs[12] = pCtx->r12;
2144 pVM->rem.s.Env.regs[13] = pCtx->r13;
2145 pVM->rem.s.Env.regs[14] = pCtx->r14;
2146 pVM->rem.s.Env.regs[15] = pCtx->r15;
2147
2148 pVM->rem.s.Env.eip = pCtx->rip;
2149
2150 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2151#else
2152 Assert(R_EAX == 0);
2153 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2154 Assert(R_ECX == 1);
2155 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2156 Assert(R_EDX == 2);
2157 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2158 Assert(R_EBX == 3);
2159 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2160 Assert(R_ESP == 4);
2161 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2162 Assert(R_EBP == 5);
2163 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2164 Assert(R_ESI == 6);
2165 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2166 Assert(R_EDI == 7);
2167 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2168 pVM->rem.s.Env.eip = pCtx->eip;
2169
2170 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2171#endif
2172
2173 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2174
2175 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2176 for (i=0;i<8;i++)
2177 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2178
2179#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2180 /*
2181 * Clear the halted hidden flag (the interrupt waking up the CPU can
2182 * have been dispatched in raw mode).
2183 */
2184 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2185#endif
2186
2187 /*
2188 * Replay invlpg? Only if we're not flushing the TLB.
2189 */
2190 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2191 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2192 if (pVM->rem.s.cInvalidatedPages)
2193 {
2194 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2195 {
2196 RTUINT i;
2197
2198 pVM->rem.s.fIgnoreCR3Load = true;
2199 pVM->rem.s.fIgnoreInvlPg = true;
2200 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2201 {
2202 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2203 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2204 }
2205 pVM->rem.s.fIgnoreInvlPg = false;
2206 pVM->rem.s.fIgnoreCR3Load = false;
2207 }
2208 pVM->rem.s.cInvalidatedPages = 0;
2209 }
2210
2211 /* Replay notification changes. */
2212 REMR3ReplayHandlerNotifications(pVM);
2213
2214 /* Update MSRs; before CRx registers! */
2215 pVM->rem.s.Env.efer = pCtx->msrEFER;
2216 pVM->rem.s.Env.star = pCtx->msrSTAR;
2217 pVM->rem.s.Env.pat = pCtx->msrPAT;
2218#ifdef TARGET_X86_64
2219 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2220 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2221 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2222 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2223
2224 /* Update the internal long mode activate flag according to the new EFER value. */
2225 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2226 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2227 else
2228 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2229#endif
2230
2231 /* Update the inhibit IRQ mask. */
2232 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2233 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2234 {
2235 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2236 if (InhibitPC == pCtx->rip)
2237 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2238 else
2239 {
2240 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2242 }
2243 }
2244
2245 /* Update the inhibit NMI mask. */
2246 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2247 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2248 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2249
2250 /*
2251 * Sync the A20 gate.
2252 */
2253 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2254 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2255 {
2256 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2257 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2258 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2259 }
2260
2261 /*
2262 * Registers which are rarely changed and require special handling / order when changed.
2263 */
2264 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2265 | CPUM_CHANGED_CR4
2266 | CPUM_CHANGED_CR0
2267 | CPUM_CHANGED_CR3
2268 | CPUM_CHANGED_GDTR
2269 | CPUM_CHANGED_IDTR
2270 | CPUM_CHANGED_SYSENTER_MSR
2271 | CPUM_CHANGED_LDTR
2272 | CPUM_CHANGED_CPUID
2273 | CPUM_CHANGED_FPU_REM
2274 )
2275 )
2276 {
2277 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2278 {
2279 pVM->rem.s.fIgnoreCR3Load = true;
2280 tlb_flush(&pVM->rem.s.Env, true);
2281 pVM->rem.s.fIgnoreCR3Load = false;
2282 }
2283
2284 /* CR4 before CR0! */
2285 if (fFlags & CPUM_CHANGED_CR4)
2286 {
2287 pVM->rem.s.fIgnoreCR3Load = true;
2288 pVM->rem.s.fIgnoreCpuMode = true;
2289 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2290 pVM->rem.s.fIgnoreCpuMode = false;
2291 pVM->rem.s.fIgnoreCR3Load = false;
2292 }
2293
2294 if (fFlags & CPUM_CHANGED_CR0)
2295 {
2296 pVM->rem.s.fIgnoreCR3Load = true;
2297 pVM->rem.s.fIgnoreCpuMode = true;
2298 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2299 pVM->rem.s.fIgnoreCpuMode = false;
2300 pVM->rem.s.fIgnoreCR3Load = false;
2301 }
2302
2303 if (fFlags & CPUM_CHANGED_CR3)
2304 {
2305 pVM->rem.s.fIgnoreCR3Load = true;
2306 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2307 pVM->rem.s.fIgnoreCR3Load = false;
2308 }
2309
2310 if (fFlags & CPUM_CHANGED_GDTR)
2311 {
2312 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2313 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2314 }
2315
2316 if (fFlags & CPUM_CHANGED_IDTR)
2317 {
2318 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2319 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2320 }
2321
2322 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2323 {
2324 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2325 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2326 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2327 }
2328
2329 if (fFlags & CPUM_CHANGED_LDTR)
2330 {
2331 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2332 {
2333 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2334 pVM->rem.s.Env.ldt.newselector = 0;
2335 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2336 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2337 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2338 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2339 }
2340 else
2341 {
2342 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2343 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2344 }
2345 }
2346
2347 if (fFlags & CPUM_CHANGED_CPUID)
2348 {
2349 uint32_t u32Dummy;
2350
2351 /*
2352 * Get the CPUID features.
2353 */
2354 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2355 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2356 }
2357
2358 /* Sync FPU state after CR4, CPUID and EFER (!). */
2359 if (fFlags & CPUM_CHANGED_FPU_REM)
2360 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->XState.x87); /* 'save' is an excellent name. */
2361 }
2362
2363 /*
2364 * Sync TR unconditionally to make life simpler.
2365 */
2366 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2367 pVM->rem.s.Env.tr.newselector = 0;
2368 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2369 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2370 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2371 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2372 /* Note! do_interrupt will fault if the busy flag is still set... */ /** @todo so fix do_interrupt then! */
2373 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2374
2375 /*
2376 * Update selector registers.
2377 *
2378 * This must be done *after* we've synced gdt, ldt and crX registers
2379 * since we're reading the GDT/LDT om sync_seg. This will happen with
2380 * saved state which takes a quick dip into rawmode for instance.
2381 *
2382 * CPL/Stack; Note first check this one as the CPL might have changed.
2383 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2384 */
2385 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2386 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2387#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2388 do \
2389 { \
2390 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2391 { \
2392 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2393 (a_pVBoxSReg)->Sel, \
2394 (a_pVBoxSReg)->u64Base, \
2395 (a_pVBoxSReg)->u32Limit, \
2396 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2397 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2398 } \
2399 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2400 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2401 { \
2402 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2403 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2404 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2405 if ((a_pRemSReg)->newselector) \
2406 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2407 } \
2408 else \
2409 (a_pRemSReg)->newselector = 0; \
2410 } while (0)
2411
2412 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2413 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2414 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2415 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2416 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2417 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2418 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2419 * be the same but not the base/limit. */
2420
2421 /*
2422 * Check for traps.
2423 */
2424 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2425 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2426 if (RT_SUCCESS(rc))
2427 {
2428#ifdef DEBUG
2429 if (u8TrapNo == 0x80)
2430 {
2431 remR3DumpLnxSyscall(pVCpu);
2432 remR3DumpOBsdSyscall(pVCpu);
2433 }
2434#endif
2435
2436 pVM->rem.s.Env.exception_index = u8TrapNo;
2437 if (enmType != TRPM_SOFTWARE_INT)
2438 {
2439 pVM->rem.s.Env.exception_is_int = 0;
2440#ifdef IEM_VERIFICATION_MODE /* Ugly hack, needs proper fixing. */
2441 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT ? 0x42 : 0;
2442#endif
2443 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2444 }
2445 else
2446 {
2447 /*
2448 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2449 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2450 * for int03 and into.
2451 */
2452 pVM->rem.s.Env.exception_is_int = 1;
2453 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2454 /* int 3 may be generated by one-byte 0xcc */
2455 if (u8TrapNo == 3)
2456 {
2457 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2458 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2459 }
2460 /* int 4 may be generated by one-byte 0xce */
2461 else if (u8TrapNo == 4)
2462 {
2463 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2464 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2465 }
2466 }
2467
2468 /* get error code and cr2 if needed. */
2469 if (enmType == TRPM_TRAP)
2470 {
2471 switch (u8TrapNo)
2472 {
2473 case X86_XCPT_PF:
2474 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2475 /* fallthru */
2476 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2477 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2478 break;
2479
2480 case X86_XCPT_AC: case X86_XCPT_DF:
2481 default:
2482 pVM->rem.s.Env.error_code = 0;
2483 break;
2484 }
2485 }
2486 else
2487 pVM->rem.s.Env.error_code = 0;
2488
2489 /*
2490 * We can now reset the active trap since the recompiler is gonna have a go at it.
2491 */
2492 rc = TRPMResetTrap(pVCpu);
2493 AssertRC(rc);
2494 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2495 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2496 }
2497
2498 /*
2499 * Clear old interrupt request flags; Check for pending hardware interrupts.
2500 * (See @remark for why we don't check for other FFs.)
2501 */
2502 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2503 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2504 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2505 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2506
2507 /*
2508 * We're now in REM mode.
2509 */
2510 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2511 pVM->rem.s.fInREM = true;
2512 pVM->rem.s.fInStateSync = false;
2513 pVM->rem.s.cCanExecuteRaw = 0;
2514 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2515 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2516 return VINF_SUCCESS;
2517}
2518
2519
2520/**
2521 * Syncs back changes in the REM state to the the VM state.
2522 *
2523 * This must be called after invoking REMR3Run().
2524 * Calling it several times in a row is not permitted.
2525 *
2526 * @returns VBox status code.
2527 *
2528 * @param pVM VM Handle.
2529 * @param pVCpu VMCPU Handle.
2530 */
2531REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2532{
2533 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2534 Assert(pCtx);
2535 unsigned i;
2536
2537 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2538 Log2(("REMR3StateBack:\n"));
2539 Assert(pVM->rem.s.fInREM);
2540
2541 /*
2542 * Copy back the registers.
2543 * This is done in the order they are declared in the CPUMCTX structure.
2544 */
2545
2546 /** @todo FOP */
2547 /** @todo FPUIP */
2548 /** @todo CS */
2549 /** @todo FPUDP */
2550 /** @todo DS */
2551
2552 /** @todo check if FPU/XMM was actually used in the recompiler */
2553 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->XState.x87);
2554//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2555
2556#ifdef TARGET_X86_64
2557 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2558 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2559 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2560 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2561 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2562 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2563 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2564 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2565 pCtx->r8 = pVM->rem.s.Env.regs[8];
2566 pCtx->r9 = pVM->rem.s.Env.regs[9];
2567 pCtx->r10 = pVM->rem.s.Env.regs[10];
2568 pCtx->r11 = pVM->rem.s.Env.regs[11];
2569 pCtx->r12 = pVM->rem.s.Env.regs[12];
2570 pCtx->r13 = pVM->rem.s.Env.regs[13];
2571 pCtx->r14 = pVM->rem.s.Env.regs[14];
2572 pCtx->r15 = pVM->rem.s.Env.regs[15];
2573
2574 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2575
2576#else
2577 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2578 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2579 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2580 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2581 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2582 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2583 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2584
2585 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2586#endif
2587
2588#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2589 do \
2590 { \
2591 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2592 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2593 { \
2594 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2595 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2596 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2597 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2598 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2599 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2600 } \
2601 else \
2602 { \
2603 pCtx->a_sreg.fFlags = 0; \
2604 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2605 } \
2606 } while (0)
2607
2608 SYNC_BACK_SREG(es, ES);
2609 SYNC_BACK_SREG(cs, CS);
2610 SYNC_BACK_SREG(ss, SS);
2611 SYNC_BACK_SREG(ds, DS);
2612 SYNC_BACK_SREG(fs, FS);
2613 SYNC_BACK_SREG(gs, GS);
2614
2615#ifdef TARGET_X86_64
2616 pCtx->rip = pVM->rem.s.Env.eip;
2617 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2618#else
2619 pCtx->eip = pVM->rem.s.Env.eip;
2620 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2621#endif
2622
2623 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2624 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2625 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2626#ifdef VBOX_WITH_RAW_MODE
2627 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2628 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2629#endif
2630 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2631
2632 for (i = 0; i < 8; i++)
2633 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2634
2635 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2636 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2637 {
2638 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2639 STAM_COUNTER_INC(&gStatREMGDTChange);
2640#ifdef VBOX_WITH_RAW_MODE
2641 if (!HMIsEnabled(pVM))
2642 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2643#endif
2644 }
2645
2646 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2647 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2648 {
2649 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2650 STAM_COUNTER_INC(&gStatREMIDTChange);
2651#ifdef VBOX_WITH_RAW_MODE
2652 if (!HMIsEnabled(pVM))
2653 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2654#endif
2655 }
2656
2657 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2658 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2659 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2660 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2661 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2662 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2663 )
2664 {
2665 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2666 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2667 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2668 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2669 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2670 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2671 STAM_COUNTER_INC(&gStatREMLDTRChange);
2672#ifdef VBOX_WITH_RAW_MODE
2673 if (!HMIsEnabled(pVM))
2674 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2675#endif
2676 }
2677
2678 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2679 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2680 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2681 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2682 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ /** @todo just fix qemu! */
2683 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2684 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2685 : 0)
2686 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2687 )
2688 {
2689 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2690 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2691 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2692 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2693 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2694 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2695 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2696 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2697 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2698 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2699 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2700 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2701 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2702 STAM_COUNTER_INC(&gStatREMTRChange);
2703#ifdef VBOX_WITH_RAW_MODE
2704 if (!HMIsEnabled(pVM))
2705 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2706#endif
2707 }
2708
2709 /* Sysenter MSR */
2710 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2711 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2712 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2713
2714 /* System MSRs. */
2715 pCtx->msrEFER = pVM->rem.s.Env.efer;
2716 pCtx->msrSTAR = pVM->rem.s.Env.star;
2717 pCtx->msrPAT = pVM->rem.s.Env.pat;
2718#ifdef TARGET_X86_64
2719 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2720 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2721 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2722 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2723#endif
2724
2725 /* Inhibit interrupt flag. */
2726 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2727 {
2728 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2729 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2730 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2731 }
2732 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2733 {
2734 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2736 }
2737
2738 /* Inhibit NMI flag. */
2739 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2740 {
2741 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2742 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2743 }
2744 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2745 {
2746 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2748 }
2749
2750 remR3TrapClear(pVM);
2751
2752 /*
2753 * Check for traps.
2754 */
2755 if ( pVM->rem.s.Env.exception_index >= 0
2756 && pVM->rem.s.Env.exception_index < 256)
2757 {
2758 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2759 int rc;
2760
2761 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2762 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int ? TRPM_SOFTWARE_INT : TRPM_TRAP;
2763 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2764 AssertRC(rc);
2765 if (enmType == TRPM_TRAP)
2766 {
2767 switch (pVM->rem.s.Env.exception_index)
2768 {
2769 case X86_XCPT_PF:
2770 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2771 /* fallthru */
2772 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2773 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2774 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2775 break;
2776 }
2777 }
2778 }
2779
2780 /*
2781 * We're not longer in REM mode.
2782 */
2783 CPUMR3RemLeave(pVCpu,
2784 HMIsEnabled(pVM)
2785 || ( pVM->rem.s.Env.segs[R_SS].newselector
2786 | pVM->rem.s.Env.segs[R_GS].newselector
2787 | pVM->rem.s.Env.segs[R_FS].newselector
2788 | pVM->rem.s.Env.segs[R_ES].newselector
2789 | pVM->rem.s.Env.segs[R_DS].newselector
2790 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2791 );
2792 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2793 pVM->rem.s.fInREM = false;
2794 pVM->rem.s.pCtx = NULL;
2795 pVM->rem.s.Env.pVCpu = NULL;
2796 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2797 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2798 return VINF_SUCCESS;
2799}
2800
2801
2802/**
2803 * This is called by the disassembler when it wants to update the cpu state
2804 * before for instance doing a register dump.
2805 */
2806static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2807{
2808 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2809 unsigned i;
2810
2811 Assert(pVM->rem.s.fInREM);
2812
2813 /*
2814 * Copy back the registers.
2815 * This is done in the order they are declared in the CPUMCTX structure.
2816 */
2817
2818 /** @todo FOP */
2819 /** @todo FPUIP */
2820 /** @todo CS */
2821 /** @todo FPUDP */
2822 /** @todo DS */
2823 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2824 pCtx->XState.x87.MXCSR = 0;
2825 pCtx->XState.x87.MXCSR_MASK = 0;
2826
2827 /** @todo check if FPU/XMM was actually used in the recompiler */
2828 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->XState.x87);
2829//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2830
2831#ifdef TARGET_X86_64
2832 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2833 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2834 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2835 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2836 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2837 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2838 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2839 pCtx->r8 = pVM->rem.s.Env.regs[8];
2840 pCtx->r9 = pVM->rem.s.Env.regs[9];
2841 pCtx->r10 = pVM->rem.s.Env.regs[10];
2842 pCtx->r11 = pVM->rem.s.Env.regs[11];
2843 pCtx->r12 = pVM->rem.s.Env.regs[12];
2844 pCtx->r13 = pVM->rem.s.Env.regs[13];
2845 pCtx->r14 = pVM->rem.s.Env.regs[14];
2846 pCtx->r15 = pVM->rem.s.Env.regs[15];
2847
2848 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2849#else
2850 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2851 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2852 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2853 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2854 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2855 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2856 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2857
2858 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2859#endif
2860
2861 SYNC_BACK_SREG(es, ES);
2862 SYNC_BACK_SREG(cs, CS);
2863 SYNC_BACK_SREG(ss, SS);
2864 SYNC_BACK_SREG(ds, DS);
2865 SYNC_BACK_SREG(fs, FS);
2866 SYNC_BACK_SREG(gs, GS);
2867
2868#ifdef TARGET_X86_64
2869 pCtx->rip = pVM->rem.s.Env.eip;
2870 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2871#else
2872 pCtx->eip = pVM->rem.s.Env.eip;
2873 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2874#endif
2875
2876 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2877 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2878 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2879#ifdef VBOX_WITH_RAW_MODE
2880 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2881 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2882#endif
2883 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2884
2885 for (i = 0; i < 8; i++)
2886 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2887
2888 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2889 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2890 {
2891 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2892 STAM_COUNTER_INC(&gStatREMGDTChange);
2893#ifdef VBOX_WITH_RAW_MODE
2894 if (!HMIsEnabled(pVM))
2895 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2896#endif
2897 }
2898
2899 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2900 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2901 {
2902 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2903 STAM_COUNTER_INC(&gStatREMIDTChange);
2904#ifdef VBOX_WITH_RAW_MODE
2905 if (!HMIsEnabled(pVM))
2906 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2907#endif
2908 }
2909
2910 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2911 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2912 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2913 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2914 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2915 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2916 )
2917 {
2918 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2919 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2920 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2921 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2922 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2923 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2924 STAM_COUNTER_INC(&gStatREMLDTRChange);
2925#ifdef VBOX_WITH_RAW_MODE
2926 if (!HMIsEnabled(pVM))
2927 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2928#endif
2929 }
2930
2931 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2932 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2933 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2934 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2935 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2936 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2937 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2938 : 0)
2939 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2940 )
2941 {
2942 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2943 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2944 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2945 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2946 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2947 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2948 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2949 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2950 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2951 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2952 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2953 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2954 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2955 STAM_COUNTER_INC(&gStatREMTRChange);
2956#ifdef VBOX_WITH_RAW_MODE
2957 if (!HMIsEnabled(pVM))
2958 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2959#endif
2960 }
2961
2962 /* Sysenter MSR */
2963 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2964 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2965 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2966
2967 /* System MSRs. */
2968 pCtx->msrEFER = pVM->rem.s.Env.efer;
2969 pCtx->msrSTAR = pVM->rem.s.Env.star;
2970 pCtx->msrPAT = pVM->rem.s.Env.pat;
2971#ifdef TARGET_X86_64
2972 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2973 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2974 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2975 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2976#endif
2977
2978}
2979
2980
2981/**
2982 * Update the VMM state information if we're currently in REM.
2983 *
2984 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2985 * we're currently executing in REM and the VMM state is invalid. This method will of
2986 * course check that we're executing in REM before syncing any data over to the VMM.
2987 *
2988 * @param pVM The VM handle.
2989 * @param pVCpu The VMCPU handle.
2990 */
2991REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2992{
2993 if (pVM->rem.s.fInREM)
2994 remR3StateUpdate(pVM, pVCpu);
2995}
2996
2997
2998#undef LOG_GROUP
2999#define LOG_GROUP LOG_GROUP_REM
3000
3001
3002/**
3003 * Notify the recompiler about Address Gate 20 state change.
3004 *
3005 * This notification is required since A20 gate changes are
3006 * initialized from a device driver and the VM might just as
3007 * well be in REM mode as in RAW mode.
3008 *
3009 * @param pVM VM handle.
3010 * @param pVCpu VMCPU handle.
3011 * @param fEnable True if the gate should be enabled.
3012 * False if the gate should be disabled.
3013 */
3014REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3015{
3016 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3017 VM_ASSERT_EMT(pVM);
3018
3019 /** @todo SMP and the A20 gate... */
3020 if (pVM->rem.s.Env.pVCpu == pVCpu)
3021 {
3022 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3023 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3024 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3025 }
3026}
3027
3028
3029/**
3030 * Replays the handler notification changes
3031 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3032 *
3033 * @param pVM VM handle.
3034 */
3035REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3036{
3037 /*
3038 * Replay the flushes.
3039 */
3040 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3041 VM_ASSERT_EMT(pVM);
3042
3043 /** @todo this isn't ensuring correct replay order. */
3044 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3045 {
3046 uint32_t idxNext;
3047 uint32_t idxRevHead;
3048 uint32_t idxHead;
3049#ifdef VBOX_STRICT
3050 int32_t c = 0;
3051#endif
3052
3053 /* Lockless purging of pending notifications. */
3054 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3055 if (idxHead == UINT32_MAX)
3056 return;
3057 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3058
3059 /*
3060 * Reverse the list to process it in FIFO order.
3061 */
3062 idxRevHead = UINT32_MAX;
3063 do
3064 {
3065 /* Save the index of the next rec. */
3066 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3067 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3068 /* Push the record onto the reversed list. */
3069 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3070 idxRevHead = idxHead;
3071 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3072 /* Advance. */
3073 idxHead = idxNext;
3074 } while (idxHead != UINT32_MAX);
3075
3076 /*
3077 * Loop thru the list, reinserting the record into the free list as they are
3078 * processed to avoid having other EMTs running out of entries while we're flushing.
3079 */
3080 idxHead = idxRevHead;
3081 do
3082 {
3083 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3084 uint32_t idxCur;
3085 Assert(--c >= 0);
3086
3087 switch (pCur->enmKind)
3088 {
3089 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3090 remR3NotifyHandlerPhysicalRegister(pVM,
3091 pCur->u.PhysicalRegister.enmType,
3092 pCur->u.PhysicalRegister.GCPhys,
3093 pCur->u.PhysicalRegister.cb,
3094 pCur->u.PhysicalRegister.fHasHCHandler);
3095 break;
3096
3097 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3098 remR3NotifyHandlerPhysicalDeregister(pVM,
3099 pCur->u.PhysicalDeregister.enmType,
3100 pCur->u.PhysicalDeregister.GCPhys,
3101 pCur->u.PhysicalDeregister.cb,
3102 pCur->u.PhysicalDeregister.fHasHCHandler,
3103 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3104 break;
3105
3106 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3107 remR3NotifyHandlerPhysicalModify(pVM,
3108 pCur->u.PhysicalModify.enmType,
3109 pCur->u.PhysicalModify.GCPhysOld,
3110 pCur->u.PhysicalModify.GCPhysNew,
3111 pCur->u.PhysicalModify.cb,
3112 pCur->u.PhysicalModify.fHasHCHandler,
3113 pCur->u.PhysicalModify.fRestoreAsRAM);
3114 break;
3115
3116 default:
3117 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3118 break;
3119 }
3120
3121 /*
3122 * Advance idxHead.
3123 */
3124 idxCur = idxHead;
3125 idxHead = pCur->idxNext;
3126 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3127
3128 /*
3129 * Put the record back into the free list.
3130 */
3131 do
3132 {
3133 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3134 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3135 ASMCompilerBarrier();
3136 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3137 } while (idxHead != UINT32_MAX);
3138
3139#ifdef VBOX_STRICT
3140 if (pVM->cCpus == 1)
3141 {
3142 unsigned c;
3143 /* Check that all records are now on the free list. */
3144 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3145 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3146 c++;
3147 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3148 }
3149#endif
3150 }
3151}
3152
3153
3154/**
3155 * Notify REM about changed code page.
3156 *
3157 * @returns VBox status code.
3158 * @param pVM VM handle.
3159 * @param pVCpu VMCPU handle.
3160 * @param pvCodePage Code page address
3161 */
3162REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3163{
3164#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3165 int rc;
3166 RTGCPHYS PhysGC;
3167 uint64_t flags;
3168
3169 VM_ASSERT_EMT(pVM);
3170
3171 /*
3172 * Get the physical page address.
3173 */
3174 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3175 if (rc == VINF_SUCCESS)
3176 {
3177 /*
3178 * Sync the required registers and flush the whole page.
3179 * (Easier to do the whole page than notifying it about each physical
3180 * byte that was changed.
3181 */
3182 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3183 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3184 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3185 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3186
3187 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3188 }
3189#endif
3190 return VINF_SUCCESS;
3191}
3192
3193
3194/**
3195 * Notification about a successful MMR3PhysRegister() call.
3196 *
3197 * @param pVM VM handle.
3198 * @param GCPhys The physical address the RAM.
3199 * @param cb Size of the memory.
3200 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3201 */
3202REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3203{
3204 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3205 VM_ASSERT_EMT(pVM);
3206
3207 /*
3208 * Validate input - we trust the caller.
3209 */
3210 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3211 Assert(cb);
3212 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3213 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3214
3215 /*
3216 * Base ram? Update GCPhysLastRam.
3217 */
3218 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3219 {
3220 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3221 {
3222 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3223 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3224 }
3225 }
3226
3227 /*
3228 * Register the ram.
3229 */
3230 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3231
3232 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3233 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3234 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3235
3236 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3237}
3238
3239
3240/**
3241 * Notification about a successful MMR3PhysRomRegister() call.
3242 *
3243 * @param pVM VM handle.
3244 * @param GCPhys The physical address of the ROM.
3245 * @param cb The size of the ROM.
3246 * @param pvCopy Pointer to the ROM copy.
3247 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3248 * This function will be called when ever the protection of the
3249 * shadow ROM changes (at reset and end of POST).
3250 */
3251REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3252{
3253 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3254 VM_ASSERT_EMT(pVM);
3255
3256 /*
3257 * Validate input - we trust the caller.
3258 */
3259 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3260 Assert(cb);
3261 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3262
3263 /*
3264 * Register the rom.
3265 */
3266 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3267
3268 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3269 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3270 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3271
3272 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3273}
3274
3275
3276/**
3277 * Notification about a successful memory deregistration or reservation.
3278 *
3279 * @param pVM VM Handle.
3280 * @param GCPhys Start physical address.
3281 * @param cb The size of the range.
3282 */
3283REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3284{
3285 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3286 VM_ASSERT_EMT(pVM);
3287
3288 /*
3289 * Validate input - we trust the caller.
3290 */
3291 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3292 Assert(cb);
3293 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3294
3295 /*
3296 * Unassigning the memory.
3297 */
3298 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3299
3300 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3301 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3302 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3303
3304 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3305}
3306
3307
3308/**
3309 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3310 *
3311 * @param pVM VM Handle.
3312 * @param enmType Handler type.
3313 * @param GCPhys Handler range address.
3314 * @param cb Size of the handler range.
3315 * @param fHasHCHandler Set if the handler has a HC callback function.
3316 *
3317 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3318 * Handler memory type to memory which has no HC handler.
3319 */
3320static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3321{
3322 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3323 enmType, GCPhys, cb, fHasHCHandler));
3324
3325 VM_ASSERT_EMT(pVM);
3326 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3327 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3328
3329
3330 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3331
3332 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3333 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3334 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3335 else if (fHasHCHandler)
3336 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3337 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3338
3339 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3340}
3341
3342/**
3343 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3344 *
3345 * @param pVM VM Handle.
3346 * @param enmType Handler type.
3347 * @param GCPhys Handler range address.
3348 * @param cb Size of the handler range.
3349 * @param fHasHCHandler Set if the handler has a HC callback function.
3350 *
3351 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3352 * Handler memory type to memory which has no HC handler.
3353 */
3354REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3355{
3356 REMR3ReplayHandlerNotifications(pVM);
3357
3358 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3359}
3360
3361/**
3362 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3363 *
3364 * @param pVM VM Handle.
3365 * @param enmType Handler type.
3366 * @param GCPhys Handler range address.
3367 * @param cb Size of the handler range.
3368 * @param fHasHCHandler Set if the handler has a HC callback function.
3369 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3370 */
3371static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3372{
3373 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3374 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3375 VM_ASSERT_EMT(pVM);
3376
3377
3378 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3379
3380 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3381 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3382 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3383 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3384 else if (fHasHCHandler)
3385 {
3386 if (!fRestoreAsRAM)
3387 {
3388 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3389 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3390 }
3391 else
3392 {
3393 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3394 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3395 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3396 }
3397 }
3398 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3399
3400 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3401}
3402
3403/**
3404 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3405 *
3406 * @param pVM VM Handle.
3407 * @param enmType Handler type.
3408 * @param GCPhys Handler range address.
3409 * @param cb Size of the handler range.
3410 * @param fHasHCHandler Set if the handler has a HC callback function.
3411 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3412 */
3413REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3414{
3415 REMR3ReplayHandlerNotifications(pVM);
3416 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3417}
3418
3419
3420/**
3421 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3422 *
3423 * @param pVM VM Handle.
3424 * @param enmType Handler type.
3425 * @param GCPhysOld Old handler range address.
3426 * @param GCPhysNew New handler range address.
3427 * @param cb Size of the handler range.
3428 * @param fHasHCHandler Set if the handler has a HC callback function.
3429 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3430 */
3431static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3432{
3433 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3434 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3435 VM_ASSERT_EMT(pVM);
3436 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3437
3438 if (fHasHCHandler)
3439 {
3440 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3441
3442 /*
3443 * Reset the old page.
3444 */
3445 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3446 if (!fRestoreAsRAM)
3447 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3448 else
3449 {
3450 /* This is not perfect, but it'll do for PD monitoring... */
3451 Assert(cb == PAGE_SIZE);
3452 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3453 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3454 }
3455
3456 /*
3457 * Update the new page.
3458 */
3459 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3460 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3461 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3462 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3463
3464 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3465 }
3466}
3467
3468/**
3469 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3470 *
3471 * @param pVM VM Handle.
3472 * @param enmType Handler type.
3473 * @param GCPhysOld Old handler range address.
3474 * @param GCPhysNew New handler range address.
3475 * @param cb Size of the handler range.
3476 * @param fHasHCHandler Set if the handler has a HC callback function.
3477 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3478 */
3479REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3480{
3481 REMR3ReplayHandlerNotifications(pVM);
3482
3483 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3484}
3485
3486/**
3487 * Checks if we're handling access to this page or not.
3488 *
3489 * @returns true if we're trapping access.
3490 * @returns false if we aren't.
3491 * @param pVM The VM handle.
3492 * @param GCPhys The physical address.
3493 *
3494 * @remark This function will only work correctly in VBOX_STRICT builds!
3495 */
3496REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3497{
3498#ifdef VBOX_STRICT
3499 ram_addr_t off;
3500 REMR3ReplayHandlerNotifications(pVM);
3501
3502 off = get_phys_page_offset(GCPhys);
3503 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3504 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3505 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3506#else
3507 return false;
3508#endif
3509}
3510
3511
3512/**
3513 * Deals with a rare case in get_phys_addr_code where the code
3514 * is being monitored.
3515 *
3516 * It could also be an MMIO page, in which case we will raise a fatal error.
3517 *
3518 * @returns The physical address corresponding to addr.
3519 * @param env The cpu environment.
3520 * @param addr The virtual address.
3521 * @param pTLBEntry The TLB entry.
3522 */
3523target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3524 target_ulong addr,
3525 CPUTLBEntry *pTLBEntry,
3526 target_phys_addr_t ioTLBEntry)
3527{
3528 PVM pVM = env->pVM;
3529
3530 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3531 {
3532 /* If code memory is being monitored, appropriate IOTLB entry will have
3533 handler IO type, and addend will provide real physical address, no
3534 matter if we store VA in TLB or not, as handlers are always passed PA */
3535 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3536 return ret;
3537 }
3538 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3539 "*** handlers\n",
3540 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3541 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3542 LogRel(("*** mmio\n"));
3543 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3544 LogRel(("*** phys\n"));
3545 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3546 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3547 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3548 AssertFatalFailed();
3549}
3550
3551/**
3552 * Read guest RAM and ROM.
3553 *
3554 * @param SrcGCPhys The source address (guest physical).
3555 * @param pvDst The destination address.
3556 * @param cb Number of bytes
3557 */
3558void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3559{
3560 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3561 VBOX_CHECK_ADDR(SrcGCPhys);
3562 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3563#ifdef VBOX_DEBUG_PHYS
3564 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3565#endif
3566 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3567}
3568
3569
3570/**
3571 * Read guest RAM and ROM, unsigned 8-bit.
3572 *
3573 * @param SrcGCPhys The source address (guest physical).
3574 */
3575RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3576{
3577 uint8_t val;
3578 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3579 VBOX_CHECK_ADDR(SrcGCPhys);
3580 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3581 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3582#ifdef VBOX_DEBUG_PHYS
3583 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3584#endif
3585 return val;
3586}
3587
3588
3589/**
3590 * Read guest RAM and ROM, signed 8-bit.
3591 *
3592 * @param SrcGCPhys The source address (guest physical).
3593 */
3594RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3595{
3596 int8_t val;
3597 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3598 VBOX_CHECK_ADDR(SrcGCPhys);
3599 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3600 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3601#ifdef VBOX_DEBUG_PHYS
3602 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3603#endif
3604 return val;
3605}
3606
3607
3608/**
3609 * Read guest RAM and ROM, unsigned 16-bit.
3610 *
3611 * @param SrcGCPhys The source address (guest physical).
3612 */
3613RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3614{
3615 uint16_t val;
3616 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3617 VBOX_CHECK_ADDR(SrcGCPhys);
3618 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3619 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3620#ifdef VBOX_DEBUG_PHYS
3621 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3622#endif
3623 return val;
3624}
3625
3626
3627/**
3628 * Read guest RAM and ROM, signed 16-bit.
3629 *
3630 * @param SrcGCPhys The source address (guest physical).
3631 */
3632RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3633{
3634 int16_t val;
3635 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3636 VBOX_CHECK_ADDR(SrcGCPhys);
3637 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3638 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3639#ifdef VBOX_DEBUG_PHYS
3640 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3641#endif
3642 return val;
3643}
3644
3645
3646/**
3647 * Read guest RAM and ROM, unsigned 32-bit.
3648 *
3649 * @param SrcGCPhys The source address (guest physical).
3650 */
3651RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3652{
3653 uint32_t val;
3654 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3655 VBOX_CHECK_ADDR(SrcGCPhys);
3656 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3657 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3658#ifdef VBOX_DEBUG_PHYS
3659 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3660#endif
3661 return val;
3662}
3663
3664
3665/**
3666 * Read guest RAM and ROM, signed 32-bit.
3667 *
3668 * @param SrcGCPhys The source address (guest physical).
3669 */
3670RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3671{
3672 int32_t val;
3673 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3674 VBOX_CHECK_ADDR(SrcGCPhys);
3675 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3676 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3677#ifdef VBOX_DEBUG_PHYS
3678 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3679#endif
3680 return val;
3681}
3682
3683
3684/**
3685 * Read guest RAM and ROM, unsigned 64-bit.
3686 *
3687 * @param SrcGCPhys The source address (guest physical).
3688 */
3689uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3690{
3691 uint64_t val;
3692 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3693 VBOX_CHECK_ADDR(SrcGCPhys);
3694 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3695 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3696#ifdef VBOX_DEBUG_PHYS
3697 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3698#endif
3699 return val;
3700}
3701
3702
3703/**
3704 * Read guest RAM and ROM, signed 64-bit.
3705 *
3706 * @param SrcGCPhys The source address (guest physical).
3707 */
3708int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3709{
3710 int64_t val;
3711 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3712 VBOX_CHECK_ADDR(SrcGCPhys);
3713 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3714 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3715#ifdef VBOX_DEBUG_PHYS
3716 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3717#endif
3718 return val;
3719}
3720
3721
3722/**
3723 * Write guest RAM.
3724 *
3725 * @param DstGCPhys The destination address (guest physical).
3726 * @param pvSrc The source address.
3727 * @param cb Number of bytes to write
3728 */
3729void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3730{
3731 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3732 VBOX_CHECK_ADDR(DstGCPhys);
3733 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3734 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3735#ifdef VBOX_DEBUG_PHYS
3736 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3737#endif
3738}
3739
3740
3741/**
3742 * Write guest RAM, unsigned 8-bit.
3743 *
3744 * @param DstGCPhys The destination address (guest physical).
3745 * @param val Value
3746 */
3747void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3748{
3749 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3750 VBOX_CHECK_ADDR(DstGCPhys);
3751 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3752 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3753#ifdef VBOX_DEBUG_PHYS
3754 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3755#endif
3756}
3757
3758
3759/**
3760 * Write guest RAM, unsigned 8-bit.
3761 *
3762 * @param DstGCPhys The destination address (guest physical).
3763 * @param val Value
3764 */
3765void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3766{
3767 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3768 VBOX_CHECK_ADDR(DstGCPhys);
3769 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3770 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3771#ifdef VBOX_DEBUG_PHYS
3772 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3773#endif
3774}
3775
3776
3777/**
3778 * Write guest RAM, unsigned 32-bit.
3779 *
3780 * @param DstGCPhys The destination address (guest physical).
3781 * @param val Value
3782 */
3783void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3784{
3785 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3786 VBOX_CHECK_ADDR(DstGCPhys);
3787 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3788 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3789#ifdef VBOX_DEBUG_PHYS
3790 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3791#endif
3792}
3793
3794
3795/**
3796 * Write guest RAM, unsigned 64-bit.
3797 *
3798 * @param DstGCPhys The destination address (guest physical).
3799 * @param val Value
3800 */
3801void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3802{
3803 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3804 VBOX_CHECK_ADDR(DstGCPhys);
3805 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3806 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3807#ifdef VBOX_DEBUG_PHYS
3808 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3809#endif
3810}
3811
3812#undef LOG_GROUP
3813#define LOG_GROUP LOG_GROUP_REM_MMIO
3814
3815/** Read MMIO memory. */
3816static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3817{
3818 CPUX86State *env = (CPUX86State *)pvEnv;
3819 uint32_t u32 = 0;
3820 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3821 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3822 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3823 return u32;
3824}
3825
3826/** Read MMIO memory. */
3827static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3828{
3829 CPUX86State *env = (CPUX86State *)pvEnv;
3830 uint32_t u32 = 0;
3831 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3832 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3833 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3834 return u32;
3835}
3836
3837/** Read MMIO memory. */
3838static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3839{
3840 CPUX86State *env = (CPUX86State *)pvEnv;
3841 uint32_t u32 = 0;
3842 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3843 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3844 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3845 return u32;
3846}
3847
3848/** Write to MMIO memory. */
3849static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3850{
3851 CPUX86State *env = (CPUX86State *)pvEnv;
3852 int rc;
3853 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3854 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3855 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3856}
3857
3858/** Write to MMIO memory. */
3859static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3860{
3861 CPUX86State *env = (CPUX86State *)pvEnv;
3862 int rc;
3863 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3864 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3865 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3866}
3867
3868/** Write to MMIO memory. */
3869static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3870{
3871 CPUX86State *env = (CPUX86State *)pvEnv;
3872 int rc;
3873 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3874 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3875 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3876}
3877
3878
3879#undef LOG_GROUP
3880#define LOG_GROUP LOG_GROUP_REM_HANDLER
3881
3882/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3883
3884static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3885{
3886 uint8_t u8;
3887 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3888 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3889 return u8;
3890}
3891
3892static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3893{
3894 uint16_t u16;
3895 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3896 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3897 return u16;
3898}
3899
3900static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3901{
3902 uint32_t u32;
3903 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3904 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3905 return u32;
3906}
3907
3908static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3909{
3910 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3911 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3912}
3913
3914static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3915{
3916 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3917 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3918}
3919
3920static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3921{
3922 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3923 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3924}
3925
3926/* -+- disassembly -+- */
3927
3928#undef LOG_GROUP
3929#define LOG_GROUP LOG_GROUP_REM_DISAS
3930
3931
3932/**
3933 * Enables or disables singled stepped disassembly.
3934 *
3935 * @returns VBox status code.
3936 * @param pVM VM handle.
3937 * @param fEnable To enable set this flag, to disable clear it.
3938 */
3939static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3940{
3941 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3942 VM_ASSERT_EMT(pVM);
3943
3944 if (fEnable)
3945 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3946 else
3947 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3948#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3949 cpu_single_step(&pVM->rem.s.Env, fEnable);
3950#endif
3951 return VINF_SUCCESS;
3952}
3953
3954
3955/**
3956 * Enables or disables singled stepped disassembly.
3957 *
3958 * @returns VBox status code.
3959 * @param pVM VM handle.
3960 * @param fEnable To enable set this flag, to disable clear it.
3961 */
3962REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3963{
3964 int rc;
3965
3966 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3967 if (VM_IS_EMT(pVM))
3968 return remR3DisasEnableStepping(pVM, fEnable);
3969
3970 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3971 AssertRC(rc);
3972 return rc;
3973}
3974
3975
3976#ifdef VBOX_WITH_DEBUGGER
3977/**
3978 * External Debugger Command: .remstep [on|off|1|0]
3979 */
3980static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
3981 PCDBGCVAR paArgs, unsigned cArgs)
3982{
3983 int rc;
3984 PVM pVM = pUVM->pVM;
3985
3986 if (cArgs == 0)
3987 /*
3988 * Print the current status.
3989 */
3990 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3991 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3992 else
3993 {
3994 /*
3995 * Convert the argument and change the mode.
3996 */
3997 bool fEnable;
3998 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3999 if (RT_SUCCESS(rc))
4000 {
4001 rc = REMR3DisasEnableStepping(pVM, fEnable);
4002 if (RT_SUCCESS(rc))
4003 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
4004 else
4005 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
4006 }
4007 else
4008 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
4009 }
4010 return rc;
4011}
4012#endif /* VBOX_WITH_DEBUGGER */
4013
4014
4015/**
4016 * Disassembles one instruction and prints it to the log.
4017 *
4018 * @returns Success indicator.
4019 * @param env Pointer to the recompiler CPU structure.
4020 * @param f32BitCode Indicates that whether or not the code should
4021 * be disassembled as 16 or 32 bit. If -1 the CS
4022 * selector will be inspected.
4023 * @param pszPrefix
4024 */
4025bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4026{
4027 PVM pVM = env->pVM;
4028 const bool fLog = LogIsEnabled();
4029 const bool fLog2 = LogIs2Enabled();
4030 int rc = VINF_SUCCESS;
4031
4032 /*
4033 * Don't bother if there ain't any log output to do.
4034 */
4035 if (!fLog && !fLog2)
4036 return true;
4037
4038 /*
4039 * Update the state so DBGF reads the correct register values.
4040 */
4041 remR3StateUpdate(pVM, env->pVCpu);
4042
4043 /*
4044 * Log registers if requested.
4045 */
4046 if (fLog2)
4047 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4048
4049 /*
4050 * Disassemble to log.
4051 */
4052 if (fLog)
4053 {
4054 PVMCPU pVCpu = VMMGetCpu(pVM);
4055 char szBuf[256];
4056 szBuf[0] = '\0';
4057 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4058 pVCpu->idCpu,
4059 0, /* Sel */ 0, /* GCPtr */
4060 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4061 szBuf,
4062 sizeof(szBuf),
4063 NULL);
4064 if (RT_FAILURE(rc))
4065 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4066 if (pszPrefix && *pszPrefix)
4067 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4068 else
4069 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4070 }
4071
4072 return RT_SUCCESS(rc);
4073}
4074
4075
4076/**
4077 * Disassemble recompiled code.
4078 *
4079 * @param phFileIgnored Ignored, logfile usually.
4080 * @param pvCode Pointer to the code block.
4081 * @param cb Size of the code block.
4082 */
4083void disas(FILE *phFile, void *pvCode, unsigned long cb)
4084{
4085 if (LogIs2Enabled())
4086 {
4087 unsigned off = 0;
4088 char szOutput[256];
4089 DISCPUSTATE Cpu;
4090#ifdef RT_ARCH_X86
4091 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4092#else
4093 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4094#endif
4095
4096 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4097 while (off < cb)
4098 {
4099 uint32_t cbInstr;
4100 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4101 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4102 if (RT_SUCCESS(rc))
4103 RTLogPrintf("%s", szOutput);
4104 else
4105 {
4106 RTLogPrintf("disas error %Rrc\n", rc);
4107 cbInstr = 1;
4108 }
4109 off += cbInstr;
4110 }
4111 }
4112}
4113
4114
4115/**
4116 * Disassemble guest code.
4117 *
4118 * @param phFileIgnored Ignored, logfile usually.
4119 * @param uCode The guest address of the code to disassemble. (flat?)
4120 * @param cb Number of bytes to disassemble.
4121 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4122 */
4123void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4124{
4125 if (LogIs2Enabled())
4126 {
4127 PVM pVM = cpu_single_env->pVM;
4128 PVMCPU pVCpu = cpu_single_env->pVCpu;
4129 RTSEL cs;
4130 RTGCUINTPTR eip;
4131
4132 Assert(pVCpu);
4133
4134 /*
4135 * Update the state so DBGF reads the correct register values (flags).
4136 */
4137 remR3StateUpdate(pVM, pVCpu);
4138
4139 /*
4140 * Do the disassembling.
4141 */
4142 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4143 cs = cpu_single_env->segs[R_CS].selector;
4144 eip = uCode - cpu_single_env->segs[R_CS].base;
4145 for (;;)
4146 {
4147 char szBuf[256];
4148 uint32_t cbInstr;
4149 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4150 pVCpu->idCpu,
4151 cs,
4152 eip,
4153 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4154 szBuf, sizeof(szBuf),
4155 &cbInstr);
4156 if (RT_SUCCESS(rc))
4157 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4158 else
4159 {
4160 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4161 cbInstr = 1;
4162 }
4163
4164 /* next */
4165 if (cb <= cbInstr)
4166 break;
4167 cb -= cbInstr;
4168 uCode += cbInstr;
4169 eip += cbInstr;
4170 }
4171 }
4172}
4173
4174
4175/**
4176 * Looks up a guest symbol.
4177 *
4178 * @returns Pointer to symbol name. This is a static buffer.
4179 * @param orig_addr The address in question.
4180 */
4181const char *lookup_symbol(target_ulong orig_addr)
4182{
4183 PVM pVM = cpu_single_env->pVM;
4184 RTGCINTPTR off = 0;
4185 RTDBGSYMBOL Sym;
4186 DBGFADDRESS Addr;
4187
4188 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4189 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, &off, &Sym, NULL /*phMod*/);
4190 if (RT_SUCCESS(rc))
4191 {
4192 static char szSym[sizeof(Sym.szName) + 48];
4193 if (!off)
4194 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4195 else if (off > 0)
4196 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4197 else
4198 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4199 return szSym;
4200 }
4201 return "<N/A>";
4202}
4203
4204
4205#undef LOG_GROUP
4206#define LOG_GROUP LOG_GROUP_REM
4207
4208
4209/* -+- FF notifications -+- */
4210
4211
4212/**
4213 * Notification about a pending interrupt.
4214 *
4215 * @param pVM VM Handle.
4216 * @param pVCpu VMCPU Handle.
4217 * @param u8Interrupt Interrupt
4218 * @thread The emulation thread.
4219 */
4220REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4221{
4222 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4223 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4224}
4225
4226/**
4227 * Notification about a pending interrupt.
4228 *
4229 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4230 * @param pVM VM Handle.
4231 * @param pVCpu VMCPU Handle.
4232 * @thread The emulation thread.
4233 */
4234REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4235{
4236 return pVM->rem.s.u32PendingInterrupt;
4237}
4238
4239/**
4240 * Notification about the interrupt FF being set.
4241 *
4242 * @param pVM VM Handle.
4243 * @param pVCpu VMCPU Handle.
4244 * @thread The emulation thread.
4245 */
4246REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4247{
4248#ifndef IEM_VERIFICATION_MODE
4249 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4250 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4251 if (pVM->rem.s.fInREM)
4252 {
4253 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4254 CPU_INTERRUPT_EXTERNAL_HARD);
4255 }
4256#endif
4257}
4258
4259
4260/**
4261 * Notification about the interrupt FF being set.
4262 *
4263 * @param pVM VM Handle.
4264 * @param pVCpu VMCPU Handle.
4265 * @thread Any.
4266 */
4267REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4268{
4269 LogFlow(("REMR3NotifyInterruptClear:\n"));
4270 if (pVM->rem.s.fInREM)
4271 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4272}
4273
4274
4275/**
4276 * Notification about pending timer(s).
4277 *
4278 * @param pVM VM Handle.
4279 * @param pVCpuDst The target cpu for this notification.
4280 * TM will not broadcast pending timer events, but use
4281 * a dedicated EMT for them. So, only interrupt REM
4282 * execution if the given CPU is executing in REM.
4283 * @thread Any.
4284 */
4285REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4286{
4287#ifndef IEM_VERIFICATION_MODE
4288#ifndef DEBUG_bird
4289 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4290#endif
4291 if (pVM->rem.s.fInREM)
4292 {
4293 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4294 {
4295 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4296 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4297 CPU_INTERRUPT_EXTERNAL_TIMER);
4298 }
4299 else
4300 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4301 }
4302 else
4303 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4304#endif
4305}
4306
4307
4308/**
4309 * Notification about pending DMA transfers.
4310 *
4311 * @param pVM VM Handle.
4312 * @thread Any.
4313 */
4314REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4315{
4316#ifndef IEM_VERIFICATION_MODE
4317 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4318 if (pVM->rem.s.fInREM)
4319 {
4320 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4321 CPU_INTERRUPT_EXTERNAL_DMA);
4322 }
4323#endif
4324}
4325
4326
4327/**
4328 * Notification about pending timer(s).
4329 *
4330 * @param pVM VM Handle.
4331 * @thread Any.
4332 */
4333REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4334{
4335#ifndef IEM_VERIFICATION_MODE
4336 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4337 if (pVM->rem.s.fInREM)
4338 {
4339 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4340 CPU_INTERRUPT_EXTERNAL_EXIT);
4341 }
4342#endif
4343}
4344
4345
4346/**
4347 * Notification about pending FF set by an external thread.
4348 *
4349 * @param pVM VM handle.
4350 * @thread Any.
4351 */
4352REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4353{
4354#ifndef IEM_VERIFICATION_MODE
4355 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4356 if (pVM->rem.s.fInREM)
4357 {
4358 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4359 CPU_INTERRUPT_EXTERNAL_EXIT);
4360 }
4361#endif
4362}
4363
4364
4365#ifdef VBOX_WITH_STATISTICS
4366void remR3ProfileStart(int statcode)
4367{
4368 STAMPROFILEADV *pStat;
4369 switch(statcode)
4370 {
4371 case STATS_EMULATE_SINGLE_INSTR:
4372 pStat = &gStatExecuteSingleInstr;
4373 break;
4374 case STATS_QEMU_COMPILATION:
4375 pStat = &gStatCompilationQEmu;
4376 break;
4377 case STATS_QEMU_RUN_EMULATED_CODE:
4378 pStat = &gStatRunCodeQEmu;
4379 break;
4380 case STATS_QEMU_TOTAL:
4381 pStat = &gStatTotalTimeQEmu;
4382 break;
4383 case STATS_QEMU_RUN_TIMERS:
4384 pStat = &gStatTimers;
4385 break;
4386 case STATS_TLB_LOOKUP:
4387 pStat= &gStatTBLookup;
4388 break;
4389 case STATS_IRQ_HANDLING:
4390 pStat= &gStatIRQ;
4391 break;
4392 case STATS_RAW_CHECK:
4393 pStat = &gStatRawCheck;
4394 break;
4395
4396 default:
4397 AssertMsgFailed(("unknown stat %d\n", statcode));
4398 return;
4399 }
4400 STAM_PROFILE_ADV_START(pStat, a);
4401}
4402
4403
4404void remR3ProfileStop(int statcode)
4405{
4406 STAMPROFILEADV *pStat;
4407 switch(statcode)
4408 {
4409 case STATS_EMULATE_SINGLE_INSTR:
4410 pStat = &gStatExecuteSingleInstr;
4411 break;
4412 case STATS_QEMU_COMPILATION:
4413 pStat = &gStatCompilationQEmu;
4414 break;
4415 case STATS_QEMU_RUN_EMULATED_CODE:
4416 pStat = &gStatRunCodeQEmu;
4417 break;
4418 case STATS_QEMU_TOTAL:
4419 pStat = &gStatTotalTimeQEmu;
4420 break;
4421 case STATS_QEMU_RUN_TIMERS:
4422 pStat = &gStatTimers;
4423 break;
4424 case STATS_TLB_LOOKUP:
4425 pStat= &gStatTBLookup;
4426 break;
4427 case STATS_IRQ_HANDLING:
4428 pStat= &gStatIRQ;
4429 break;
4430 case STATS_RAW_CHECK:
4431 pStat = &gStatRawCheck;
4432 break;
4433 default:
4434 AssertMsgFailed(("unknown stat %d\n", statcode));
4435 return;
4436 }
4437 STAM_PROFILE_ADV_STOP(pStat, a);
4438}
4439#endif
4440
4441/**
4442 * Raise an RC, force rem exit.
4443 *
4444 * @param pVM VM handle.
4445 * @param rc The rc.
4446 */
4447void remR3RaiseRC(PVM pVM, int rc)
4448{
4449 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4450 Assert(pVM->rem.s.fInREM);
4451 VM_ASSERT_EMT(pVM);
4452 pVM->rem.s.rc = rc;
4453 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4454}
4455
4456
4457/* -+- timers -+- */
4458
4459uint64_t cpu_get_tsc(CPUX86State *env)
4460{
4461 STAM_COUNTER_INC(&gStatCpuGetTSC);
4462 return TMCpuTickGet(env->pVCpu);
4463}
4464
4465
4466/* -+- interrupts -+- */
4467
4468void cpu_set_ferr(CPUX86State *env)
4469{
4470 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4471 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4472}
4473
4474int cpu_get_pic_interrupt(CPUX86State *env)
4475{
4476 uint8_t u8Interrupt;
4477 int rc;
4478
4479 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4480 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4481 * with the (a)pic.
4482 */
4483 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4484 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4485 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4486 * remove this kludge. */
4487 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4488 {
4489 rc = VINF_SUCCESS;
4490 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4491 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4492 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4493 }
4494 else
4495 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4496
4497 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4498 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4499 if (RT_SUCCESS(rc))
4500 {
4501 if (VMCPU_FF_IS_PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4502 env->interrupt_request |= CPU_INTERRUPT_HARD;
4503 return u8Interrupt;
4504 }
4505 return -1;
4506}
4507
4508
4509/* -+- local apic -+- */
4510
4511#if 0 /* CPUMSetGuestMsr does this now. */
4512void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4513{
4514 int rc = PDMApicSetBase(env->pVM, val);
4515 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4516}
4517#endif
4518
4519uint64_t cpu_get_apic_base(CPUX86State *env)
4520{
4521 uint64_t u64;
4522 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4523 if (RT_SUCCESS(rcStrict))
4524 {
4525 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4526 return u64;
4527 }
4528 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4529 return 0;
4530}
4531
4532void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4533{
4534 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4535 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4536}
4537
4538uint8_t cpu_get_apic_tpr(CPUX86State *env)
4539{
4540 uint8_t u8;
4541 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL, NULL);
4542 if (RT_SUCCESS(rc))
4543 {
4544 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4545 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4546 }
4547 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4548 return 0;
4549}
4550
4551/**
4552 * Read an MSR.
4553 *
4554 * @retval 0 success.
4555 * @retval -1 failure, raise \#GP(0).
4556 * @param env The cpu state.
4557 * @param idMsr The MSR to read.
4558 * @param puValue Where to return the value.
4559 */
4560int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4561{
4562 Assert(env->pVCpu);
4563 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4564}
4565
4566/**
4567 * Write to an MSR.
4568 *
4569 * @retval 0 success.
4570 * @retval -1 failure, raise \#GP(0).
4571 * @param env The cpu state.
4572 * @param idMsr The MSR to read.
4573 * @param puValue Where to return the value.
4574 */
4575int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4576{
4577 Assert(env->pVCpu);
4578 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4579}
4580
4581/* -+- I/O Ports -+- */
4582
4583#undef LOG_GROUP
4584#define LOG_GROUP LOG_GROUP_REM_IOPORT
4585
4586void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4587{
4588 int rc;
4589
4590 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4591 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4592
4593 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4594 if (RT_LIKELY(rc == VINF_SUCCESS))
4595 return;
4596 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4597 {
4598 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4599 remR3RaiseRC(env->pVM, rc);
4600 return;
4601 }
4602 remAbort(rc, __FUNCTION__);
4603}
4604
4605void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4606{
4607 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4608 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4609 if (RT_LIKELY(rc == VINF_SUCCESS))
4610 return;
4611 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4612 {
4613 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4614 remR3RaiseRC(env->pVM, rc);
4615 return;
4616 }
4617 remAbort(rc, __FUNCTION__);
4618}
4619
4620void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4621{
4622 int rc;
4623 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4624 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4625 if (RT_LIKELY(rc == VINF_SUCCESS))
4626 return;
4627 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4628 {
4629 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4630 remR3RaiseRC(env->pVM, rc);
4631 return;
4632 }
4633 remAbort(rc, __FUNCTION__);
4634}
4635
4636uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4637{
4638 uint32_t u32 = 0;
4639 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4640 if (RT_LIKELY(rc == VINF_SUCCESS))
4641 {
4642 if (/*addr != 0x61 && */addr != 0x71)
4643 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4644 return (uint8_t)u32;
4645 }
4646 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4647 {
4648 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4649 remR3RaiseRC(env->pVM, rc);
4650 return (uint8_t)u32;
4651 }
4652 remAbort(rc, __FUNCTION__);
4653 return UINT8_C(0xff);
4654}
4655
4656uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4657{
4658 uint32_t u32 = 0;
4659 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4660 if (RT_LIKELY(rc == VINF_SUCCESS))
4661 {
4662 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4663 return (uint16_t)u32;
4664 }
4665 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4666 {
4667 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4668 remR3RaiseRC(env->pVM, rc);
4669 return (uint16_t)u32;
4670 }
4671 remAbort(rc, __FUNCTION__);
4672 return UINT16_C(0xffff);
4673}
4674
4675uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4676{
4677 uint32_t u32 = 0;
4678 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4679 if (RT_LIKELY(rc == VINF_SUCCESS))
4680 {
4681 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4682 return u32;
4683 }
4684 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4685 {
4686 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4687 remR3RaiseRC(env->pVM, rc);
4688 return u32;
4689 }
4690 remAbort(rc, __FUNCTION__);
4691 return UINT32_C(0xffffffff);
4692}
4693
4694#undef LOG_GROUP
4695#define LOG_GROUP LOG_GROUP_REM
4696
4697
4698/* -+- helpers and misc other interfaces -+- */
4699
4700/**
4701 * Perform the CPUID instruction.
4702 *
4703 * @param env Pointer to the recompiler CPU structure.
4704 * @param idx The CPUID leaf (eax).
4705 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4706 * @param pvEAX Where to store eax.
4707 * @param pvEBX Where to store ebx.
4708 * @param pvECX Where to store ecx.
4709 * @param pvEDX Where to store edx.
4710 */
4711void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4712 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4713{
4714 NOREF(idxSub);
4715 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4716}
4717
4718
4719#if 0 /* not used */
4720/**
4721 * Interface for qemu hardware to report back fatal errors.
4722 */
4723void hw_error(const char *pszFormat, ...)
4724{
4725 /*
4726 * Bitch about it.
4727 */
4728 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4729 * this in my Odin32 tree at home! */
4730 va_list args;
4731 va_start(args, pszFormat);
4732 RTLogPrintf("fatal error in virtual hardware:");
4733 RTLogPrintfV(pszFormat, args);
4734 va_end(args);
4735 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4736
4737 /*
4738 * If we're in REM context we'll sync back the state before 'jumping' to
4739 * the EMs failure handling.
4740 */
4741 PVM pVM = cpu_single_env->pVM;
4742 if (pVM->rem.s.fInREM)
4743 REMR3StateBack(pVM);
4744 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4745 AssertMsgFailed(("EMR3FatalError returned!\n"));
4746}
4747#endif
4748
4749/**
4750 * Interface for the qemu cpu to report unhandled situation
4751 * raising a fatal VM error.
4752 */
4753void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4754{
4755 va_list va;
4756 PVM pVM;
4757 PVMCPU pVCpu;
4758 char szMsg[256];
4759
4760 /*
4761 * Bitch about it.
4762 */
4763 RTLogFlags(NULL, "nodisabled nobuffered");
4764 RTLogFlush(NULL);
4765
4766 va_start(va, pszFormat);
4767#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4768 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4769 unsigned cArgs = 0;
4770 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4771 const char *psz = strchr(pszFormat, '%');
4772 while (psz && cArgs < 6)
4773 {
4774 auArgs[cArgs++] = va_arg(va, uintptr_t);
4775 psz = strchr(psz + 1, '%');
4776 }
4777 switch (cArgs)
4778 {
4779 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4780 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4781 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4782 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4783 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4784 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4785 default:
4786 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4787 }
4788#else
4789 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4790#endif
4791 va_end(va);
4792
4793 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4794 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4795
4796 /*
4797 * If we're in REM context we'll sync back the state before 'jumping' to
4798 * the EMs failure handling.
4799 */
4800 pVM = cpu_single_env->pVM;
4801 pVCpu = cpu_single_env->pVCpu;
4802 Assert(pVCpu);
4803
4804 if (pVM->rem.s.fInREM)
4805 REMR3StateBack(pVM, pVCpu);
4806 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4807 AssertMsgFailed(("EMR3FatalError returned!\n"));
4808}
4809
4810
4811/**
4812 * Aborts the VM.
4813 *
4814 * @param rc VBox error code.
4815 * @param pszTip Hint about why/when this happened.
4816 */
4817void remAbort(int rc, const char *pszTip)
4818{
4819 PVM pVM;
4820 PVMCPU pVCpu;
4821
4822 /*
4823 * Bitch about it.
4824 */
4825 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4826 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4827
4828 /*
4829 * Jump back to where we entered the recompiler.
4830 */
4831 pVM = cpu_single_env->pVM;
4832 pVCpu = cpu_single_env->pVCpu;
4833 Assert(pVCpu);
4834
4835 if (pVM->rem.s.fInREM)
4836 REMR3StateBack(pVM, pVCpu);
4837
4838 EMR3FatalError(pVCpu, rc);
4839 AssertMsgFailed(("EMR3FatalError returned!\n"));
4840}
4841
4842
4843/**
4844 * Dumps a linux system call.
4845 * @param pVCpu VMCPU handle.
4846 */
4847void remR3DumpLnxSyscall(PVMCPU pVCpu)
4848{
4849 static const char *apsz[] =
4850 {
4851 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4852 "sys_exit",
4853 "sys_fork",
4854 "sys_read",
4855 "sys_write",
4856 "sys_open", /* 5 */
4857 "sys_close",
4858 "sys_waitpid",
4859 "sys_creat",
4860 "sys_link",
4861 "sys_unlink", /* 10 */
4862 "sys_execve",
4863 "sys_chdir",
4864 "sys_time",
4865 "sys_mknod",
4866 "sys_chmod", /* 15 */
4867 "sys_lchown16",
4868 "sys_ni_syscall", /* old break syscall holder */
4869 "sys_stat",
4870 "sys_lseek",
4871 "sys_getpid", /* 20 */
4872 "sys_mount",
4873 "sys_oldumount",
4874 "sys_setuid16",
4875 "sys_getuid16",
4876 "sys_stime", /* 25 */
4877 "sys_ptrace",
4878 "sys_alarm",
4879 "sys_fstat",
4880 "sys_pause",
4881 "sys_utime", /* 30 */
4882 "sys_ni_syscall", /* old stty syscall holder */
4883 "sys_ni_syscall", /* old gtty syscall holder */
4884 "sys_access",
4885 "sys_nice",
4886 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4887 "sys_sync",
4888 "sys_kill",
4889 "sys_rename",
4890 "sys_mkdir",
4891 "sys_rmdir", /* 40 */
4892 "sys_dup",
4893 "sys_pipe",
4894 "sys_times",
4895 "sys_ni_syscall", /* old prof syscall holder */
4896 "sys_brk", /* 45 */
4897 "sys_setgid16",
4898 "sys_getgid16",
4899 "sys_signal",
4900 "sys_geteuid16",
4901 "sys_getegid16", /* 50 */
4902 "sys_acct",
4903 "sys_umount", /* recycled never used phys() */
4904 "sys_ni_syscall", /* old lock syscall holder */
4905 "sys_ioctl",
4906 "sys_fcntl", /* 55 */
4907 "sys_ni_syscall", /* old mpx syscall holder */
4908 "sys_setpgid",
4909 "sys_ni_syscall", /* old ulimit syscall holder */
4910 "sys_olduname",
4911 "sys_umask", /* 60 */
4912 "sys_chroot",
4913 "sys_ustat",
4914 "sys_dup2",
4915 "sys_getppid",
4916 "sys_getpgrp", /* 65 */
4917 "sys_setsid",
4918 "sys_sigaction",
4919 "sys_sgetmask",
4920 "sys_ssetmask",
4921 "sys_setreuid16", /* 70 */
4922 "sys_setregid16",
4923 "sys_sigsuspend",
4924 "sys_sigpending",
4925 "sys_sethostname",
4926 "sys_setrlimit", /* 75 */
4927 "sys_old_getrlimit",
4928 "sys_getrusage",
4929 "sys_gettimeofday",
4930 "sys_settimeofday",
4931 "sys_getgroups16", /* 80 */
4932 "sys_setgroups16",
4933 "old_select",
4934 "sys_symlink",
4935 "sys_lstat",
4936 "sys_readlink", /* 85 */
4937 "sys_uselib",
4938 "sys_swapon",
4939 "sys_reboot",
4940 "old_readdir",
4941 "old_mmap", /* 90 */
4942 "sys_munmap",
4943 "sys_truncate",
4944 "sys_ftruncate",
4945 "sys_fchmod",
4946 "sys_fchown16", /* 95 */
4947 "sys_getpriority",
4948 "sys_setpriority",
4949 "sys_ni_syscall", /* old profil syscall holder */
4950 "sys_statfs",
4951 "sys_fstatfs", /* 100 */
4952 "sys_ioperm",
4953 "sys_socketcall",
4954 "sys_syslog",
4955 "sys_setitimer",
4956 "sys_getitimer", /* 105 */
4957 "sys_newstat",
4958 "sys_newlstat",
4959 "sys_newfstat",
4960 "sys_uname",
4961 "sys_iopl", /* 110 */
4962 "sys_vhangup",
4963 "sys_ni_syscall", /* old "idle" system call */
4964 "sys_vm86old",
4965 "sys_wait4",
4966 "sys_swapoff", /* 115 */
4967 "sys_sysinfo",
4968 "sys_ipc",
4969 "sys_fsync",
4970 "sys_sigreturn",
4971 "sys_clone", /* 120 */
4972 "sys_setdomainname",
4973 "sys_newuname",
4974 "sys_modify_ldt",
4975 "sys_adjtimex",
4976 "sys_mprotect", /* 125 */
4977 "sys_sigprocmask",
4978 "sys_ni_syscall", /* old "create_module" */
4979 "sys_init_module",
4980 "sys_delete_module",
4981 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4982 "sys_quotactl",
4983 "sys_getpgid",
4984 "sys_fchdir",
4985 "sys_bdflush",
4986 "sys_sysfs", /* 135 */
4987 "sys_personality",
4988 "sys_ni_syscall", /* reserved for afs_syscall */
4989 "sys_setfsuid16",
4990 "sys_setfsgid16",
4991 "sys_llseek", /* 140 */
4992 "sys_getdents",
4993 "sys_select",
4994 "sys_flock",
4995 "sys_msync",
4996 "sys_readv", /* 145 */
4997 "sys_writev",
4998 "sys_getsid",
4999 "sys_fdatasync",
5000 "sys_sysctl",
5001 "sys_mlock", /* 150 */
5002 "sys_munlock",
5003 "sys_mlockall",
5004 "sys_munlockall",
5005 "sys_sched_setparam",
5006 "sys_sched_getparam", /* 155 */
5007 "sys_sched_setscheduler",
5008 "sys_sched_getscheduler",
5009 "sys_sched_yield",
5010 "sys_sched_get_priority_max",
5011 "sys_sched_get_priority_min", /* 160 */
5012 "sys_sched_rr_get_interval",
5013 "sys_nanosleep",
5014 "sys_mremap",
5015 "sys_setresuid16",
5016 "sys_getresuid16", /* 165 */
5017 "sys_vm86",
5018 "sys_ni_syscall", /* Old sys_query_module */
5019 "sys_poll",
5020 "sys_nfsservctl",
5021 "sys_setresgid16", /* 170 */
5022 "sys_getresgid16",
5023 "sys_prctl",
5024 "sys_rt_sigreturn",
5025 "sys_rt_sigaction",
5026 "sys_rt_sigprocmask", /* 175 */
5027 "sys_rt_sigpending",
5028 "sys_rt_sigtimedwait",
5029 "sys_rt_sigqueueinfo",
5030 "sys_rt_sigsuspend",
5031 "sys_pread64", /* 180 */
5032 "sys_pwrite64",
5033 "sys_chown16",
5034 "sys_getcwd",
5035 "sys_capget",
5036 "sys_capset", /* 185 */
5037 "sys_sigaltstack",
5038 "sys_sendfile",
5039 "sys_ni_syscall", /* reserved for streams1 */
5040 "sys_ni_syscall", /* reserved for streams2 */
5041 "sys_vfork", /* 190 */
5042 "sys_getrlimit",
5043 "sys_mmap2",
5044 "sys_truncate64",
5045 "sys_ftruncate64",
5046 "sys_stat64", /* 195 */
5047 "sys_lstat64",
5048 "sys_fstat64",
5049 "sys_lchown",
5050 "sys_getuid",
5051 "sys_getgid", /* 200 */
5052 "sys_geteuid",
5053 "sys_getegid",
5054 "sys_setreuid",
5055 "sys_setregid",
5056 "sys_getgroups", /* 205 */
5057 "sys_setgroups",
5058 "sys_fchown",
5059 "sys_setresuid",
5060 "sys_getresuid",
5061 "sys_setresgid", /* 210 */
5062 "sys_getresgid",
5063 "sys_chown",
5064 "sys_setuid",
5065 "sys_setgid",
5066 "sys_setfsuid", /* 215 */
5067 "sys_setfsgid",
5068 "sys_pivot_root",
5069 "sys_mincore",
5070 "sys_madvise",
5071 "sys_getdents64", /* 220 */
5072 "sys_fcntl64",
5073 "sys_ni_syscall", /* reserved for TUX */
5074 "sys_ni_syscall",
5075 "sys_gettid",
5076 "sys_readahead", /* 225 */
5077 "sys_setxattr",
5078 "sys_lsetxattr",
5079 "sys_fsetxattr",
5080 "sys_getxattr",
5081 "sys_lgetxattr", /* 230 */
5082 "sys_fgetxattr",
5083 "sys_listxattr",
5084 "sys_llistxattr",
5085 "sys_flistxattr",
5086 "sys_removexattr", /* 235 */
5087 "sys_lremovexattr",
5088 "sys_fremovexattr",
5089 "sys_tkill",
5090 "sys_sendfile64",
5091 "sys_futex", /* 240 */
5092 "sys_sched_setaffinity",
5093 "sys_sched_getaffinity",
5094 "sys_set_thread_area",
5095 "sys_get_thread_area",
5096 "sys_io_setup", /* 245 */
5097 "sys_io_destroy",
5098 "sys_io_getevents",
5099 "sys_io_submit",
5100 "sys_io_cancel",
5101 "sys_fadvise64", /* 250 */
5102 "sys_ni_syscall",
5103 "sys_exit_group",
5104 "sys_lookup_dcookie",
5105 "sys_epoll_create",
5106 "sys_epoll_ctl", /* 255 */
5107 "sys_epoll_wait",
5108 "sys_remap_file_pages",
5109 "sys_set_tid_address",
5110 "sys_timer_create",
5111 "sys_timer_settime", /* 260 */
5112 "sys_timer_gettime",
5113 "sys_timer_getoverrun",
5114 "sys_timer_delete",
5115 "sys_clock_settime",
5116 "sys_clock_gettime", /* 265 */
5117 "sys_clock_getres",
5118 "sys_clock_nanosleep",
5119 "sys_statfs64",
5120 "sys_fstatfs64",
5121 "sys_tgkill", /* 270 */
5122 "sys_utimes",
5123 "sys_fadvise64_64",
5124 "sys_ni_syscall" /* sys_vserver */
5125 };
5126
5127 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5128 switch (uEAX)
5129 {
5130 default:
5131 if (uEAX < RT_ELEMENTS(apsz))
5132 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5133 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5134 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5135 else
5136 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5137 break;
5138
5139 }
5140}
5141
5142
5143/**
5144 * Dumps an OpenBSD system call.
5145 * @param pVCpu VMCPU handle.
5146 */
5147void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5148{
5149 static const char *apsz[] =
5150 {
5151 "SYS_syscall", //0
5152 "SYS_exit", //1
5153 "SYS_fork", //2
5154 "SYS_read", //3
5155 "SYS_write", //4
5156 "SYS_open", //5
5157 "SYS_close", //6
5158 "SYS_wait4", //7
5159 "SYS_8",
5160 "SYS_link", //9
5161 "SYS_unlink", //10
5162 "SYS_11",
5163 "SYS_chdir", //12
5164 "SYS_fchdir", //13
5165 "SYS_mknod", //14
5166 "SYS_chmod", //15
5167 "SYS_chown", //16
5168 "SYS_break", //17
5169 "SYS_18",
5170 "SYS_19",
5171 "SYS_getpid", //20
5172 "SYS_mount", //21
5173 "SYS_unmount", //22
5174 "SYS_setuid", //23
5175 "SYS_getuid", //24
5176 "SYS_geteuid", //25
5177 "SYS_ptrace", //26
5178 "SYS_recvmsg", //27
5179 "SYS_sendmsg", //28
5180 "SYS_recvfrom", //29
5181 "SYS_accept", //30
5182 "SYS_getpeername", //31
5183 "SYS_getsockname", //32
5184 "SYS_access", //33
5185 "SYS_chflags", //34
5186 "SYS_fchflags", //35
5187 "SYS_sync", //36
5188 "SYS_kill", //37
5189 "SYS_38",
5190 "SYS_getppid", //39
5191 "SYS_40",
5192 "SYS_dup", //41
5193 "SYS_opipe", //42
5194 "SYS_getegid", //43
5195 "SYS_profil", //44
5196 "SYS_ktrace", //45
5197 "SYS_sigaction", //46
5198 "SYS_getgid", //47
5199 "SYS_sigprocmask", //48
5200 "SYS_getlogin", //49
5201 "SYS_setlogin", //50
5202 "SYS_acct", //51
5203 "SYS_sigpending", //52
5204 "SYS_osigaltstack", //53
5205 "SYS_ioctl", //54
5206 "SYS_reboot", //55
5207 "SYS_revoke", //56
5208 "SYS_symlink", //57
5209 "SYS_readlink", //58
5210 "SYS_execve", //59
5211 "SYS_umask", //60
5212 "SYS_chroot", //61
5213 "SYS_62",
5214 "SYS_63",
5215 "SYS_64",
5216 "SYS_65",
5217 "SYS_vfork", //66
5218 "SYS_67",
5219 "SYS_68",
5220 "SYS_sbrk", //69
5221 "SYS_sstk", //70
5222 "SYS_61",
5223 "SYS_vadvise", //72
5224 "SYS_munmap", //73
5225 "SYS_mprotect", //74
5226 "SYS_madvise", //75
5227 "SYS_76",
5228 "SYS_77",
5229 "SYS_mincore", //78
5230 "SYS_getgroups", //79
5231 "SYS_setgroups", //80
5232 "SYS_getpgrp", //81
5233 "SYS_setpgid", //82
5234 "SYS_setitimer", //83
5235 "SYS_84",
5236 "SYS_85",
5237 "SYS_getitimer", //86
5238 "SYS_87",
5239 "SYS_88",
5240 "SYS_89",
5241 "SYS_dup2", //90
5242 "SYS_91",
5243 "SYS_fcntl", //92
5244 "SYS_select", //93
5245 "SYS_94",
5246 "SYS_fsync", //95
5247 "SYS_setpriority", //96
5248 "SYS_socket", //97
5249 "SYS_connect", //98
5250 "SYS_99",
5251 "SYS_getpriority", //100
5252 "SYS_101",
5253 "SYS_102",
5254 "SYS_sigreturn", //103
5255 "SYS_bind", //104
5256 "SYS_setsockopt", //105
5257 "SYS_listen", //106
5258 "SYS_107",
5259 "SYS_108",
5260 "SYS_109",
5261 "SYS_110",
5262 "SYS_sigsuspend", //111
5263 "SYS_112",
5264 "SYS_113",
5265 "SYS_114",
5266 "SYS_115",
5267 "SYS_gettimeofday", //116
5268 "SYS_getrusage", //117
5269 "SYS_getsockopt", //118
5270 "SYS_119",
5271 "SYS_readv", //120
5272 "SYS_writev", //121
5273 "SYS_settimeofday", //122
5274 "SYS_fchown", //123
5275 "SYS_fchmod", //124
5276 "SYS_125",
5277 "SYS_setreuid", //126
5278 "SYS_setregid", //127
5279 "SYS_rename", //128
5280 "SYS_129",
5281 "SYS_130",
5282 "SYS_flock", //131
5283 "SYS_mkfifo", //132
5284 "SYS_sendto", //133
5285 "SYS_shutdown", //134
5286 "SYS_socketpair", //135
5287 "SYS_mkdir", //136
5288 "SYS_rmdir", //137
5289 "SYS_utimes", //138
5290 "SYS_139",
5291 "SYS_adjtime", //140
5292 "SYS_141",
5293 "SYS_142",
5294 "SYS_143",
5295 "SYS_144",
5296 "SYS_145",
5297 "SYS_146",
5298 "SYS_setsid", //147
5299 "SYS_quotactl", //148
5300 "SYS_149",
5301 "SYS_150",
5302 "SYS_151",
5303 "SYS_152",
5304 "SYS_153",
5305 "SYS_154",
5306 "SYS_nfssvc", //155
5307 "SYS_156",
5308 "SYS_157",
5309 "SYS_158",
5310 "SYS_159",
5311 "SYS_160",
5312 "SYS_getfh", //161
5313 "SYS_162",
5314 "SYS_163",
5315 "SYS_164",
5316 "SYS_sysarch", //165
5317 "SYS_166",
5318 "SYS_167",
5319 "SYS_168",
5320 "SYS_169",
5321 "SYS_170",
5322 "SYS_171",
5323 "SYS_172",
5324 "SYS_pread", //173
5325 "SYS_pwrite", //174
5326 "SYS_175",
5327 "SYS_176",
5328 "SYS_177",
5329 "SYS_178",
5330 "SYS_179",
5331 "SYS_180",
5332 "SYS_setgid", //181
5333 "SYS_setegid", //182
5334 "SYS_seteuid", //183
5335 "SYS_lfs_bmapv", //184
5336 "SYS_lfs_markv", //185
5337 "SYS_lfs_segclean", //186
5338 "SYS_lfs_segwait", //187
5339 "SYS_188",
5340 "SYS_189",
5341 "SYS_190",
5342 "SYS_pathconf", //191
5343 "SYS_fpathconf", //192
5344 "SYS_swapctl", //193
5345 "SYS_getrlimit", //194
5346 "SYS_setrlimit", //195
5347 "SYS_getdirentries", //196
5348 "SYS_mmap", //197
5349 "SYS___syscall", //198
5350 "SYS_lseek", //199
5351 "SYS_truncate", //200
5352 "SYS_ftruncate", //201
5353 "SYS___sysctl", //202
5354 "SYS_mlock", //203
5355 "SYS_munlock", //204
5356 "SYS_205",
5357 "SYS_futimes", //206
5358 "SYS_getpgid", //207
5359 "SYS_xfspioctl", //208
5360 "SYS_209",
5361 "SYS_210",
5362 "SYS_211",
5363 "SYS_212",
5364 "SYS_213",
5365 "SYS_214",
5366 "SYS_215",
5367 "SYS_216",
5368 "SYS_217",
5369 "SYS_218",
5370 "SYS_219",
5371 "SYS_220",
5372 "SYS_semget", //221
5373 "SYS_222",
5374 "SYS_223",
5375 "SYS_224",
5376 "SYS_msgget", //225
5377 "SYS_msgsnd", //226
5378 "SYS_msgrcv", //227
5379 "SYS_shmat", //228
5380 "SYS_229",
5381 "SYS_shmdt", //230
5382 "SYS_231",
5383 "SYS_clock_gettime", //232
5384 "SYS_clock_settime", //233
5385 "SYS_clock_getres", //234
5386 "SYS_235",
5387 "SYS_236",
5388 "SYS_237",
5389 "SYS_238",
5390 "SYS_239",
5391 "SYS_nanosleep", //240
5392 "SYS_241",
5393 "SYS_242",
5394 "SYS_243",
5395 "SYS_244",
5396 "SYS_245",
5397 "SYS_246",
5398 "SYS_247",
5399 "SYS_248",
5400 "SYS_249",
5401 "SYS_minherit", //250
5402 "SYS_rfork", //251
5403 "SYS_poll", //252
5404 "SYS_issetugid", //253
5405 "SYS_lchown", //254
5406 "SYS_getsid", //255
5407 "SYS_msync", //256
5408 "SYS_257",
5409 "SYS_258",
5410 "SYS_259",
5411 "SYS_getfsstat", //260
5412 "SYS_statfs", //261
5413 "SYS_fstatfs", //262
5414 "SYS_pipe", //263
5415 "SYS_fhopen", //264
5416 "SYS_265",
5417 "SYS_fhstatfs", //266
5418 "SYS_preadv", //267
5419 "SYS_pwritev", //268
5420 "SYS_kqueue", //269
5421 "SYS_kevent", //270
5422 "SYS_mlockall", //271
5423 "SYS_munlockall", //272
5424 "SYS_getpeereid", //273
5425 "SYS_274",
5426 "SYS_275",
5427 "SYS_276",
5428 "SYS_277",
5429 "SYS_278",
5430 "SYS_279",
5431 "SYS_280",
5432 "SYS_getresuid", //281
5433 "SYS_setresuid", //282
5434 "SYS_getresgid", //283
5435 "SYS_setresgid", //284
5436 "SYS_285",
5437 "SYS_mquery", //286
5438 "SYS_closefrom", //287
5439 "SYS_sigaltstack", //288
5440 "SYS_shmget", //289
5441 "SYS_semop", //290
5442 "SYS_stat", //291
5443 "SYS_fstat", //292
5444 "SYS_lstat", //293
5445 "SYS_fhstat", //294
5446 "SYS___semctl", //295
5447 "SYS_shmctl", //296
5448 "SYS_msgctl", //297
5449 "SYS_MAXSYSCALL", //298
5450 //299
5451 //300
5452 };
5453 uint32_t uEAX;
5454 if (!LogIsEnabled())
5455 return;
5456 uEAX = CPUMGetGuestEAX(pVCpu);
5457 switch (uEAX)
5458 {
5459 default:
5460 if (uEAX < RT_ELEMENTS(apsz))
5461 {
5462 uint32_t au32Args[8] = {0};
5463 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5464 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5465 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5466 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5467 }
5468 else
5469 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5470 break;
5471 }
5472}
5473
5474
5475#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5476/**
5477 * The Dll main entry point (stub).
5478 */
5479bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5480{
5481 return true;
5482}
5483
5484void *memcpy(void *dst, const void *src, size_t size)
5485{
5486 uint8_t*pbDst = dst, *pbSrc = src;
5487 while (size-- > 0)
5488 *pbDst++ = *pbSrc++;
5489 return dst;
5490}
5491
5492#endif
5493
5494void cpu_smm_update(CPUX86State *env)
5495{
5496}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette