VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 55362

Last change on this file since 55362 was 55048, checked in by vboxsync, 10 years ago

VMM,REM: Allocate the FPU/SSE/AVX/FUTURE state stuff. We need to use pointers to substates anyway and this will make CPUMCPU much smaller.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 183.1 KB
Line 
1/* $Id: VBoxRecompiler.c 55048 2015-03-31 18:49:19Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/vmm/uvm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50
51#include <VBox/log.h>
52#include <iprt/alloca.h>
53#include <iprt/semaphore.h>
54#include <iprt/asm.h>
55#include <iprt/assert.h>
56#include <iprt/thread.h>
57#include <iprt/string.h>
58
59/* Don't wanna include everything. */
60extern void cpu_exec_init_all(uintptr_t tb_size);
61extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
62extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
63extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
64extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
65extern void tlb_flush(CPUX86State *env, int flush_global);
66extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
67extern void sync_ldtr(CPUX86State *env1, int selector);
68
69#ifdef VBOX_STRICT
70ram_addr_t get_phys_page_offset(target_ulong addr);
71#endif
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77
78/** Copy 80-bit fpu register at pSrc to pDst.
79 * This is probably faster than *calling* memcpy.
80 */
81#define REM_COPY_FPU_REG(pDst, pSrc) \
82 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
83
84/** How remR3RunLoggingStep operates. */
85#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
86
87
88/** Selector flag shift between qemu and VBox.
89 * VBox shifts the qemu bits to the right. */
90#define SEL_FLAGS_SHIFT (8)
91/** Mask applied to the shifted qemu selector flags to get the attributes VBox
92 * (VT-x) needs. */
93#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
94
95
96/*******************************************************************************
97* Internal Functions *
98*******************************************************************************/
99static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
101static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
102static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
103
104static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
105static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
106static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
107static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
108static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
109static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
110
111static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
112static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
113static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
114static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
115static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
116static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
117
118static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
119static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
120static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
121
122/*******************************************************************************
123* Global Variables *
124*******************************************************************************/
125
126/** @todo Move stats to REM::s some rainy day we have nothing do to. */
127#ifdef VBOX_WITH_STATISTICS
128static STAMPROFILEADV gStatExecuteSingleInstr;
129static STAMPROFILEADV gStatCompilationQEmu;
130static STAMPROFILEADV gStatRunCodeQEmu;
131static STAMPROFILEADV gStatTotalTimeQEmu;
132static STAMPROFILEADV gStatTimers;
133static STAMPROFILEADV gStatTBLookup;
134static STAMPROFILEADV gStatIRQ;
135static STAMPROFILEADV gStatRawCheck;
136static STAMPROFILEADV gStatMemRead;
137static STAMPROFILEADV gStatMemWrite;
138static STAMPROFILE gStatGCPhys2HCVirt;
139static STAMCOUNTER gStatCpuGetTSC;
140static STAMCOUNTER gStatRefuseTFInhibit;
141static STAMCOUNTER gStatRefuseVM86;
142static STAMCOUNTER gStatRefusePaging;
143static STAMCOUNTER gStatRefusePAE;
144static STAMCOUNTER gStatRefuseIOPLNot0;
145static STAMCOUNTER gStatRefuseIF0;
146static STAMCOUNTER gStatRefuseCode16;
147static STAMCOUNTER gStatRefuseWP0;
148static STAMCOUNTER gStatRefuseRing1or2;
149static STAMCOUNTER gStatRefuseCanExecute;
150static STAMCOUNTER gaStatRefuseStale[6];
151static STAMCOUNTER gStatREMGDTChange;
152static STAMCOUNTER gStatREMIDTChange;
153static STAMCOUNTER gStatREMLDTRChange;
154static STAMCOUNTER gStatREMTRChange;
155static STAMCOUNTER gStatSelOutOfSync[6];
156static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
157static STAMCOUNTER gStatFlushTBs;
158#endif
159/* in exec.c */
160extern uint32_t tlb_flush_count;
161extern uint32_t tb_flush_count;
162extern uint32_t tb_phys_invalidate_count;
163
164/*
165 * Global stuff.
166 */
167
168/** MMIO read callbacks. */
169CPUReadMemoryFunc *g_apfnMMIORead[3] =
170{
171 remR3MMIOReadU8,
172 remR3MMIOReadU16,
173 remR3MMIOReadU32
174};
175
176/** MMIO write callbacks. */
177CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
178{
179 remR3MMIOWriteU8,
180 remR3MMIOWriteU16,
181 remR3MMIOWriteU32
182};
183
184/** Handler read callbacks. */
185CPUReadMemoryFunc *g_apfnHandlerRead[3] =
186{
187 remR3HandlerReadU8,
188 remR3HandlerReadU16,
189 remR3HandlerReadU32
190};
191
192/** Handler write callbacks. */
193CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
194{
195 remR3HandlerWriteU8,
196 remR3HandlerWriteU16,
197 remR3HandlerWriteU32
198};
199
200
201#ifdef VBOX_WITH_DEBUGGER
202/*
203 * Debugger commands.
204 */
205static FNDBGCCMD remR3CmdDisasEnableStepping;;
206
207/** '.remstep' arguments. */
208static const DBGCVARDESC g_aArgRemStep[] =
209{
210 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
211 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
212};
213
214/** Command descriptors. */
215static const DBGCCMD g_aCmds[] =
216{
217 {
218 .pszCmd ="remstep",
219 .cArgsMin = 0,
220 .cArgsMax = 1,
221 .paArgDescs = &g_aArgRemStep[0],
222 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
223 .fFlags = 0,
224 .pfnHandler = remR3CmdDisasEnableStepping,
225 .pszSyntax = "[on/off]",
226 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
227 "If no arguments show the current state."
228 }
229};
230#endif
231
232/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
233 * @todo huh??? That cannot be the case on the mac... So, this
234 * point is probably not valid any longer. */
235uint8_t *code_gen_prologue;
236
237
238/*******************************************************************************
239* Internal Functions *
240*******************************************************************************/
241void remAbort(int rc, const char *pszTip);
242extern int testmath(void);
243
244/* Put them here to avoid unused variable warning. */
245AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
246#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
247//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
248/* Why did this have to be identical?? */
249AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
250#else
251AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
252#endif
253
254
255/**
256 * Initializes the REM.
257 *
258 * @returns VBox status code.
259 * @param pVM The VM to operate on.
260 */
261REMR3DECL(int) REMR3Init(PVM pVM)
262{
263 PREMHANDLERNOTIFICATION pCur;
264 uint32_t u32Dummy;
265 int rc;
266 unsigned i;
267
268#ifdef VBOX_ENABLE_VBOXREM64
269 LogRel(("Using 64-bit aware REM\n"));
270#endif
271
272 /*
273 * Assert sanity.
274 */
275 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
276 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
277 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
278#if 0 /* just an annoyance at the moment. */
279#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
280 Assert(!testmath());
281#endif
282#endif
283
284 /*
285 * Init some internal data members.
286 */
287 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
288 pVM->rem.s.Env.pVM = pVM;
289#ifdef CPU_RAW_MODE_INIT
290 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
291#endif
292
293 /*
294 * Initialize the REM critical section.
295 *
296 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
297 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
298 * deadlocks. (mostly pgm vs rem locking)
299 */
300 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
301 AssertRCReturn(rc, rc);
302
303 /* ctx. */
304 pVM->rem.s.pCtx = NULL; /* set when executing code. */
305 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
306
307 /* ignore all notifications */
308 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
309
310 code_gen_prologue = RTMemExecAlloc(_1K);
311 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
312
313 cpu_exec_init_all(0);
314
315 /*
316 * Init the recompiler.
317 */
318 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
319 {
320 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
321 return VERR_GENERAL_FAILURE;
322 }
323 PVMCPU pVCpu = VMMGetCpu(pVM);
324 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
325 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
326
327 EMRemLock(pVM);
328 cpu_reset(&pVM->rem.s.Env);
329 EMRemUnlock(pVM);
330
331 /* allocate code buffer for single instruction emulation. */
332 pVM->rem.s.Env.cbCodeBuffer = 4096;
333 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
334 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
335
336 /* Finally, set the cpu_single_env global. */
337 cpu_single_env = &pVM->rem.s.Env;
338
339 /* Nothing is pending by default */
340 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
341
342 /*
343 * Register ram types.
344 */
345 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
346 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
347 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
348 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
349 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
350
351 /* stop ignoring. */
352 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
353
354 /*
355 * Register the saved state data unit.
356 */
357 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
358 NULL, NULL, NULL,
359 NULL, remR3Save, NULL,
360 NULL, remR3Load, NULL);
361 if (RT_FAILURE(rc))
362 return rc;
363
364#ifdef VBOX_WITH_DEBUGGER
365 /*
366 * Debugger commands.
367 */
368 static bool fRegisteredCmds = false;
369 if (!fRegisteredCmds)
370 {
371 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
372 if (RT_SUCCESS(rc))
373 fRegisteredCmds = true;
374 }
375#endif
376
377#ifdef VBOX_WITH_STATISTICS
378 /*
379 * Statistics.
380 */
381 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
382 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
383 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
384 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
385 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
386 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
387 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
388 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
389 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
390 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
391 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
392
393 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
394
395 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
396 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
397 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
398 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
399 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
400 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
401 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
402 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
403 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
404 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
405 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
406 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
407 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
408 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
409 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
410 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
411 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
412
413 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
414 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
415 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
416 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
417
418 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
419 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
420 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
421 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
422 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
423 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
424
425 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
426 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
427 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
428 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
429 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
430 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
431
432 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
433#endif /* VBOX_WITH_STATISTICS */
434 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
435 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
436
437 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
438 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
439 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
440
441
442#ifdef DEBUG_ALL_LOGGING
443 loglevel = ~0;
444#endif
445
446 /*
447 * Init the handler notification lists.
448 */
449 pVM->rem.s.idxPendingList = UINT32_MAX;
450 pVM->rem.s.idxFreeList = 0;
451
452 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
453 {
454 pCur = &pVM->rem.s.aHandlerNotifications[i];
455 pCur->idxNext = i + 1;
456 pCur->idxSelf = i;
457 }
458 pCur->idxNext = UINT32_MAX; /* the last record. */
459
460 return rc;
461}
462
463
464/**
465 * Finalizes the REM initialization.
466 *
467 * This is called after all components, devices and drivers has
468 * been initialized. Its main purpose it to finish the RAM related
469 * initialization.
470 *
471 * @returns VBox status code.
472 *
473 * @param pVM The VM handle.
474 */
475REMR3DECL(int) REMR3InitFinalize(PVM pVM)
476{
477 int rc;
478
479 /*
480 * Ram size & dirty bit map.
481 */
482 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
483 pVM->rem.s.fGCPhysLastRamFixed = true;
484#ifdef RT_STRICT
485 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
486#else
487 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
488#endif
489 return rc;
490}
491
492/**
493 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM handle.
497 * @param fGuarded Whether to guard the map.
498 */
499static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
500{
501 int rc = VINF_SUCCESS;
502 RTGCPHYS cb;
503
504 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
505
506 cb = pVM->rem.s.GCPhysLastRam + 1;
507 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
508 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
509 VERR_OUT_OF_RANGE);
510
511 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
512 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
513
514 if (!fGuarded)
515 {
516 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
517 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
518 }
519 else
520 {
521 /*
522 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
523 */
524 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
525 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
526 if (cbBitmapFull == cbBitmapAligned)
527 cbBitmapFull += _4G >> PAGE_SHIFT;
528 else if (cbBitmapFull - cbBitmapAligned < _64K)
529 cbBitmapFull += _64K;
530
531 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
532 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
533
534 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
535 if (RT_FAILURE(rc))
536 {
537 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
538 AssertLogRelRCReturn(rc, rc);
539 }
540
541 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
542 }
543
544 /* initialize it. */
545 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
546 return rc;
547}
548
549
550/**
551 * Terminates the REM.
552 *
553 * Termination means cleaning up and freeing all resources,
554 * the VM it self is at this point powered off or suspended.
555 *
556 * @returns VBox status code.
557 * @param pVM The VM to operate on.
558 */
559REMR3DECL(int) REMR3Term(PVM pVM)
560{
561 /*
562 * Statistics.
563 */
564 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
565 STAMR3Deregister(pVM->pUVM, "/REM/*");
566
567 return VINF_SUCCESS;
568}
569
570
571/**
572 * The VM is being reset.
573 *
574 * For the REM component this means to call the cpu_reset() and
575 * reinitialize some state variables.
576 *
577 * @param pVM VM handle.
578 */
579REMR3DECL(void) REMR3Reset(PVM pVM)
580{
581 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
582
583 /*
584 * Reset the REM cpu.
585 */
586 Assert(pVM->rem.s.cIgnoreAll == 0);
587 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
588 cpu_reset(&pVM->rem.s.Env);
589 pVM->rem.s.cInvalidatedPages = 0;
590 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
591 Assert(pVM->rem.s.cIgnoreAll == 0);
592
593 /* Clear raw ring 0 init state */
594 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
595
596 /* Flush the TBs the next time we execute code here. */
597 pVM->rem.s.fFlushTBs = true;
598
599 EMRemUnlock(pVM);
600}
601
602
603/**
604 * Execute state save operation.
605 *
606 * @returns VBox status code.
607 * @param pVM VM Handle.
608 * @param pSSM SSM operation handle.
609 */
610static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
611{
612 PREM pRem = &pVM->rem.s;
613
614 /*
615 * Save the required CPU Env bits.
616 * (Not much because we're never in REM when doing the save.)
617 */
618 LogFlow(("remR3Save:\n"));
619 Assert(!pRem->fInREM);
620 SSMR3PutU32(pSSM, pRem->Env.hflags);
621 SSMR3PutU32(pSSM, ~0); /* separator */
622
623 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
624 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
625 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
626
627 return SSMR3PutU32(pSSM, ~0); /* terminator */
628}
629
630
631/**
632 * Execute state load operation.
633 *
634 * @returns VBox status code.
635 * @param pVM VM Handle.
636 * @param pSSM SSM operation handle.
637 * @param uVersion Data layout version.
638 * @param uPass The data pass.
639 */
640static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
641{
642 uint32_t u32Dummy;
643 uint32_t fRawRing0 = false;
644 uint32_t u32Sep;
645 uint32_t i;
646 int rc;
647 PREM pRem;
648
649 LogFlow(("remR3Load:\n"));
650 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
651
652 /*
653 * Validate version.
654 */
655 if ( uVersion != REM_SAVED_STATE_VERSION
656 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
657 {
658 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
659 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
660 }
661
662 /*
663 * Do a reset to be on the safe side...
664 */
665 REMR3Reset(pVM);
666
667 /*
668 * Ignore all ignorable notifications.
669 * (Not doing this will cause serious trouble.)
670 */
671 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
672
673 /*
674 * Load the required CPU Env bits.
675 * (Not much because we're never in REM when doing the save.)
676 */
677 pRem = &pVM->rem.s;
678 Assert(!pRem->fInREM);
679 SSMR3GetU32(pSSM, &pRem->Env.hflags);
680 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
681 {
682 /* Redundant REM CPU state has to be loaded, but can be ignored. */
683 CPUX86State_Ver16 temp;
684 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
685 }
686
687 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
688 if (RT_FAILURE(rc))
689 return rc;
690 if (u32Sep != ~0U)
691 {
692 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
693 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
694 }
695
696 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
697 SSMR3GetUInt(pSSM, &fRawRing0);
698 if (fRawRing0)
699 pRem->Env.state |= CPU_RAW_RING0;
700
701 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /*
704 * Load the REM stuff.
705 */
706 /** @todo r=bird: We should just drop all these items, restoring doesn't make
707 * sense. */
708 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
709 if (RT_FAILURE(rc))
710 return rc;
711 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
712 {
713 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716 for (i = 0; i < pRem->cInvalidatedPages; i++)
717 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
718 }
719
720 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
721 if (RT_FAILURE(rc))
722 return rc;
723
724 /* check the terminator. */
725 rc = SSMR3GetU32(pSSM, &u32Sep);
726 if (RT_FAILURE(rc))
727 return rc;
728 if (u32Sep != ~0U)
729 {
730 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
731 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
732 }
733
734 /*
735 * Get the CPUID features.
736 */
737 PVMCPU pVCpu = VMMGetCpu(pVM);
738 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
739 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
740
741 /*
742 * Stop ignoring ignorable notifications.
743 */
744 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
745
746 /*
747 * Sync the whole CPU state when executing code in the recompiler.
748 */
749 for (i = 0; i < pVM->cCpus; i++)
750 {
751 PVMCPU pVCpu = &pVM->aCpus[i];
752 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
753 }
754 return VINF_SUCCESS;
755}
756
757
758
759#undef LOG_GROUP
760#define LOG_GROUP LOG_GROUP_REM_RUN
761
762/**
763 * Single steps an instruction in recompiled mode.
764 *
765 * Before calling this function the REM state needs to be in sync with
766 * the VM. Call REMR3State() to perform the sync. It's only necessary
767 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
768 * and after calling REMR3StateBack().
769 *
770 * @returns VBox status code.
771 *
772 * @param pVM VM Handle.
773 * @param pVCpu VMCPU Handle.
774 */
775REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
776{
777 int rc, interrupt_request;
778 RTGCPTR GCPtrPC;
779 bool fBp;
780
781 /*
782 * Lock the REM - we don't wanna have anyone interrupting us
783 * while stepping - and enabled single stepping. We also ignore
784 * pending interrupts and suchlike.
785 */
786 interrupt_request = pVM->rem.s.Env.interrupt_request;
787 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
788 pVM->rem.s.Env.interrupt_request = 0;
789 cpu_single_step(&pVM->rem.s.Env, 1);
790
791 /*
792 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
793 */
794 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
795 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
796
797 /*
798 * Execute and handle the return code.
799 * We execute without enabling the cpu tick, so on success we'll
800 * just flip it on and off to make sure it moves
801 */
802 rc = cpu_exec(&pVM->rem.s.Env);
803 if (rc == EXCP_DEBUG)
804 {
805 TMR3NotifyResume(pVM, pVCpu);
806 TMR3NotifySuspend(pVM, pVCpu);
807 rc = VINF_EM_DBG_STEPPED;
808 }
809 else
810 {
811 switch (rc)
812 {
813 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
814 case EXCP_HLT:
815 case EXCP_HALTED: rc = VINF_EM_HALT; break;
816 case EXCP_RC:
817 rc = pVM->rem.s.rc;
818 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
819 break;
820 case EXCP_EXECUTE_RAW:
821 case EXCP_EXECUTE_HM:
822 /** @todo: is it correct? No! */
823 rc = VINF_SUCCESS;
824 break;
825 default:
826 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
827 rc = VERR_INTERNAL_ERROR;
828 break;
829 }
830 }
831
832 /*
833 * Restore the stuff we changed to prevent interruption.
834 * Unlock the REM.
835 */
836 if (fBp)
837 {
838 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
839 Assert(rc2 == 0); NOREF(rc2);
840 }
841 cpu_single_step(&pVM->rem.s.Env, 0);
842 pVM->rem.s.Env.interrupt_request = interrupt_request;
843
844 return rc;
845}
846
847
848/**
849 * Set a breakpoint using the REM facilities.
850 *
851 * @returns VBox status code.
852 * @param pVM The VM handle.
853 * @param Address The breakpoint address.
854 * @thread The emulation thread.
855 */
856REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
857{
858 VM_ASSERT_EMT(pVM);
859 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
860 {
861 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
862 return VINF_SUCCESS;
863 }
864 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
865 return VERR_REM_NO_MORE_BP_SLOTS;
866}
867
868
869/**
870 * Clears a breakpoint set by REMR3BreakpointSet().
871 *
872 * @returns VBox status code.
873 * @param pVM The VM handle.
874 * @param Address The breakpoint address.
875 * @thread The emulation thread.
876 */
877REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
878{
879 VM_ASSERT_EMT(pVM);
880 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
881 {
882 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
883 return VINF_SUCCESS;
884 }
885 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
886 return VERR_REM_BP_NOT_FOUND;
887}
888
889
890/**
891 * Emulate an instruction.
892 *
893 * This function executes one instruction without letting anyone
894 * interrupt it. This is intended for being called while being in
895 * raw mode and thus will take care of all the state syncing between
896 * REM and the rest.
897 *
898 * @returns VBox status code.
899 * @param pVM VM handle.
900 * @param pVCpu VMCPU Handle.
901 */
902REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
903{
904 bool fFlushTBs;
905
906 int rc, rc2;
907 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
908
909 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
910 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
911 */
912 if (HMIsEnabled(pVM))
913 pVM->rem.s.Env.state |= CPU_RAW_HM;
914
915 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
916 fFlushTBs = pVM->rem.s.fFlushTBs;
917 pVM->rem.s.fFlushTBs = false;
918
919 /*
920 * Sync the state and enable single instruction / single stepping.
921 */
922 rc = REMR3State(pVM, pVCpu);
923 pVM->rem.s.fFlushTBs = fFlushTBs;
924 if (RT_SUCCESS(rc))
925 {
926 int interrupt_request = pVM->rem.s.Env.interrupt_request;
927 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
928#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
929 cpu_single_step(&pVM->rem.s.Env, 0);
930#endif
931 Assert(!pVM->rem.s.Env.singlestep_enabled);
932
933 /*
934 * Now we set the execute single instruction flag and enter the cpu_exec loop.
935 */
936 TMNotifyStartOfExecution(pVCpu);
937 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
938 rc = cpu_exec(&pVM->rem.s.Env);
939 TMNotifyEndOfExecution(pVCpu);
940 switch (rc)
941 {
942 /*
943 * Executed without anything out of the way happening.
944 */
945 case EXCP_SINGLE_INSTR:
946 rc = VINF_EM_RESCHEDULE;
947 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
948 break;
949
950 /*
951 * If we take a trap or start servicing a pending interrupt, we might end up here.
952 * (Timer thread or some other thread wishing EMT's attention.)
953 */
954 case EXCP_INTERRUPT:
955 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
956 rc = VINF_EM_RESCHEDULE;
957 break;
958
959 /*
960 * Single step, we assume!
961 * If there was a breakpoint there we're fucked now.
962 */
963 case EXCP_DEBUG:
964 if (pVM->rem.s.Env.watchpoint_hit)
965 {
966 /** @todo deal with watchpoints */
967 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
968 rc = VINF_EM_DBG_BREAKPOINT;
969 }
970 else
971 {
972 CPUBreakpoint *pBP;
973 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
974 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
975 if (pBP->pc == GCPtrPC)
976 break;
977 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
978 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
979 }
980 break;
981
982 /*
983 * hlt instruction.
984 */
985 case EXCP_HLT:
986 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
987 rc = VINF_EM_HALT;
988 break;
989
990 /*
991 * The VM has halted.
992 */
993 case EXCP_HALTED:
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
995 rc = VINF_EM_HALT;
996 break;
997
998 /*
999 * Switch to RAW-mode.
1000 */
1001 case EXCP_EXECUTE_RAW:
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1003 rc = VINF_EM_RESCHEDULE_RAW;
1004 break;
1005
1006 /*
1007 * Switch to hardware accelerated RAW-mode.
1008 */
1009 case EXCP_EXECUTE_HM:
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1011 rc = VINF_EM_RESCHEDULE_HM;
1012 break;
1013
1014 /*
1015 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1016 */
1017 case EXCP_RC:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1019 rc = pVM->rem.s.rc;
1020 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1021 break;
1022
1023 /*
1024 * Figure out the rest when they arrive....
1025 */
1026 default:
1027 AssertMsgFailed(("rc=%d\n", rc));
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1029 rc = VINF_EM_RESCHEDULE;
1030 break;
1031 }
1032
1033 /*
1034 * Switch back the state.
1035 */
1036 pVM->rem.s.Env.interrupt_request = interrupt_request;
1037 rc2 = REMR3StateBack(pVM, pVCpu);
1038 AssertRC(rc2);
1039 }
1040
1041 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1042 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1043 return rc;
1044}
1045
1046
1047/**
1048 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1049 *
1050 * @returns VBox status code.
1051 *
1052 * @param pVM The VM handle.
1053 * @param pVCpu The Virtual CPU handle.
1054 */
1055static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1056{
1057 int rc;
1058
1059 Assert(pVM->rem.s.fInREM);
1060#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1061 cpu_single_step(&pVM->rem.s.Env, 1);
1062#else
1063 Assert(!pVM->rem.s.Env.singlestep_enabled);
1064#endif
1065
1066 /*
1067 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1068 */
1069 for (;;)
1070 {
1071 char szBuf[256];
1072
1073 /*
1074 * Log the current registers state and instruction.
1075 */
1076 remR3StateUpdate(pVM, pVCpu);
1077 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1078 szBuf[0] = '\0';
1079 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1080 pVCpu->idCpu,
1081 0, /* Sel */ 0, /* GCPtr */
1082 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1083 szBuf,
1084 sizeof(szBuf),
1085 NULL);
1086 if (RT_FAILURE(rc))
1087 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1088 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1089
1090 /*
1091 * Execute the instruction.
1092 */
1093 TMNotifyStartOfExecution(pVCpu);
1094
1095 if ( pVM->rem.s.Env.exception_index < 0
1096 || pVM->rem.s.Env.exception_index > 256)
1097 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1098
1099#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1100 pVM->rem.s.Env.interrupt_request = 0;
1101#else
1102 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1103#endif
1104 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1105 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1106 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1107 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1108 pVM->rem.s.Env.interrupt_request,
1109 pVM->rem.s.Env.halted,
1110 pVM->rem.s.Env.exception_index
1111 );
1112
1113 rc = cpu_exec(&pVM->rem.s.Env);
1114
1115 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1116 pVM->rem.s.Env.interrupt_request,
1117 pVM->rem.s.Env.halted,
1118 pVM->rem.s.Env.exception_index
1119 );
1120
1121 TMNotifyEndOfExecution(pVCpu);
1122
1123 switch (rc)
1124 {
1125#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1126 /*
1127 * The normal exit.
1128 */
1129 case EXCP_SINGLE_INSTR:
1130 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1131 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1132 continue;
1133 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1134 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1135 rc = VINF_SUCCESS;
1136 break;
1137
1138#else
1139 /*
1140 * The normal exit, check for breakpoints at PC just to be sure.
1141 */
1142#endif
1143 case EXCP_DEBUG:
1144 if (pVM->rem.s.Env.watchpoint_hit)
1145 {
1146 /** @todo deal with watchpoints */
1147 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1148 rc = VINF_EM_DBG_BREAKPOINT;
1149 }
1150 else
1151 {
1152 CPUBreakpoint *pBP;
1153 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1154 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1155 if (pBP->pc == GCPtrPC)
1156 break;
1157 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1158 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1159 }
1160#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1161 if (rc == VINF_EM_DBG_STEPPED)
1162 {
1163 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1164 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1165 continue;
1166
1167 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1168 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1169 rc = VINF_SUCCESS;
1170 }
1171#endif
1172 break;
1173
1174 /*
1175 * If we take a trap or start servicing a pending interrupt, we might end up here.
1176 * (Timer thread or some other thread wishing EMT's attention.)
1177 */
1178 case EXCP_INTERRUPT:
1179 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1180 rc = VINF_SUCCESS;
1181 break;
1182
1183 /*
1184 * hlt instruction.
1185 */
1186 case EXCP_HLT:
1187 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1188 rc = VINF_EM_HALT;
1189 break;
1190
1191 /*
1192 * The VM has halted.
1193 */
1194 case EXCP_HALTED:
1195 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1196 rc = VINF_EM_HALT;
1197 break;
1198
1199 /*
1200 * Switch to RAW-mode.
1201 */
1202 case EXCP_EXECUTE_RAW:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1204 rc = VINF_EM_RESCHEDULE_RAW;
1205 break;
1206
1207 /*
1208 * Switch to hardware accelerated RAW-mode.
1209 */
1210 case EXCP_EXECUTE_HM:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1212 rc = VINF_EM_RESCHEDULE_HM;
1213 break;
1214
1215 /*
1216 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1217 */
1218 case EXCP_RC:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1220 rc = pVM->rem.s.rc;
1221 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1222 break;
1223
1224 /*
1225 * Figure out the rest when they arrive....
1226 */
1227 default:
1228 AssertMsgFailed(("rc=%d\n", rc));
1229 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1230 rc = VINF_EM_RESCHEDULE;
1231 break;
1232 }
1233 break;
1234 }
1235
1236#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1237// cpu_single_step(&pVM->rem.s.Env, 0);
1238#else
1239 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1240#endif
1241 return rc;
1242}
1243
1244
1245/**
1246 * Runs code in recompiled mode.
1247 *
1248 * Before calling this function the REM state needs to be in sync with
1249 * the VM. Call REMR3State() to perform the sync. It's only necessary
1250 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1251 * and after calling REMR3StateBack().
1252 *
1253 * @returns VBox status code.
1254 *
1255 * @param pVM VM Handle.
1256 * @param pVCpu VMCPU Handle.
1257 */
1258REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1259{
1260 int rc;
1261
1262 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1263 return remR3RunLoggingStep(pVM, pVCpu);
1264
1265 Assert(pVM->rem.s.fInREM);
1266 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1267
1268 TMNotifyStartOfExecution(pVCpu);
1269 rc = cpu_exec(&pVM->rem.s.Env);
1270 TMNotifyEndOfExecution(pVCpu);
1271 switch (rc)
1272 {
1273 /*
1274 * This happens when the execution was interrupted
1275 * by an external event, like pending timers.
1276 */
1277 case EXCP_INTERRUPT:
1278 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1279 rc = VINF_SUCCESS;
1280 break;
1281
1282 /*
1283 * hlt instruction.
1284 */
1285 case EXCP_HLT:
1286 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1287 rc = VINF_EM_HALT;
1288 break;
1289
1290 /*
1291 * The VM has halted.
1292 */
1293 case EXCP_HALTED:
1294 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1295 rc = VINF_EM_HALT;
1296 break;
1297
1298 /*
1299 * Breakpoint/single step.
1300 */
1301 case EXCP_DEBUG:
1302 if (pVM->rem.s.Env.watchpoint_hit)
1303 {
1304 /** @todo deal with watchpoints */
1305 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1306 rc = VINF_EM_DBG_BREAKPOINT;
1307 }
1308 else
1309 {
1310 CPUBreakpoint *pBP;
1311 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1312 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1313 if (pBP->pc == GCPtrPC)
1314 break;
1315 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1316 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1317 }
1318 break;
1319
1320 /*
1321 * Switch to RAW-mode.
1322 */
1323 case EXCP_EXECUTE_RAW:
1324 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1325 rc = VINF_EM_RESCHEDULE_RAW;
1326 break;
1327
1328 /*
1329 * Switch to hardware accelerated RAW-mode.
1330 */
1331 case EXCP_EXECUTE_HM:
1332 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1333 rc = VINF_EM_RESCHEDULE_HM;
1334 break;
1335
1336 /*
1337 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1338 */
1339 case EXCP_RC:
1340 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1341 rc = pVM->rem.s.rc;
1342 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1343 break;
1344
1345 /*
1346 * Figure out the rest when they arrive....
1347 */
1348 default:
1349 AssertMsgFailed(("rc=%d\n", rc));
1350 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1351 rc = VINF_SUCCESS;
1352 break;
1353 }
1354
1355 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1356 return rc;
1357}
1358
1359
1360/**
1361 * Check if the cpu state is suitable for Raw execution.
1362 *
1363 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1364 *
1365 * @param env The CPU env struct.
1366 * @param eip The EIP to check this for (might differ from env->eip).
1367 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1368 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1369 *
1370 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1371 */
1372bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1373{
1374 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1375 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1376 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1377 uint32_t u32CR0;
1378
1379#ifdef IEM_VERIFICATION_MODE
1380 return false;
1381#endif
1382
1383 /* Update counter. */
1384 env->pVM->rem.s.cCanExecuteRaw++;
1385
1386 /* Never when single stepping+logging guest code. */
1387 if (env->state & CPU_EMULATE_SINGLE_STEP)
1388 return false;
1389
1390 if (HMIsEnabled(env->pVM))
1391 {
1392#ifdef RT_OS_WINDOWS
1393 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1394#else
1395 CPUMCTX Ctx;
1396 PCPUMCTX pCtx = &Ctx;
1397#endif
1398
1399 env->state |= CPU_RAW_HM;
1400
1401 /*
1402 * The simple check first...
1403 */
1404 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1405 return false;
1406
1407 /*
1408 * Create partial context for HMR3CanExecuteGuest
1409 */
1410 pCtx->cr0 = env->cr[0];
1411 pCtx->cr3 = env->cr[3];
1412 pCtx->cr4 = env->cr[4];
1413
1414 pCtx->tr.Sel = env->tr.selector;
1415 pCtx->tr.ValidSel = env->tr.selector;
1416 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1417 pCtx->tr.u64Base = env->tr.base;
1418 pCtx->tr.u32Limit = env->tr.limit;
1419 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1420
1421 pCtx->ldtr.Sel = env->ldt.selector;
1422 pCtx->ldtr.ValidSel = env->ldt.selector;
1423 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1424 pCtx->ldtr.u64Base = env->ldt.base;
1425 pCtx->ldtr.u32Limit = env->ldt.limit;
1426 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1427
1428 pCtx->idtr.cbIdt = env->idt.limit;
1429 pCtx->idtr.pIdt = env->idt.base;
1430
1431 pCtx->gdtr.cbGdt = env->gdt.limit;
1432 pCtx->gdtr.pGdt = env->gdt.base;
1433
1434 pCtx->rsp = env->regs[R_ESP];
1435 pCtx->rip = env->eip;
1436
1437 pCtx->eflags.u32 = env->eflags;
1438
1439 pCtx->cs.Sel = env->segs[R_CS].selector;
1440 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1441 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1442 pCtx->cs.u64Base = env->segs[R_CS].base;
1443 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1444 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1445
1446 pCtx->ds.Sel = env->segs[R_DS].selector;
1447 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1448 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1449 pCtx->ds.u64Base = env->segs[R_DS].base;
1450 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1451 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1452
1453 pCtx->es.Sel = env->segs[R_ES].selector;
1454 pCtx->es.ValidSel = env->segs[R_ES].selector;
1455 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1456 pCtx->es.u64Base = env->segs[R_ES].base;
1457 pCtx->es.u32Limit = env->segs[R_ES].limit;
1458 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1459
1460 pCtx->fs.Sel = env->segs[R_FS].selector;
1461 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1462 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1463 pCtx->fs.u64Base = env->segs[R_FS].base;
1464 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1465 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1466
1467 pCtx->gs.Sel = env->segs[R_GS].selector;
1468 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1469 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1470 pCtx->gs.u64Base = env->segs[R_GS].base;
1471 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1472 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1473
1474 pCtx->ss.Sel = env->segs[R_SS].selector;
1475 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1476 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1477 pCtx->ss.u64Base = env->segs[R_SS].base;
1478 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1479 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1480
1481 pCtx->msrEFER = env->efer;
1482
1483 /* Hardware accelerated raw-mode:
1484 *
1485 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1486 */
1487 if (HMR3CanExecuteGuest(env->pVM, pCtx) == true)
1488 {
1489 *piException = EXCP_EXECUTE_HM;
1490 return true;
1491 }
1492 return false;
1493 }
1494
1495 /*
1496 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1497 * or 32 bits protected mode ring 0 code
1498 *
1499 * The tests are ordered by the likelihood of being true during normal execution.
1500 */
1501 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1502 {
1503 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1504 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1505 return false;
1506 }
1507
1508#ifndef VBOX_RAW_V86
1509 if (fFlags & VM_MASK) {
1510 STAM_COUNTER_INC(&gStatRefuseVM86);
1511 Log2(("raw mode refused: VM_MASK\n"));
1512 return false;
1513 }
1514#endif
1515
1516 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1517 {
1518#ifndef DEBUG_bird
1519 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1520#endif
1521 return false;
1522 }
1523
1524 if (env->singlestep_enabled)
1525 {
1526 //Log2(("raw mode refused: Single step\n"));
1527 return false;
1528 }
1529
1530 if (!QTAILQ_EMPTY(&env->breakpoints))
1531 {
1532 //Log2(("raw mode refused: Breakpoints\n"));
1533 return false;
1534 }
1535
1536 if (!QTAILQ_EMPTY(&env->watchpoints))
1537 {
1538 //Log2(("raw mode refused: Watchpoints\n"));
1539 return false;
1540 }
1541
1542 u32CR0 = env->cr[0];
1543 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1544 {
1545 STAM_COUNTER_INC(&gStatRefusePaging);
1546 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1547 return false;
1548 }
1549
1550 if (env->cr[4] & CR4_PAE_MASK)
1551 {
1552 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1553 {
1554 STAM_COUNTER_INC(&gStatRefusePAE);
1555 return false;
1556 }
1557 }
1558
1559 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1560 {
1561 if (!EMIsRawRing3Enabled(env->pVM))
1562 return false;
1563
1564 if (!(env->eflags & IF_MASK))
1565 {
1566 STAM_COUNTER_INC(&gStatRefuseIF0);
1567 Log2(("raw mode refused: IF (RawR3)\n"));
1568 return false;
1569 }
1570
1571 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1572 {
1573 STAM_COUNTER_INC(&gStatRefuseWP0);
1574 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1575 return false;
1576 }
1577 }
1578 else
1579 {
1580 if (!EMIsRawRing0Enabled(env->pVM))
1581 return false;
1582
1583 // Let's start with pure 32 bits ring 0 code first
1584 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1585 {
1586 STAM_COUNTER_INC(&gStatRefuseCode16);
1587 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1588 return false;
1589 }
1590
1591 if (EMIsRawRing1Enabled(env->pVM))
1592 {
1593 /* Only ring 0 and 1 supervisor code. */
1594 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1595 {
1596 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1597 return false;
1598 }
1599 }
1600 /* Only R0. */
1601 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1602 {
1603 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1604 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1605 return false;
1606 }
1607
1608 if (!(u32CR0 & CR0_WP_MASK))
1609 {
1610 STAM_COUNTER_INC(&gStatRefuseWP0);
1611 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1612 return false;
1613 }
1614
1615#ifdef VBOX_WITH_RAW_MODE
1616 if (PATMIsPatchGCAddr(env->pVM, eip))
1617 {
1618 Log2(("raw r0 mode forced: patch code\n"));
1619 *piException = EXCP_EXECUTE_RAW;
1620 return true;
1621 }
1622#endif
1623
1624#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1625 if (!(env->eflags & IF_MASK))
1626 {
1627 STAM_COUNTER_INC(&gStatRefuseIF0);
1628 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1629 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1630 return false;
1631 }
1632#endif
1633
1634#ifndef VBOX_WITH_RAW_RING1
1635 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1636 {
1637 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1638 return false;
1639 }
1640#endif
1641 env->state |= CPU_RAW_RING0;
1642 }
1643
1644 /*
1645 * Don't reschedule the first time we're called, because there might be
1646 * special reasons why we're here that is not covered by the above checks.
1647 */
1648 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1649 {
1650 Log2(("raw mode refused: first scheduling\n"));
1651 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1652 return false;
1653 }
1654
1655 /*
1656 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1657 */
1658 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1659 {
1660 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1661 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1662 return false;
1663 }
1664 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1665 {
1666 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1667 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1668 return false;
1669 }
1670 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1671 {
1672 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1673 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1674 return false;
1675 }
1676 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1677 {
1678 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1679 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1680 return false;
1681 }
1682 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1683 {
1684 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1685 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1686 return false;
1687 }
1688 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1689 {
1690 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1691 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1692 return false;
1693 }
1694
1695/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1696 *piException = EXCP_EXECUTE_RAW;
1697 return true;
1698}
1699
1700
1701#ifdef VBOX_WITH_RAW_MODE
1702/**
1703 * Fetches a code byte.
1704 *
1705 * @returns Success indicator (bool) for ease of use.
1706 * @param env The CPU environment structure.
1707 * @param GCPtrInstr Where to fetch code.
1708 * @param pu8Byte Where to store the byte on success
1709 */
1710bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1711{
1712 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1713 if (RT_SUCCESS(rc))
1714 return true;
1715 return false;
1716}
1717#endif /* VBOX_WITH_RAW_MODE */
1718
1719
1720/**
1721 * Flush (or invalidate if you like) page table/dir entry.
1722 *
1723 * (invlpg instruction; tlb_flush_page)
1724 *
1725 * @param env Pointer to cpu environment.
1726 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1727 */
1728void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1729{
1730 PVM pVM = env->pVM;
1731 PCPUMCTX pCtx;
1732 int rc;
1733
1734 Assert(EMRemIsLockOwner(env->pVM));
1735
1736 /*
1737 * When we're replaying invlpg instructions or restoring a saved
1738 * state we disable this path.
1739 */
1740 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1741 return;
1742 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1743 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1744
1745 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1746
1747 /*
1748 * Update the control registers before calling PGMFlushPage.
1749 */
1750 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1751 Assert(pCtx);
1752 pCtx->cr0 = env->cr[0];
1753 pCtx->cr3 = env->cr[3];
1754#ifdef VBOX_WITH_RAW_MODE
1755 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1756 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1757#endif
1758 pCtx->cr4 = env->cr[4];
1759
1760 /*
1761 * Let PGM do the rest.
1762 */
1763 Assert(env->pVCpu);
1764 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1765 if (RT_FAILURE(rc))
1766 {
1767 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1768 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1769 }
1770 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1771}
1772
1773
1774#ifndef REM_PHYS_ADDR_IN_TLB
1775/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1776void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1777{
1778 void *pv;
1779 int rc;
1780
1781
1782 /* Address must be aligned enough to fiddle with lower bits */
1783 Assert((physAddr & 0x3) == 0);
1784 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1785
1786 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1787 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1788 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1789 Assert( rc == VINF_SUCCESS
1790 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1791 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1792 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1793 if (RT_FAILURE(rc))
1794 return (void *)1;
1795 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1796 return (void *)((uintptr_t)pv | 2);
1797 return pv;
1798}
1799#endif /* REM_PHYS_ADDR_IN_TLB */
1800
1801
1802/**
1803 * Called from tlb_protect_code in order to write monitor a code page.
1804 *
1805 * @param env Pointer to the CPU environment.
1806 * @param GCPtr Code page to monitor
1807 */
1808void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1809{
1810#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1811 Assert(env->pVM->rem.s.fInREM);
1812 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1813 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1814 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1815 && !(env->eflags & VM_MASK) /* no V86 mode */
1816 && !HMIsEnabled(env->pVM))
1817 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1818#endif
1819}
1820
1821
1822/**
1823 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1824 *
1825 * @param env Pointer to the CPU environment.
1826 * @param GCPtr Code page to monitor
1827 */
1828void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1829{
1830 Assert(env->pVM->rem.s.fInREM);
1831#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1832 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1833 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1834 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1835 && !(env->eflags & VM_MASK) /* no V86 mode */
1836 && !HMIsEnabled(env->pVM))
1837 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1838#endif
1839}
1840
1841
1842/**
1843 * Called when the CPU is initialized, any of the CRx registers are changed or
1844 * when the A20 line is modified.
1845 *
1846 * @param env Pointer to the CPU environment.
1847 * @param fGlobal Set if the flush is global.
1848 */
1849void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1850{
1851 PVM pVM = env->pVM;
1852 PCPUMCTX pCtx;
1853 Assert(EMRemIsLockOwner(pVM));
1854
1855 /*
1856 * When we're replaying invlpg instructions or restoring a saved
1857 * state we disable this path.
1858 */
1859 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1860 return;
1861 Assert(pVM->rem.s.fInREM);
1862
1863 /*
1864 * The caller doesn't check cr4, so we have to do that for ourselves.
1865 */
1866 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1867 fGlobal = true;
1868 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1869
1870 /*
1871 * Update the control registers before calling PGMR3FlushTLB.
1872 */
1873 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1874 Assert(pCtx);
1875 pCtx->cr0 = env->cr[0];
1876 pCtx->cr3 = env->cr[3];
1877#ifdef VBOX_WITH_RAW_MODE
1878 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1879 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1880#endif
1881 pCtx->cr4 = env->cr[4];
1882
1883 /*
1884 * Let PGM do the rest.
1885 */
1886 Assert(env->pVCpu);
1887 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1888}
1889
1890
1891/**
1892 * Called when any of the cr0, cr4 or efer registers is updated.
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3ChangeCpuMode(CPUX86State *env)
1897{
1898 PVM pVM = env->pVM;
1899 uint64_t efer;
1900 PCPUMCTX pCtx;
1901 int rc;
1902
1903 /*
1904 * When we're replaying loads or restoring a saved
1905 * state this path is disabled.
1906 */
1907 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1908 return;
1909 Assert(pVM->rem.s.fInREM);
1910
1911 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1912 Assert(pCtx);
1913
1914 /*
1915 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1916 */
1917 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1918 PGMCr0WpEnabled(env->pVCpu);
1919
1920 /*
1921 * Update the control registers before calling PGMChangeMode()
1922 * as it may need to map whatever cr3 is pointing to.
1923 */
1924 pCtx->cr0 = env->cr[0];
1925 pCtx->cr3 = env->cr[3];
1926#ifdef VBOX_WITH_RAW_MODE
1927 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1928 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1929#endif
1930 pCtx->cr4 = env->cr[4];
1931#ifdef TARGET_X86_64
1932 efer = env->efer;
1933 pCtx->msrEFER = efer;
1934#else
1935 efer = 0;
1936#endif
1937 Assert(env->pVCpu);
1938 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1939 if (rc != VINF_SUCCESS)
1940 {
1941 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1942 {
1943 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1944 remR3RaiseRC(env->pVM, rc);
1945 }
1946 else
1947 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1948 }
1949}
1950
1951
1952/**
1953 * Called from compiled code to run dma.
1954 *
1955 * @param env Pointer to the CPU environment.
1956 */
1957void remR3DmaRun(CPUX86State *env)
1958{
1959 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1960 PDMR3DmaRun(env->pVM);
1961 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1962}
1963
1964
1965/**
1966 * Called from compiled code to schedule pending timers in VMM
1967 *
1968 * @param env Pointer to the CPU environment.
1969 */
1970void remR3TimersRun(CPUX86State *env)
1971{
1972 LogFlow(("remR3TimersRun:\n"));
1973 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1974 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1975 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1976 TMR3TimerQueuesDo(env->pVM);
1977 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1978 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1979}
1980
1981
1982/**
1983 * Record trap occurrence
1984 *
1985 * @returns VBox status code
1986 * @param env Pointer to the CPU environment.
1987 * @param uTrap Trap nr
1988 * @param uErrorCode Error code
1989 * @param pvNextEIP Next EIP
1990 */
1991int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1992{
1993 PVM pVM = env->pVM;
1994#ifdef VBOX_WITH_STATISTICS
1995 static STAMCOUNTER s_aStatTrap[255];
1996 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1997#endif
1998
1999#ifdef VBOX_WITH_STATISTICS
2000 if (uTrap < 255)
2001 {
2002 if (!s_aRegisters[uTrap])
2003 {
2004 char szStatName[64];
2005 s_aRegisters[uTrap] = true;
2006 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2007 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2008 }
2009 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2010 }
2011#endif
2012 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2013 if( uTrap < 0x20
2014 && (env->cr[0] & X86_CR0_PE)
2015 && !(env->eflags & X86_EFL_VM))
2016 {
2017#ifdef DEBUG
2018 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2019#endif
2020 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2021 {
2022 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2023 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2024 return VERR_REM_TOO_MANY_TRAPS;
2025 }
2026 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2027 {
2028 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2029 pVM->rem.s.cPendingExceptions = 1;
2030 }
2031 pVM->rem.s.uPendingException = uTrap;
2032 pVM->rem.s.uPendingExcptEIP = env->eip;
2033 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2034 }
2035 else
2036 {
2037 pVM->rem.s.cPendingExceptions = 0;
2038 pVM->rem.s.uPendingException = uTrap;
2039 pVM->rem.s.uPendingExcptEIP = env->eip;
2040 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2041 }
2042 return VINF_SUCCESS;
2043}
2044
2045
2046/*
2047 * Clear current active trap
2048 *
2049 * @param pVM VM Handle.
2050 */
2051void remR3TrapClear(PVM pVM)
2052{
2053 pVM->rem.s.cPendingExceptions = 0;
2054 pVM->rem.s.uPendingException = 0;
2055 pVM->rem.s.uPendingExcptEIP = 0;
2056 pVM->rem.s.uPendingExcptCR2 = 0;
2057}
2058
2059
2060/*
2061 * Record previous call instruction addresses
2062 *
2063 * @param env Pointer to the CPU environment.
2064 */
2065void remR3RecordCall(CPUX86State *env)
2066{
2067#ifdef VBOX_WITH_RAW_MODE
2068 CSAMR3RecordCallAddress(env->pVM, env->eip);
2069#endif
2070}
2071
2072
2073/**
2074 * Syncs the internal REM state with the VM.
2075 *
2076 * This must be called before REMR3Run() is invoked whenever when the REM
2077 * state is not up to date. Calling it several times in a row is not
2078 * permitted.
2079 *
2080 * @returns VBox status code.
2081 *
2082 * @param pVM VM Handle.
2083 * @param pVCpu VMCPU Handle.
2084 *
2085 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2086 * no do this since the majority of the callers don't want any unnecessary of events
2087 * pending that would immediately interrupt execution.
2088 */
2089REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2090{
2091 register const CPUMCTX *pCtx;
2092 register unsigned fFlags;
2093 unsigned i;
2094 TRPMEVENT enmType;
2095 uint8_t u8TrapNo;
2096 uint32_t uCpl;
2097 int rc;
2098
2099 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2100 Log2(("REMR3State:\n"));
2101
2102 pVM->rem.s.Env.pVCpu = pVCpu;
2103 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2104
2105 Assert(!pVM->rem.s.fInREM);
2106 pVM->rem.s.fInStateSync = true;
2107
2108 /*
2109 * If we have to flush TBs, do that immediately.
2110 */
2111 if (pVM->rem.s.fFlushTBs)
2112 {
2113 STAM_COUNTER_INC(&gStatFlushTBs);
2114 tb_flush(&pVM->rem.s.Env);
2115 pVM->rem.s.fFlushTBs = false;
2116 }
2117
2118 /*
2119 * Copy the registers which require no special handling.
2120 */
2121#ifdef TARGET_X86_64
2122 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2123 Assert(R_EAX == 0);
2124 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2125 Assert(R_ECX == 1);
2126 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2127 Assert(R_EDX == 2);
2128 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2129 Assert(R_EBX == 3);
2130 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2131 Assert(R_ESP == 4);
2132 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2133 Assert(R_EBP == 5);
2134 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2135 Assert(R_ESI == 6);
2136 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2137 Assert(R_EDI == 7);
2138 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2139 pVM->rem.s.Env.regs[8] = pCtx->r8;
2140 pVM->rem.s.Env.regs[9] = pCtx->r9;
2141 pVM->rem.s.Env.regs[10] = pCtx->r10;
2142 pVM->rem.s.Env.regs[11] = pCtx->r11;
2143 pVM->rem.s.Env.regs[12] = pCtx->r12;
2144 pVM->rem.s.Env.regs[13] = pCtx->r13;
2145 pVM->rem.s.Env.regs[14] = pCtx->r14;
2146 pVM->rem.s.Env.regs[15] = pCtx->r15;
2147
2148 pVM->rem.s.Env.eip = pCtx->rip;
2149
2150 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2151#else
2152 Assert(R_EAX == 0);
2153 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2154 Assert(R_ECX == 1);
2155 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2156 Assert(R_EDX == 2);
2157 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2158 Assert(R_EBX == 3);
2159 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2160 Assert(R_ESP == 4);
2161 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2162 Assert(R_EBP == 5);
2163 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2164 Assert(R_ESI == 6);
2165 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2166 Assert(R_EDI == 7);
2167 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2168 pVM->rem.s.Env.eip = pCtx->eip;
2169
2170 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2171#endif
2172
2173 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2174
2175 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2176 for (i=0;i<8;i++)
2177 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2178
2179#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2180 /*
2181 * Clear the halted hidden flag (the interrupt waking up the CPU can
2182 * have been dispatched in raw mode).
2183 */
2184 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2185#endif
2186
2187 /*
2188 * Replay invlpg? Only if we're not flushing the TLB.
2189 */
2190 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2191 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2192 if (pVM->rem.s.cInvalidatedPages)
2193 {
2194 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2195 {
2196 RTUINT i;
2197
2198 pVM->rem.s.fIgnoreCR3Load = true;
2199 pVM->rem.s.fIgnoreInvlPg = true;
2200 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2201 {
2202 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2203 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2204 }
2205 pVM->rem.s.fIgnoreInvlPg = false;
2206 pVM->rem.s.fIgnoreCR3Load = false;
2207 }
2208 pVM->rem.s.cInvalidatedPages = 0;
2209 }
2210
2211 /* Replay notification changes. */
2212 REMR3ReplayHandlerNotifications(pVM);
2213
2214 /* Update MSRs; before CRx registers! */
2215 pVM->rem.s.Env.efer = pCtx->msrEFER;
2216 pVM->rem.s.Env.star = pCtx->msrSTAR;
2217 pVM->rem.s.Env.pat = pCtx->msrPAT;
2218#ifdef TARGET_X86_64
2219 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2220 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2221 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2222 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2223
2224 /* Update the internal long mode activate flag according to the new EFER value. */
2225 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2226 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2227 else
2228 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2229#endif
2230
2231 /* Update the inhibit IRQ mask. */
2232 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2233 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2234 {
2235 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2236 if (InhibitPC == pCtx->rip)
2237 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2238 else
2239 {
2240 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2242 }
2243 }
2244
2245 /* Update the inhibit NMI mask. */
2246 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2247 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2248 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2249
2250 /*
2251 * Sync the A20 gate.
2252 */
2253 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2254 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2255 {
2256 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2257 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2258 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2259 }
2260
2261 /*
2262 * Registers which are rarely changed and require special handling / order when changed.
2263 */
2264 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2265 | CPUM_CHANGED_CR4
2266 | CPUM_CHANGED_CR0
2267 | CPUM_CHANGED_CR3
2268 | CPUM_CHANGED_GDTR
2269 | CPUM_CHANGED_IDTR
2270 | CPUM_CHANGED_SYSENTER_MSR
2271 | CPUM_CHANGED_LDTR
2272 | CPUM_CHANGED_CPUID
2273 | CPUM_CHANGED_FPU_REM
2274 )
2275 )
2276 {
2277 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2278 {
2279 pVM->rem.s.fIgnoreCR3Load = true;
2280 tlb_flush(&pVM->rem.s.Env, true);
2281 pVM->rem.s.fIgnoreCR3Load = false;
2282 }
2283
2284 /* CR4 before CR0! */
2285 if (fFlags & CPUM_CHANGED_CR4)
2286 {
2287 pVM->rem.s.fIgnoreCR3Load = true;
2288 pVM->rem.s.fIgnoreCpuMode = true;
2289 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2290 pVM->rem.s.fIgnoreCpuMode = false;
2291 pVM->rem.s.fIgnoreCR3Load = false;
2292 }
2293
2294 if (fFlags & CPUM_CHANGED_CR0)
2295 {
2296 pVM->rem.s.fIgnoreCR3Load = true;
2297 pVM->rem.s.fIgnoreCpuMode = true;
2298 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2299 pVM->rem.s.fIgnoreCpuMode = false;
2300 pVM->rem.s.fIgnoreCR3Load = false;
2301 }
2302
2303 if (fFlags & CPUM_CHANGED_CR3)
2304 {
2305 pVM->rem.s.fIgnoreCR3Load = true;
2306 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2307 pVM->rem.s.fIgnoreCR3Load = false;
2308 }
2309
2310 if (fFlags & CPUM_CHANGED_GDTR)
2311 {
2312 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2313 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2314 }
2315
2316 if (fFlags & CPUM_CHANGED_IDTR)
2317 {
2318 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2319 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2320 }
2321
2322 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2323 {
2324 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2325 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2326 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2327 }
2328
2329 if (fFlags & CPUM_CHANGED_LDTR)
2330 {
2331 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2332 {
2333 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2334 pVM->rem.s.Env.ldt.newselector = 0;
2335 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2336 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2337 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2338 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2339 }
2340 else
2341 {
2342 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2343 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2344 }
2345 }
2346
2347 if (fFlags & CPUM_CHANGED_CPUID)
2348 {
2349 uint32_t u32Dummy;
2350
2351 /*
2352 * Get the CPUID features.
2353 */
2354 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2355 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2356 }
2357
2358 /* Sync FPU state after CR4, CPUID and EFER (!). */
2359 if (fFlags & CPUM_CHANGED_FPU_REM)
2360 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */
2361 }
2362
2363 /*
2364 * Sync TR unconditionally to make life simpler.
2365 */
2366 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2367 pVM->rem.s.Env.tr.newselector = 0;
2368 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2369 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2370 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2371 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2372 /* Note! do_interrupt will fault if the busy flag is still set... */ /** @todo so fix do_interrupt then! */
2373 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2374
2375 /*
2376 * Update selector registers.
2377 *
2378 * This must be done *after* we've synced gdt, ldt and crX registers
2379 * since we're reading the GDT/LDT om sync_seg. This will happen with
2380 * saved state which takes a quick dip into rawmode for instance.
2381 *
2382 * CPL/Stack; Note first check this one as the CPL might have changed.
2383 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2384 */
2385 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2386 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2387#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2388 do \
2389 { \
2390 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2391 { \
2392 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2393 (a_pVBoxSReg)->Sel, \
2394 (a_pVBoxSReg)->u64Base, \
2395 (a_pVBoxSReg)->u32Limit, \
2396 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2397 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2398 } \
2399 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2400 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2401 { \
2402 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2403 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2404 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2405 if ((a_pRemSReg)->newselector) \
2406 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2407 } \
2408 else \
2409 (a_pRemSReg)->newselector = 0; \
2410 } while (0)
2411
2412 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2413 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2414 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2415 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2416 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2417 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2418 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2419 * be the same but not the base/limit. */
2420
2421 /*
2422 * Check for traps.
2423 */
2424 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2425 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2426 if (RT_SUCCESS(rc))
2427 {
2428#ifdef DEBUG
2429 if (u8TrapNo == 0x80)
2430 {
2431 remR3DumpLnxSyscall(pVCpu);
2432 remR3DumpOBsdSyscall(pVCpu);
2433 }
2434#endif
2435
2436 pVM->rem.s.Env.exception_index = u8TrapNo;
2437 if (enmType != TRPM_SOFTWARE_INT)
2438 {
2439 pVM->rem.s.Env.exception_is_int = 0;
2440#ifdef IEM_VERIFICATION_MODE /* Ugly hack, needs proper fixing. */
2441 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT ? 0x42 : 0;
2442#endif
2443 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2444 }
2445 else
2446 {
2447 /*
2448 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2449 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2450 * for int03 and into.
2451 */
2452 pVM->rem.s.Env.exception_is_int = 1;
2453 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2454 /* int 3 may be generated by one-byte 0xcc */
2455 if (u8TrapNo == 3)
2456 {
2457 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2458 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2459 }
2460 /* int 4 may be generated by one-byte 0xce */
2461 else if (u8TrapNo == 4)
2462 {
2463 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2464 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2465 }
2466 }
2467
2468 /* get error code and cr2 if needed. */
2469 if (enmType == TRPM_TRAP)
2470 {
2471 switch (u8TrapNo)
2472 {
2473 case X86_XCPT_PF:
2474 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2475 /* fallthru */
2476 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2477 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2478 break;
2479
2480 case X86_XCPT_AC: case X86_XCPT_DF:
2481 default:
2482 pVM->rem.s.Env.error_code = 0;
2483 break;
2484 }
2485 }
2486 else
2487 pVM->rem.s.Env.error_code = 0;
2488
2489 /*
2490 * We can now reset the active trap since the recompiler is gonna have a go at it.
2491 */
2492 rc = TRPMResetTrap(pVCpu);
2493 AssertRC(rc);
2494 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2495 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2496 }
2497
2498 /*
2499 * Clear old interrupt request flags; Check for pending hardware interrupts.
2500 * (See @remark for why we don't check for other FFs.)
2501 */
2502 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2503 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2504 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2505 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2506
2507 /*
2508 * We're now in REM mode.
2509 */
2510 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2511 pVM->rem.s.fInREM = true;
2512 pVM->rem.s.fInStateSync = false;
2513 pVM->rem.s.cCanExecuteRaw = 0;
2514 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2515 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2516 return VINF_SUCCESS;
2517}
2518
2519
2520/**
2521 * Syncs back changes in the REM state to the the VM state.
2522 *
2523 * This must be called after invoking REMR3Run().
2524 * Calling it several times in a row is not permitted.
2525 *
2526 * @returns VBox status code.
2527 *
2528 * @param pVM VM Handle.
2529 * @param pVCpu VMCPU Handle.
2530 */
2531REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2532{
2533 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2534 Assert(pCtx);
2535 unsigned i;
2536
2537 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2538 Log2(("REMR3StateBack:\n"));
2539 Assert(pVM->rem.s.fInREM);
2540
2541 /*
2542 * Copy back the registers.
2543 * This is done in the order they are declared in the CPUMCTX structure.
2544 */
2545
2546 /** @todo FOP */
2547 /** @todo FPUIP */
2548 /** @todo CS */
2549 /** @todo FPUDP */
2550 /** @todo DS */
2551
2552 /** @todo check if FPU/XMM was actually used in the recompiler */
2553 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87);
2554//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2555
2556#ifdef TARGET_X86_64
2557 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2558 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2559 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2560 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2561 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2562 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2563 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2564 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2565 pCtx->r8 = pVM->rem.s.Env.regs[8];
2566 pCtx->r9 = pVM->rem.s.Env.regs[9];
2567 pCtx->r10 = pVM->rem.s.Env.regs[10];
2568 pCtx->r11 = pVM->rem.s.Env.regs[11];
2569 pCtx->r12 = pVM->rem.s.Env.regs[12];
2570 pCtx->r13 = pVM->rem.s.Env.regs[13];
2571 pCtx->r14 = pVM->rem.s.Env.regs[14];
2572 pCtx->r15 = pVM->rem.s.Env.regs[15];
2573
2574 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2575
2576#else
2577 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2578 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2579 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2580 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2581 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2582 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2583 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2584
2585 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2586#endif
2587
2588#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2589 do \
2590 { \
2591 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2592 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2593 { \
2594 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2595 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2596 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2597 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2598 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2599 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2600 } \
2601 else \
2602 { \
2603 pCtx->a_sreg.fFlags = 0; \
2604 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2605 } \
2606 } while (0)
2607
2608 SYNC_BACK_SREG(es, ES);
2609 SYNC_BACK_SREG(cs, CS);
2610 SYNC_BACK_SREG(ss, SS);
2611 SYNC_BACK_SREG(ds, DS);
2612 SYNC_BACK_SREG(fs, FS);
2613 SYNC_BACK_SREG(gs, GS);
2614
2615#ifdef TARGET_X86_64
2616 pCtx->rip = pVM->rem.s.Env.eip;
2617 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2618#else
2619 pCtx->eip = pVM->rem.s.Env.eip;
2620 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2621#endif
2622
2623 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2624 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2625 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2626#ifdef VBOX_WITH_RAW_MODE
2627 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2628 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2629#endif
2630 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2631
2632 for (i = 0; i < 8; i++)
2633 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2634
2635 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2636 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2637 {
2638 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2639 STAM_COUNTER_INC(&gStatREMGDTChange);
2640#ifdef VBOX_WITH_RAW_MODE
2641 if (!HMIsEnabled(pVM))
2642 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2643#endif
2644 }
2645
2646 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2647 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2648 {
2649 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2650 STAM_COUNTER_INC(&gStatREMIDTChange);
2651#ifdef VBOX_WITH_RAW_MODE
2652 if (!HMIsEnabled(pVM))
2653 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2654#endif
2655 }
2656
2657 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2658 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2659 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2660 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2661 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2662 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2663 )
2664 {
2665 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2666 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2667 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2668 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2669 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2670 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2671 STAM_COUNTER_INC(&gStatREMLDTRChange);
2672#ifdef VBOX_WITH_RAW_MODE
2673 if (!HMIsEnabled(pVM))
2674 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2675#endif
2676 }
2677
2678 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2679 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2680 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2681 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2682 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ /** @todo just fix qemu! */
2683 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2684 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2685 : 0)
2686 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2687 )
2688 {
2689 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2690 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2691 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2692 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2693 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2694 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2695 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2696 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2697 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2698 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2699 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2700 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2701 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2702 STAM_COUNTER_INC(&gStatREMTRChange);
2703#ifdef VBOX_WITH_RAW_MODE
2704 if (!HMIsEnabled(pVM))
2705 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2706#endif
2707 }
2708
2709 /* Sysenter MSR */
2710 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2711 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2712 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2713
2714 /* System MSRs. */
2715 pCtx->msrEFER = pVM->rem.s.Env.efer;
2716 pCtx->msrSTAR = pVM->rem.s.Env.star;
2717 pCtx->msrPAT = pVM->rem.s.Env.pat;
2718#ifdef TARGET_X86_64
2719 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2720 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2721 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2722 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2723#endif
2724
2725 /* Inhibit interrupt flag. */
2726 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2727 {
2728 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2729 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2730 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2731 }
2732 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2733 {
2734 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2736 }
2737
2738 /* Inhibit NMI flag. */
2739 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2740 {
2741 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2742 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2743 }
2744 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2745 {
2746 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2748 }
2749
2750 remR3TrapClear(pVM);
2751
2752 /*
2753 * Check for traps.
2754 */
2755 if ( pVM->rem.s.Env.exception_index >= 0
2756 && pVM->rem.s.Env.exception_index < 256)
2757 {
2758 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2759 int rc;
2760
2761 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2762 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int ? TRPM_SOFTWARE_INT : TRPM_TRAP;
2763 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2764 AssertRC(rc);
2765 if (enmType == TRPM_TRAP)
2766 {
2767 switch (pVM->rem.s.Env.exception_index)
2768 {
2769 case X86_XCPT_PF:
2770 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2771 /* fallthru */
2772 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2773 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2774 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2775 break;
2776 }
2777 }
2778 }
2779
2780 /*
2781 * We're not longer in REM mode.
2782 */
2783 CPUMR3RemLeave(pVCpu,
2784 HMIsEnabled(pVM)
2785 || ( pVM->rem.s.Env.segs[R_SS].newselector
2786 | pVM->rem.s.Env.segs[R_GS].newselector
2787 | pVM->rem.s.Env.segs[R_FS].newselector
2788 | pVM->rem.s.Env.segs[R_ES].newselector
2789 | pVM->rem.s.Env.segs[R_DS].newselector
2790 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2791 );
2792 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2793 pVM->rem.s.fInREM = false;
2794 pVM->rem.s.pCtx = NULL;
2795 pVM->rem.s.Env.pVCpu = NULL;
2796 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2797 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2798 return VINF_SUCCESS;
2799}
2800
2801
2802/**
2803 * This is called by the disassembler when it wants to update the cpu state
2804 * before for instance doing a register dump.
2805 */
2806static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2807{
2808 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2809 unsigned i;
2810
2811 Assert(pVM->rem.s.fInREM);
2812
2813 /*
2814 * Copy back the registers.
2815 * This is done in the order they are declared in the CPUMCTX structure.
2816 */
2817
2818 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87;
2819 /** @todo FOP */
2820 /** @todo FPUIP */
2821 /** @todo CS */
2822 /** @todo FPUDP */
2823 /** @todo DS */
2824 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2825 pFpuCtx->MXCSR = 0;
2826 pFpuCtx->MXCSR_MASK = 0;
2827
2828 /** @todo check if FPU/XMM was actually used in the recompiler */
2829 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx);
2830//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2831
2832#ifdef TARGET_X86_64
2833 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2834 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2835 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2836 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2837 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2838 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2839 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2840 pCtx->r8 = pVM->rem.s.Env.regs[8];
2841 pCtx->r9 = pVM->rem.s.Env.regs[9];
2842 pCtx->r10 = pVM->rem.s.Env.regs[10];
2843 pCtx->r11 = pVM->rem.s.Env.regs[11];
2844 pCtx->r12 = pVM->rem.s.Env.regs[12];
2845 pCtx->r13 = pVM->rem.s.Env.regs[13];
2846 pCtx->r14 = pVM->rem.s.Env.regs[14];
2847 pCtx->r15 = pVM->rem.s.Env.regs[15];
2848
2849 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2850#else
2851 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2852 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2853 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2854 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2855 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2856 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2857 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2858
2859 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2860#endif
2861
2862 SYNC_BACK_SREG(es, ES);
2863 SYNC_BACK_SREG(cs, CS);
2864 SYNC_BACK_SREG(ss, SS);
2865 SYNC_BACK_SREG(ds, DS);
2866 SYNC_BACK_SREG(fs, FS);
2867 SYNC_BACK_SREG(gs, GS);
2868
2869#ifdef TARGET_X86_64
2870 pCtx->rip = pVM->rem.s.Env.eip;
2871 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2872#else
2873 pCtx->eip = pVM->rem.s.Env.eip;
2874 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2875#endif
2876
2877 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2878 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2879 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2880#ifdef VBOX_WITH_RAW_MODE
2881 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2882 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2883#endif
2884 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2885
2886 for (i = 0; i < 8; i++)
2887 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2888
2889 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2890 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2891 {
2892 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2893 STAM_COUNTER_INC(&gStatREMGDTChange);
2894#ifdef VBOX_WITH_RAW_MODE
2895 if (!HMIsEnabled(pVM))
2896 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2897#endif
2898 }
2899
2900 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2901 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2902 {
2903 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2904 STAM_COUNTER_INC(&gStatREMIDTChange);
2905#ifdef VBOX_WITH_RAW_MODE
2906 if (!HMIsEnabled(pVM))
2907 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2908#endif
2909 }
2910
2911 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2912 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2913 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2914 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2915 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2916 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2917 )
2918 {
2919 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2920 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2921 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2922 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2923 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2924 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2925 STAM_COUNTER_INC(&gStatREMLDTRChange);
2926#ifdef VBOX_WITH_RAW_MODE
2927 if (!HMIsEnabled(pVM))
2928 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2929#endif
2930 }
2931
2932 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2933 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2934 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2935 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2936 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2937 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2938 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2939 : 0)
2940 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2941 )
2942 {
2943 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2944 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2945 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2946 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2947 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2948 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2949 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2950 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2951 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2952 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2953 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2954 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2955 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2956 STAM_COUNTER_INC(&gStatREMTRChange);
2957#ifdef VBOX_WITH_RAW_MODE
2958 if (!HMIsEnabled(pVM))
2959 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2960#endif
2961 }
2962
2963 /* Sysenter MSR */
2964 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2965 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2966 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2967
2968 /* System MSRs. */
2969 pCtx->msrEFER = pVM->rem.s.Env.efer;
2970 pCtx->msrSTAR = pVM->rem.s.Env.star;
2971 pCtx->msrPAT = pVM->rem.s.Env.pat;
2972#ifdef TARGET_X86_64
2973 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2974 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2975 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2976 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2977#endif
2978
2979}
2980
2981
2982/**
2983 * Update the VMM state information if we're currently in REM.
2984 *
2985 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2986 * we're currently executing in REM and the VMM state is invalid. This method will of
2987 * course check that we're executing in REM before syncing any data over to the VMM.
2988 *
2989 * @param pVM The VM handle.
2990 * @param pVCpu The VMCPU handle.
2991 */
2992REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2993{
2994 if (pVM->rem.s.fInREM)
2995 remR3StateUpdate(pVM, pVCpu);
2996}
2997
2998
2999#undef LOG_GROUP
3000#define LOG_GROUP LOG_GROUP_REM
3001
3002
3003/**
3004 * Notify the recompiler about Address Gate 20 state change.
3005 *
3006 * This notification is required since A20 gate changes are
3007 * initialized from a device driver and the VM might just as
3008 * well be in REM mode as in RAW mode.
3009 *
3010 * @param pVM VM handle.
3011 * @param pVCpu VMCPU handle.
3012 * @param fEnable True if the gate should be enabled.
3013 * False if the gate should be disabled.
3014 */
3015REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3016{
3017 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3018 VM_ASSERT_EMT(pVM);
3019
3020 /** @todo SMP and the A20 gate... */
3021 if (pVM->rem.s.Env.pVCpu == pVCpu)
3022 {
3023 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3024 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3025 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3026 }
3027}
3028
3029
3030/**
3031 * Replays the handler notification changes
3032 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3033 *
3034 * @param pVM VM handle.
3035 */
3036REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3037{
3038 /*
3039 * Replay the flushes.
3040 */
3041 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3042 VM_ASSERT_EMT(pVM);
3043
3044 /** @todo this isn't ensuring correct replay order. */
3045 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3046 {
3047 uint32_t idxNext;
3048 uint32_t idxRevHead;
3049 uint32_t idxHead;
3050#ifdef VBOX_STRICT
3051 int32_t c = 0;
3052#endif
3053
3054 /* Lockless purging of pending notifications. */
3055 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3056 if (idxHead == UINT32_MAX)
3057 return;
3058 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3059
3060 /*
3061 * Reverse the list to process it in FIFO order.
3062 */
3063 idxRevHead = UINT32_MAX;
3064 do
3065 {
3066 /* Save the index of the next rec. */
3067 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3068 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3069 /* Push the record onto the reversed list. */
3070 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3071 idxRevHead = idxHead;
3072 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3073 /* Advance. */
3074 idxHead = idxNext;
3075 } while (idxHead != UINT32_MAX);
3076
3077 /*
3078 * Loop thru the list, reinserting the record into the free list as they are
3079 * processed to avoid having other EMTs running out of entries while we're flushing.
3080 */
3081 idxHead = idxRevHead;
3082 do
3083 {
3084 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3085 uint32_t idxCur;
3086 Assert(--c >= 0);
3087
3088 switch (pCur->enmKind)
3089 {
3090 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3091 remR3NotifyHandlerPhysicalRegister(pVM,
3092 pCur->u.PhysicalRegister.enmType,
3093 pCur->u.PhysicalRegister.GCPhys,
3094 pCur->u.PhysicalRegister.cb,
3095 pCur->u.PhysicalRegister.fHasHCHandler);
3096 break;
3097
3098 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3099 remR3NotifyHandlerPhysicalDeregister(pVM,
3100 pCur->u.PhysicalDeregister.enmType,
3101 pCur->u.PhysicalDeregister.GCPhys,
3102 pCur->u.PhysicalDeregister.cb,
3103 pCur->u.PhysicalDeregister.fHasHCHandler,
3104 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3105 break;
3106
3107 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3108 remR3NotifyHandlerPhysicalModify(pVM,
3109 pCur->u.PhysicalModify.enmType,
3110 pCur->u.PhysicalModify.GCPhysOld,
3111 pCur->u.PhysicalModify.GCPhysNew,
3112 pCur->u.PhysicalModify.cb,
3113 pCur->u.PhysicalModify.fHasHCHandler,
3114 pCur->u.PhysicalModify.fRestoreAsRAM);
3115 break;
3116
3117 default:
3118 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3119 break;
3120 }
3121
3122 /*
3123 * Advance idxHead.
3124 */
3125 idxCur = idxHead;
3126 idxHead = pCur->idxNext;
3127 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3128
3129 /*
3130 * Put the record back into the free list.
3131 */
3132 do
3133 {
3134 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3135 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3136 ASMCompilerBarrier();
3137 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3138 } while (idxHead != UINT32_MAX);
3139
3140#ifdef VBOX_STRICT
3141 if (pVM->cCpus == 1)
3142 {
3143 unsigned c;
3144 /* Check that all records are now on the free list. */
3145 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3146 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3147 c++;
3148 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3149 }
3150#endif
3151 }
3152}
3153
3154
3155/**
3156 * Notify REM about changed code page.
3157 *
3158 * @returns VBox status code.
3159 * @param pVM VM handle.
3160 * @param pVCpu VMCPU handle.
3161 * @param pvCodePage Code page address
3162 */
3163REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3164{
3165#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3166 int rc;
3167 RTGCPHYS PhysGC;
3168 uint64_t flags;
3169
3170 VM_ASSERT_EMT(pVM);
3171
3172 /*
3173 * Get the physical page address.
3174 */
3175 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3176 if (rc == VINF_SUCCESS)
3177 {
3178 /*
3179 * Sync the required registers and flush the whole page.
3180 * (Easier to do the whole page than notifying it about each physical
3181 * byte that was changed.
3182 */
3183 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3184 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3185 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3186 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3187
3188 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3189 }
3190#endif
3191 return VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Notification about a successful MMR3PhysRegister() call.
3197 *
3198 * @param pVM VM handle.
3199 * @param GCPhys The physical address the RAM.
3200 * @param cb Size of the memory.
3201 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3202 */
3203REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3204{
3205 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3206 VM_ASSERT_EMT(pVM);
3207
3208 /*
3209 * Validate input - we trust the caller.
3210 */
3211 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3212 Assert(cb);
3213 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3214 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3215
3216 /*
3217 * Base ram? Update GCPhysLastRam.
3218 */
3219 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3220 {
3221 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3222 {
3223 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3224 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3225 }
3226 }
3227
3228 /*
3229 * Register the ram.
3230 */
3231 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3232
3233 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3234 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3235 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3236
3237 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3238}
3239
3240
3241/**
3242 * Notification about a successful MMR3PhysRomRegister() call.
3243 *
3244 * @param pVM VM handle.
3245 * @param GCPhys The physical address of the ROM.
3246 * @param cb The size of the ROM.
3247 * @param pvCopy Pointer to the ROM copy.
3248 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3249 * This function will be called when ever the protection of the
3250 * shadow ROM changes (at reset and end of POST).
3251 */
3252REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3253{
3254 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3255 VM_ASSERT_EMT(pVM);
3256
3257 /*
3258 * Validate input - we trust the caller.
3259 */
3260 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3261 Assert(cb);
3262 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3263
3264 /*
3265 * Register the rom.
3266 */
3267 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3268
3269 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3270 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3271 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3272
3273 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3274}
3275
3276
3277/**
3278 * Notification about a successful memory deregistration or reservation.
3279 *
3280 * @param pVM VM Handle.
3281 * @param GCPhys Start physical address.
3282 * @param cb The size of the range.
3283 */
3284REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3285{
3286 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3287 VM_ASSERT_EMT(pVM);
3288
3289 /*
3290 * Validate input - we trust the caller.
3291 */
3292 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3293 Assert(cb);
3294 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3295
3296 /*
3297 * Unassigning the memory.
3298 */
3299 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3300
3301 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3302 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3303 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3304
3305 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3306}
3307
3308
3309/**
3310 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3311 *
3312 * @param pVM VM Handle.
3313 * @param enmType Handler type.
3314 * @param GCPhys Handler range address.
3315 * @param cb Size of the handler range.
3316 * @param fHasHCHandler Set if the handler has a HC callback function.
3317 *
3318 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3319 * Handler memory type to memory which has no HC handler.
3320 */
3321static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3322{
3323 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3324 enmType, GCPhys, cb, fHasHCHandler));
3325
3326 VM_ASSERT_EMT(pVM);
3327 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3328 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3329
3330
3331 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3332
3333 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3334 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3335 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3336 else if (fHasHCHandler)
3337 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3338 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3339
3340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3341}
3342
3343/**
3344 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3345 *
3346 * @param pVM VM Handle.
3347 * @param enmType Handler type.
3348 * @param GCPhys Handler range address.
3349 * @param cb Size of the handler range.
3350 * @param fHasHCHandler Set if the handler has a HC callback function.
3351 *
3352 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3353 * Handler memory type to memory which has no HC handler.
3354 */
3355REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3356{
3357 REMR3ReplayHandlerNotifications(pVM);
3358
3359 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3360}
3361
3362/**
3363 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3364 *
3365 * @param pVM VM Handle.
3366 * @param enmType Handler type.
3367 * @param GCPhys Handler range address.
3368 * @param cb Size of the handler range.
3369 * @param fHasHCHandler Set if the handler has a HC callback function.
3370 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3371 */
3372static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3373{
3374 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3375 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3376 VM_ASSERT_EMT(pVM);
3377
3378
3379 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3380
3381 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3382 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3383 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3384 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3385 else if (fHasHCHandler)
3386 {
3387 if (!fRestoreAsRAM)
3388 {
3389 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3390 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3391 }
3392 else
3393 {
3394 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3395 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3396 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3397 }
3398 }
3399 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3400
3401 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3402}
3403
3404/**
3405 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3406 *
3407 * @param pVM VM Handle.
3408 * @param enmType Handler type.
3409 * @param GCPhys Handler range address.
3410 * @param cb Size of the handler range.
3411 * @param fHasHCHandler Set if the handler has a HC callback function.
3412 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3413 */
3414REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3415{
3416 REMR3ReplayHandlerNotifications(pVM);
3417 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3418}
3419
3420
3421/**
3422 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3423 *
3424 * @param pVM VM Handle.
3425 * @param enmType Handler type.
3426 * @param GCPhysOld Old handler range address.
3427 * @param GCPhysNew New handler range address.
3428 * @param cb Size of the handler range.
3429 * @param fHasHCHandler Set if the handler has a HC callback function.
3430 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3431 */
3432static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3433{
3434 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3435 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3436 VM_ASSERT_EMT(pVM);
3437 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3438
3439 if (fHasHCHandler)
3440 {
3441 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3442
3443 /*
3444 * Reset the old page.
3445 */
3446 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3447 if (!fRestoreAsRAM)
3448 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3449 else
3450 {
3451 /* This is not perfect, but it'll do for PD monitoring... */
3452 Assert(cb == PAGE_SIZE);
3453 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3454 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3455 }
3456
3457 /*
3458 * Update the new page.
3459 */
3460 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3461 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3462 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3463 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3464
3465 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3466 }
3467}
3468
3469/**
3470 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3471 *
3472 * @param pVM VM Handle.
3473 * @param enmType Handler type.
3474 * @param GCPhysOld Old handler range address.
3475 * @param GCPhysNew New handler range address.
3476 * @param cb Size of the handler range.
3477 * @param fHasHCHandler Set if the handler has a HC callback function.
3478 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3479 */
3480REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3481{
3482 REMR3ReplayHandlerNotifications(pVM);
3483
3484 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3485}
3486
3487/**
3488 * Checks if we're handling access to this page or not.
3489 *
3490 * @returns true if we're trapping access.
3491 * @returns false if we aren't.
3492 * @param pVM The VM handle.
3493 * @param GCPhys The physical address.
3494 *
3495 * @remark This function will only work correctly in VBOX_STRICT builds!
3496 */
3497REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3498{
3499#ifdef VBOX_STRICT
3500 ram_addr_t off;
3501 REMR3ReplayHandlerNotifications(pVM);
3502
3503 off = get_phys_page_offset(GCPhys);
3504 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3505 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3506 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3507#else
3508 return false;
3509#endif
3510}
3511
3512
3513/**
3514 * Deals with a rare case in get_phys_addr_code where the code
3515 * is being monitored.
3516 *
3517 * It could also be an MMIO page, in which case we will raise a fatal error.
3518 *
3519 * @returns The physical address corresponding to addr.
3520 * @param env The cpu environment.
3521 * @param addr The virtual address.
3522 * @param pTLBEntry The TLB entry.
3523 */
3524target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3525 target_ulong addr,
3526 CPUTLBEntry *pTLBEntry,
3527 target_phys_addr_t ioTLBEntry)
3528{
3529 PVM pVM = env->pVM;
3530
3531 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3532 {
3533 /* If code memory is being monitored, appropriate IOTLB entry will have
3534 handler IO type, and addend will provide real physical address, no
3535 matter if we store VA in TLB or not, as handlers are always passed PA */
3536 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3537 return ret;
3538 }
3539 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3540 "*** handlers\n",
3541 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3542 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3543 LogRel(("*** mmio\n"));
3544 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3545 LogRel(("*** phys\n"));
3546 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3547 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3548 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3549 AssertFatalFailed();
3550}
3551
3552/**
3553 * Read guest RAM and ROM.
3554 *
3555 * @param SrcGCPhys The source address (guest physical).
3556 * @param pvDst The destination address.
3557 * @param cb Number of bytes
3558 */
3559void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3560{
3561 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3562 VBOX_CHECK_ADDR(SrcGCPhys);
3563 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3564#ifdef VBOX_DEBUG_PHYS
3565 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3566#endif
3567 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3568}
3569
3570
3571/**
3572 * Read guest RAM and ROM, unsigned 8-bit.
3573 *
3574 * @param SrcGCPhys The source address (guest physical).
3575 */
3576RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3577{
3578 uint8_t val;
3579 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3580 VBOX_CHECK_ADDR(SrcGCPhys);
3581 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3582 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3583#ifdef VBOX_DEBUG_PHYS
3584 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3585#endif
3586 return val;
3587}
3588
3589
3590/**
3591 * Read guest RAM and ROM, signed 8-bit.
3592 *
3593 * @param SrcGCPhys The source address (guest physical).
3594 */
3595RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3596{
3597 int8_t val;
3598 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3599 VBOX_CHECK_ADDR(SrcGCPhys);
3600 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3601 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3602#ifdef VBOX_DEBUG_PHYS
3603 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3604#endif
3605 return val;
3606}
3607
3608
3609/**
3610 * Read guest RAM and ROM, unsigned 16-bit.
3611 *
3612 * @param SrcGCPhys The source address (guest physical).
3613 */
3614RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3615{
3616 uint16_t val;
3617 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3618 VBOX_CHECK_ADDR(SrcGCPhys);
3619 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3620 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3621#ifdef VBOX_DEBUG_PHYS
3622 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3623#endif
3624 return val;
3625}
3626
3627
3628/**
3629 * Read guest RAM and ROM, signed 16-bit.
3630 *
3631 * @param SrcGCPhys The source address (guest physical).
3632 */
3633RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3634{
3635 int16_t val;
3636 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3637 VBOX_CHECK_ADDR(SrcGCPhys);
3638 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3639 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3640#ifdef VBOX_DEBUG_PHYS
3641 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3642#endif
3643 return val;
3644}
3645
3646
3647/**
3648 * Read guest RAM and ROM, unsigned 32-bit.
3649 *
3650 * @param SrcGCPhys The source address (guest physical).
3651 */
3652RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3653{
3654 uint32_t val;
3655 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3656 VBOX_CHECK_ADDR(SrcGCPhys);
3657 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3658 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3659#ifdef VBOX_DEBUG_PHYS
3660 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3661#endif
3662 return val;
3663}
3664
3665
3666/**
3667 * Read guest RAM and ROM, signed 32-bit.
3668 *
3669 * @param SrcGCPhys The source address (guest physical).
3670 */
3671RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3672{
3673 int32_t val;
3674 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3675 VBOX_CHECK_ADDR(SrcGCPhys);
3676 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3677 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3678#ifdef VBOX_DEBUG_PHYS
3679 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3680#endif
3681 return val;
3682}
3683
3684
3685/**
3686 * Read guest RAM and ROM, unsigned 64-bit.
3687 *
3688 * @param SrcGCPhys The source address (guest physical).
3689 */
3690uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3691{
3692 uint64_t val;
3693 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3694 VBOX_CHECK_ADDR(SrcGCPhys);
3695 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3696 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3697#ifdef VBOX_DEBUG_PHYS
3698 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3699#endif
3700 return val;
3701}
3702
3703
3704/**
3705 * Read guest RAM and ROM, signed 64-bit.
3706 *
3707 * @param SrcGCPhys The source address (guest physical).
3708 */
3709int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3710{
3711 int64_t val;
3712 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3713 VBOX_CHECK_ADDR(SrcGCPhys);
3714 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3715 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3716#ifdef VBOX_DEBUG_PHYS
3717 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3718#endif
3719 return val;
3720}
3721
3722
3723/**
3724 * Write guest RAM.
3725 *
3726 * @param DstGCPhys The destination address (guest physical).
3727 * @param pvSrc The source address.
3728 * @param cb Number of bytes to write
3729 */
3730void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3731{
3732 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3733 VBOX_CHECK_ADDR(DstGCPhys);
3734 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3735 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3736#ifdef VBOX_DEBUG_PHYS
3737 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3738#endif
3739}
3740
3741
3742/**
3743 * Write guest RAM, unsigned 8-bit.
3744 *
3745 * @param DstGCPhys The destination address (guest physical).
3746 * @param val Value
3747 */
3748void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3749{
3750 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3751 VBOX_CHECK_ADDR(DstGCPhys);
3752 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3753 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3754#ifdef VBOX_DEBUG_PHYS
3755 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3756#endif
3757}
3758
3759
3760/**
3761 * Write guest RAM, unsigned 8-bit.
3762 *
3763 * @param DstGCPhys The destination address (guest physical).
3764 * @param val Value
3765 */
3766void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3767{
3768 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3769 VBOX_CHECK_ADDR(DstGCPhys);
3770 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3771 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3772#ifdef VBOX_DEBUG_PHYS
3773 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3774#endif
3775}
3776
3777
3778/**
3779 * Write guest RAM, unsigned 32-bit.
3780 *
3781 * @param DstGCPhys The destination address (guest physical).
3782 * @param val Value
3783 */
3784void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3785{
3786 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3787 VBOX_CHECK_ADDR(DstGCPhys);
3788 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3789 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3790#ifdef VBOX_DEBUG_PHYS
3791 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3792#endif
3793}
3794
3795
3796/**
3797 * Write guest RAM, unsigned 64-bit.
3798 *
3799 * @param DstGCPhys The destination address (guest physical).
3800 * @param val Value
3801 */
3802void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3803{
3804 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3805 VBOX_CHECK_ADDR(DstGCPhys);
3806 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3807 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3808#ifdef VBOX_DEBUG_PHYS
3809 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3810#endif
3811}
3812
3813#undef LOG_GROUP
3814#define LOG_GROUP LOG_GROUP_REM_MMIO
3815
3816/** Read MMIO memory. */
3817static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3818{
3819 CPUX86State *env = (CPUX86State *)pvEnv;
3820 uint32_t u32 = 0;
3821 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3822 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3823 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3824 return u32;
3825}
3826
3827/** Read MMIO memory. */
3828static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3829{
3830 CPUX86State *env = (CPUX86State *)pvEnv;
3831 uint32_t u32 = 0;
3832 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3833 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3834 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3835 return u32;
3836}
3837
3838/** Read MMIO memory. */
3839static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3840{
3841 CPUX86State *env = (CPUX86State *)pvEnv;
3842 uint32_t u32 = 0;
3843 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3844 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3845 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3846 return u32;
3847}
3848
3849/** Write to MMIO memory. */
3850static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3851{
3852 CPUX86State *env = (CPUX86State *)pvEnv;
3853 int rc;
3854 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3855 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3856 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3857}
3858
3859/** Write to MMIO memory. */
3860static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3861{
3862 CPUX86State *env = (CPUX86State *)pvEnv;
3863 int rc;
3864 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3865 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3866 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3867}
3868
3869/** Write to MMIO memory. */
3870static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3871{
3872 CPUX86State *env = (CPUX86State *)pvEnv;
3873 int rc;
3874 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3875 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3876 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3877}
3878
3879
3880#undef LOG_GROUP
3881#define LOG_GROUP LOG_GROUP_REM_HANDLER
3882
3883/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3884
3885static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3886{
3887 uint8_t u8;
3888 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3889 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3890 return u8;
3891}
3892
3893static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3894{
3895 uint16_t u16;
3896 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3897 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3898 return u16;
3899}
3900
3901static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3902{
3903 uint32_t u32;
3904 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3905 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3906 return u32;
3907}
3908
3909static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3910{
3911 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3912 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3913}
3914
3915static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3916{
3917 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3918 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3919}
3920
3921static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3922{
3923 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3924 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3925}
3926
3927/* -+- disassembly -+- */
3928
3929#undef LOG_GROUP
3930#define LOG_GROUP LOG_GROUP_REM_DISAS
3931
3932
3933/**
3934 * Enables or disables singled stepped disassembly.
3935 *
3936 * @returns VBox status code.
3937 * @param pVM VM handle.
3938 * @param fEnable To enable set this flag, to disable clear it.
3939 */
3940static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3941{
3942 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3943 VM_ASSERT_EMT(pVM);
3944
3945 if (fEnable)
3946 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3947 else
3948 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3949#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3950 cpu_single_step(&pVM->rem.s.Env, fEnable);
3951#endif
3952 return VINF_SUCCESS;
3953}
3954
3955
3956/**
3957 * Enables or disables singled stepped disassembly.
3958 *
3959 * @returns VBox status code.
3960 * @param pVM VM handle.
3961 * @param fEnable To enable set this flag, to disable clear it.
3962 */
3963REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3964{
3965 int rc;
3966
3967 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3968 if (VM_IS_EMT(pVM))
3969 return remR3DisasEnableStepping(pVM, fEnable);
3970
3971 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3972 AssertRC(rc);
3973 return rc;
3974}
3975
3976
3977#ifdef VBOX_WITH_DEBUGGER
3978/**
3979 * External Debugger Command: .remstep [on|off|1|0]
3980 */
3981static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
3982 PCDBGCVAR paArgs, unsigned cArgs)
3983{
3984 int rc;
3985 PVM pVM = pUVM->pVM;
3986
3987 if (cArgs == 0)
3988 /*
3989 * Print the current status.
3990 */
3991 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3992 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3993 else
3994 {
3995 /*
3996 * Convert the argument and change the mode.
3997 */
3998 bool fEnable;
3999 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
4000 if (RT_SUCCESS(rc))
4001 {
4002 rc = REMR3DisasEnableStepping(pVM, fEnable);
4003 if (RT_SUCCESS(rc))
4004 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
4005 else
4006 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
4007 }
4008 else
4009 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
4010 }
4011 return rc;
4012}
4013#endif /* VBOX_WITH_DEBUGGER */
4014
4015
4016/**
4017 * Disassembles one instruction and prints it to the log.
4018 *
4019 * @returns Success indicator.
4020 * @param env Pointer to the recompiler CPU structure.
4021 * @param f32BitCode Indicates that whether or not the code should
4022 * be disassembled as 16 or 32 bit. If -1 the CS
4023 * selector will be inspected.
4024 * @param pszPrefix
4025 */
4026bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4027{
4028 PVM pVM = env->pVM;
4029 const bool fLog = LogIsEnabled();
4030 const bool fLog2 = LogIs2Enabled();
4031 int rc = VINF_SUCCESS;
4032
4033 /*
4034 * Don't bother if there ain't any log output to do.
4035 */
4036 if (!fLog && !fLog2)
4037 return true;
4038
4039 /*
4040 * Update the state so DBGF reads the correct register values.
4041 */
4042 remR3StateUpdate(pVM, env->pVCpu);
4043
4044 /*
4045 * Log registers if requested.
4046 */
4047 if (fLog2)
4048 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4049
4050 /*
4051 * Disassemble to log.
4052 */
4053 if (fLog)
4054 {
4055 PVMCPU pVCpu = VMMGetCpu(pVM);
4056 char szBuf[256];
4057 szBuf[0] = '\0';
4058 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4059 pVCpu->idCpu,
4060 0, /* Sel */ 0, /* GCPtr */
4061 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4062 szBuf,
4063 sizeof(szBuf),
4064 NULL);
4065 if (RT_FAILURE(rc))
4066 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4067 if (pszPrefix && *pszPrefix)
4068 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4069 else
4070 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4071 }
4072
4073 return RT_SUCCESS(rc);
4074}
4075
4076
4077/**
4078 * Disassemble recompiled code.
4079 *
4080 * @param phFileIgnored Ignored, logfile usually.
4081 * @param pvCode Pointer to the code block.
4082 * @param cb Size of the code block.
4083 */
4084void disas(FILE *phFile, void *pvCode, unsigned long cb)
4085{
4086 if (LogIs2Enabled())
4087 {
4088 unsigned off = 0;
4089 char szOutput[256];
4090 DISCPUSTATE Cpu;
4091#ifdef RT_ARCH_X86
4092 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4093#else
4094 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4095#endif
4096
4097 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4098 while (off < cb)
4099 {
4100 uint32_t cbInstr;
4101 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4102 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4103 if (RT_SUCCESS(rc))
4104 RTLogPrintf("%s", szOutput);
4105 else
4106 {
4107 RTLogPrintf("disas error %Rrc\n", rc);
4108 cbInstr = 1;
4109 }
4110 off += cbInstr;
4111 }
4112 }
4113}
4114
4115
4116/**
4117 * Disassemble guest code.
4118 *
4119 * @param phFileIgnored Ignored, logfile usually.
4120 * @param uCode The guest address of the code to disassemble. (flat?)
4121 * @param cb Number of bytes to disassemble.
4122 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4123 */
4124void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4125{
4126 if (LogIs2Enabled())
4127 {
4128 PVM pVM = cpu_single_env->pVM;
4129 PVMCPU pVCpu = cpu_single_env->pVCpu;
4130 RTSEL cs;
4131 RTGCUINTPTR eip;
4132
4133 Assert(pVCpu);
4134
4135 /*
4136 * Update the state so DBGF reads the correct register values (flags).
4137 */
4138 remR3StateUpdate(pVM, pVCpu);
4139
4140 /*
4141 * Do the disassembling.
4142 */
4143 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4144 cs = cpu_single_env->segs[R_CS].selector;
4145 eip = uCode - cpu_single_env->segs[R_CS].base;
4146 for (;;)
4147 {
4148 char szBuf[256];
4149 uint32_t cbInstr;
4150 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4151 pVCpu->idCpu,
4152 cs,
4153 eip,
4154 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4155 szBuf, sizeof(szBuf),
4156 &cbInstr);
4157 if (RT_SUCCESS(rc))
4158 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4159 else
4160 {
4161 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4162 cbInstr = 1;
4163 }
4164
4165 /* next */
4166 if (cb <= cbInstr)
4167 break;
4168 cb -= cbInstr;
4169 uCode += cbInstr;
4170 eip += cbInstr;
4171 }
4172 }
4173}
4174
4175
4176/**
4177 * Looks up a guest symbol.
4178 *
4179 * @returns Pointer to symbol name. This is a static buffer.
4180 * @param orig_addr The address in question.
4181 */
4182const char *lookup_symbol(target_ulong orig_addr)
4183{
4184 PVM pVM = cpu_single_env->pVM;
4185 RTGCINTPTR off = 0;
4186 RTDBGSYMBOL Sym;
4187 DBGFADDRESS Addr;
4188
4189 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4190 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, &off, &Sym, NULL /*phMod*/);
4191 if (RT_SUCCESS(rc))
4192 {
4193 static char szSym[sizeof(Sym.szName) + 48];
4194 if (!off)
4195 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4196 else if (off > 0)
4197 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4198 else
4199 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4200 return szSym;
4201 }
4202 return "<N/A>";
4203}
4204
4205
4206#undef LOG_GROUP
4207#define LOG_GROUP LOG_GROUP_REM
4208
4209
4210/* -+- FF notifications -+- */
4211
4212
4213/**
4214 * Notification about a pending interrupt.
4215 *
4216 * @param pVM VM Handle.
4217 * @param pVCpu VMCPU Handle.
4218 * @param u8Interrupt Interrupt
4219 * @thread The emulation thread.
4220 */
4221REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4222{
4223 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4224 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4225}
4226
4227/**
4228 * Notification about a pending interrupt.
4229 *
4230 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4231 * @param pVM VM Handle.
4232 * @param pVCpu VMCPU Handle.
4233 * @thread The emulation thread.
4234 */
4235REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4236{
4237 return pVM->rem.s.u32PendingInterrupt;
4238}
4239
4240/**
4241 * Notification about the interrupt FF being set.
4242 *
4243 * @param pVM VM Handle.
4244 * @param pVCpu VMCPU Handle.
4245 * @thread The emulation thread.
4246 */
4247REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4248{
4249#ifndef IEM_VERIFICATION_MODE
4250 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4251 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4252 if (pVM->rem.s.fInREM)
4253 {
4254 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4255 CPU_INTERRUPT_EXTERNAL_HARD);
4256 }
4257#endif
4258}
4259
4260
4261/**
4262 * Notification about the interrupt FF being set.
4263 *
4264 * @param pVM VM Handle.
4265 * @param pVCpu VMCPU Handle.
4266 * @thread Any.
4267 */
4268REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4269{
4270 LogFlow(("REMR3NotifyInterruptClear:\n"));
4271 if (pVM->rem.s.fInREM)
4272 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4273}
4274
4275
4276/**
4277 * Notification about pending timer(s).
4278 *
4279 * @param pVM VM Handle.
4280 * @param pVCpuDst The target cpu for this notification.
4281 * TM will not broadcast pending timer events, but use
4282 * a dedicated EMT for them. So, only interrupt REM
4283 * execution if the given CPU is executing in REM.
4284 * @thread Any.
4285 */
4286REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4287{
4288#ifndef IEM_VERIFICATION_MODE
4289#ifndef DEBUG_bird
4290 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4291#endif
4292 if (pVM->rem.s.fInREM)
4293 {
4294 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4295 {
4296 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4297 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4298 CPU_INTERRUPT_EXTERNAL_TIMER);
4299 }
4300 else
4301 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4302 }
4303 else
4304 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4305#endif
4306}
4307
4308
4309/**
4310 * Notification about pending DMA transfers.
4311 *
4312 * @param pVM VM Handle.
4313 * @thread Any.
4314 */
4315REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4316{
4317#ifndef IEM_VERIFICATION_MODE
4318 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4319 if (pVM->rem.s.fInREM)
4320 {
4321 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4322 CPU_INTERRUPT_EXTERNAL_DMA);
4323 }
4324#endif
4325}
4326
4327
4328/**
4329 * Notification about pending timer(s).
4330 *
4331 * @param pVM VM Handle.
4332 * @thread Any.
4333 */
4334REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4335{
4336#ifndef IEM_VERIFICATION_MODE
4337 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4338 if (pVM->rem.s.fInREM)
4339 {
4340 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4341 CPU_INTERRUPT_EXTERNAL_EXIT);
4342 }
4343#endif
4344}
4345
4346
4347/**
4348 * Notification about pending FF set by an external thread.
4349 *
4350 * @param pVM VM handle.
4351 * @thread Any.
4352 */
4353REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4354{
4355#ifndef IEM_VERIFICATION_MODE
4356 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4357 if (pVM->rem.s.fInREM)
4358 {
4359 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4360 CPU_INTERRUPT_EXTERNAL_EXIT);
4361 }
4362#endif
4363}
4364
4365
4366#ifdef VBOX_WITH_STATISTICS
4367void remR3ProfileStart(int statcode)
4368{
4369 STAMPROFILEADV *pStat;
4370 switch(statcode)
4371 {
4372 case STATS_EMULATE_SINGLE_INSTR:
4373 pStat = &gStatExecuteSingleInstr;
4374 break;
4375 case STATS_QEMU_COMPILATION:
4376 pStat = &gStatCompilationQEmu;
4377 break;
4378 case STATS_QEMU_RUN_EMULATED_CODE:
4379 pStat = &gStatRunCodeQEmu;
4380 break;
4381 case STATS_QEMU_TOTAL:
4382 pStat = &gStatTotalTimeQEmu;
4383 break;
4384 case STATS_QEMU_RUN_TIMERS:
4385 pStat = &gStatTimers;
4386 break;
4387 case STATS_TLB_LOOKUP:
4388 pStat= &gStatTBLookup;
4389 break;
4390 case STATS_IRQ_HANDLING:
4391 pStat= &gStatIRQ;
4392 break;
4393 case STATS_RAW_CHECK:
4394 pStat = &gStatRawCheck;
4395 break;
4396
4397 default:
4398 AssertMsgFailed(("unknown stat %d\n", statcode));
4399 return;
4400 }
4401 STAM_PROFILE_ADV_START(pStat, a);
4402}
4403
4404
4405void remR3ProfileStop(int statcode)
4406{
4407 STAMPROFILEADV *pStat;
4408 switch(statcode)
4409 {
4410 case STATS_EMULATE_SINGLE_INSTR:
4411 pStat = &gStatExecuteSingleInstr;
4412 break;
4413 case STATS_QEMU_COMPILATION:
4414 pStat = &gStatCompilationQEmu;
4415 break;
4416 case STATS_QEMU_RUN_EMULATED_CODE:
4417 pStat = &gStatRunCodeQEmu;
4418 break;
4419 case STATS_QEMU_TOTAL:
4420 pStat = &gStatTotalTimeQEmu;
4421 break;
4422 case STATS_QEMU_RUN_TIMERS:
4423 pStat = &gStatTimers;
4424 break;
4425 case STATS_TLB_LOOKUP:
4426 pStat= &gStatTBLookup;
4427 break;
4428 case STATS_IRQ_HANDLING:
4429 pStat= &gStatIRQ;
4430 break;
4431 case STATS_RAW_CHECK:
4432 pStat = &gStatRawCheck;
4433 break;
4434 default:
4435 AssertMsgFailed(("unknown stat %d\n", statcode));
4436 return;
4437 }
4438 STAM_PROFILE_ADV_STOP(pStat, a);
4439}
4440#endif
4441
4442/**
4443 * Raise an RC, force rem exit.
4444 *
4445 * @param pVM VM handle.
4446 * @param rc The rc.
4447 */
4448void remR3RaiseRC(PVM pVM, int rc)
4449{
4450 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4451 Assert(pVM->rem.s.fInREM);
4452 VM_ASSERT_EMT(pVM);
4453 pVM->rem.s.rc = rc;
4454 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4455}
4456
4457
4458/* -+- timers -+- */
4459
4460uint64_t cpu_get_tsc(CPUX86State *env)
4461{
4462 STAM_COUNTER_INC(&gStatCpuGetTSC);
4463 return TMCpuTickGet(env->pVCpu);
4464}
4465
4466
4467/* -+- interrupts -+- */
4468
4469void cpu_set_ferr(CPUX86State *env)
4470{
4471 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4472 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4473}
4474
4475int cpu_get_pic_interrupt(CPUX86State *env)
4476{
4477 uint8_t u8Interrupt;
4478 int rc;
4479
4480 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4481 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4482 * with the (a)pic.
4483 */
4484 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4485 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4486 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4487 * remove this kludge. */
4488 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4489 {
4490 rc = VINF_SUCCESS;
4491 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4492 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4493 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4494 }
4495 else
4496 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4497
4498 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4499 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4500 if (RT_SUCCESS(rc))
4501 {
4502 if (VMCPU_FF_IS_PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4503 env->interrupt_request |= CPU_INTERRUPT_HARD;
4504 return u8Interrupt;
4505 }
4506 return -1;
4507}
4508
4509
4510/* -+- local apic -+- */
4511
4512#if 0 /* CPUMSetGuestMsr does this now. */
4513void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4514{
4515 int rc = PDMApicSetBase(env->pVM, val);
4516 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4517}
4518#endif
4519
4520uint64_t cpu_get_apic_base(CPUX86State *env)
4521{
4522 uint64_t u64;
4523 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4524 if (RT_SUCCESS(rcStrict))
4525 {
4526 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4527 return u64;
4528 }
4529 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4530 return 0;
4531}
4532
4533void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4534{
4535 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4536 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4537}
4538
4539uint8_t cpu_get_apic_tpr(CPUX86State *env)
4540{
4541 uint8_t u8;
4542 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL, NULL);
4543 if (RT_SUCCESS(rc))
4544 {
4545 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4546 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4547 }
4548 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4549 return 0;
4550}
4551
4552/**
4553 * Read an MSR.
4554 *
4555 * @retval 0 success.
4556 * @retval -1 failure, raise \#GP(0).
4557 * @param env The cpu state.
4558 * @param idMsr The MSR to read.
4559 * @param puValue Where to return the value.
4560 */
4561int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4562{
4563 Assert(env->pVCpu);
4564 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4565}
4566
4567/**
4568 * Write to an MSR.
4569 *
4570 * @retval 0 success.
4571 * @retval -1 failure, raise \#GP(0).
4572 * @param env The cpu state.
4573 * @param idMsr The MSR to read.
4574 * @param puValue Where to return the value.
4575 */
4576int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4577{
4578 Assert(env->pVCpu);
4579 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4580}
4581
4582/* -+- I/O Ports -+- */
4583
4584#undef LOG_GROUP
4585#define LOG_GROUP LOG_GROUP_REM_IOPORT
4586
4587void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4588{
4589 int rc;
4590
4591 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4592 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4593
4594 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4595 if (RT_LIKELY(rc == VINF_SUCCESS))
4596 return;
4597 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4598 {
4599 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4600 remR3RaiseRC(env->pVM, rc);
4601 return;
4602 }
4603 remAbort(rc, __FUNCTION__);
4604}
4605
4606void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4607{
4608 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4609 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4610 if (RT_LIKELY(rc == VINF_SUCCESS))
4611 return;
4612 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4613 {
4614 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4615 remR3RaiseRC(env->pVM, rc);
4616 return;
4617 }
4618 remAbort(rc, __FUNCTION__);
4619}
4620
4621void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4622{
4623 int rc;
4624 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4625 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4626 if (RT_LIKELY(rc == VINF_SUCCESS))
4627 return;
4628 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4629 {
4630 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4631 remR3RaiseRC(env->pVM, rc);
4632 return;
4633 }
4634 remAbort(rc, __FUNCTION__);
4635}
4636
4637uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4638{
4639 uint32_t u32 = 0;
4640 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4641 if (RT_LIKELY(rc == VINF_SUCCESS))
4642 {
4643 if (/*addr != 0x61 && */addr != 0x71)
4644 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4645 return (uint8_t)u32;
4646 }
4647 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4648 {
4649 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4650 remR3RaiseRC(env->pVM, rc);
4651 return (uint8_t)u32;
4652 }
4653 remAbort(rc, __FUNCTION__);
4654 return UINT8_C(0xff);
4655}
4656
4657uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4658{
4659 uint32_t u32 = 0;
4660 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4661 if (RT_LIKELY(rc == VINF_SUCCESS))
4662 {
4663 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4664 return (uint16_t)u32;
4665 }
4666 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4667 {
4668 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4669 remR3RaiseRC(env->pVM, rc);
4670 return (uint16_t)u32;
4671 }
4672 remAbort(rc, __FUNCTION__);
4673 return UINT16_C(0xffff);
4674}
4675
4676uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4677{
4678 uint32_t u32 = 0;
4679 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4680 if (RT_LIKELY(rc == VINF_SUCCESS))
4681 {
4682 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4683 return u32;
4684 }
4685 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4686 {
4687 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4688 remR3RaiseRC(env->pVM, rc);
4689 return u32;
4690 }
4691 remAbort(rc, __FUNCTION__);
4692 return UINT32_C(0xffffffff);
4693}
4694
4695#undef LOG_GROUP
4696#define LOG_GROUP LOG_GROUP_REM
4697
4698
4699/* -+- helpers and misc other interfaces -+- */
4700
4701/**
4702 * Perform the CPUID instruction.
4703 *
4704 * @param env Pointer to the recompiler CPU structure.
4705 * @param idx The CPUID leaf (eax).
4706 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4707 * @param pvEAX Where to store eax.
4708 * @param pvEBX Where to store ebx.
4709 * @param pvECX Where to store ecx.
4710 * @param pvEDX Where to store edx.
4711 */
4712void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4713 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4714{
4715 NOREF(idxSub);
4716 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4717}
4718
4719
4720#if 0 /* not used */
4721/**
4722 * Interface for qemu hardware to report back fatal errors.
4723 */
4724void hw_error(const char *pszFormat, ...)
4725{
4726 /*
4727 * Bitch about it.
4728 */
4729 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4730 * this in my Odin32 tree at home! */
4731 va_list args;
4732 va_start(args, pszFormat);
4733 RTLogPrintf("fatal error in virtual hardware:");
4734 RTLogPrintfV(pszFormat, args);
4735 va_end(args);
4736 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4737
4738 /*
4739 * If we're in REM context we'll sync back the state before 'jumping' to
4740 * the EMs failure handling.
4741 */
4742 PVM pVM = cpu_single_env->pVM;
4743 if (pVM->rem.s.fInREM)
4744 REMR3StateBack(pVM);
4745 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4746 AssertMsgFailed(("EMR3FatalError returned!\n"));
4747}
4748#endif
4749
4750/**
4751 * Interface for the qemu cpu to report unhandled situation
4752 * raising a fatal VM error.
4753 */
4754void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4755{
4756 va_list va;
4757 PVM pVM;
4758 PVMCPU pVCpu;
4759 char szMsg[256];
4760
4761 /*
4762 * Bitch about it.
4763 */
4764 RTLogFlags(NULL, "nodisabled nobuffered");
4765 RTLogFlush(NULL);
4766
4767 va_start(va, pszFormat);
4768#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4769 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4770 unsigned cArgs = 0;
4771 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4772 const char *psz = strchr(pszFormat, '%');
4773 while (psz && cArgs < 6)
4774 {
4775 auArgs[cArgs++] = va_arg(va, uintptr_t);
4776 psz = strchr(psz + 1, '%');
4777 }
4778 switch (cArgs)
4779 {
4780 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4781 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4782 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4783 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4784 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4785 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4786 default:
4787 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4788 }
4789#else
4790 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4791#endif
4792 va_end(va);
4793
4794 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4795 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4796
4797 /*
4798 * If we're in REM context we'll sync back the state before 'jumping' to
4799 * the EMs failure handling.
4800 */
4801 pVM = cpu_single_env->pVM;
4802 pVCpu = cpu_single_env->pVCpu;
4803 Assert(pVCpu);
4804
4805 if (pVM->rem.s.fInREM)
4806 REMR3StateBack(pVM, pVCpu);
4807 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4808 AssertMsgFailed(("EMR3FatalError returned!\n"));
4809}
4810
4811
4812/**
4813 * Aborts the VM.
4814 *
4815 * @param rc VBox error code.
4816 * @param pszTip Hint about why/when this happened.
4817 */
4818void remAbort(int rc, const char *pszTip)
4819{
4820 PVM pVM;
4821 PVMCPU pVCpu;
4822
4823 /*
4824 * Bitch about it.
4825 */
4826 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4827 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4828
4829 /*
4830 * Jump back to where we entered the recompiler.
4831 */
4832 pVM = cpu_single_env->pVM;
4833 pVCpu = cpu_single_env->pVCpu;
4834 Assert(pVCpu);
4835
4836 if (pVM->rem.s.fInREM)
4837 REMR3StateBack(pVM, pVCpu);
4838
4839 EMR3FatalError(pVCpu, rc);
4840 AssertMsgFailed(("EMR3FatalError returned!\n"));
4841}
4842
4843
4844/**
4845 * Dumps a linux system call.
4846 * @param pVCpu VMCPU handle.
4847 */
4848void remR3DumpLnxSyscall(PVMCPU pVCpu)
4849{
4850 static const char *apsz[] =
4851 {
4852 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4853 "sys_exit",
4854 "sys_fork",
4855 "sys_read",
4856 "sys_write",
4857 "sys_open", /* 5 */
4858 "sys_close",
4859 "sys_waitpid",
4860 "sys_creat",
4861 "sys_link",
4862 "sys_unlink", /* 10 */
4863 "sys_execve",
4864 "sys_chdir",
4865 "sys_time",
4866 "sys_mknod",
4867 "sys_chmod", /* 15 */
4868 "sys_lchown16",
4869 "sys_ni_syscall", /* old break syscall holder */
4870 "sys_stat",
4871 "sys_lseek",
4872 "sys_getpid", /* 20 */
4873 "sys_mount",
4874 "sys_oldumount",
4875 "sys_setuid16",
4876 "sys_getuid16",
4877 "sys_stime", /* 25 */
4878 "sys_ptrace",
4879 "sys_alarm",
4880 "sys_fstat",
4881 "sys_pause",
4882 "sys_utime", /* 30 */
4883 "sys_ni_syscall", /* old stty syscall holder */
4884 "sys_ni_syscall", /* old gtty syscall holder */
4885 "sys_access",
4886 "sys_nice",
4887 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4888 "sys_sync",
4889 "sys_kill",
4890 "sys_rename",
4891 "sys_mkdir",
4892 "sys_rmdir", /* 40 */
4893 "sys_dup",
4894 "sys_pipe",
4895 "sys_times",
4896 "sys_ni_syscall", /* old prof syscall holder */
4897 "sys_brk", /* 45 */
4898 "sys_setgid16",
4899 "sys_getgid16",
4900 "sys_signal",
4901 "sys_geteuid16",
4902 "sys_getegid16", /* 50 */
4903 "sys_acct",
4904 "sys_umount", /* recycled never used phys() */
4905 "sys_ni_syscall", /* old lock syscall holder */
4906 "sys_ioctl",
4907 "sys_fcntl", /* 55 */
4908 "sys_ni_syscall", /* old mpx syscall holder */
4909 "sys_setpgid",
4910 "sys_ni_syscall", /* old ulimit syscall holder */
4911 "sys_olduname",
4912 "sys_umask", /* 60 */
4913 "sys_chroot",
4914 "sys_ustat",
4915 "sys_dup2",
4916 "sys_getppid",
4917 "sys_getpgrp", /* 65 */
4918 "sys_setsid",
4919 "sys_sigaction",
4920 "sys_sgetmask",
4921 "sys_ssetmask",
4922 "sys_setreuid16", /* 70 */
4923 "sys_setregid16",
4924 "sys_sigsuspend",
4925 "sys_sigpending",
4926 "sys_sethostname",
4927 "sys_setrlimit", /* 75 */
4928 "sys_old_getrlimit",
4929 "sys_getrusage",
4930 "sys_gettimeofday",
4931 "sys_settimeofday",
4932 "sys_getgroups16", /* 80 */
4933 "sys_setgroups16",
4934 "old_select",
4935 "sys_symlink",
4936 "sys_lstat",
4937 "sys_readlink", /* 85 */
4938 "sys_uselib",
4939 "sys_swapon",
4940 "sys_reboot",
4941 "old_readdir",
4942 "old_mmap", /* 90 */
4943 "sys_munmap",
4944 "sys_truncate",
4945 "sys_ftruncate",
4946 "sys_fchmod",
4947 "sys_fchown16", /* 95 */
4948 "sys_getpriority",
4949 "sys_setpriority",
4950 "sys_ni_syscall", /* old profil syscall holder */
4951 "sys_statfs",
4952 "sys_fstatfs", /* 100 */
4953 "sys_ioperm",
4954 "sys_socketcall",
4955 "sys_syslog",
4956 "sys_setitimer",
4957 "sys_getitimer", /* 105 */
4958 "sys_newstat",
4959 "sys_newlstat",
4960 "sys_newfstat",
4961 "sys_uname",
4962 "sys_iopl", /* 110 */
4963 "sys_vhangup",
4964 "sys_ni_syscall", /* old "idle" system call */
4965 "sys_vm86old",
4966 "sys_wait4",
4967 "sys_swapoff", /* 115 */
4968 "sys_sysinfo",
4969 "sys_ipc",
4970 "sys_fsync",
4971 "sys_sigreturn",
4972 "sys_clone", /* 120 */
4973 "sys_setdomainname",
4974 "sys_newuname",
4975 "sys_modify_ldt",
4976 "sys_adjtimex",
4977 "sys_mprotect", /* 125 */
4978 "sys_sigprocmask",
4979 "sys_ni_syscall", /* old "create_module" */
4980 "sys_init_module",
4981 "sys_delete_module",
4982 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4983 "sys_quotactl",
4984 "sys_getpgid",
4985 "sys_fchdir",
4986 "sys_bdflush",
4987 "sys_sysfs", /* 135 */
4988 "sys_personality",
4989 "sys_ni_syscall", /* reserved for afs_syscall */
4990 "sys_setfsuid16",
4991 "sys_setfsgid16",
4992 "sys_llseek", /* 140 */
4993 "sys_getdents",
4994 "sys_select",
4995 "sys_flock",
4996 "sys_msync",
4997 "sys_readv", /* 145 */
4998 "sys_writev",
4999 "sys_getsid",
5000 "sys_fdatasync",
5001 "sys_sysctl",
5002 "sys_mlock", /* 150 */
5003 "sys_munlock",
5004 "sys_mlockall",
5005 "sys_munlockall",
5006 "sys_sched_setparam",
5007 "sys_sched_getparam", /* 155 */
5008 "sys_sched_setscheduler",
5009 "sys_sched_getscheduler",
5010 "sys_sched_yield",
5011 "sys_sched_get_priority_max",
5012 "sys_sched_get_priority_min", /* 160 */
5013 "sys_sched_rr_get_interval",
5014 "sys_nanosleep",
5015 "sys_mremap",
5016 "sys_setresuid16",
5017 "sys_getresuid16", /* 165 */
5018 "sys_vm86",
5019 "sys_ni_syscall", /* Old sys_query_module */
5020 "sys_poll",
5021 "sys_nfsservctl",
5022 "sys_setresgid16", /* 170 */
5023 "sys_getresgid16",
5024 "sys_prctl",
5025 "sys_rt_sigreturn",
5026 "sys_rt_sigaction",
5027 "sys_rt_sigprocmask", /* 175 */
5028 "sys_rt_sigpending",
5029 "sys_rt_sigtimedwait",
5030 "sys_rt_sigqueueinfo",
5031 "sys_rt_sigsuspend",
5032 "sys_pread64", /* 180 */
5033 "sys_pwrite64",
5034 "sys_chown16",
5035 "sys_getcwd",
5036 "sys_capget",
5037 "sys_capset", /* 185 */
5038 "sys_sigaltstack",
5039 "sys_sendfile",
5040 "sys_ni_syscall", /* reserved for streams1 */
5041 "sys_ni_syscall", /* reserved for streams2 */
5042 "sys_vfork", /* 190 */
5043 "sys_getrlimit",
5044 "sys_mmap2",
5045 "sys_truncate64",
5046 "sys_ftruncate64",
5047 "sys_stat64", /* 195 */
5048 "sys_lstat64",
5049 "sys_fstat64",
5050 "sys_lchown",
5051 "sys_getuid",
5052 "sys_getgid", /* 200 */
5053 "sys_geteuid",
5054 "sys_getegid",
5055 "sys_setreuid",
5056 "sys_setregid",
5057 "sys_getgroups", /* 205 */
5058 "sys_setgroups",
5059 "sys_fchown",
5060 "sys_setresuid",
5061 "sys_getresuid",
5062 "sys_setresgid", /* 210 */
5063 "sys_getresgid",
5064 "sys_chown",
5065 "sys_setuid",
5066 "sys_setgid",
5067 "sys_setfsuid", /* 215 */
5068 "sys_setfsgid",
5069 "sys_pivot_root",
5070 "sys_mincore",
5071 "sys_madvise",
5072 "sys_getdents64", /* 220 */
5073 "sys_fcntl64",
5074 "sys_ni_syscall", /* reserved for TUX */
5075 "sys_ni_syscall",
5076 "sys_gettid",
5077 "sys_readahead", /* 225 */
5078 "sys_setxattr",
5079 "sys_lsetxattr",
5080 "sys_fsetxattr",
5081 "sys_getxattr",
5082 "sys_lgetxattr", /* 230 */
5083 "sys_fgetxattr",
5084 "sys_listxattr",
5085 "sys_llistxattr",
5086 "sys_flistxattr",
5087 "sys_removexattr", /* 235 */
5088 "sys_lremovexattr",
5089 "sys_fremovexattr",
5090 "sys_tkill",
5091 "sys_sendfile64",
5092 "sys_futex", /* 240 */
5093 "sys_sched_setaffinity",
5094 "sys_sched_getaffinity",
5095 "sys_set_thread_area",
5096 "sys_get_thread_area",
5097 "sys_io_setup", /* 245 */
5098 "sys_io_destroy",
5099 "sys_io_getevents",
5100 "sys_io_submit",
5101 "sys_io_cancel",
5102 "sys_fadvise64", /* 250 */
5103 "sys_ni_syscall",
5104 "sys_exit_group",
5105 "sys_lookup_dcookie",
5106 "sys_epoll_create",
5107 "sys_epoll_ctl", /* 255 */
5108 "sys_epoll_wait",
5109 "sys_remap_file_pages",
5110 "sys_set_tid_address",
5111 "sys_timer_create",
5112 "sys_timer_settime", /* 260 */
5113 "sys_timer_gettime",
5114 "sys_timer_getoverrun",
5115 "sys_timer_delete",
5116 "sys_clock_settime",
5117 "sys_clock_gettime", /* 265 */
5118 "sys_clock_getres",
5119 "sys_clock_nanosleep",
5120 "sys_statfs64",
5121 "sys_fstatfs64",
5122 "sys_tgkill", /* 270 */
5123 "sys_utimes",
5124 "sys_fadvise64_64",
5125 "sys_ni_syscall" /* sys_vserver */
5126 };
5127
5128 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5129 switch (uEAX)
5130 {
5131 default:
5132 if (uEAX < RT_ELEMENTS(apsz))
5133 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5134 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5135 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5136 else
5137 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5138 break;
5139
5140 }
5141}
5142
5143
5144/**
5145 * Dumps an OpenBSD system call.
5146 * @param pVCpu VMCPU handle.
5147 */
5148void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5149{
5150 static const char *apsz[] =
5151 {
5152 "SYS_syscall", //0
5153 "SYS_exit", //1
5154 "SYS_fork", //2
5155 "SYS_read", //3
5156 "SYS_write", //4
5157 "SYS_open", //5
5158 "SYS_close", //6
5159 "SYS_wait4", //7
5160 "SYS_8",
5161 "SYS_link", //9
5162 "SYS_unlink", //10
5163 "SYS_11",
5164 "SYS_chdir", //12
5165 "SYS_fchdir", //13
5166 "SYS_mknod", //14
5167 "SYS_chmod", //15
5168 "SYS_chown", //16
5169 "SYS_break", //17
5170 "SYS_18",
5171 "SYS_19",
5172 "SYS_getpid", //20
5173 "SYS_mount", //21
5174 "SYS_unmount", //22
5175 "SYS_setuid", //23
5176 "SYS_getuid", //24
5177 "SYS_geteuid", //25
5178 "SYS_ptrace", //26
5179 "SYS_recvmsg", //27
5180 "SYS_sendmsg", //28
5181 "SYS_recvfrom", //29
5182 "SYS_accept", //30
5183 "SYS_getpeername", //31
5184 "SYS_getsockname", //32
5185 "SYS_access", //33
5186 "SYS_chflags", //34
5187 "SYS_fchflags", //35
5188 "SYS_sync", //36
5189 "SYS_kill", //37
5190 "SYS_38",
5191 "SYS_getppid", //39
5192 "SYS_40",
5193 "SYS_dup", //41
5194 "SYS_opipe", //42
5195 "SYS_getegid", //43
5196 "SYS_profil", //44
5197 "SYS_ktrace", //45
5198 "SYS_sigaction", //46
5199 "SYS_getgid", //47
5200 "SYS_sigprocmask", //48
5201 "SYS_getlogin", //49
5202 "SYS_setlogin", //50
5203 "SYS_acct", //51
5204 "SYS_sigpending", //52
5205 "SYS_osigaltstack", //53
5206 "SYS_ioctl", //54
5207 "SYS_reboot", //55
5208 "SYS_revoke", //56
5209 "SYS_symlink", //57
5210 "SYS_readlink", //58
5211 "SYS_execve", //59
5212 "SYS_umask", //60
5213 "SYS_chroot", //61
5214 "SYS_62",
5215 "SYS_63",
5216 "SYS_64",
5217 "SYS_65",
5218 "SYS_vfork", //66
5219 "SYS_67",
5220 "SYS_68",
5221 "SYS_sbrk", //69
5222 "SYS_sstk", //70
5223 "SYS_61",
5224 "SYS_vadvise", //72
5225 "SYS_munmap", //73
5226 "SYS_mprotect", //74
5227 "SYS_madvise", //75
5228 "SYS_76",
5229 "SYS_77",
5230 "SYS_mincore", //78
5231 "SYS_getgroups", //79
5232 "SYS_setgroups", //80
5233 "SYS_getpgrp", //81
5234 "SYS_setpgid", //82
5235 "SYS_setitimer", //83
5236 "SYS_84",
5237 "SYS_85",
5238 "SYS_getitimer", //86
5239 "SYS_87",
5240 "SYS_88",
5241 "SYS_89",
5242 "SYS_dup2", //90
5243 "SYS_91",
5244 "SYS_fcntl", //92
5245 "SYS_select", //93
5246 "SYS_94",
5247 "SYS_fsync", //95
5248 "SYS_setpriority", //96
5249 "SYS_socket", //97
5250 "SYS_connect", //98
5251 "SYS_99",
5252 "SYS_getpriority", //100
5253 "SYS_101",
5254 "SYS_102",
5255 "SYS_sigreturn", //103
5256 "SYS_bind", //104
5257 "SYS_setsockopt", //105
5258 "SYS_listen", //106
5259 "SYS_107",
5260 "SYS_108",
5261 "SYS_109",
5262 "SYS_110",
5263 "SYS_sigsuspend", //111
5264 "SYS_112",
5265 "SYS_113",
5266 "SYS_114",
5267 "SYS_115",
5268 "SYS_gettimeofday", //116
5269 "SYS_getrusage", //117
5270 "SYS_getsockopt", //118
5271 "SYS_119",
5272 "SYS_readv", //120
5273 "SYS_writev", //121
5274 "SYS_settimeofday", //122
5275 "SYS_fchown", //123
5276 "SYS_fchmod", //124
5277 "SYS_125",
5278 "SYS_setreuid", //126
5279 "SYS_setregid", //127
5280 "SYS_rename", //128
5281 "SYS_129",
5282 "SYS_130",
5283 "SYS_flock", //131
5284 "SYS_mkfifo", //132
5285 "SYS_sendto", //133
5286 "SYS_shutdown", //134
5287 "SYS_socketpair", //135
5288 "SYS_mkdir", //136
5289 "SYS_rmdir", //137
5290 "SYS_utimes", //138
5291 "SYS_139",
5292 "SYS_adjtime", //140
5293 "SYS_141",
5294 "SYS_142",
5295 "SYS_143",
5296 "SYS_144",
5297 "SYS_145",
5298 "SYS_146",
5299 "SYS_setsid", //147
5300 "SYS_quotactl", //148
5301 "SYS_149",
5302 "SYS_150",
5303 "SYS_151",
5304 "SYS_152",
5305 "SYS_153",
5306 "SYS_154",
5307 "SYS_nfssvc", //155
5308 "SYS_156",
5309 "SYS_157",
5310 "SYS_158",
5311 "SYS_159",
5312 "SYS_160",
5313 "SYS_getfh", //161
5314 "SYS_162",
5315 "SYS_163",
5316 "SYS_164",
5317 "SYS_sysarch", //165
5318 "SYS_166",
5319 "SYS_167",
5320 "SYS_168",
5321 "SYS_169",
5322 "SYS_170",
5323 "SYS_171",
5324 "SYS_172",
5325 "SYS_pread", //173
5326 "SYS_pwrite", //174
5327 "SYS_175",
5328 "SYS_176",
5329 "SYS_177",
5330 "SYS_178",
5331 "SYS_179",
5332 "SYS_180",
5333 "SYS_setgid", //181
5334 "SYS_setegid", //182
5335 "SYS_seteuid", //183
5336 "SYS_lfs_bmapv", //184
5337 "SYS_lfs_markv", //185
5338 "SYS_lfs_segclean", //186
5339 "SYS_lfs_segwait", //187
5340 "SYS_188",
5341 "SYS_189",
5342 "SYS_190",
5343 "SYS_pathconf", //191
5344 "SYS_fpathconf", //192
5345 "SYS_swapctl", //193
5346 "SYS_getrlimit", //194
5347 "SYS_setrlimit", //195
5348 "SYS_getdirentries", //196
5349 "SYS_mmap", //197
5350 "SYS___syscall", //198
5351 "SYS_lseek", //199
5352 "SYS_truncate", //200
5353 "SYS_ftruncate", //201
5354 "SYS___sysctl", //202
5355 "SYS_mlock", //203
5356 "SYS_munlock", //204
5357 "SYS_205",
5358 "SYS_futimes", //206
5359 "SYS_getpgid", //207
5360 "SYS_xfspioctl", //208
5361 "SYS_209",
5362 "SYS_210",
5363 "SYS_211",
5364 "SYS_212",
5365 "SYS_213",
5366 "SYS_214",
5367 "SYS_215",
5368 "SYS_216",
5369 "SYS_217",
5370 "SYS_218",
5371 "SYS_219",
5372 "SYS_220",
5373 "SYS_semget", //221
5374 "SYS_222",
5375 "SYS_223",
5376 "SYS_224",
5377 "SYS_msgget", //225
5378 "SYS_msgsnd", //226
5379 "SYS_msgrcv", //227
5380 "SYS_shmat", //228
5381 "SYS_229",
5382 "SYS_shmdt", //230
5383 "SYS_231",
5384 "SYS_clock_gettime", //232
5385 "SYS_clock_settime", //233
5386 "SYS_clock_getres", //234
5387 "SYS_235",
5388 "SYS_236",
5389 "SYS_237",
5390 "SYS_238",
5391 "SYS_239",
5392 "SYS_nanosleep", //240
5393 "SYS_241",
5394 "SYS_242",
5395 "SYS_243",
5396 "SYS_244",
5397 "SYS_245",
5398 "SYS_246",
5399 "SYS_247",
5400 "SYS_248",
5401 "SYS_249",
5402 "SYS_minherit", //250
5403 "SYS_rfork", //251
5404 "SYS_poll", //252
5405 "SYS_issetugid", //253
5406 "SYS_lchown", //254
5407 "SYS_getsid", //255
5408 "SYS_msync", //256
5409 "SYS_257",
5410 "SYS_258",
5411 "SYS_259",
5412 "SYS_getfsstat", //260
5413 "SYS_statfs", //261
5414 "SYS_fstatfs", //262
5415 "SYS_pipe", //263
5416 "SYS_fhopen", //264
5417 "SYS_265",
5418 "SYS_fhstatfs", //266
5419 "SYS_preadv", //267
5420 "SYS_pwritev", //268
5421 "SYS_kqueue", //269
5422 "SYS_kevent", //270
5423 "SYS_mlockall", //271
5424 "SYS_munlockall", //272
5425 "SYS_getpeereid", //273
5426 "SYS_274",
5427 "SYS_275",
5428 "SYS_276",
5429 "SYS_277",
5430 "SYS_278",
5431 "SYS_279",
5432 "SYS_280",
5433 "SYS_getresuid", //281
5434 "SYS_setresuid", //282
5435 "SYS_getresgid", //283
5436 "SYS_setresgid", //284
5437 "SYS_285",
5438 "SYS_mquery", //286
5439 "SYS_closefrom", //287
5440 "SYS_sigaltstack", //288
5441 "SYS_shmget", //289
5442 "SYS_semop", //290
5443 "SYS_stat", //291
5444 "SYS_fstat", //292
5445 "SYS_lstat", //293
5446 "SYS_fhstat", //294
5447 "SYS___semctl", //295
5448 "SYS_shmctl", //296
5449 "SYS_msgctl", //297
5450 "SYS_MAXSYSCALL", //298
5451 //299
5452 //300
5453 };
5454 uint32_t uEAX;
5455 if (!LogIsEnabled())
5456 return;
5457 uEAX = CPUMGetGuestEAX(pVCpu);
5458 switch (uEAX)
5459 {
5460 default:
5461 if (uEAX < RT_ELEMENTS(apsz))
5462 {
5463 uint32_t au32Args[8] = {0};
5464 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5465 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5466 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5467 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5468 }
5469 else
5470 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5471 break;
5472 }
5473}
5474
5475
5476#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5477/**
5478 * The Dll main entry point (stub).
5479 */
5480bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5481{
5482 return true;
5483}
5484
5485void *memcpy(void *dst, const void *src, size_t size)
5486{
5487 uint8_t*pbDst = dst, *pbSrc = src;
5488 while (size-- > 0)
5489 *pbDst++ = *pbSrc++;
5490 return dst;
5491}
5492
5493#endif
5494
5495void cpu_smm_update(CPUX86State *env)
5496{
5497}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette