VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 45528

Last change on this file since 45528 was 45528, checked in by vboxsync, 12 years ago

VBOX_WITH_RAW_MODE changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 181.8 KB
Line 
1/* $Id: VBoxRecompiler.c 45528 2013-04-12 17:32:57Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/vmm/uvm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50
51#include <VBox/log.h>
52#include <iprt/semaphore.h>
53#include <iprt/asm.h>
54#include <iprt/assert.h>
55#include <iprt/thread.h>
56#include <iprt/string.h>
57
58/* Don't wanna include everything. */
59extern void cpu_exec_init_all(uintptr_t tb_size);
60extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
61extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
62extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
63extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
64extern void tlb_flush(CPUX86State *env, int flush_global);
65extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
66extern void sync_ldtr(CPUX86State *env1, int selector);
67
68#ifdef VBOX_STRICT
69ram_addr_t get_phys_page_offset(target_ulong addr);
70#endif
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76
77/** Copy 80-bit fpu register at pSrc to pDst.
78 * This is probably faster than *calling* memcpy.
79 */
80#define REM_COPY_FPU_REG(pDst, pSrc) \
81 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
82
83/** How remR3RunLoggingStep operates. */
84#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
85
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
91static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
92static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
93static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
94
95static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
97static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
98static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
100static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
101
102static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
104static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
105static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
108
109static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
111static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
112
113/*******************************************************************************
114* Global Variables *
115*******************************************************************************/
116
117/** @todo Move stats to REM::s some rainy day we have nothing do to. */
118#ifdef VBOX_WITH_STATISTICS
119static STAMPROFILEADV gStatExecuteSingleInstr;
120static STAMPROFILEADV gStatCompilationQEmu;
121static STAMPROFILEADV gStatRunCodeQEmu;
122static STAMPROFILEADV gStatTotalTimeQEmu;
123static STAMPROFILEADV gStatTimers;
124static STAMPROFILEADV gStatTBLookup;
125static STAMPROFILEADV gStatIRQ;
126static STAMPROFILEADV gStatRawCheck;
127static STAMPROFILEADV gStatMemRead;
128static STAMPROFILEADV gStatMemWrite;
129static STAMPROFILE gStatGCPhys2HCVirt;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gaStatRefuseStale[6];
142static STAMCOUNTER gStatREMGDTChange;
143static STAMCOUNTER gStatREMIDTChange;
144static STAMCOUNTER gStatREMLDTRChange;
145static STAMCOUNTER gStatREMTRChange;
146static STAMCOUNTER gStatSelOutOfSync[6];
147static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
148static STAMCOUNTER gStatFlushTBs;
149#endif
150/* in exec.c */
151extern uint32_t tlb_flush_count;
152extern uint32_t tb_flush_count;
153extern uint32_t tb_phys_invalidate_count;
154
155/*
156 * Global stuff.
157 */
158
159/** MMIO read callbacks. */
160CPUReadMemoryFunc *g_apfnMMIORead[3] =
161{
162 remR3MMIOReadU8,
163 remR3MMIOReadU16,
164 remR3MMIOReadU32
165};
166
167/** MMIO write callbacks. */
168CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
169{
170 remR3MMIOWriteU8,
171 remR3MMIOWriteU16,
172 remR3MMIOWriteU32
173};
174
175/** Handler read callbacks. */
176CPUReadMemoryFunc *g_apfnHandlerRead[3] =
177{
178 remR3HandlerReadU8,
179 remR3HandlerReadU16,
180 remR3HandlerReadU32
181};
182
183/** Handler write callbacks. */
184CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
185{
186 remR3HandlerWriteU8,
187 remR3HandlerWriteU16,
188 remR3HandlerWriteU32
189};
190
191
192#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
193/*
194 * Debugger commands.
195 */
196static FNDBGCCMD remR3CmdDisasEnableStepping;;
197
198/** '.remstep' arguments. */
199static const DBGCVARDESC g_aArgRemStep[] =
200{
201 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
202 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
203};
204
205/** Command descriptors. */
206static const DBGCCMD g_aCmds[] =
207{
208 {
209 .pszCmd ="remstep",
210 .cArgsMin = 0,
211 .cArgsMax = 1,
212 .paArgDescs = &g_aArgRemStep[0],
213 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
224 * @todo huh??? That cannot be the case on the mac... So, this
225 * point is probably not valid any longer. */
226uint8_t *code_gen_prologue;
227
228
229/*******************************************************************************
230* Internal Functions *
231*******************************************************************************/
232void remAbort(int rc, const char *pszTip);
233extern int testmath(void);
234
235/* Put them here to avoid unused variable warning. */
236AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
237#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
238//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
239/* Why did this have to be identical?? */
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#else
242AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
243#endif
244
245
246/**
247 * Initializes the REM.
248 *
249 * @returns VBox status code.
250 * @param pVM The VM to operate on.
251 */
252REMR3DECL(int) REMR3Init(PVM pVM)
253{
254 PREMHANDLERNOTIFICATION pCur;
255 uint32_t u32Dummy;
256 int rc;
257 unsigned i;
258
259#ifdef VBOX_ENABLE_VBOXREM64
260 LogRel(("Using 64-bit aware REM\n"));
261#endif
262
263 /*
264 * Assert sanity.
265 */
266 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
267 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
268 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
269#if 0 /* just an annoyance at the moment. */
270#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
271 Assert(!testmath());
272#endif
273#endif
274
275 /*
276 * Init some internal data members.
277 */
278 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
279 pVM->rem.s.Env.pVM = pVM;
280#ifdef CPU_RAW_MODE_INIT
281 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
282#endif
283
284 /*
285 * Initialize the REM critical section.
286 *
287 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
288 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
289 * deadlocks. (mostly pgm vs rem locking)
290 */
291 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
292 AssertRCReturn(rc, rc);
293
294 /* ctx. */
295 pVM->rem.s.pCtx = NULL; /* set when executing code. */
296 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
297
298 /* ignore all notifications */
299 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
300
301 code_gen_prologue = RTMemExecAlloc(_1K);
302 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
303
304 cpu_exec_init_all(0);
305
306 /*
307 * Init the recompiler.
308 */
309 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
310 {
311 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
312 return VERR_GENERAL_FAILURE;
313 }
314 PVMCPU pVCpu = VMMGetCpu(pVM);
315 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
316 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
317
318 EMRemLock(pVM);
319 cpu_reset(&pVM->rem.s.Env);
320 EMRemUnlock(pVM);
321
322 /* allocate code buffer for single instruction emulation. */
323 pVM->rem.s.Env.cbCodeBuffer = 4096;
324 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
325 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
326
327 /* Finally, set the cpu_single_env global. */
328 cpu_single_env = &pVM->rem.s.Env;
329
330 /* Nothing is pending by default */
331 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
332
333 /*
334 * Register ram types.
335 */
336 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
337 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
338 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
339 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
340 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
341
342 /* stop ignoring. */
343 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
344
345 /*
346 * Register the saved state data unit.
347 */
348 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
349 NULL, NULL, NULL,
350 NULL, remR3Save, NULL,
351 NULL, remR3Load, NULL);
352 if (RT_FAILURE(rc))
353 return rc;
354
355#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
356 /*
357 * Debugger commands.
358 */
359 static bool fRegisteredCmds = false;
360 if (!fRegisteredCmds)
361 {
362 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
363 if (RT_SUCCESS(rc))
364 fRegisteredCmds = true;
365 }
366#endif
367
368#ifdef VBOX_WITH_STATISTICS
369 /*
370 * Statistics.
371 */
372 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
373 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
374 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
375 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
376 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
377 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
378 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
379 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
380 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
381 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
382 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
383
384 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
385
386 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
387 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
388 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
389 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
390 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
391 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
392 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
393 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
394 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
395 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
396 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
397 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
398 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
399 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
400 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
401 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
402 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
403
404 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
405 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
406 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
407 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
408
409 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
414 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
415
416 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
417 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
418 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
419 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
420 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
421 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
422
423 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
424#endif /* VBOX_WITH_STATISTICS */
425 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
426 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
427
428 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
429 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
430 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
431
432
433#ifdef DEBUG_ALL_LOGGING
434 loglevel = ~0;
435#endif
436
437 /*
438 * Init the handler notification lists.
439 */
440 pVM->rem.s.idxPendingList = UINT32_MAX;
441 pVM->rem.s.idxFreeList = 0;
442
443 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
444 {
445 pCur = &pVM->rem.s.aHandlerNotifications[i];
446 pCur->idxNext = i + 1;
447 pCur->idxSelf = i;
448 }
449 pCur->idxNext = UINT32_MAX; /* the last record. */
450
451 return rc;
452}
453
454
455/**
456 * Finalizes the REM initialization.
457 *
458 * This is called after all components, devices and drivers has
459 * been initialized. Its main purpose it to finish the RAM related
460 * initialization.
461 *
462 * @returns VBox status code.
463 *
464 * @param pVM The VM handle.
465 */
466REMR3DECL(int) REMR3InitFinalize(PVM pVM)
467{
468 int rc;
469
470 /*
471 * Ram size & dirty bit map.
472 */
473 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
474 pVM->rem.s.fGCPhysLastRamFixed = true;
475#ifdef RT_STRICT
476 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
477#else
478 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
479#endif
480 return rc;
481}
482
483/**
484 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
485 *
486 * @returns VBox status code.
487 * @param pVM The VM handle.
488 * @param fGuarded Whether to guard the map.
489 */
490static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
491{
492 int rc = VINF_SUCCESS;
493 RTGCPHYS cb;
494
495 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
496
497 cb = pVM->rem.s.GCPhysLastRam + 1;
498 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
499 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
500 VERR_OUT_OF_RANGE);
501
502 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
503 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
504
505 if (!fGuarded)
506 {
507 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
508 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
509 }
510 else
511 {
512 /*
513 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
514 */
515 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
516 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
517 if (cbBitmapFull == cbBitmapAligned)
518 cbBitmapFull += _4G >> PAGE_SHIFT;
519 else if (cbBitmapFull - cbBitmapAligned < _64K)
520 cbBitmapFull += _64K;
521
522 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
523 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
524
525 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
526 if (RT_FAILURE(rc))
527 {
528 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
529 AssertLogRelRCReturn(rc, rc);
530 }
531
532 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
533 }
534
535 /* initialize it. */
536 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
537 return rc;
538}
539
540
541/**
542 * Terminates the REM.
543 *
544 * Termination means cleaning up and freeing all resources,
545 * the VM it self is at this point powered off or suspended.
546 *
547 * @returns VBox status code.
548 * @param pVM The VM to operate on.
549 */
550REMR3DECL(int) REMR3Term(PVM pVM)
551{
552#ifdef VBOX_WITH_STATISTICS
553 /*
554 * Statistics.
555 */
556 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
557 STAM_DEREG(pVM, &gStatCompilationQEmu);
558 STAM_DEREG(pVM, &gStatRunCodeQEmu);
559 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
560 STAM_DEREG(pVM, &gStatTimers);
561 STAM_DEREG(pVM, &gStatTBLookup);
562 STAM_DEREG(pVM, &gStatIRQ);
563 STAM_DEREG(pVM, &gStatRawCheck);
564 STAM_DEREG(pVM, &gStatMemRead);
565 STAM_DEREG(pVM, &gStatMemWrite);
566 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
567
568 STAM_DEREG(pVM, &gStatCpuGetTSC);
569
570 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
571 STAM_DEREG(pVM, &gStatRefuseVM86);
572 STAM_DEREG(pVM, &gStatRefusePaging);
573 STAM_DEREG(pVM, &gStatRefusePAE);
574 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
575 STAM_DEREG(pVM, &gStatRefuseIF0);
576 STAM_DEREG(pVM, &gStatRefuseCode16);
577 STAM_DEREG(pVM, &gStatRefuseWP0);
578 STAM_DEREG(pVM, &gStatRefuseRing1or2);
579 STAM_DEREG(pVM, &gStatRefuseCanExecute);
580 STAM_DEREG(pVM, &gaStatRefuseStale[0]);
581 STAM_DEREG(pVM, &gaStatRefuseStale[1]);
582 STAM_DEREG(pVM, &gaStatRefuseStale[2]);
583 STAM_DEREG(pVM, &gaStatRefuseStale[3]);
584 STAM_DEREG(pVM, &gaStatRefuseStale[4]);
585 STAM_DEREG(pVM, &gaStatRefuseStale[5]);
586 STAM_DEREG(pVM, &gStatFlushTBs);
587
588 STAM_DEREG(pVM, &gStatREMGDTChange);
589 STAM_DEREG(pVM, &gStatREMLDTRChange);
590 STAM_DEREG(pVM, &gStatREMIDTChange);
591 STAM_DEREG(pVM, &gStatREMTRChange);
592
593 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
594 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
595 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
596 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
597 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
598 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
599
600 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
601 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
602 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
603 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
604 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
605 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
606
607 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
608#endif /* VBOX_WITH_STATISTICS */
609
610 STAM_REL_DEREG(pVM, &tb_flush_count);
611 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
612 STAM_REL_DEREG(pVM, &tlb_flush_count);
613
614 return VINF_SUCCESS;
615}
616
617
618/**
619 * The VM is being reset.
620 *
621 * For the REM component this means to call the cpu_reset() and
622 * reinitialize some state variables.
623 *
624 * @param pVM VM handle.
625 */
626REMR3DECL(void) REMR3Reset(PVM pVM)
627{
628 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
629
630 /*
631 * Reset the REM cpu.
632 */
633 Assert(pVM->rem.s.cIgnoreAll == 0);
634 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
635 cpu_reset(&pVM->rem.s.Env);
636 pVM->rem.s.cInvalidatedPages = 0;
637 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
638 Assert(pVM->rem.s.cIgnoreAll == 0);
639
640 /* Clear raw ring 0 init state */
641 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
642
643 /* Flush the TBs the next time we execute code here. */
644 pVM->rem.s.fFlushTBs = true;
645
646 EMRemUnlock(pVM);
647}
648
649
650/**
651 * Execute state save operation.
652 *
653 * @returns VBox status code.
654 * @param pVM VM Handle.
655 * @param pSSM SSM operation handle.
656 */
657static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
658{
659 PREM pRem = &pVM->rem.s;
660
661 /*
662 * Save the required CPU Env bits.
663 * (Not much because we're never in REM when doing the save.)
664 */
665 LogFlow(("remR3Save:\n"));
666 Assert(!pRem->fInREM);
667 SSMR3PutU32(pSSM, pRem->Env.hflags);
668 SSMR3PutU32(pSSM, ~0); /* separator */
669
670 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
671 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
672 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
673
674 return SSMR3PutU32(pSSM, ~0); /* terminator */
675}
676
677
678/**
679 * Execute state load operation.
680 *
681 * @returns VBox status code.
682 * @param pVM VM Handle.
683 * @param pSSM SSM operation handle.
684 * @param uVersion Data layout version.
685 * @param uPass The data pass.
686 */
687static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
688{
689 uint32_t u32Dummy;
690 uint32_t fRawRing0 = false;
691 uint32_t u32Sep;
692 uint32_t i;
693 int rc;
694 PREM pRem;
695
696 LogFlow(("remR3Load:\n"));
697 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
698
699 /*
700 * Validate version.
701 */
702 if ( uVersion != REM_SAVED_STATE_VERSION
703 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
706 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
707 }
708
709 /*
710 * Do a reset to be on the safe side...
711 */
712 REMR3Reset(pVM);
713
714 /*
715 * Ignore all ignorable notifications.
716 * (Not doing this will cause serious trouble.)
717 */
718 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
719
720 /*
721 * Load the required CPU Env bits.
722 * (Not much because we're never in REM when doing the save.)
723 */
724 pRem = &pVM->rem.s;
725 Assert(!pRem->fInREM);
726 SSMR3GetU32(pSSM, &pRem->Env.hflags);
727 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
728 {
729 /* Redundant REM CPU state has to be loaded, but can be ignored. */
730 CPUX86State_Ver16 temp;
731 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
732 }
733
734 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
735 if (RT_FAILURE(rc))
736 return rc;
737 if (u32Sep != ~0U)
738 {
739 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
740 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
741 }
742
743 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
744 SSMR3GetUInt(pSSM, &fRawRing0);
745 if (fRawRing0)
746 pRem->Env.state |= CPU_RAW_RING0;
747
748 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
749 {
750 /*
751 * Load the REM stuff.
752 */
753 /** @todo r=bird: We should just drop all these items, restoring doesn't make
754 * sense. */
755 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
756 if (RT_FAILURE(rc))
757 return rc;
758 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
759 {
760 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
761 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
762 }
763 for (i = 0; i < pRem->cInvalidatedPages; i++)
764 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
765 }
766
767 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
768 if (RT_FAILURE(rc))
769 return rc;
770
771 /* check the terminator. */
772 rc = SSMR3GetU32(pSSM, &u32Sep);
773 if (RT_FAILURE(rc))
774 return rc;
775 if (u32Sep != ~0U)
776 {
777 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
778 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
779 }
780
781 /*
782 * Get the CPUID features.
783 */
784 PVMCPU pVCpu = VMMGetCpu(pVM);
785 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
786 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
787
788 /*
789 * Stop ignoring ignorable notifications.
790 */
791 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
792
793 /*
794 * Sync the whole CPU state when executing code in the recompiler.
795 */
796 for (i = 0; i < pVM->cCpus; i++)
797 {
798 PVMCPU pVCpu = &pVM->aCpus[i];
799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
800 }
801 return VINF_SUCCESS;
802}
803
804
805
806#undef LOG_GROUP
807#define LOG_GROUP LOG_GROUP_REM_RUN
808
809/**
810 * Single steps an instruction in recompiled mode.
811 *
812 * Before calling this function the REM state needs to be in sync with
813 * the VM. Call REMR3State() to perform the sync. It's only necessary
814 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
815 * and after calling REMR3StateBack().
816 *
817 * @returns VBox status code.
818 *
819 * @param pVM VM Handle.
820 * @param pVCpu VMCPU Handle.
821 */
822REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
823{
824 int rc, interrupt_request;
825 RTGCPTR GCPtrPC;
826 bool fBp;
827
828 /*
829 * Lock the REM - we don't wanna have anyone interrupting us
830 * while stepping - and enabled single stepping. We also ignore
831 * pending interrupts and suchlike.
832 */
833 interrupt_request = pVM->rem.s.Env.interrupt_request;
834 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
835 pVM->rem.s.Env.interrupt_request = 0;
836 cpu_single_step(&pVM->rem.s.Env, 1);
837
838 /*
839 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
840 */
841 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
842 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
843
844 /*
845 * Execute and handle the return code.
846 * We execute without enabling the cpu tick, so on success we'll
847 * just flip it on and off to make sure it moves
848 */
849 rc = cpu_exec(&pVM->rem.s.Env);
850 if (rc == EXCP_DEBUG)
851 {
852 TMR3NotifyResume(pVM, pVCpu);
853 TMR3NotifySuspend(pVM, pVCpu);
854 rc = VINF_EM_DBG_STEPPED;
855 }
856 else
857 {
858 switch (rc)
859 {
860 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
861 case EXCP_HLT:
862 case EXCP_HALTED: rc = VINF_EM_HALT; break;
863 case EXCP_RC:
864 rc = pVM->rem.s.rc;
865 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
866 break;
867 case EXCP_EXECUTE_RAW:
868 case EXCP_EXECUTE_HM:
869 /** @todo: is it correct? No! */
870 rc = VINF_SUCCESS;
871 break;
872 default:
873 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
874 rc = VERR_INTERNAL_ERROR;
875 break;
876 }
877 }
878
879 /*
880 * Restore the stuff we changed to prevent interruption.
881 * Unlock the REM.
882 */
883 if (fBp)
884 {
885 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
886 Assert(rc2 == 0); NOREF(rc2);
887 }
888 cpu_single_step(&pVM->rem.s.Env, 0);
889 pVM->rem.s.Env.interrupt_request = interrupt_request;
890
891 return rc;
892}
893
894
895/**
896 * Set a breakpoint using the REM facilities.
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
907 {
908 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
912 return VERR_REM_NO_MORE_BP_SLOTS;
913}
914
915
916/**
917 * Clears a breakpoint set by REMR3BreakpointSet().
918 *
919 * @returns VBox status code.
920 * @param pVM The VM handle.
921 * @param Address The breakpoint address.
922 * @thread The emulation thread.
923 */
924REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
925{
926 VM_ASSERT_EMT(pVM);
927 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
928 {
929 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
930 return VINF_SUCCESS;
931 }
932 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
933 return VERR_REM_BP_NOT_FOUND;
934}
935
936
937/**
938 * Emulate an instruction.
939 *
940 * This function executes one instruction without letting anyone
941 * interrupt it. This is intended for being called while being in
942 * raw mode and thus will take care of all the state syncing between
943 * REM and the rest.
944 *
945 * @returns VBox status code.
946 * @param pVM VM handle.
947 * @param pVCpu VMCPU Handle.
948 */
949REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
950{
951 bool fFlushTBs;
952
953 int rc, rc2;
954 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
955
956 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
957 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
958 */
959 if (HMIsEnabled(pVM))
960 pVM->rem.s.Env.state |= CPU_RAW_HM;
961
962 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
963 fFlushTBs = pVM->rem.s.fFlushTBs;
964 pVM->rem.s.fFlushTBs = false;
965
966 /*
967 * Sync the state and enable single instruction / single stepping.
968 */
969 rc = REMR3State(pVM, pVCpu);
970 pVM->rem.s.fFlushTBs = fFlushTBs;
971 if (RT_SUCCESS(rc))
972 {
973 int interrupt_request = pVM->rem.s.Env.interrupt_request;
974 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
975#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
976 cpu_single_step(&pVM->rem.s.Env, 0);
977#endif
978 Assert(!pVM->rem.s.Env.singlestep_enabled);
979
980 /*
981 * Now we set the execute single instruction flag and enter the cpu_exec loop.
982 */
983 TMNotifyStartOfExecution(pVCpu);
984 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
985 rc = cpu_exec(&pVM->rem.s.Env);
986 TMNotifyEndOfExecution(pVCpu);
987 switch (rc)
988 {
989 /*
990 * Executed without anything out of the way happening.
991 */
992 case EXCP_SINGLE_INSTR:
993 rc = VINF_EM_RESCHEDULE;
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
995 break;
996
997 /*
998 * If we take a trap or start servicing a pending interrupt, we might end up here.
999 * (Timer thread or some other thread wishing EMT's attention.)
1000 */
1001 case EXCP_INTERRUPT:
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
1003 rc = VINF_EM_RESCHEDULE;
1004 break;
1005
1006 /*
1007 * Single step, we assume!
1008 * If there was a breakpoint there we're fucked now.
1009 */
1010 case EXCP_DEBUG:
1011 if (pVM->rem.s.Env.watchpoint_hit)
1012 {
1013 /** @todo deal with watchpoints */
1014 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1015 rc = VINF_EM_DBG_BREAKPOINT;
1016 }
1017 else
1018 {
1019 CPUBreakpoint *pBP;
1020 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1021 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1022 if (pBP->pc == GCPtrPC)
1023 break;
1024 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1026 }
1027 break;
1028
1029 /*
1030 * hlt instruction.
1031 */
1032 case EXCP_HLT:
1033 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1034 rc = VINF_EM_HALT;
1035 break;
1036
1037 /*
1038 * The VM has halted.
1039 */
1040 case EXCP_HALTED:
1041 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1042 rc = VINF_EM_HALT;
1043 break;
1044
1045 /*
1046 * Switch to RAW-mode.
1047 */
1048 case EXCP_EXECUTE_RAW:
1049 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1050 rc = VINF_EM_RESCHEDULE_RAW;
1051 break;
1052
1053 /*
1054 * Switch to hardware accelerated RAW-mode.
1055 */
1056 case EXCP_EXECUTE_HM:
1057 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1058 rc = VINF_EM_RESCHEDULE_HM;
1059 break;
1060
1061 /*
1062 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1063 */
1064 case EXCP_RC:
1065 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1066 rc = pVM->rem.s.rc;
1067 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1068 break;
1069
1070 /*
1071 * Figure out the rest when they arrive....
1072 */
1073 default:
1074 AssertMsgFailed(("rc=%d\n", rc));
1075 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1076 rc = VINF_EM_RESCHEDULE;
1077 break;
1078 }
1079
1080 /*
1081 * Switch back the state.
1082 */
1083 pVM->rem.s.Env.interrupt_request = interrupt_request;
1084 rc2 = REMR3StateBack(pVM, pVCpu);
1085 AssertRC(rc2);
1086 }
1087
1088 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1089 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1090 return rc;
1091}
1092
1093
1094/**
1095 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1096 *
1097 * @returns VBox status code.
1098 *
1099 * @param pVM The VM handle.
1100 * @param pVCpu The Virtual CPU handle.
1101 */
1102static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1103{
1104 int rc;
1105
1106 Assert(pVM->rem.s.fInREM);
1107#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1108 cpu_single_step(&pVM->rem.s.Env, 1);
1109#else
1110 Assert(!pVM->rem.s.Env.singlestep_enabled);
1111#endif
1112
1113 /*
1114 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1115 */
1116 for (;;)
1117 {
1118 char szBuf[256];
1119
1120 /*
1121 * Log the current registers state and instruction.
1122 */
1123 remR3StateUpdate(pVM, pVCpu);
1124 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1125 szBuf[0] = '\0';
1126 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1127 pVCpu->idCpu,
1128 0, /* Sel */
1129 0, /* GCPtr */
1130 DBGF_DISAS_FLAGS_CURRENT_GUEST
1131 | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1132 szBuf,
1133 sizeof(szBuf),
1134 NULL);
1135 if (RT_FAILURE(rc))
1136 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1137 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1138
1139 /*
1140 * Execute the instruction.
1141 */
1142 TMNotifyStartOfExecution(pVCpu);
1143
1144 if ( pVM->rem.s.Env.exception_index < 0
1145 || pVM->rem.s.Env.exception_index > 256)
1146 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1147
1148#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1149 pVM->rem.s.Env.interrupt_request = 0;
1150#else
1151 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1152#endif
1153 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1154 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1155 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1156 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1157 pVM->rem.s.Env.interrupt_request,
1158 pVM->rem.s.Env.halted,
1159 pVM->rem.s.Env.exception_index
1160 );
1161
1162 rc = cpu_exec(&pVM->rem.s.Env);
1163
1164 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1165 pVM->rem.s.Env.interrupt_request,
1166 pVM->rem.s.Env.halted,
1167 pVM->rem.s.Env.exception_index
1168 );
1169
1170 TMNotifyEndOfExecution(pVCpu);
1171
1172 switch (rc)
1173 {
1174#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1175 /*
1176 * The normal exit.
1177 */
1178 case EXCP_SINGLE_INSTR:
1179 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1180 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1181 continue;
1182 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1183 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1184 rc = VINF_SUCCESS;
1185 break;
1186
1187#else
1188 /*
1189 * The normal exit, check for breakpoints at PC just to be sure.
1190 */
1191#endif
1192 case EXCP_DEBUG:
1193 if (pVM->rem.s.Env.watchpoint_hit)
1194 {
1195 /** @todo deal with watchpoints */
1196 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1197 rc = VINF_EM_DBG_BREAKPOINT;
1198 }
1199 else
1200 {
1201 CPUBreakpoint *pBP;
1202 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1203 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1204 if (pBP->pc == GCPtrPC)
1205 break;
1206 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1207 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1208 }
1209#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1210 if (rc == VINF_EM_DBG_STEPPED)
1211 {
1212 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1213 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1214 continue;
1215
1216 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1217 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1218 rc = VINF_SUCCESS;
1219 }
1220#endif
1221 break;
1222
1223 /*
1224 * If we take a trap or start servicing a pending interrupt, we might end up here.
1225 * (Timer thread or some other thread wishing EMT's attention.)
1226 */
1227 case EXCP_INTERRUPT:
1228 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1229 rc = VINF_SUCCESS;
1230 break;
1231
1232 /*
1233 * hlt instruction.
1234 */
1235 case EXCP_HLT:
1236 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1237 rc = VINF_EM_HALT;
1238 break;
1239
1240 /*
1241 * The VM has halted.
1242 */
1243 case EXCP_HALTED:
1244 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1245 rc = VINF_EM_HALT;
1246 break;
1247
1248 /*
1249 * Switch to RAW-mode.
1250 */
1251 case EXCP_EXECUTE_RAW:
1252 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1253 rc = VINF_EM_RESCHEDULE_RAW;
1254 break;
1255
1256 /*
1257 * Switch to hardware accelerated RAW-mode.
1258 */
1259 case EXCP_EXECUTE_HM:
1260 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1261 rc = VINF_EM_RESCHEDULE_HM;
1262 break;
1263
1264 /*
1265 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1266 */
1267 case EXCP_RC:
1268 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1269 rc = pVM->rem.s.rc;
1270 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1271 break;
1272
1273 /*
1274 * Figure out the rest when they arrive....
1275 */
1276 default:
1277 AssertMsgFailed(("rc=%d\n", rc));
1278 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1279 rc = VINF_EM_RESCHEDULE;
1280 break;
1281 }
1282 break;
1283 }
1284
1285#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1286// cpu_single_step(&pVM->rem.s.Env, 0);
1287#else
1288 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1289#endif
1290 return rc;
1291}
1292
1293
1294/**
1295 * Runs code in recompiled mode.
1296 *
1297 * Before calling this function the REM state needs to be in sync with
1298 * the VM. Call REMR3State() to perform the sync. It's only necessary
1299 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1300 * and after calling REMR3StateBack().
1301 *
1302 * @returns VBox status code.
1303 *
1304 * @param pVM VM Handle.
1305 * @param pVCpu VMCPU Handle.
1306 */
1307REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1308{
1309 int rc;
1310
1311 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1312 return remR3RunLoggingStep(pVM, pVCpu);
1313
1314 Assert(pVM->rem.s.fInREM);
1315 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1316
1317 TMNotifyStartOfExecution(pVCpu);
1318 rc = cpu_exec(&pVM->rem.s.Env);
1319 TMNotifyEndOfExecution(pVCpu);
1320 switch (rc)
1321 {
1322 /*
1323 * This happens when the execution was interrupted
1324 * by an external event, like pending timers.
1325 */
1326 case EXCP_INTERRUPT:
1327 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1328 rc = VINF_SUCCESS;
1329 break;
1330
1331 /*
1332 * hlt instruction.
1333 */
1334 case EXCP_HLT:
1335 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1336 rc = VINF_EM_HALT;
1337 break;
1338
1339 /*
1340 * The VM has halted.
1341 */
1342 case EXCP_HALTED:
1343 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1344 rc = VINF_EM_HALT;
1345 break;
1346
1347 /*
1348 * Breakpoint/single step.
1349 */
1350 case EXCP_DEBUG:
1351 if (pVM->rem.s.Env.watchpoint_hit)
1352 {
1353 /** @todo deal with watchpoints */
1354 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1355 rc = VINF_EM_DBG_BREAKPOINT;
1356 }
1357 else
1358 {
1359 CPUBreakpoint *pBP;
1360 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1361 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1362 if (pBP->pc == GCPtrPC)
1363 break;
1364 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1365 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1366 }
1367 break;
1368
1369 /*
1370 * Switch to RAW-mode.
1371 */
1372 case EXCP_EXECUTE_RAW:
1373 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1374 rc = VINF_EM_RESCHEDULE_RAW;
1375 break;
1376
1377 /*
1378 * Switch to hardware accelerated RAW-mode.
1379 */
1380 case EXCP_EXECUTE_HM:
1381 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1382 rc = VINF_EM_RESCHEDULE_HM;
1383 break;
1384
1385 /*
1386 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1387 */
1388 case EXCP_RC:
1389 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1390 rc = pVM->rem.s.rc;
1391 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1392 break;
1393
1394 /*
1395 * Figure out the rest when they arrive....
1396 */
1397 default:
1398 AssertMsgFailed(("rc=%d\n", rc));
1399 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1400 rc = VINF_SUCCESS;
1401 break;
1402 }
1403
1404 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1405 return rc;
1406}
1407
1408
1409/**
1410 * Check if the cpu state is suitable for Raw execution.
1411 *
1412 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1413 *
1414 * @param env The CPU env struct.
1415 * @param eip The EIP to check this for (might differ from env->eip).
1416 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1417 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1418 *
1419 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1420 */
1421bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1422{
1423 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1424 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1425 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1426 uint32_t u32CR0;
1427
1428#ifdef IEM_VERIFICATION_MODE
1429 return false;
1430#endif
1431
1432 /* Update counter. */
1433 env->pVM->rem.s.cCanExecuteRaw++;
1434
1435 /* Never when single stepping+logging guest code. */
1436 if (env->state & CPU_EMULATE_SINGLE_STEP)
1437 return false;
1438
1439 if (HMIsEnabled(env->pVM))
1440 {
1441 CPUMCTX Ctx;
1442
1443 env->state |= CPU_RAW_HM;
1444
1445 /*
1446 * The simple check first...
1447 */
1448 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1449 return false;
1450
1451 /*
1452 * Create partial context for HMR3CanExecuteGuest
1453 */
1454 Ctx.cr0 = env->cr[0];
1455 Ctx.cr3 = env->cr[3];
1456 Ctx.cr4 = env->cr[4];
1457
1458 Ctx.tr.Sel = env->tr.selector;
1459 Ctx.tr.ValidSel = env->tr.selector;
1460 Ctx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1461 Ctx.tr.u64Base = env->tr.base;
1462 Ctx.tr.u32Limit = env->tr.limit;
1463 Ctx.tr.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1464
1465 Ctx.ldtr.Sel = env->ldt.selector;
1466 Ctx.ldtr.ValidSel = env->ldt.selector;
1467 Ctx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1468 Ctx.ldtr.u64Base = env->ldt.base;
1469 Ctx.ldtr.u32Limit = env->ldt.limit;
1470 Ctx.ldtr.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1471
1472 Ctx.idtr.cbIdt = env->idt.limit;
1473 Ctx.idtr.pIdt = env->idt.base;
1474
1475 Ctx.gdtr.cbGdt = env->gdt.limit;
1476 Ctx.gdtr.pGdt = env->gdt.base;
1477
1478 Ctx.rsp = env->regs[R_ESP];
1479 Ctx.rip = env->eip;
1480
1481 Ctx.eflags.u32 = env->eflags;
1482
1483 Ctx.cs.Sel = env->segs[R_CS].selector;
1484 Ctx.cs.ValidSel = env->segs[R_CS].selector;
1485 Ctx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1486 Ctx.cs.u64Base = env->segs[R_CS].base;
1487 Ctx.cs.u32Limit = env->segs[R_CS].limit;
1488 Ctx.cs.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1489
1490 Ctx.ds.Sel = env->segs[R_DS].selector;
1491 Ctx.ds.ValidSel = env->segs[R_DS].selector;
1492 Ctx.ds.fFlags = CPUMSELREG_FLAGS_VALID;
1493 Ctx.ds.u64Base = env->segs[R_DS].base;
1494 Ctx.ds.u32Limit = env->segs[R_DS].limit;
1495 Ctx.ds.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1496
1497 Ctx.es.Sel = env->segs[R_ES].selector;
1498 Ctx.es.ValidSel = env->segs[R_ES].selector;
1499 Ctx.es.fFlags = CPUMSELREG_FLAGS_VALID;
1500 Ctx.es.u64Base = env->segs[R_ES].base;
1501 Ctx.es.u32Limit = env->segs[R_ES].limit;
1502 Ctx.es.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1503
1504 Ctx.fs.Sel = env->segs[R_FS].selector;
1505 Ctx.fs.ValidSel = env->segs[R_FS].selector;
1506 Ctx.fs.fFlags = CPUMSELREG_FLAGS_VALID;
1507 Ctx.fs.u64Base = env->segs[R_FS].base;
1508 Ctx.fs.u32Limit = env->segs[R_FS].limit;
1509 Ctx.fs.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1510
1511 Ctx.gs.Sel = env->segs[R_GS].selector;
1512 Ctx.gs.ValidSel = env->segs[R_GS].selector;
1513 Ctx.gs.fFlags = CPUMSELREG_FLAGS_VALID;
1514 Ctx.gs.u64Base = env->segs[R_GS].base;
1515 Ctx.gs.u32Limit = env->segs[R_GS].limit;
1516 Ctx.gs.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1517
1518 Ctx.ss.Sel = env->segs[R_SS].selector;
1519 Ctx.ss.ValidSel = env->segs[R_SS].selector;
1520 Ctx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1521 Ctx.ss.u64Base = env->segs[R_SS].base;
1522 Ctx.ss.u32Limit = env->segs[R_SS].limit;
1523 Ctx.ss.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1524
1525 Ctx.msrEFER = env->efer;
1526
1527 /* Hardware accelerated raw-mode:
1528 *
1529 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1530 */
1531 if (HMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1532 {
1533 *piException = EXCP_EXECUTE_HM;
1534 return true;
1535 }
1536 return false;
1537 }
1538
1539 /*
1540 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1541 * or 32 bits protected mode ring 0 code
1542 *
1543 * The tests are ordered by the likelihood of being true during normal execution.
1544 */
1545 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1546 {
1547 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1548 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1549 return false;
1550 }
1551
1552#ifndef VBOX_RAW_V86
1553 if (fFlags & VM_MASK) {
1554 STAM_COUNTER_INC(&gStatRefuseVM86);
1555 Log2(("raw mode refused: VM_MASK\n"));
1556 return false;
1557 }
1558#endif
1559
1560 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1561 {
1562#ifndef DEBUG_bird
1563 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1564#endif
1565 return false;
1566 }
1567
1568 if (env->singlestep_enabled)
1569 {
1570 //Log2(("raw mode refused: Single step\n"));
1571 return false;
1572 }
1573
1574 if (!QTAILQ_EMPTY(&env->breakpoints))
1575 {
1576 //Log2(("raw mode refused: Breakpoints\n"));
1577 return false;
1578 }
1579
1580 if (!QTAILQ_EMPTY(&env->watchpoints))
1581 {
1582 //Log2(("raw mode refused: Watchpoints\n"));
1583 return false;
1584 }
1585
1586 u32CR0 = env->cr[0];
1587 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1588 {
1589 STAM_COUNTER_INC(&gStatRefusePaging);
1590 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1591 return false;
1592 }
1593
1594 if (env->cr[4] & CR4_PAE_MASK)
1595 {
1596 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1597 {
1598 STAM_COUNTER_INC(&gStatRefusePAE);
1599 return false;
1600 }
1601 }
1602
1603 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1604 {
1605 if (!EMIsRawRing3Enabled(env->pVM))
1606 return false;
1607
1608 if (!(env->eflags & IF_MASK))
1609 {
1610 STAM_COUNTER_INC(&gStatRefuseIF0);
1611 Log2(("raw mode refused: IF (RawR3)\n"));
1612 return false;
1613 }
1614
1615 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1616 {
1617 STAM_COUNTER_INC(&gStatRefuseWP0);
1618 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1619 return false;
1620 }
1621 }
1622 else
1623 {
1624 if (!EMIsRawRing0Enabled(env->pVM))
1625 return false;
1626
1627 // Let's start with pure 32 bits ring 0 code first
1628 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1629 {
1630 STAM_COUNTER_INC(&gStatRefuseCode16);
1631 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1632 return false;
1633 }
1634
1635 if (EMIsRawRing1Enabled(env->pVM))
1636 {
1637 /* Only ring 0 and 1 supervisor code. */
1638 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1639 {
1640 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1641 return false;
1642 }
1643 }
1644 /* Only R0. */
1645 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1646 {
1647 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1648 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1649 return false;
1650 }
1651
1652 if (!(u32CR0 & CR0_WP_MASK))
1653 {
1654 STAM_COUNTER_INC(&gStatRefuseWP0);
1655 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1656 return false;
1657 }
1658
1659#ifdef VBOX_WITH_RAW_MODE
1660 if (PATMIsPatchGCAddr(env->pVM, eip))
1661 {
1662 Log2(("raw r0 mode forced: patch code\n"));
1663 *piException = EXCP_EXECUTE_RAW;
1664 return true;
1665 }
1666#endif
1667
1668#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1669 if (!(env->eflags & IF_MASK))
1670 {
1671 STAM_COUNTER_INC(&gStatRefuseIF0);
1672 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1673 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1674 return false;
1675 }
1676#endif
1677
1678#ifndef VBOX_WITH_RAW_RING1
1679 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1680 {
1681 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1682 return false;
1683 }
1684#endif
1685 env->state |= CPU_RAW_RING0;
1686 }
1687
1688 /*
1689 * Don't reschedule the first time we're called, because there might be
1690 * special reasons why we're here that is not covered by the above checks.
1691 */
1692 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1693 {
1694 Log2(("raw mode refused: first scheduling\n"));
1695 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1696 return false;
1697 }
1698
1699 /*
1700 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1701 */
1702 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1703 {
1704 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1705 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1706 return false;
1707 }
1708 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1709 {
1710 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1711 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1712 return false;
1713 }
1714 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1715 {
1716 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1717 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1718 return false;
1719 }
1720 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1721 {
1722 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1723 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1724 return false;
1725 }
1726 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1727 {
1728 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1729 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1730 return false;
1731 }
1732 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1733 {
1734 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1735 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1736 return false;
1737 }
1738
1739/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1740 *piException = EXCP_EXECUTE_RAW;
1741 return true;
1742}
1743
1744
1745#ifdef VBOX_WITH_RAW_MODE
1746/**
1747 * Fetches a code byte.
1748 *
1749 * @returns Success indicator (bool) for ease of use.
1750 * @param env The CPU environment structure.
1751 * @param GCPtrInstr Where to fetch code.
1752 * @param pu8Byte Where to store the byte on success
1753 */
1754bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1755{
1756 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1757 if (RT_SUCCESS(rc))
1758 return true;
1759 return false;
1760}
1761#endif /* VBOX_WITH_RAW_MODE */
1762
1763
1764/**
1765 * Flush (or invalidate if you like) page table/dir entry.
1766 *
1767 * (invlpg instruction; tlb_flush_page)
1768 *
1769 * @param env Pointer to cpu environment.
1770 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1771 */
1772void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1773{
1774 PVM pVM = env->pVM;
1775 PCPUMCTX pCtx;
1776 int rc;
1777
1778 Assert(EMRemIsLockOwner(env->pVM));
1779
1780 /*
1781 * When we're replaying invlpg instructions or restoring a saved
1782 * state we disable this path.
1783 */
1784 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1785 return;
1786 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1787 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1788
1789 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1790
1791 /*
1792 * Update the control registers before calling PGMFlushPage.
1793 */
1794 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1795 Assert(pCtx);
1796 pCtx->cr0 = env->cr[0];
1797 pCtx->cr3 = env->cr[3];
1798 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1799 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1800 pCtx->cr4 = env->cr[4];
1801
1802 /*
1803 * Let PGM do the rest.
1804 */
1805 Assert(env->pVCpu);
1806 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1807 if (RT_FAILURE(rc))
1808 {
1809 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1810 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1811 }
1812 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1813}
1814
1815
1816#ifndef REM_PHYS_ADDR_IN_TLB
1817/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1818void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1819{
1820 void *pv;
1821 int rc;
1822
1823
1824 /* Address must be aligned enough to fiddle with lower bits */
1825 Assert((physAddr & 0x3) == 0);
1826 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1827
1828 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1829 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1830 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1831 Assert( rc == VINF_SUCCESS
1832 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1833 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1834 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1835 if (RT_FAILURE(rc))
1836 return (void *)1;
1837 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1838 return (void *)((uintptr_t)pv | 2);
1839 return pv;
1840}
1841#endif /* REM_PHYS_ADDR_IN_TLB */
1842
1843
1844/**
1845 * Called from tlb_protect_code in order to write monitor a code page.
1846 *
1847 * @param env Pointer to the CPU environment.
1848 * @param GCPtr Code page to monitor
1849 */
1850void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1851{
1852#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1853 Assert(env->pVM->rem.s.fInREM);
1854 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1855 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1856 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1857 && !(env->eflags & VM_MASK) /* no V86 mode */
1858 && !HMIsEnabled(env->pVM))
1859 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1860#endif
1861}
1862
1863
1864/**
1865 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1866 *
1867 * @param env Pointer to the CPU environment.
1868 * @param GCPtr Code page to monitor
1869 */
1870void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1871{
1872 Assert(env->pVM->rem.s.fInREM);
1873#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1874 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1875 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1876 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1877 && !(env->eflags & VM_MASK) /* no V86 mode */
1878 && !HMIsEnabled(env->pVM))
1879 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1880#endif
1881}
1882
1883
1884/**
1885 * Called when the CPU is initialized, any of the CRx registers are changed or
1886 * when the A20 line is modified.
1887 *
1888 * @param env Pointer to the CPU environment.
1889 * @param fGlobal Set if the flush is global.
1890 */
1891void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1892{
1893 PVM pVM = env->pVM;
1894 PCPUMCTX pCtx;
1895 Assert(EMRemIsLockOwner(pVM));
1896
1897 /*
1898 * When we're replaying invlpg instructions or restoring a saved
1899 * state we disable this path.
1900 */
1901 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1902 return;
1903 Assert(pVM->rem.s.fInREM);
1904
1905 /*
1906 * The caller doesn't check cr4, so we have to do that for ourselves.
1907 */
1908 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1909 fGlobal = true;
1910 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1911
1912 /*
1913 * Update the control registers before calling PGMR3FlushTLB.
1914 */
1915 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1916 Assert(pCtx);
1917 pCtx->cr0 = env->cr[0];
1918 pCtx->cr3 = env->cr[3];
1919 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1920 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1921 pCtx->cr4 = env->cr[4];
1922
1923 /*
1924 * Let PGM do the rest.
1925 */
1926 Assert(env->pVCpu);
1927 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1928}
1929
1930
1931/**
1932 * Called when any of the cr0, cr4 or efer registers is updated.
1933 *
1934 * @param env Pointer to the CPU environment.
1935 */
1936void remR3ChangeCpuMode(CPUX86State *env)
1937{
1938 PVM pVM = env->pVM;
1939 uint64_t efer;
1940 PCPUMCTX pCtx;
1941 int rc;
1942
1943 /*
1944 * When we're replaying loads or restoring a saved
1945 * state this path is disabled.
1946 */
1947 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1948 return;
1949 Assert(pVM->rem.s.fInREM);
1950
1951 /*
1952 * Update the control registers before calling PGMChangeMode()
1953 * as it may need to map whatever cr3 is pointing to.
1954 */
1955 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1956 Assert(pCtx);
1957 pCtx->cr0 = env->cr[0];
1958 pCtx->cr3 = env->cr[3];
1959 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1960 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1961 pCtx->cr4 = env->cr[4];
1962#ifdef TARGET_X86_64
1963 efer = env->efer;
1964 pCtx->msrEFER = efer;
1965#else
1966 efer = 0;
1967#endif
1968 Assert(env->pVCpu);
1969 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1970 if (rc != VINF_SUCCESS)
1971 {
1972 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1973 {
1974 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1975 remR3RaiseRC(env->pVM, rc);
1976 }
1977 else
1978 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1979 }
1980}
1981
1982
1983/**
1984 * Called from compiled code to run dma.
1985 *
1986 * @param env Pointer to the CPU environment.
1987 */
1988void remR3DmaRun(CPUX86State *env)
1989{
1990 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1991 PDMR3DmaRun(env->pVM);
1992 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1993}
1994
1995
1996/**
1997 * Called from compiled code to schedule pending timers in VMM
1998 *
1999 * @param env Pointer to the CPU environment.
2000 */
2001void remR3TimersRun(CPUX86State *env)
2002{
2003 LogFlow(("remR3TimersRun:\n"));
2004 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
2005 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
2006 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
2007 TMR3TimerQueuesDo(env->pVM);
2008 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
2009 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
2010}
2011
2012
2013/**
2014 * Record trap occurrence
2015 *
2016 * @returns VBox status code
2017 * @param env Pointer to the CPU environment.
2018 * @param uTrap Trap nr
2019 * @param uErrorCode Error code
2020 * @param pvNextEIP Next EIP
2021 */
2022int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
2023{
2024 PVM pVM = env->pVM;
2025#ifdef VBOX_WITH_STATISTICS
2026 static STAMCOUNTER s_aStatTrap[255];
2027 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
2028#endif
2029
2030#ifdef VBOX_WITH_STATISTICS
2031 if (uTrap < 255)
2032 {
2033 if (!s_aRegisters[uTrap])
2034 {
2035 char szStatName[64];
2036 s_aRegisters[uTrap] = true;
2037 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2038 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2039 }
2040 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2041 }
2042#endif
2043 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2044 if( uTrap < 0x20
2045 && (env->cr[0] & X86_CR0_PE)
2046 && !(env->eflags & X86_EFL_VM))
2047 {
2048#ifdef DEBUG
2049 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2050#endif
2051 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2052 {
2053 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2054 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2055 return VERR_REM_TOO_MANY_TRAPS;
2056 }
2057 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2058 pVM->rem.s.cPendingExceptions = 1;
2059 pVM->rem.s.uPendingException = uTrap;
2060 pVM->rem.s.uPendingExcptEIP = env->eip;
2061 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2062 }
2063 else
2064 {
2065 pVM->rem.s.cPendingExceptions = 0;
2066 pVM->rem.s.uPendingException = uTrap;
2067 pVM->rem.s.uPendingExcptEIP = env->eip;
2068 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2069 }
2070 return VINF_SUCCESS;
2071}
2072
2073
2074/*
2075 * Clear current active trap
2076 *
2077 * @param pVM VM Handle.
2078 */
2079void remR3TrapClear(PVM pVM)
2080{
2081 pVM->rem.s.cPendingExceptions = 0;
2082 pVM->rem.s.uPendingException = 0;
2083 pVM->rem.s.uPendingExcptEIP = 0;
2084 pVM->rem.s.uPendingExcptCR2 = 0;
2085}
2086
2087
2088/*
2089 * Record previous call instruction addresses
2090 *
2091 * @param env Pointer to the CPU environment.
2092 */
2093void remR3RecordCall(CPUX86State *env)
2094{
2095#ifdef VBOX_WITH_RAW_MODE
2096 CSAMR3RecordCallAddress(env->pVM, env->eip);
2097#endif
2098}
2099
2100
2101/**
2102 * Syncs the internal REM state with the VM.
2103 *
2104 * This must be called before REMR3Run() is invoked whenever when the REM
2105 * state is not up to date. Calling it several times in a row is not
2106 * permitted.
2107 *
2108 * @returns VBox status code.
2109 *
2110 * @param pVM VM Handle.
2111 * @param pVCpu VMCPU Handle.
2112 *
2113 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2114 * no do this since the majority of the callers don't want any unnecessary of events
2115 * pending that would immediately interrupt execution.
2116 */
2117REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2118{
2119 register const CPUMCTX *pCtx;
2120 register unsigned fFlags;
2121 unsigned i;
2122 TRPMEVENT enmType;
2123 uint8_t u8TrapNo;
2124 uint32_t uCpl;
2125 int rc;
2126
2127 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2128 Log2(("REMR3State:\n"));
2129
2130 pVM->rem.s.Env.pVCpu = pVCpu;
2131 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2132
2133 Assert(!pVM->rem.s.fInREM);
2134 pVM->rem.s.fInStateSync = true;
2135
2136 /*
2137 * If we have to flush TBs, do that immediately.
2138 */
2139 if (pVM->rem.s.fFlushTBs)
2140 {
2141 STAM_COUNTER_INC(&gStatFlushTBs);
2142 tb_flush(&pVM->rem.s.Env);
2143 pVM->rem.s.fFlushTBs = false;
2144 }
2145
2146 /*
2147 * Copy the registers which require no special handling.
2148 */
2149#ifdef TARGET_X86_64
2150 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2151 Assert(R_EAX == 0);
2152 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2153 Assert(R_ECX == 1);
2154 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2155 Assert(R_EDX == 2);
2156 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2157 Assert(R_EBX == 3);
2158 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2159 Assert(R_ESP == 4);
2160 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2161 Assert(R_EBP == 5);
2162 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2163 Assert(R_ESI == 6);
2164 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2165 Assert(R_EDI == 7);
2166 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2167 pVM->rem.s.Env.regs[8] = pCtx->r8;
2168 pVM->rem.s.Env.regs[9] = pCtx->r9;
2169 pVM->rem.s.Env.regs[10] = pCtx->r10;
2170 pVM->rem.s.Env.regs[11] = pCtx->r11;
2171 pVM->rem.s.Env.regs[12] = pCtx->r12;
2172 pVM->rem.s.Env.regs[13] = pCtx->r13;
2173 pVM->rem.s.Env.regs[14] = pCtx->r14;
2174 pVM->rem.s.Env.regs[15] = pCtx->r15;
2175
2176 pVM->rem.s.Env.eip = pCtx->rip;
2177
2178 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2179#else
2180 Assert(R_EAX == 0);
2181 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2182 Assert(R_ECX == 1);
2183 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2184 Assert(R_EDX == 2);
2185 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2186 Assert(R_EBX == 3);
2187 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2188 Assert(R_ESP == 4);
2189 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2190 Assert(R_EBP == 5);
2191 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2192 Assert(R_ESI == 6);
2193 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2194 Assert(R_EDI == 7);
2195 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2196 pVM->rem.s.Env.eip = pCtx->eip;
2197
2198 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2199#endif
2200
2201 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2202
2203 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2204 for (i=0;i<8;i++)
2205 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2206
2207#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2208 /*
2209 * Clear the halted hidden flag (the interrupt waking up the CPU can
2210 * have been dispatched in raw mode).
2211 */
2212 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2213#endif
2214
2215 /*
2216 * Replay invlpg? Only if we're not flushing the TLB.
2217 */
2218 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2219 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2220 if (pVM->rem.s.cInvalidatedPages)
2221 {
2222 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2223 {
2224 RTUINT i;
2225
2226 pVM->rem.s.fIgnoreCR3Load = true;
2227 pVM->rem.s.fIgnoreInvlPg = true;
2228 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2229 {
2230 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2231 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2232 }
2233 pVM->rem.s.fIgnoreInvlPg = false;
2234 pVM->rem.s.fIgnoreCR3Load = false;
2235 }
2236 pVM->rem.s.cInvalidatedPages = 0;
2237 }
2238
2239 /* Replay notification changes. */
2240 REMR3ReplayHandlerNotifications(pVM);
2241
2242 /* Update MSRs; before CRx registers! */
2243 pVM->rem.s.Env.efer = pCtx->msrEFER;
2244 pVM->rem.s.Env.star = pCtx->msrSTAR;
2245 pVM->rem.s.Env.pat = pCtx->msrPAT;
2246#ifdef TARGET_X86_64
2247 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2248 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2249 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2250 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2251
2252 /* Update the internal long mode activate flag according to the new EFER value. */
2253 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2254 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2255 else
2256 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2257#endif
2258
2259 /* Update the inhibit IRQ mask. */
2260 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2261 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2262 {
2263 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2264 if (InhibitPC == pCtx->rip)
2265 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2266 else
2267 {
2268 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2269 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2270 }
2271 }
2272
2273 /*
2274 * Sync the A20 gate.
2275 */
2276 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2277 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2278 {
2279 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2280 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2281 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2282 }
2283
2284 /*
2285 * Registers which are rarely changed and require special handling / order when changed.
2286 */
2287 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2288 | CPUM_CHANGED_CR4
2289 | CPUM_CHANGED_CR0
2290 | CPUM_CHANGED_CR3
2291 | CPUM_CHANGED_GDTR
2292 | CPUM_CHANGED_IDTR
2293 | CPUM_CHANGED_SYSENTER_MSR
2294 | CPUM_CHANGED_LDTR
2295 | CPUM_CHANGED_CPUID
2296 | CPUM_CHANGED_FPU_REM
2297 )
2298 )
2299 {
2300 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2301 {
2302 pVM->rem.s.fIgnoreCR3Load = true;
2303 tlb_flush(&pVM->rem.s.Env, true);
2304 pVM->rem.s.fIgnoreCR3Load = false;
2305 }
2306
2307 /* CR4 before CR0! */
2308 if (fFlags & CPUM_CHANGED_CR4)
2309 {
2310 pVM->rem.s.fIgnoreCR3Load = true;
2311 pVM->rem.s.fIgnoreCpuMode = true;
2312 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2313 pVM->rem.s.fIgnoreCpuMode = false;
2314 pVM->rem.s.fIgnoreCR3Load = false;
2315 }
2316
2317 if (fFlags & CPUM_CHANGED_CR0)
2318 {
2319 pVM->rem.s.fIgnoreCR3Load = true;
2320 pVM->rem.s.fIgnoreCpuMode = true;
2321 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2322 pVM->rem.s.fIgnoreCpuMode = false;
2323 pVM->rem.s.fIgnoreCR3Load = false;
2324 }
2325
2326 if (fFlags & CPUM_CHANGED_CR3)
2327 {
2328 pVM->rem.s.fIgnoreCR3Load = true;
2329 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2330 pVM->rem.s.fIgnoreCR3Load = false;
2331 }
2332
2333 if (fFlags & CPUM_CHANGED_GDTR)
2334 {
2335 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2336 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2337 }
2338
2339 if (fFlags & CPUM_CHANGED_IDTR)
2340 {
2341 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2342 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2343 }
2344
2345 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2346 {
2347 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2348 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2349 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2350 }
2351
2352 if (fFlags & CPUM_CHANGED_LDTR)
2353 {
2354 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2355 {
2356 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2357 pVM->rem.s.Env.ldt.newselector = 0;
2358 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2359 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2360 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2361 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u << 8) & 0xFFFFFF;
2362 }
2363 else
2364 {
2365 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2366 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2367 }
2368 }
2369
2370 if (fFlags & CPUM_CHANGED_CPUID)
2371 {
2372 uint32_t u32Dummy;
2373
2374 /*
2375 * Get the CPUID features.
2376 */
2377 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2378 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2379 }
2380
2381 /* Sync FPU state after CR4, CPUID and EFER (!). */
2382 if (fFlags & CPUM_CHANGED_FPU_REM)
2383 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2384 }
2385
2386 /*
2387 * Sync TR unconditionally to make life simpler.
2388 */
2389 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2390 pVM->rem.s.Env.tr.newselector = 0;
2391 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2392 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2393 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2394 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
2395 /* Note! do_interrupt will fault if the busy flag is still set... */
2396 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2397
2398 /*
2399 * Update selector registers.
2400 *
2401 * This must be done *after* we've synced gdt, ldt and crX registers
2402 * since we're reading the GDT/LDT om sync_seg. This will happen with
2403 * saved state which takes a quick dip into rawmode for instance.
2404 *
2405 * CPL/Stack; Note first check this one as the CPL might have changed.
2406 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2407 */
2408 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2409 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2410#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2411 do \
2412 { \
2413 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2414 { \
2415 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2416 (a_pVBoxSReg)->Sel, \
2417 (a_pVBoxSReg)->u64Base, \
2418 (a_pVBoxSReg)->u32Limit, \
2419 ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \
2420 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2421 } \
2422 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2423 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2424 { \
2425 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2426 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2427 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2428 if ((a_pRemSReg)->newselector) \
2429 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2430 } \
2431 else \
2432 (a_pRemSReg)->newselector = 0; \
2433 } while (0)
2434
2435 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2436 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2437 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2438 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2439 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2440 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2441 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2442 * be the same but not the base/limit. */
2443
2444 /*
2445 * Check for traps.
2446 */
2447 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2448 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2449 if (RT_SUCCESS(rc))
2450 {
2451#ifdef DEBUG
2452 if (u8TrapNo == 0x80)
2453 {
2454 remR3DumpLnxSyscall(pVCpu);
2455 remR3DumpOBsdSyscall(pVCpu);
2456 }
2457#endif
2458
2459 pVM->rem.s.Env.exception_index = u8TrapNo;
2460 if (enmType != TRPM_SOFTWARE_INT)
2461 {
2462 pVM->rem.s.Env.exception_is_int = 0;
2463 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2464 }
2465 else
2466 {
2467 /*
2468 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2469 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2470 * for int03 and into.
2471 */
2472 pVM->rem.s.Env.exception_is_int = 1;
2473 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2474 /* int 3 may be generated by one-byte 0xcc */
2475 if (u8TrapNo == 3)
2476 {
2477 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2478 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2479 }
2480 /* int 4 may be generated by one-byte 0xce */
2481 else if (u8TrapNo == 4)
2482 {
2483 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2484 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2485 }
2486 }
2487
2488 /* get error code and cr2 if needed. */
2489 if (enmType == TRPM_TRAP)
2490 {
2491 switch (u8TrapNo)
2492 {
2493 case 0x0e:
2494 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2495 /* fallthru */
2496 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2497 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2498 break;
2499
2500 case 0x11: case 0x08:
2501 default:
2502 pVM->rem.s.Env.error_code = 0;
2503 break;
2504 }
2505 }
2506 else
2507 pVM->rem.s.Env.error_code = 0;
2508
2509 /*
2510 * We can now reset the active trap since the recompiler is gonna have a go at it.
2511 */
2512 rc = TRPMResetTrap(pVCpu);
2513 AssertRC(rc);
2514 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2515 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2516 }
2517
2518 /*
2519 * Clear old interrupt request flags; Check for pending hardware interrupts.
2520 * (See @remark for why we don't check for other FFs.)
2521 */
2522 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2523 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2524 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2525 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2526
2527 /*
2528 * We're now in REM mode.
2529 */
2530 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2531 pVM->rem.s.fInREM = true;
2532 pVM->rem.s.fInStateSync = false;
2533 pVM->rem.s.cCanExecuteRaw = 0;
2534 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2535 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2536 return VINF_SUCCESS;
2537}
2538
2539
2540/**
2541 * Syncs back changes in the REM state to the the VM state.
2542 *
2543 * This must be called after invoking REMR3Run().
2544 * Calling it several times in a row is not permitted.
2545 *
2546 * @returns VBox status code.
2547 *
2548 * @param pVM VM Handle.
2549 * @param pVCpu VMCPU Handle.
2550 */
2551REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2552{
2553 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2554 Assert(pCtx);
2555 unsigned i;
2556
2557 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2558 Log2(("REMR3StateBack:\n"));
2559 Assert(pVM->rem.s.fInREM);
2560
2561 /*
2562 * Copy back the registers.
2563 * This is done in the order they are declared in the CPUMCTX structure.
2564 */
2565
2566 /** @todo FOP */
2567 /** @todo FPUIP */
2568 /** @todo CS */
2569 /** @todo FPUDP */
2570 /** @todo DS */
2571
2572 /** @todo check if FPU/XMM was actually used in the recompiler */
2573 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2574//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2575
2576#ifdef TARGET_X86_64
2577 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2578 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2579 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2580 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2581 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2582 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2583 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2584 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2585 pCtx->r8 = pVM->rem.s.Env.regs[8];
2586 pCtx->r9 = pVM->rem.s.Env.regs[9];
2587 pCtx->r10 = pVM->rem.s.Env.regs[10];
2588 pCtx->r11 = pVM->rem.s.Env.regs[11];
2589 pCtx->r12 = pVM->rem.s.Env.regs[12];
2590 pCtx->r13 = pVM->rem.s.Env.regs[13];
2591 pCtx->r14 = pVM->rem.s.Env.regs[14];
2592 pCtx->r15 = pVM->rem.s.Env.regs[15];
2593
2594 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2595
2596#else
2597 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2598 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2599 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2600 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2601 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2602 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2603 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2604
2605 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2606#endif
2607
2608#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2609 do \
2610 { \
2611 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2612 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2613 { \
2614 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2615 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2616 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2617 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2618 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ \
2619 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> 8) & 0xF0FF; \
2620 } \
2621 else \
2622 { \
2623 pCtx->a_sreg.fFlags = 0; \
2624 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2625 } \
2626 } while (0)
2627
2628 SYNC_BACK_SREG(es, ES);
2629 SYNC_BACK_SREG(cs, CS);
2630 SYNC_BACK_SREG(ss, SS);
2631 SYNC_BACK_SREG(ds, DS);
2632 SYNC_BACK_SREG(fs, FS);
2633 SYNC_BACK_SREG(gs, GS);
2634
2635#ifdef TARGET_X86_64
2636 pCtx->rip = pVM->rem.s.Env.eip;
2637 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2638#else
2639 pCtx->eip = pVM->rem.s.Env.eip;
2640 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2641#endif
2642
2643 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2644 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2645 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2646 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2647 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2648 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2649
2650 for (i = 0; i < 8; i++)
2651 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2652
2653 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2654 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2655 {
2656 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2657 STAM_COUNTER_INC(&gStatREMGDTChange);
2658 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2659 }
2660
2661 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2662 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2663 {
2664 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2665 STAM_COUNTER_INC(&gStatREMIDTChange);
2666 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2667 }
2668
2669 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2670 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2671 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2672 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2673 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2674 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2675 )
2676 {
2677 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2678 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2679 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2680 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2681 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2682 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2683 STAM_COUNTER_INC(&gStatREMLDTRChange);
2684 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2685 }
2686
2687 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2688 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2689 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2690 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2691 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2692 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2693 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2694 : 0)
2695 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2696 )
2697 {
2698 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2699 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2700 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2701 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2702 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2703 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2704 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2705 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2706 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2707 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2708 if (pCtx->tr.Attr.u)
2709 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2710 STAM_COUNTER_INC(&gStatREMTRChange);
2711 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2712 }
2713
2714 /* Sysenter MSR */
2715 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2716 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2717 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2718
2719 /* System MSRs. */
2720 pCtx->msrEFER = pVM->rem.s.Env.efer;
2721 pCtx->msrSTAR = pVM->rem.s.Env.star;
2722 pCtx->msrPAT = pVM->rem.s.Env.pat;
2723#ifdef TARGET_X86_64
2724 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2725 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2726 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2727 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2728#endif
2729
2730 /* Inhibit interrupt flag. */
2731 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2732 {
2733 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2734 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2735 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2736 }
2737 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2738 {
2739 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2740 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2741 }
2742
2743 remR3TrapClear(pVM);
2744
2745 /*
2746 * Check for traps.
2747 */
2748 if ( pVM->rem.s.Env.exception_index >= 0
2749 && pVM->rem.s.Env.exception_index < 256)
2750 {
2751 int rc;
2752
2753 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2754 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2755 AssertRC(rc);
2756 switch (pVM->rem.s.Env.exception_index)
2757 {
2758 case 0x0e:
2759 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2760 /* fallthru */
2761 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2762 case 0x11: case 0x08: /* 0 */
2763 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2764 break;
2765 }
2766
2767 }
2768
2769 /*
2770 * We're not longer in REM mode.
2771 */
2772 CPUMR3RemLeave(pVCpu,
2773 HMIsEnabled(pVM)
2774 || ( pVM->rem.s.Env.segs[R_SS].newselector
2775 | pVM->rem.s.Env.segs[R_GS].newselector
2776 | pVM->rem.s.Env.segs[R_FS].newselector
2777 | pVM->rem.s.Env.segs[R_ES].newselector
2778 | pVM->rem.s.Env.segs[R_DS].newselector
2779 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2780 );
2781 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2782 pVM->rem.s.fInREM = false;
2783 pVM->rem.s.pCtx = NULL;
2784 pVM->rem.s.Env.pVCpu = NULL;
2785 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2786 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2787 return VINF_SUCCESS;
2788}
2789
2790
2791/**
2792 * This is called by the disassembler when it wants to update the cpu state
2793 * before for instance doing a register dump.
2794 */
2795static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2796{
2797 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2798 unsigned i;
2799
2800 Assert(pVM->rem.s.fInREM);
2801
2802 /*
2803 * Copy back the registers.
2804 * This is done in the order they are declared in the CPUMCTX structure.
2805 */
2806
2807 /** @todo FOP */
2808 /** @todo FPUIP */
2809 /** @todo CS */
2810 /** @todo FPUDP */
2811 /** @todo DS */
2812 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2813 pCtx->fpu.MXCSR = 0;
2814 pCtx->fpu.MXCSR_MASK = 0;
2815
2816 /** @todo check if FPU/XMM was actually used in the recompiler */
2817 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2818//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2819
2820#ifdef TARGET_X86_64
2821 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2822 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2823 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2824 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2825 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2826 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2827 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2828 pCtx->r8 = pVM->rem.s.Env.regs[8];
2829 pCtx->r9 = pVM->rem.s.Env.regs[9];
2830 pCtx->r10 = pVM->rem.s.Env.regs[10];
2831 pCtx->r11 = pVM->rem.s.Env.regs[11];
2832 pCtx->r12 = pVM->rem.s.Env.regs[12];
2833 pCtx->r13 = pVM->rem.s.Env.regs[13];
2834 pCtx->r14 = pVM->rem.s.Env.regs[14];
2835 pCtx->r15 = pVM->rem.s.Env.regs[15];
2836
2837 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2838#else
2839 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2840 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2841 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2842 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2843 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2844 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2845 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2846
2847 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2848#endif
2849
2850 SYNC_BACK_SREG(es, ES);
2851 SYNC_BACK_SREG(cs, CS);
2852 SYNC_BACK_SREG(ss, SS);
2853 SYNC_BACK_SREG(ds, DS);
2854 SYNC_BACK_SREG(fs, FS);
2855 SYNC_BACK_SREG(gs, GS);
2856
2857#ifdef TARGET_X86_64
2858 pCtx->rip = pVM->rem.s.Env.eip;
2859 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2860#else
2861 pCtx->eip = pVM->rem.s.Env.eip;
2862 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2863#endif
2864
2865 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2866 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2867 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2868 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2869 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2870 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2871
2872 for (i = 0; i < 8; i++)
2873 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2874
2875 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2876 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2877 {
2878 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2879 STAM_COUNTER_INC(&gStatREMGDTChange);
2880 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2881 }
2882
2883 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2884 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2885 {
2886 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2887 STAM_COUNTER_INC(&gStatREMIDTChange);
2888 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2889 }
2890
2891 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2892 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2893 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2894 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2895 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2896 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2897 )
2898 {
2899 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2900 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2901 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2902 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2903 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2904 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2905 STAM_COUNTER_INC(&gStatREMLDTRChange);
2906 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2907 }
2908
2909 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2910 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2911 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2912 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2913 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2914 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2915 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2916 : 0)
2917 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2918 )
2919 {
2920 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2921 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2922 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2923 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2924 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2925 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2926 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2927 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2928 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2929 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2930 if (pCtx->tr.Attr.u)
2931 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2932 STAM_COUNTER_INC(&gStatREMTRChange);
2933 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2934 }
2935
2936 /* Sysenter MSR */
2937 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2938 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2939 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2940
2941 /* System MSRs. */
2942 pCtx->msrEFER = pVM->rem.s.Env.efer;
2943 pCtx->msrSTAR = pVM->rem.s.Env.star;
2944 pCtx->msrPAT = pVM->rem.s.Env.pat;
2945#ifdef TARGET_X86_64
2946 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2947 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2948 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2949 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2950#endif
2951
2952}
2953
2954
2955/**
2956 * Update the VMM state information if we're currently in REM.
2957 *
2958 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2959 * we're currently executing in REM and the VMM state is invalid. This method will of
2960 * course check that we're executing in REM before syncing any data over to the VMM.
2961 *
2962 * @param pVM The VM handle.
2963 * @param pVCpu The VMCPU handle.
2964 */
2965REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2966{
2967 if (pVM->rem.s.fInREM)
2968 remR3StateUpdate(pVM, pVCpu);
2969}
2970
2971
2972#undef LOG_GROUP
2973#define LOG_GROUP LOG_GROUP_REM
2974
2975
2976/**
2977 * Notify the recompiler about Address Gate 20 state change.
2978 *
2979 * This notification is required since A20 gate changes are
2980 * initialized from a device driver and the VM might just as
2981 * well be in REM mode as in RAW mode.
2982 *
2983 * @param pVM VM handle.
2984 * @param pVCpu VMCPU handle.
2985 * @param fEnable True if the gate should be enabled.
2986 * False if the gate should be disabled.
2987 */
2988REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2989{
2990 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2991 VM_ASSERT_EMT(pVM);
2992
2993 /** @todo SMP and the A20 gate... */
2994 if (pVM->rem.s.Env.pVCpu == pVCpu)
2995 {
2996 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2997 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2998 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2999 }
3000}
3001
3002
3003/**
3004 * Replays the handler notification changes
3005 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3006 *
3007 * @param pVM VM handle.
3008 */
3009REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3010{
3011 /*
3012 * Replay the flushes.
3013 */
3014 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3015 VM_ASSERT_EMT(pVM);
3016
3017 /** @todo this isn't ensuring correct replay order. */
3018 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3019 {
3020 uint32_t idxNext;
3021 uint32_t idxRevHead;
3022 uint32_t idxHead;
3023#ifdef VBOX_STRICT
3024 int32_t c = 0;
3025#endif
3026
3027 /* Lockless purging of pending notifications. */
3028 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3029 if (idxHead == UINT32_MAX)
3030 return;
3031 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3032
3033 /*
3034 * Reverse the list to process it in FIFO order.
3035 */
3036 idxRevHead = UINT32_MAX;
3037 do
3038 {
3039 /* Save the index of the next rec. */
3040 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3041 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3042 /* Push the record onto the reversed list. */
3043 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3044 idxRevHead = idxHead;
3045 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3046 /* Advance. */
3047 idxHead = idxNext;
3048 } while (idxHead != UINT32_MAX);
3049
3050 /*
3051 * Loop thru the list, reinserting the record into the free list as they are
3052 * processed to avoid having other EMTs running out of entries while we're flushing.
3053 */
3054 idxHead = idxRevHead;
3055 do
3056 {
3057 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3058 uint32_t idxCur;
3059 Assert(--c >= 0);
3060
3061 switch (pCur->enmKind)
3062 {
3063 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3064 remR3NotifyHandlerPhysicalRegister(pVM,
3065 pCur->u.PhysicalRegister.enmType,
3066 pCur->u.PhysicalRegister.GCPhys,
3067 pCur->u.PhysicalRegister.cb,
3068 pCur->u.PhysicalRegister.fHasHCHandler);
3069 break;
3070
3071 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3072 remR3NotifyHandlerPhysicalDeregister(pVM,
3073 pCur->u.PhysicalDeregister.enmType,
3074 pCur->u.PhysicalDeregister.GCPhys,
3075 pCur->u.PhysicalDeregister.cb,
3076 pCur->u.PhysicalDeregister.fHasHCHandler,
3077 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3078 break;
3079
3080 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3081 remR3NotifyHandlerPhysicalModify(pVM,
3082 pCur->u.PhysicalModify.enmType,
3083 pCur->u.PhysicalModify.GCPhysOld,
3084 pCur->u.PhysicalModify.GCPhysNew,
3085 pCur->u.PhysicalModify.cb,
3086 pCur->u.PhysicalModify.fHasHCHandler,
3087 pCur->u.PhysicalModify.fRestoreAsRAM);
3088 break;
3089
3090 default:
3091 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3092 break;
3093 }
3094
3095 /*
3096 * Advance idxHead.
3097 */
3098 idxCur = idxHead;
3099 idxHead = pCur->idxNext;
3100 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3101
3102 /*
3103 * Put the record back into the free list.
3104 */
3105 do
3106 {
3107 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3108 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3109 ASMCompilerBarrier();
3110 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3111 } while (idxHead != UINT32_MAX);
3112
3113#ifdef VBOX_STRICT
3114 if (pVM->cCpus == 1)
3115 {
3116 unsigned c;
3117 /* Check that all records are now on the free list. */
3118 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3119 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3120 c++;
3121 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3122 }
3123#endif
3124 }
3125}
3126
3127
3128/**
3129 * Notify REM about changed code page.
3130 *
3131 * @returns VBox status code.
3132 * @param pVM VM handle.
3133 * @param pVCpu VMCPU handle.
3134 * @param pvCodePage Code page address
3135 */
3136REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3137{
3138#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3139 int rc;
3140 RTGCPHYS PhysGC;
3141 uint64_t flags;
3142
3143 VM_ASSERT_EMT(pVM);
3144
3145 /*
3146 * Get the physical page address.
3147 */
3148 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3149 if (rc == VINF_SUCCESS)
3150 {
3151 /*
3152 * Sync the required registers and flush the whole page.
3153 * (Easier to do the whole page than notifying it about each physical
3154 * byte that was changed.
3155 */
3156 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3157 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3158 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3159 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3160
3161 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3162 }
3163#endif
3164 return VINF_SUCCESS;
3165}
3166
3167
3168/**
3169 * Notification about a successful MMR3PhysRegister() call.
3170 *
3171 * @param pVM VM handle.
3172 * @param GCPhys The physical address the RAM.
3173 * @param cb Size of the memory.
3174 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3175 */
3176REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3177{
3178 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3179 VM_ASSERT_EMT(pVM);
3180
3181 /*
3182 * Validate input - we trust the caller.
3183 */
3184 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3185 Assert(cb);
3186 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3187 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3188
3189 /*
3190 * Base ram? Update GCPhysLastRam.
3191 */
3192 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3193 {
3194 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3195 {
3196 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3197 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3198 }
3199 }
3200
3201 /*
3202 * Register the ram.
3203 */
3204 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3205
3206 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3207 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3208 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3209
3210 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3211}
3212
3213
3214/**
3215 * Notification about a successful MMR3PhysRomRegister() call.
3216 *
3217 * @param pVM VM handle.
3218 * @param GCPhys The physical address of the ROM.
3219 * @param cb The size of the ROM.
3220 * @param pvCopy Pointer to the ROM copy.
3221 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3222 * This function will be called when ever the protection of the
3223 * shadow ROM changes (at reset and end of POST).
3224 */
3225REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3226{
3227 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3228 VM_ASSERT_EMT(pVM);
3229
3230 /*
3231 * Validate input - we trust the caller.
3232 */
3233 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3234 Assert(cb);
3235 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3236
3237 /*
3238 * Register the rom.
3239 */
3240 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3241
3242 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3243 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3244 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3245
3246 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3247}
3248
3249
3250/**
3251 * Notification about a successful memory deregistration or reservation.
3252 *
3253 * @param pVM VM Handle.
3254 * @param GCPhys Start physical address.
3255 * @param cb The size of the range.
3256 */
3257REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3258{
3259 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3260 VM_ASSERT_EMT(pVM);
3261
3262 /*
3263 * Validate input - we trust the caller.
3264 */
3265 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3266 Assert(cb);
3267 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3268
3269 /*
3270 * Unassigning the memory.
3271 */
3272 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3273
3274 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3275 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3276 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3277
3278 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3279}
3280
3281
3282/**
3283 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3284 *
3285 * @param pVM VM Handle.
3286 * @param enmType Handler type.
3287 * @param GCPhys Handler range address.
3288 * @param cb Size of the handler range.
3289 * @param fHasHCHandler Set if the handler has a HC callback function.
3290 *
3291 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3292 * Handler memory type to memory which has no HC handler.
3293 */
3294static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3295{
3296 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3297 enmType, GCPhys, cb, fHasHCHandler));
3298
3299 VM_ASSERT_EMT(pVM);
3300 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3301 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3302
3303
3304 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3305
3306 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3307 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3308 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3309 else if (fHasHCHandler)
3310 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3311 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3312
3313 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3314}
3315
3316/**
3317 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3318 *
3319 * @param pVM VM Handle.
3320 * @param enmType Handler type.
3321 * @param GCPhys Handler range address.
3322 * @param cb Size of the handler range.
3323 * @param fHasHCHandler Set if the handler has a HC callback function.
3324 *
3325 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3326 * Handler memory type to memory which has no HC handler.
3327 */
3328REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3329{
3330 REMR3ReplayHandlerNotifications(pVM);
3331
3332 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3333}
3334
3335/**
3336 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3337 *
3338 * @param pVM VM Handle.
3339 * @param enmType Handler type.
3340 * @param GCPhys Handler range address.
3341 * @param cb Size of the handler range.
3342 * @param fHasHCHandler Set if the handler has a HC callback function.
3343 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3344 */
3345static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3346{
3347 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3348 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3349 VM_ASSERT_EMT(pVM);
3350
3351
3352 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3353
3354 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3355 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3356 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3357 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3358 else if (fHasHCHandler)
3359 {
3360 if (!fRestoreAsRAM)
3361 {
3362 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3363 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3364 }
3365 else
3366 {
3367 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3368 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3369 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3370 }
3371 }
3372 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3373
3374 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3375}
3376
3377/**
3378 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3379 *
3380 * @param pVM VM Handle.
3381 * @param enmType Handler type.
3382 * @param GCPhys Handler range address.
3383 * @param cb Size of the handler range.
3384 * @param fHasHCHandler Set if the handler has a HC callback function.
3385 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3386 */
3387REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3388{
3389 REMR3ReplayHandlerNotifications(pVM);
3390 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3391}
3392
3393
3394/**
3395 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3396 *
3397 * @param pVM VM Handle.
3398 * @param enmType Handler type.
3399 * @param GCPhysOld Old handler range address.
3400 * @param GCPhysNew New handler range address.
3401 * @param cb Size of the handler range.
3402 * @param fHasHCHandler Set if the handler has a HC callback function.
3403 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3404 */
3405static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3406{
3407 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3408 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3409 VM_ASSERT_EMT(pVM);
3410 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3411
3412 if (fHasHCHandler)
3413 {
3414 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3415
3416 /*
3417 * Reset the old page.
3418 */
3419 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3420 if (!fRestoreAsRAM)
3421 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3422 else
3423 {
3424 /* This is not perfect, but it'll do for PD monitoring... */
3425 Assert(cb == PAGE_SIZE);
3426 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3427 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3428 }
3429
3430 /*
3431 * Update the new page.
3432 */
3433 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3434 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3435 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3436 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3437
3438 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3439 }
3440}
3441
3442/**
3443 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3444 *
3445 * @param pVM VM Handle.
3446 * @param enmType Handler type.
3447 * @param GCPhysOld Old handler range address.
3448 * @param GCPhysNew New handler range address.
3449 * @param cb Size of the handler range.
3450 * @param fHasHCHandler Set if the handler has a HC callback function.
3451 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3452 */
3453REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3454{
3455 REMR3ReplayHandlerNotifications(pVM);
3456
3457 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3458}
3459
3460/**
3461 * Checks if we're handling access to this page or not.
3462 *
3463 * @returns true if we're trapping access.
3464 * @returns false if we aren't.
3465 * @param pVM The VM handle.
3466 * @param GCPhys The physical address.
3467 *
3468 * @remark This function will only work correctly in VBOX_STRICT builds!
3469 */
3470REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3471{
3472#ifdef VBOX_STRICT
3473 ram_addr_t off;
3474 REMR3ReplayHandlerNotifications(pVM);
3475
3476 off = get_phys_page_offset(GCPhys);
3477 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3478 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3479 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3480#else
3481 return false;
3482#endif
3483}
3484
3485
3486/**
3487 * Deals with a rare case in get_phys_addr_code where the code
3488 * is being monitored.
3489 *
3490 * It could also be an MMIO page, in which case we will raise a fatal error.
3491 *
3492 * @returns The physical address corresponding to addr.
3493 * @param env The cpu environment.
3494 * @param addr The virtual address.
3495 * @param pTLBEntry The TLB entry.
3496 */
3497target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3498 target_ulong addr,
3499 CPUTLBEntry *pTLBEntry,
3500 target_phys_addr_t ioTLBEntry)
3501{
3502 PVM pVM = env->pVM;
3503
3504 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3505 {
3506 /* If code memory is being monitored, appropriate IOTLB entry will have
3507 handler IO type, and addend will provide real physical address, no
3508 matter if we store VA in TLB or not, as handlers are always passed PA */
3509 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3510 return ret;
3511 }
3512 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3513 "*** handlers\n",
3514 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3515 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3516 LogRel(("*** mmio\n"));
3517 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3518 LogRel(("*** phys\n"));
3519 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3520 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3521 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3522 AssertFatalFailed();
3523}
3524
3525/**
3526 * Read guest RAM and ROM.
3527 *
3528 * @param SrcGCPhys The source address (guest physical).
3529 * @param pvDst The destination address.
3530 * @param cb Number of bytes
3531 */
3532void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3533{
3534 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3535 VBOX_CHECK_ADDR(SrcGCPhys);
3536 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3537#ifdef VBOX_DEBUG_PHYS
3538 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3539#endif
3540 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3541}
3542
3543
3544/**
3545 * Read guest RAM and ROM, unsigned 8-bit.
3546 *
3547 * @param SrcGCPhys The source address (guest physical).
3548 */
3549RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3550{
3551 uint8_t val;
3552 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3553 VBOX_CHECK_ADDR(SrcGCPhys);
3554 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3555 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3556#ifdef VBOX_DEBUG_PHYS
3557 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3558#endif
3559 return val;
3560}
3561
3562
3563/**
3564 * Read guest RAM and ROM, signed 8-bit.
3565 *
3566 * @param SrcGCPhys The source address (guest physical).
3567 */
3568RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3569{
3570 int8_t val;
3571 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3572 VBOX_CHECK_ADDR(SrcGCPhys);
3573 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3574 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3575#ifdef VBOX_DEBUG_PHYS
3576 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3577#endif
3578 return val;
3579}
3580
3581
3582/**
3583 * Read guest RAM and ROM, unsigned 16-bit.
3584 *
3585 * @param SrcGCPhys The source address (guest physical).
3586 */
3587RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3588{
3589 uint16_t val;
3590 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3591 VBOX_CHECK_ADDR(SrcGCPhys);
3592 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3593 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3594#ifdef VBOX_DEBUG_PHYS
3595 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3596#endif
3597 return val;
3598}
3599
3600
3601/**
3602 * Read guest RAM and ROM, signed 16-bit.
3603 *
3604 * @param SrcGCPhys The source address (guest physical).
3605 */
3606RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3607{
3608 int16_t val;
3609 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3610 VBOX_CHECK_ADDR(SrcGCPhys);
3611 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3612 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3613#ifdef VBOX_DEBUG_PHYS
3614 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3615#endif
3616 return val;
3617}
3618
3619
3620/**
3621 * Read guest RAM and ROM, unsigned 32-bit.
3622 *
3623 * @param SrcGCPhys The source address (guest physical).
3624 */
3625RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3626{
3627 uint32_t val;
3628 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3629 VBOX_CHECK_ADDR(SrcGCPhys);
3630 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3631 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3632#ifdef VBOX_DEBUG_PHYS
3633 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3634#endif
3635 return val;
3636}
3637
3638
3639/**
3640 * Read guest RAM and ROM, signed 32-bit.
3641 *
3642 * @param SrcGCPhys The source address (guest physical).
3643 */
3644RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3645{
3646 int32_t val;
3647 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3648 VBOX_CHECK_ADDR(SrcGCPhys);
3649 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3650 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3651#ifdef VBOX_DEBUG_PHYS
3652 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3653#endif
3654 return val;
3655}
3656
3657
3658/**
3659 * Read guest RAM and ROM, unsigned 64-bit.
3660 *
3661 * @param SrcGCPhys The source address (guest physical).
3662 */
3663uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3664{
3665 uint64_t val;
3666 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3667 VBOX_CHECK_ADDR(SrcGCPhys);
3668 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3669 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3670#ifdef VBOX_DEBUG_PHYS
3671 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3672#endif
3673 return val;
3674}
3675
3676
3677/**
3678 * Read guest RAM and ROM, signed 64-bit.
3679 *
3680 * @param SrcGCPhys The source address (guest physical).
3681 */
3682int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3683{
3684 int64_t val;
3685 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3686 VBOX_CHECK_ADDR(SrcGCPhys);
3687 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3688 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3689#ifdef VBOX_DEBUG_PHYS
3690 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3691#endif
3692 return val;
3693}
3694
3695
3696/**
3697 * Write guest RAM.
3698 *
3699 * @param DstGCPhys The destination address (guest physical).
3700 * @param pvSrc The source address.
3701 * @param cb Number of bytes to write
3702 */
3703void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3704{
3705 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3706 VBOX_CHECK_ADDR(DstGCPhys);
3707 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3708 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3709#ifdef VBOX_DEBUG_PHYS
3710 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3711#endif
3712}
3713
3714
3715/**
3716 * Write guest RAM, unsigned 8-bit.
3717 *
3718 * @param DstGCPhys The destination address (guest physical).
3719 * @param val Value
3720 */
3721void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3722{
3723 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3724 VBOX_CHECK_ADDR(DstGCPhys);
3725 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3726 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3727#ifdef VBOX_DEBUG_PHYS
3728 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3729#endif
3730}
3731
3732
3733/**
3734 * Write guest RAM, unsigned 8-bit.
3735 *
3736 * @param DstGCPhys The destination address (guest physical).
3737 * @param val Value
3738 */
3739void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3740{
3741 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3742 VBOX_CHECK_ADDR(DstGCPhys);
3743 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3744 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3745#ifdef VBOX_DEBUG_PHYS
3746 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3747#endif
3748}
3749
3750
3751/**
3752 * Write guest RAM, unsigned 32-bit.
3753 *
3754 * @param DstGCPhys The destination address (guest physical).
3755 * @param val Value
3756 */
3757void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3758{
3759 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3760 VBOX_CHECK_ADDR(DstGCPhys);
3761 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3762 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3763#ifdef VBOX_DEBUG_PHYS
3764 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3765#endif
3766}
3767
3768
3769/**
3770 * Write guest RAM, unsigned 64-bit.
3771 *
3772 * @param DstGCPhys The destination address (guest physical).
3773 * @param val Value
3774 */
3775void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3776{
3777 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3778 VBOX_CHECK_ADDR(DstGCPhys);
3779 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3780 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3781#ifdef VBOX_DEBUG_PHYS
3782 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3783#endif
3784}
3785
3786#undef LOG_GROUP
3787#define LOG_GROUP LOG_GROUP_REM_MMIO
3788
3789/** Read MMIO memory. */
3790static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3791{
3792 CPUX86State *env = (CPUX86State *)pvEnv;
3793 uint32_t u32 = 0;
3794 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3795 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3796 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3797 return u32;
3798}
3799
3800/** Read MMIO memory. */
3801static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3802{
3803 CPUX86State *env = (CPUX86State *)pvEnv;
3804 uint32_t u32 = 0;
3805 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3806 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3807 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3808 return u32;
3809}
3810
3811/** Read MMIO memory. */
3812static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3813{
3814 CPUX86State *env = (CPUX86State *)pvEnv;
3815 uint32_t u32 = 0;
3816 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3817 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3818 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3819 return u32;
3820}
3821
3822/** Write to MMIO memory. */
3823static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3824{
3825 CPUX86State *env = (CPUX86State *)pvEnv;
3826 int rc;
3827 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3828 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3829 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3830}
3831
3832/** Write to MMIO memory. */
3833static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3834{
3835 CPUX86State *env = (CPUX86State *)pvEnv;
3836 int rc;
3837 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3838 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3839 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3840}
3841
3842/** Write to MMIO memory. */
3843static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3844{
3845 CPUX86State *env = (CPUX86State *)pvEnv;
3846 int rc;
3847 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3848 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3849 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3850}
3851
3852
3853#undef LOG_GROUP
3854#define LOG_GROUP LOG_GROUP_REM_HANDLER
3855
3856/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3857
3858static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3859{
3860 uint8_t u8;
3861 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3862 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3863 return u8;
3864}
3865
3866static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3867{
3868 uint16_t u16;
3869 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3870 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3871 return u16;
3872}
3873
3874static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3875{
3876 uint32_t u32;
3877 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3878 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3879 return u32;
3880}
3881
3882static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3883{
3884 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3885 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3886}
3887
3888static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3889{
3890 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3891 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3892}
3893
3894static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3895{
3896 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3897 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3898}
3899
3900/* -+- disassembly -+- */
3901
3902#undef LOG_GROUP
3903#define LOG_GROUP LOG_GROUP_REM_DISAS
3904
3905
3906/**
3907 * Enables or disables singled stepped disassembly.
3908 *
3909 * @returns VBox status code.
3910 * @param pVM VM handle.
3911 * @param fEnable To enable set this flag, to disable clear it.
3912 */
3913static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3914{
3915 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3916 VM_ASSERT_EMT(pVM);
3917
3918 if (fEnable)
3919 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3920 else
3921 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3922#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3923 cpu_single_step(&pVM->rem.s.Env, fEnable);
3924#endif
3925 return VINF_SUCCESS;
3926}
3927
3928
3929/**
3930 * Enables or disables singled stepped disassembly.
3931 *
3932 * @returns VBox status code.
3933 * @param pVM VM handle.
3934 * @param fEnable To enable set this flag, to disable clear it.
3935 */
3936REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3937{
3938 int rc;
3939
3940 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3941 if (VM_IS_EMT(pVM))
3942 return remR3DisasEnableStepping(pVM, fEnable);
3943
3944 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3945 AssertRC(rc);
3946 return rc;
3947}
3948
3949
3950#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3951/**
3952 * External Debugger Command: .remstep [on|off|1|0]
3953 */
3954static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
3955{
3956 int rc;
3957 PVM pVM = pUVM->pVM;
3958
3959 if (cArgs == 0)
3960 /*
3961 * Print the current status.
3962 */
3963 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3964 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3965 else
3966 {
3967 /*
3968 * Convert the argument and change the mode.
3969 */
3970 bool fEnable;
3971 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3972 if (RT_SUCCESS(rc))
3973 {
3974 rc = REMR3DisasEnableStepping(pVM, fEnable);
3975 if (RT_SUCCESS(rc))
3976 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3977 else
3978 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3979 }
3980 else
3981 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3982 }
3983 return rc;
3984}
3985#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3986
3987
3988/**
3989 * Disassembles one instruction and prints it to the log.
3990 *
3991 * @returns Success indicator.
3992 * @param env Pointer to the recompiler CPU structure.
3993 * @param f32BitCode Indicates that whether or not the code should
3994 * be disassembled as 16 or 32 bit. If -1 the CS
3995 * selector will be inspected.
3996 * @param pszPrefix
3997 */
3998bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3999{
4000 PVM pVM = env->pVM;
4001 const bool fLog = LogIsEnabled();
4002 const bool fLog2 = LogIs2Enabled();
4003 int rc = VINF_SUCCESS;
4004
4005 /*
4006 * Don't bother if there ain't any log output to do.
4007 */
4008 if (!fLog && !fLog2)
4009 return true;
4010
4011 /*
4012 * Update the state so DBGF reads the correct register values.
4013 */
4014 remR3StateUpdate(pVM, env->pVCpu);
4015
4016 /*
4017 * Log registers if requested.
4018 */
4019 if (fLog2)
4020 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4021
4022 /*
4023 * Disassemble to log.
4024 */
4025 if (fLog)
4026 {
4027 PVMCPU pVCpu = VMMGetCpu(pVM);
4028 char szBuf[256];
4029 szBuf[0] = '\0';
4030 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4031 pVCpu->idCpu,
4032 0, /* Sel */
4033 0, /* GCPtr */
4034 DBGF_DISAS_FLAGS_CURRENT_GUEST
4035 | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4036 szBuf,
4037 sizeof(szBuf),
4038 NULL);
4039 if (RT_FAILURE(rc))
4040 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4041 if (pszPrefix && *pszPrefix)
4042 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4043 else
4044 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4045 }
4046
4047 return RT_SUCCESS(rc);
4048}
4049
4050
4051/**
4052 * Disassemble recompiled code.
4053 *
4054 * @param phFileIgnored Ignored, logfile usually.
4055 * @param pvCode Pointer to the code block.
4056 * @param cb Size of the code block.
4057 */
4058void disas(FILE *phFile, void *pvCode, unsigned long cb)
4059{
4060 if (LogIs2Enabled())
4061 {
4062 unsigned off = 0;
4063 char szOutput[256];
4064 DISCPUSTATE Cpu;
4065#ifdef RT_ARCH_X86
4066 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4067#else
4068 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4069#endif
4070
4071 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4072 while (off < cb)
4073 {
4074 uint32_t cbInstr;
4075 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4076 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4077 if (RT_SUCCESS(rc))
4078 RTLogPrintf("%s", szOutput);
4079 else
4080 {
4081 RTLogPrintf("disas error %Rrc\n", rc);
4082 cbInstr = 1;
4083 }
4084 off += cbInstr;
4085 }
4086 }
4087}
4088
4089
4090/**
4091 * Disassemble guest code.
4092 *
4093 * @param phFileIgnored Ignored, logfile usually.
4094 * @param uCode The guest address of the code to disassemble. (flat?)
4095 * @param cb Number of bytes to disassemble.
4096 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4097 */
4098void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4099{
4100 if (LogIs2Enabled())
4101 {
4102 PVM pVM = cpu_single_env->pVM;
4103 PVMCPU pVCpu = cpu_single_env->pVCpu;
4104 RTSEL cs;
4105 RTGCUINTPTR eip;
4106
4107 Assert(pVCpu);
4108
4109 /*
4110 * Update the state so DBGF reads the correct register values (flags).
4111 */
4112 remR3StateUpdate(pVM, pVCpu);
4113
4114 /*
4115 * Do the disassembling.
4116 */
4117 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4118 cs = cpu_single_env->segs[R_CS].selector;
4119 eip = uCode - cpu_single_env->segs[R_CS].base;
4120 for (;;)
4121 {
4122 char szBuf[256];
4123 uint32_t cbInstr;
4124 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4125 pVCpu->idCpu,
4126 cs,
4127 eip,
4128 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4129 szBuf, sizeof(szBuf),
4130 &cbInstr);
4131 if (RT_SUCCESS(rc))
4132 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4133 else
4134 {
4135 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4136 cbInstr = 1;
4137 }
4138
4139 /* next */
4140 if (cb <= cbInstr)
4141 break;
4142 cb -= cbInstr;
4143 uCode += cbInstr;
4144 eip += cbInstr;
4145 }
4146 }
4147}
4148
4149
4150/**
4151 * Looks up a guest symbol.
4152 *
4153 * @returns Pointer to symbol name. This is a static buffer.
4154 * @param orig_addr The address in question.
4155 */
4156const char *lookup_symbol(target_ulong orig_addr)
4157{
4158 PVM pVM = cpu_single_env->pVM;
4159 RTGCINTPTR off = 0;
4160 RTDBGSYMBOL Sym;
4161 DBGFADDRESS Addr;
4162
4163 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4164 &off, &Sym, NULL /*phMod*/);
4165 if (RT_SUCCESS(rc))
4166 {
4167 static char szSym[sizeof(Sym.szName) + 48];
4168 if (!off)
4169 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4170 else if (off > 0)
4171 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4172 else
4173 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4174 return szSym;
4175 }
4176 return "<N/A>";
4177}
4178
4179
4180#undef LOG_GROUP
4181#define LOG_GROUP LOG_GROUP_REM
4182
4183
4184/* -+- FF notifications -+- */
4185
4186
4187/**
4188 * Notification about a pending interrupt.
4189 *
4190 * @param pVM VM Handle.
4191 * @param pVCpu VMCPU Handle.
4192 * @param u8Interrupt Interrupt
4193 * @thread The emulation thread.
4194 */
4195REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4196{
4197 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4198 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4199}
4200
4201/**
4202 * Notification about a pending interrupt.
4203 *
4204 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4205 * @param pVM VM Handle.
4206 * @param pVCpu VMCPU Handle.
4207 * @thread The emulation thread.
4208 */
4209REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4210{
4211 return pVM->rem.s.u32PendingInterrupt;
4212}
4213
4214/**
4215 * Notification about the interrupt FF being set.
4216 *
4217 * @param pVM VM Handle.
4218 * @param pVCpu VMCPU Handle.
4219 * @thread The emulation thread.
4220 */
4221REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4222{
4223#ifndef IEM_VERIFICATION_MODE
4224 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4225 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4226 if (pVM->rem.s.fInREM)
4227 {
4228 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4229 CPU_INTERRUPT_EXTERNAL_HARD);
4230 }
4231#endif
4232}
4233
4234
4235/**
4236 * Notification about the interrupt FF being set.
4237 *
4238 * @param pVM VM Handle.
4239 * @param pVCpu VMCPU Handle.
4240 * @thread Any.
4241 */
4242REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4243{
4244 LogFlow(("REMR3NotifyInterruptClear:\n"));
4245 if (pVM->rem.s.fInREM)
4246 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4247}
4248
4249
4250/**
4251 * Notification about pending timer(s).
4252 *
4253 * @param pVM VM Handle.
4254 * @param pVCpuDst The target cpu for this notification.
4255 * TM will not broadcast pending timer events, but use
4256 * a dedicated EMT for them. So, only interrupt REM
4257 * execution if the given CPU is executing in REM.
4258 * @thread Any.
4259 */
4260REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4261{
4262#ifndef IEM_VERIFICATION_MODE
4263#ifndef DEBUG_bird
4264 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4265#endif
4266 if (pVM->rem.s.fInREM)
4267 {
4268 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4269 {
4270 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4271 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4272 CPU_INTERRUPT_EXTERNAL_TIMER);
4273 }
4274 else
4275 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4276 }
4277 else
4278 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4279#endif
4280}
4281
4282
4283/**
4284 * Notification about pending DMA transfers.
4285 *
4286 * @param pVM VM Handle.
4287 * @thread Any.
4288 */
4289REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4290{
4291#ifndef IEM_VERIFICATION_MODE
4292 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4293 if (pVM->rem.s.fInREM)
4294 {
4295 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4296 CPU_INTERRUPT_EXTERNAL_DMA);
4297 }
4298#endif
4299}
4300
4301
4302/**
4303 * Notification about pending timer(s).
4304 *
4305 * @param pVM VM Handle.
4306 * @thread Any.
4307 */
4308REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4309{
4310#ifndef IEM_VERIFICATION_MODE
4311 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4312 if (pVM->rem.s.fInREM)
4313 {
4314 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4315 CPU_INTERRUPT_EXTERNAL_EXIT);
4316 }
4317#endif
4318}
4319
4320
4321/**
4322 * Notification about pending FF set by an external thread.
4323 *
4324 * @param pVM VM handle.
4325 * @thread Any.
4326 */
4327REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4328{
4329#ifndef IEM_VERIFICATION_MODE
4330 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4331 if (pVM->rem.s.fInREM)
4332 {
4333 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4334 CPU_INTERRUPT_EXTERNAL_EXIT);
4335 }
4336#endif
4337}
4338
4339
4340#ifdef VBOX_WITH_STATISTICS
4341void remR3ProfileStart(int statcode)
4342{
4343 STAMPROFILEADV *pStat;
4344 switch(statcode)
4345 {
4346 case STATS_EMULATE_SINGLE_INSTR:
4347 pStat = &gStatExecuteSingleInstr;
4348 break;
4349 case STATS_QEMU_COMPILATION:
4350 pStat = &gStatCompilationQEmu;
4351 break;
4352 case STATS_QEMU_RUN_EMULATED_CODE:
4353 pStat = &gStatRunCodeQEmu;
4354 break;
4355 case STATS_QEMU_TOTAL:
4356 pStat = &gStatTotalTimeQEmu;
4357 break;
4358 case STATS_QEMU_RUN_TIMERS:
4359 pStat = &gStatTimers;
4360 break;
4361 case STATS_TLB_LOOKUP:
4362 pStat= &gStatTBLookup;
4363 break;
4364 case STATS_IRQ_HANDLING:
4365 pStat= &gStatIRQ;
4366 break;
4367 case STATS_RAW_CHECK:
4368 pStat = &gStatRawCheck;
4369 break;
4370
4371 default:
4372 AssertMsgFailed(("unknown stat %d\n", statcode));
4373 return;
4374 }
4375 STAM_PROFILE_ADV_START(pStat, a);
4376}
4377
4378
4379void remR3ProfileStop(int statcode)
4380{
4381 STAMPROFILEADV *pStat;
4382 switch(statcode)
4383 {
4384 case STATS_EMULATE_SINGLE_INSTR:
4385 pStat = &gStatExecuteSingleInstr;
4386 break;
4387 case STATS_QEMU_COMPILATION:
4388 pStat = &gStatCompilationQEmu;
4389 break;
4390 case STATS_QEMU_RUN_EMULATED_CODE:
4391 pStat = &gStatRunCodeQEmu;
4392 break;
4393 case STATS_QEMU_TOTAL:
4394 pStat = &gStatTotalTimeQEmu;
4395 break;
4396 case STATS_QEMU_RUN_TIMERS:
4397 pStat = &gStatTimers;
4398 break;
4399 case STATS_TLB_LOOKUP:
4400 pStat= &gStatTBLookup;
4401 break;
4402 case STATS_IRQ_HANDLING:
4403 pStat= &gStatIRQ;
4404 break;
4405 case STATS_RAW_CHECK:
4406 pStat = &gStatRawCheck;
4407 break;
4408 default:
4409 AssertMsgFailed(("unknown stat %d\n", statcode));
4410 return;
4411 }
4412 STAM_PROFILE_ADV_STOP(pStat, a);
4413}
4414#endif
4415
4416/**
4417 * Raise an RC, force rem exit.
4418 *
4419 * @param pVM VM handle.
4420 * @param rc The rc.
4421 */
4422void remR3RaiseRC(PVM pVM, int rc)
4423{
4424 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4425 Assert(pVM->rem.s.fInREM);
4426 VM_ASSERT_EMT(pVM);
4427 pVM->rem.s.rc = rc;
4428 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4429}
4430
4431
4432/* -+- timers -+- */
4433
4434uint64_t cpu_get_tsc(CPUX86State *env)
4435{
4436 STAM_COUNTER_INC(&gStatCpuGetTSC);
4437 return TMCpuTickGet(env->pVCpu);
4438}
4439
4440
4441/* -+- interrupts -+- */
4442
4443void cpu_set_ferr(CPUX86State *env)
4444{
4445 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4446 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4447}
4448
4449int cpu_get_pic_interrupt(CPUX86State *env)
4450{
4451 uint8_t u8Interrupt;
4452 int rc;
4453
4454 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4455 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4456 * with the (a)pic.
4457 */
4458 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4459 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4460 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4461 * remove this kludge. */
4462 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4463 {
4464 rc = VINF_SUCCESS;
4465 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4466 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4467 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4468 }
4469 else
4470 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4471
4472 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4473 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4474 if (RT_SUCCESS(rc))
4475 {
4476 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4477 env->interrupt_request |= CPU_INTERRUPT_HARD;
4478 return u8Interrupt;
4479 }
4480 return -1;
4481}
4482
4483
4484/* -+- local apic -+- */
4485
4486#if 0 /* CPUMSetGuestMsr does this now. */
4487void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4488{
4489 int rc = PDMApicSetBase(env->pVM, val);
4490 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4491}
4492#endif
4493
4494uint64_t cpu_get_apic_base(CPUX86State *env)
4495{
4496 uint64_t u64;
4497 int rc = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4498 if (RT_SUCCESS(rc))
4499 {
4500 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4501 return u64;
4502 }
4503 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4504 return 0;
4505}
4506
4507void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4508{
4509 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4510 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4511}
4512
4513uint8_t cpu_get_apic_tpr(CPUX86State *env)
4514{
4515 uint8_t u8;
4516 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4517 if (RT_SUCCESS(rc))
4518 {
4519 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4520 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4521 }
4522 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4523 return 0;
4524}
4525
4526/**
4527 * Read an MSR.
4528 *
4529 * @retval 0 success.
4530 * @retval -1 failure, raise \#GP(0).
4531 * @param env The cpu state.
4532 * @param idMsr The MSR to read.
4533 * @param puValue Where to return the value.
4534 */
4535int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4536{
4537 Assert(env->pVCpu);
4538 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4539}
4540
4541/**
4542 * Write to an MSR.
4543 *
4544 * @retval 0 success.
4545 * @retval -1 failure, raise \#GP(0).
4546 * @param env The cpu state.
4547 * @param idMsr The MSR to read.
4548 * @param puValue Where to return the value.
4549 */
4550int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4551{
4552 Assert(env->pVCpu);
4553 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4554}
4555
4556/* -+- I/O Ports -+- */
4557
4558#undef LOG_GROUP
4559#define LOG_GROUP LOG_GROUP_REM_IOPORT
4560
4561void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4562{
4563 int rc;
4564
4565 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4566 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4567
4568 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4569 if (RT_LIKELY(rc == VINF_SUCCESS))
4570 return;
4571 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4572 {
4573 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4574 remR3RaiseRC(env->pVM, rc);
4575 return;
4576 }
4577 remAbort(rc, __FUNCTION__);
4578}
4579
4580void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4581{
4582 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4583 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4584 if (RT_LIKELY(rc == VINF_SUCCESS))
4585 return;
4586 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4587 {
4588 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4589 remR3RaiseRC(env->pVM, rc);
4590 return;
4591 }
4592 remAbort(rc, __FUNCTION__);
4593}
4594
4595void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4596{
4597 int rc;
4598 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4599 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4600 if (RT_LIKELY(rc == VINF_SUCCESS))
4601 return;
4602 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4603 {
4604 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4605 remR3RaiseRC(env->pVM, rc);
4606 return;
4607 }
4608 remAbort(rc, __FUNCTION__);
4609}
4610
4611uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4612{
4613 uint32_t u32 = 0;
4614 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4615 if (RT_LIKELY(rc == VINF_SUCCESS))
4616 {
4617 if (/*addr != 0x61 && */addr != 0x71)
4618 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4619 return (uint8_t)u32;
4620 }
4621 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4622 {
4623 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4624 remR3RaiseRC(env->pVM, rc);
4625 return (uint8_t)u32;
4626 }
4627 remAbort(rc, __FUNCTION__);
4628 return UINT8_C(0xff);
4629}
4630
4631uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4632{
4633 uint32_t u32 = 0;
4634 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4635 if (RT_LIKELY(rc == VINF_SUCCESS))
4636 {
4637 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4638 return (uint16_t)u32;
4639 }
4640 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4641 {
4642 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4643 remR3RaiseRC(env->pVM, rc);
4644 return (uint16_t)u32;
4645 }
4646 remAbort(rc, __FUNCTION__);
4647 return UINT16_C(0xffff);
4648}
4649
4650uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4651{
4652 uint32_t u32 = 0;
4653 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4654 if (RT_LIKELY(rc == VINF_SUCCESS))
4655 {
4656//if (addr==0x01f0 && u32 == 0x6b6d)
4657// loglevel = ~0;
4658 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4659 return u32;
4660 }
4661 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4662 {
4663 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4664 remR3RaiseRC(env->pVM, rc);
4665 return u32;
4666 }
4667 remAbort(rc, __FUNCTION__);
4668 return UINT32_C(0xffffffff);
4669}
4670
4671#undef LOG_GROUP
4672#define LOG_GROUP LOG_GROUP_REM
4673
4674
4675/* -+- helpers and misc other interfaces -+- */
4676
4677/**
4678 * Perform the CPUID instruction.
4679 *
4680 * @param env Pointer to the recompiler CPU structure.
4681 * @param idx The CPUID leaf (eax).
4682 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4683 * @param pvEAX Where to store eax.
4684 * @param pvEBX Where to store ebx.
4685 * @param pvECX Where to store ecx.
4686 * @param pvEDX Where to store edx.
4687 */
4688void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4689 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4690{
4691 NOREF(idxSub);
4692 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4693}
4694
4695
4696#if 0 /* not used */
4697/**
4698 * Interface for qemu hardware to report back fatal errors.
4699 */
4700void hw_error(const char *pszFormat, ...)
4701{
4702 /*
4703 * Bitch about it.
4704 */
4705 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4706 * this in my Odin32 tree at home! */
4707 va_list args;
4708 va_start(args, pszFormat);
4709 RTLogPrintf("fatal error in virtual hardware:");
4710 RTLogPrintfV(pszFormat, args);
4711 va_end(args);
4712 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4713
4714 /*
4715 * If we're in REM context we'll sync back the state before 'jumping' to
4716 * the EMs failure handling.
4717 */
4718 PVM pVM = cpu_single_env->pVM;
4719 if (pVM->rem.s.fInREM)
4720 REMR3StateBack(pVM);
4721 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4722 AssertMsgFailed(("EMR3FatalError returned!\n"));
4723}
4724#endif
4725
4726/**
4727 * Interface for the qemu cpu to report unhandled situation
4728 * raising a fatal VM error.
4729 */
4730void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4731{
4732 va_list va;
4733 PVM pVM;
4734 PVMCPU pVCpu;
4735 char szMsg[256];
4736
4737 /*
4738 * Bitch about it.
4739 */
4740 RTLogFlags(NULL, "nodisabled nobuffered");
4741 RTLogFlush(NULL);
4742
4743 va_start(va, pszFormat);
4744#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4745 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4746 unsigned cArgs = 0;
4747 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4748 const char *psz = strchr(pszFormat, '%');
4749 while (psz && cArgs < 6)
4750 {
4751 auArgs[cArgs++] = va_arg(va, uintptr_t);
4752 psz = strchr(psz + 1, '%');
4753 }
4754 switch (cArgs)
4755 {
4756 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4757 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4758 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4759 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4760 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4761 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4762 default:
4763 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4764 }
4765#else
4766 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4767#endif
4768 va_end(va);
4769
4770 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4771 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4772
4773 /*
4774 * If we're in REM context we'll sync back the state before 'jumping' to
4775 * the EMs failure handling.
4776 */
4777 pVM = cpu_single_env->pVM;
4778 pVCpu = cpu_single_env->pVCpu;
4779 Assert(pVCpu);
4780
4781 if (pVM->rem.s.fInREM)
4782 REMR3StateBack(pVM, pVCpu);
4783 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4784 AssertMsgFailed(("EMR3FatalError returned!\n"));
4785}
4786
4787
4788/**
4789 * Aborts the VM.
4790 *
4791 * @param rc VBox error code.
4792 * @param pszTip Hint about why/when this happened.
4793 */
4794void remAbort(int rc, const char *pszTip)
4795{
4796 PVM pVM;
4797 PVMCPU pVCpu;
4798
4799 /*
4800 * Bitch about it.
4801 */
4802 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4803 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4804
4805 /*
4806 * Jump back to where we entered the recompiler.
4807 */
4808 pVM = cpu_single_env->pVM;
4809 pVCpu = cpu_single_env->pVCpu;
4810 Assert(pVCpu);
4811
4812 if (pVM->rem.s.fInREM)
4813 REMR3StateBack(pVM, pVCpu);
4814
4815 EMR3FatalError(pVCpu, rc);
4816 AssertMsgFailed(("EMR3FatalError returned!\n"));
4817}
4818
4819
4820/**
4821 * Dumps a linux system call.
4822 * @param pVCpu VMCPU handle.
4823 */
4824void remR3DumpLnxSyscall(PVMCPU pVCpu)
4825{
4826 static const char *apsz[] =
4827 {
4828 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4829 "sys_exit",
4830 "sys_fork",
4831 "sys_read",
4832 "sys_write",
4833 "sys_open", /* 5 */
4834 "sys_close",
4835 "sys_waitpid",
4836 "sys_creat",
4837 "sys_link",
4838 "sys_unlink", /* 10 */
4839 "sys_execve",
4840 "sys_chdir",
4841 "sys_time",
4842 "sys_mknod",
4843 "sys_chmod", /* 15 */
4844 "sys_lchown16",
4845 "sys_ni_syscall", /* old break syscall holder */
4846 "sys_stat",
4847 "sys_lseek",
4848 "sys_getpid", /* 20 */
4849 "sys_mount",
4850 "sys_oldumount",
4851 "sys_setuid16",
4852 "sys_getuid16",
4853 "sys_stime", /* 25 */
4854 "sys_ptrace",
4855 "sys_alarm",
4856 "sys_fstat",
4857 "sys_pause",
4858 "sys_utime", /* 30 */
4859 "sys_ni_syscall", /* old stty syscall holder */
4860 "sys_ni_syscall", /* old gtty syscall holder */
4861 "sys_access",
4862 "sys_nice",
4863 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4864 "sys_sync",
4865 "sys_kill",
4866 "sys_rename",
4867 "sys_mkdir",
4868 "sys_rmdir", /* 40 */
4869 "sys_dup",
4870 "sys_pipe",
4871 "sys_times",
4872 "sys_ni_syscall", /* old prof syscall holder */
4873 "sys_brk", /* 45 */
4874 "sys_setgid16",
4875 "sys_getgid16",
4876 "sys_signal",
4877 "sys_geteuid16",
4878 "sys_getegid16", /* 50 */
4879 "sys_acct",
4880 "sys_umount", /* recycled never used phys() */
4881 "sys_ni_syscall", /* old lock syscall holder */
4882 "sys_ioctl",
4883 "sys_fcntl", /* 55 */
4884 "sys_ni_syscall", /* old mpx syscall holder */
4885 "sys_setpgid",
4886 "sys_ni_syscall", /* old ulimit syscall holder */
4887 "sys_olduname",
4888 "sys_umask", /* 60 */
4889 "sys_chroot",
4890 "sys_ustat",
4891 "sys_dup2",
4892 "sys_getppid",
4893 "sys_getpgrp", /* 65 */
4894 "sys_setsid",
4895 "sys_sigaction",
4896 "sys_sgetmask",
4897 "sys_ssetmask",
4898 "sys_setreuid16", /* 70 */
4899 "sys_setregid16",
4900 "sys_sigsuspend",
4901 "sys_sigpending",
4902 "sys_sethostname",
4903 "sys_setrlimit", /* 75 */
4904 "sys_old_getrlimit",
4905 "sys_getrusage",
4906 "sys_gettimeofday",
4907 "sys_settimeofday",
4908 "sys_getgroups16", /* 80 */
4909 "sys_setgroups16",
4910 "old_select",
4911 "sys_symlink",
4912 "sys_lstat",
4913 "sys_readlink", /* 85 */
4914 "sys_uselib",
4915 "sys_swapon",
4916 "sys_reboot",
4917 "old_readdir",
4918 "old_mmap", /* 90 */
4919 "sys_munmap",
4920 "sys_truncate",
4921 "sys_ftruncate",
4922 "sys_fchmod",
4923 "sys_fchown16", /* 95 */
4924 "sys_getpriority",
4925 "sys_setpriority",
4926 "sys_ni_syscall", /* old profil syscall holder */
4927 "sys_statfs",
4928 "sys_fstatfs", /* 100 */
4929 "sys_ioperm",
4930 "sys_socketcall",
4931 "sys_syslog",
4932 "sys_setitimer",
4933 "sys_getitimer", /* 105 */
4934 "sys_newstat",
4935 "sys_newlstat",
4936 "sys_newfstat",
4937 "sys_uname",
4938 "sys_iopl", /* 110 */
4939 "sys_vhangup",
4940 "sys_ni_syscall", /* old "idle" system call */
4941 "sys_vm86old",
4942 "sys_wait4",
4943 "sys_swapoff", /* 115 */
4944 "sys_sysinfo",
4945 "sys_ipc",
4946 "sys_fsync",
4947 "sys_sigreturn",
4948 "sys_clone", /* 120 */
4949 "sys_setdomainname",
4950 "sys_newuname",
4951 "sys_modify_ldt",
4952 "sys_adjtimex",
4953 "sys_mprotect", /* 125 */
4954 "sys_sigprocmask",
4955 "sys_ni_syscall", /* old "create_module" */
4956 "sys_init_module",
4957 "sys_delete_module",
4958 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4959 "sys_quotactl",
4960 "sys_getpgid",
4961 "sys_fchdir",
4962 "sys_bdflush",
4963 "sys_sysfs", /* 135 */
4964 "sys_personality",
4965 "sys_ni_syscall", /* reserved for afs_syscall */
4966 "sys_setfsuid16",
4967 "sys_setfsgid16",
4968 "sys_llseek", /* 140 */
4969 "sys_getdents",
4970 "sys_select",
4971 "sys_flock",
4972 "sys_msync",
4973 "sys_readv", /* 145 */
4974 "sys_writev",
4975 "sys_getsid",
4976 "sys_fdatasync",
4977 "sys_sysctl",
4978 "sys_mlock", /* 150 */
4979 "sys_munlock",
4980 "sys_mlockall",
4981 "sys_munlockall",
4982 "sys_sched_setparam",
4983 "sys_sched_getparam", /* 155 */
4984 "sys_sched_setscheduler",
4985 "sys_sched_getscheduler",
4986 "sys_sched_yield",
4987 "sys_sched_get_priority_max",
4988 "sys_sched_get_priority_min", /* 160 */
4989 "sys_sched_rr_get_interval",
4990 "sys_nanosleep",
4991 "sys_mremap",
4992 "sys_setresuid16",
4993 "sys_getresuid16", /* 165 */
4994 "sys_vm86",
4995 "sys_ni_syscall", /* Old sys_query_module */
4996 "sys_poll",
4997 "sys_nfsservctl",
4998 "sys_setresgid16", /* 170 */
4999 "sys_getresgid16",
5000 "sys_prctl",
5001 "sys_rt_sigreturn",
5002 "sys_rt_sigaction",
5003 "sys_rt_sigprocmask", /* 175 */
5004 "sys_rt_sigpending",
5005 "sys_rt_sigtimedwait",
5006 "sys_rt_sigqueueinfo",
5007 "sys_rt_sigsuspend",
5008 "sys_pread64", /* 180 */
5009 "sys_pwrite64",
5010 "sys_chown16",
5011 "sys_getcwd",
5012 "sys_capget",
5013 "sys_capset", /* 185 */
5014 "sys_sigaltstack",
5015 "sys_sendfile",
5016 "sys_ni_syscall", /* reserved for streams1 */
5017 "sys_ni_syscall", /* reserved for streams2 */
5018 "sys_vfork", /* 190 */
5019 "sys_getrlimit",
5020 "sys_mmap2",
5021 "sys_truncate64",
5022 "sys_ftruncate64",
5023 "sys_stat64", /* 195 */
5024 "sys_lstat64",
5025 "sys_fstat64",
5026 "sys_lchown",
5027 "sys_getuid",
5028 "sys_getgid", /* 200 */
5029 "sys_geteuid",
5030 "sys_getegid",
5031 "sys_setreuid",
5032 "sys_setregid",
5033 "sys_getgroups", /* 205 */
5034 "sys_setgroups",
5035 "sys_fchown",
5036 "sys_setresuid",
5037 "sys_getresuid",
5038 "sys_setresgid", /* 210 */
5039 "sys_getresgid",
5040 "sys_chown",
5041 "sys_setuid",
5042 "sys_setgid",
5043 "sys_setfsuid", /* 215 */
5044 "sys_setfsgid",
5045 "sys_pivot_root",
5046 "sys_mincore",
5047 "sys_madvise",
5048 "sys_getdents64", /* 220 */
5049 "sys_fcntl64",
5050 "sys_ni_syscall", /* reserved for TUX */
5051 "sys_ni_syscall",
5052 "sys_gettid",
5053 "sys_readahead", /* 225 */
5054 "sys_setxattr",
5055 "sys_lsetxattr",
5056 "sys_fsetxattr",
5057 "sys_getxattr",
5058 "sys_lgetxattr", /* 230 */
5059 "sys_fgetxattr",
5060 "sys_listxattr",
5061 "sys_llistxattr",
5062 "sys_flistxattr",
5063 "sys_removexattr", /* 235 */
5064 "sys_lremovexattr",
5065 "sys_fremovexattr",
5066 "sys_tkill",
5067 "sys_sendfile64",
5068 "sys_futex", /* 240 */
5069 "sys_sched_setaffinity",
5070 "sys_sched_getaffinity",
5071 "sys_set_thread_area",
5072 "sys_get_thread_area",
5073 "sys_io_setup", /* 245 */
5074 "sys_io_destroy",
5075 "sys_io_getevents",
5076 "sys_io_submit",
5077 "sys_io_cancel",
5078 "sys_fadvise64", /* 250 */
5079 "sys_ni_syscall",
5080 "sys_exit_group",
5081 "sys_lookup_dcookie",
5082 "sys_epoll_create",
5083 "sys_epoll_ctl", /* 255 */
5084 "sys_epoll_wait",
5085 "sys_remap_file_pages",
5086 "sys_set_tid_address",
5087 "sys_timer_create",
5088 "sys_timer_settime", /* 260 */
5089 "sys_timer_gettime",
5090 "sys_timer_getoverrun",
5091 "sys_timer_delete",
5092 "sys_clock_settime",
5093 "sys_clock_gettime", /* 265 */
5094 "sys_clock_getres",
5095 "sys_clock_nanosleep",
5096 "sys_statfs64",
5097 "sys_fstatfs64",
5098 "sys_tgkill", /* 270 */
5099 "sys_utimes",
5100 "sys_fadvise64_64",
5101 "sys_ni_syscall" /* sys_vserver */
5102 };
5103
5104 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5105 switch (uEAX)
5106 {
5107 default:
5108 if (uEAX < RT_ELEMENTS(apsz))
5109 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5110 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5111 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5112 else
5113 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5114 break;
5115
5116 }
5117}
5118
5119
5120/**
5121 * Dumps an OpenBSD system call.
5122 * @param pVCpu VMCPU handle.
5123 */
5124void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5125{
5126 static const char *apsz[] =
5127 {
5128 "SYS_syscall", //0
5129 "SYS_exit", //1
5130 "SYS_fork", //2
5131 "SYS_read", //3
5132 "SYS_write", //4
5133 "SYS_open", //5
5134 "SYS_close", //6
5135 "SYS_wait4", //7
5136 "SYS_8",
5137 "SYS_link", //9
5138 "SYS_unlink", //10
5139 "SYS_11",
5140 "SYS_chdir", //12
5141 "SYS_fchdir", //13
5142 "SYS_mknod", //14
5143 "SYS_chmod", //15
5144 "SYS_chown", //16
5145 "SYS_break", //17
5146 "SYS_18",
5147 "SYS_19",
5148 "SYS_getpid", //20
5149 "SYS_mount", //21
5150 "SYS_unmount", //22
5151 "SYS_setuid", //23
5152 "SYS_getuid", //24
5153 "SYS_geteuid", //25
5154 "SYS_ptrace", //26
5155 "SYS_recvmsg", //27
5156 "SYS_sendmsg", //28
5157 "SYS_recvfrom", //29
5158 "SYS_accept", //30
5159 "SYS_getpeername", //31
5160 "SYS_getsockname", //32
5161 "SYS_access", //33
5162 "SYS_chflags", //34
5163 "SYS_fchflags", //35
5164 "SYS_sync", //36
5165 "SYS_kill", //37
5166 "SYS_38",
5167 "SYS_getppid", //39
5168 "SYS_40",
5169 "SYS_dup", //41
5170 "SYS_opipe", //42
5171 "SYS_getegid", //43
5172 "SYS_profil", //44
5173 "SYS_ktrace", //45
5174 "SYS_sigaction", //46
5175 "SYS_getgid", //47
5176 "SYS_sigprocmask", //48
5177 "SYS_getlogin", //49
5178 "SYS_setlogin", //50
5179 "SYS_acct", //51
5180 "SYS_sigpending", //52
5181 "SYS_osigaltstack", //53
5182 "SYS_ioctl", //54
5183 "SYS_reboot", //55
5184 "SYS_revoke", //56
5185 "SYS_symlink", //57
5186 "SYS_readlink", //58
5187 "SYS_execve", //59
5188 "SYS_umask", //60
5189 "SYS_chroot", //61
5190 "SYS_62",
5191 "SYS_63",
5192 "SYS_64",
5193 "SYS_65",
5194 "SYS_vfork", //66
5195 "SYS_67",
5196 "SYS_68",
5197 "SYS_sbrk", //69
5198 "SYS_sstk", //70
5199 "SYS_61",
5200 "SYS_vadvise", //72
5201 "SYS_munmap", //73
5202 "SYS_mprotect", //74
5203 "SYS_madvise", //75
5204 "SYS_76",
5205 "SYS_77",
5206 "SYS_mincore", //78
5207 "SYS_getgroups", //79
5208 "SYS_setgroups", //80
5209 "SYS_getpgrp", //81
5210 "SYS_setpgid", //82
5211 "SYS_setitimer", //83
5212 "SYS_84",
5213 "SYS_85",
5214 "SYS_getitimer", //86
5215 "SYS_87",
5216 "SYS_88",
5217 "SYS_89",
5218 "SYS_dup2", //90
5219 "SYS_91",
5220 "SYS_fcntl", //92
5221 "SYS_select", //93
5222 "SYS_94",
5223 "SYS_fsync", //95
5224 "SYS_setpriority", //96
5225 "SYS_socket", //97
5226 "SYS_connect", //98
5227 "SYS_99",
5228 "SYS_getpriority", //100
5229 "SYS_101",
5230 "SYS_102",
5231 "SYS_sigreturn", //103
5232 "SYS_bind", //104
5233 "SYS_setsockopt", //105
5234 "SYS_listen", //106
5235 "SYS_107",
5236 "SYS_108",
5237 "SYS_109",
5238 "SYS_110",
5239 "SYS_sigsuspend", //111
5240 "SYS_112",
5241 "SYS_113",
5242 "SYS_114",
5243 "SYS_115",
5244 "SYS_gettimeofday", //116
5245 "SYS_getrusage", //117
5246 "SYS_getsockopt", //118
5247 "SYS_119",
5248 "SYS_readv", //120
5249 "SYS_writev", //121
5250 "SYS_settimeofday", //122
5251 "SYS_fchown", //123
5252 "SYS_fchmod", //124
5253 "SYS_125",
5254 "SYS_setreuid", //126
5255 "SYS_setregid", //127
5256 "SYS_rename", //128
5257 "SYS_129",
5258 "SYS_130",
5259 "SYS_flock", //131
5260 "SYS_mkfifo", //132
5261 "SYS_sendto", //133
5262 "SYS_shutdown", //134
5263 "SYS_socketpair", //135
5264 "SYS_mkdir", //136
5265 "SYS_rmdir", //137
5266 "SYS_utimes", //138
5267 "SYS_139",
5268 "SYS_adjtime", //140
5269 "SYS_141",
5270 "SYS_142",
5271 "SYS_143",
5272 "SYS_144",
5273 "SYS_145",
5274 "SYS_146",
5275 "SYS_setsid", //147
5276 "SYS_quotactl", //148
5277 "SYS_149",
5278 "SYS_150",
5279 "SYS_151",
5280 "SYS_152",
5281 "SYS_153",
5282 "SYS_154",
5283 "SYS_nfssvc", //155
5284 "SYS_156",
5285 "SYS_157",
5286 "SYS_158",
5287 "SYS_159",
5288 "SYS_160",
5289 "SYS_getfh", //161
5290 "SYS_162",
5291 "SYS_163",
5292 "SYS_164",
5293 "SYS_sysarch", //165
5294 "SYS_166",
5295 "SYS_167",
5296 "SYS_168",
5297 "SYS_169",
5298 "SYS_170",
5299 "SYS_171",
5300 "SYS_172",
5301 "SYS_pread", //173
5302 "SYS_pwrite", //174
5303 "SYS_175",
5304 "SYS_176",
5305 "SYS_177",
5306 "SYS_178",
5307 "SYS_179",
5308 "SYS_180",
5309 "SYS_setgid", //181
5310 "SYS_setegid", //182
5311 "SYS_seteuid", //183
5312 "SYS_lfs_bmapv", //184
5313 "SYS_lfs_markv", //185
5314 "SYS_lfs_segclean", //186
5315 "SYS_lfs_segwait", //187
5316 "SYS_188",
5317 "SYS_189",
5318 "SYS_190",
5319 "SYS_pathconf", //191
5320 "SYS_fpathconf", //192
5321 "SYS_swapctl", //193
5322 "SYS_getrlimit", //194
5323 "SYS_setrlimit", //195
5324 "SYS_getdirentries", //196
5325 "SYS_mmap", //197
5326 "SYS___syscall", //198
5327 "SYS_lseek", //199
5328 "SYS_truncate", //200
5329 "SYS_ftruncate", //201
5330 "SYS___sysctl", //202
5331 "SYS_mlock", //203
5332 "SYS_munlock", //204
5333 "SYS_205",
5334 "SYS_futimes", //206
5335 "SYS_getpgid", //207
5336 "SYS_xfspioctl", //208
5337 "SYS_209",
5338 "SYS_210",
5339 "SYS_211",
5340 "SYS_212",
5341 "SYS_213",
5342 "SYS_214",
5343 "SYS_215",
5344 "SYS_216",
5345 "SYS_217",
5346 "SYS_218",
5347 "SYS_219",
5348 "SYS_220",
5349 "SYS_semget", //221
5350 "SYS_222",
5351 "SYS_223",
5352 "SYS_224",
5353 "SYS_msgget", //225
5354 "SYS_msgsnd", //226
5355 "SYS_msgrcv", //227
5356 "SYS_shmat", //228
5357 "SYS_229",
5358 "SYS_shmdt", //230
5359 "SYS_231",
5360 "SYS_clock_gettime", //232
5361 "SYS_clock_settime", //233
5362 "SYS_clock_getres", //234
5363 "SYS_235",
5364 "SYS_236",
5365 "SYS_237",
5366 "SYS_238",
5367 "SYS_239",
5368 "SYS_nanosleep", //240
5369 "SYS_241",
5370 "SYS_242",
5371 "SYS_243",
5372 "SYS_244",
5373 "SYS_245",
5374 "SYS_246",
5375 "SYS_247",
5376 "SYS_248",
5377 "SYS_249",
5378 "SYS_minherit", //250
5379 "SYS_rfork", //251
5380 "SYS_poll", //252
5381 "SYS_issetugid", //253
5382 "SYS_lchown", //254
5383 "SYS_getsid", //255
5384 "SYS_msync", //256
5385 "SYS_257",
5386 "SYS_258",
5387 "SYS_259",
5388 "SYS_getfsstat", //260
5389 "SYS_statfs", //261
5390 "SYS_fstatfs", //262
5391 "SYS_pipe", //263
5392 "SYS_fhopen", //264
5393 "SYS_265",
5394 "SYS_fhstatfs", //266
5395 "SYS_preadv", //267
5396 "SYS_pwritev", //268
5397 "SYS_kqueue", //269
5398 "SYS_kevent", //270
5399 "SYS_mlockall", //271
5400 "SYS_munlockall", //272
5401 "SYS_getpeereid", //273
5402 "SYS_274",
5403 "SYS_275",
5404 "SYS_276",
5405 "SYS_277",
5406 "SYS_278",
5407 "SYS_279",
5408 "SYS_280",
5409 "SYS_getresuid", //281
5410 "SYS_setresuid", //282
5411 "SYS_getresgid", //283
5412 "SYS_setresgid", //284
5413 "SYS_285",
5414 "SYS_mquery", //286
5415 "SYS_closefrom", //287
5416 "SYS_sigaltstack", //288
5417 "SYS_shmget", //289
5418 "SYS_semop", //290
5419 "SYS_stat", //291
5420 "SYS_fstat", //292
5421 "SYS_lstat", //293
5422 "SYS_fhstat", //294
5423 "SYS___semctl", //295
5424 "SYS_shmctl", //296
5425 "SYS_msgctl", //297
5426 "SYS_MAXSYSCALL", //298
5427 //299
5428 //300
5429 };
5430 uint32_t uEAX;
5431 if (!LogIsEnabled())
5432 return;
5433 uEAX = CPUMGetGuestEAX(pVCpu);
5434 switch (uEAX)
5435 {
5436 default:
5437 if (uEAX < RT_ELEMENTS(apsz))
5438 {
5439 uint32_t au32Args[8] = {0};
5440 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5441 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5442 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5443 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5444 }
5445 else
5446 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5447 break;
5448 }
5449}
5450
5451
5452#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5453/**
5454 * The Dll main entry point (stub).
5455 */
5456bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5457{
5458 return true;
5459}
5460
5461void *memcpy(void *dst, const void *src, size_t size)
5462{
5463 uint8_t*pbDst = dst, *pbSrc = src;
5464 while (size-- > 0)
5465 *pbDst++ = *pbSrc++;
5466 return dst;
5467}
5468
5469#endif
5470
5471void cpu_smm_update(CPUX86State *env)
5472{
5473}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette