VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36144

Last change on this file since 36144 was 36144, checked in by vboxsync, 14 years ago

rem: Removed more obsolete/unused files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.8 KB
Line 
1/* $Id: VBoxRecompiler.c 36144 2011-03-03 14:02:19Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "osdep.h"
24#include "exec-all.h"
25#include "config.h"
26#include "cpu-all.h"
27
28#include <VBox/vmm/rem.h>
29#include <VBox/vmm/vmapi.h>
30#include <VBox/vmm/tm.h>
31#include <VBox/vmm/ssm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/iom.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/pdm.h>
38#include <VBox/vmm/dbgf.h>
39#include <VBox/dbg.h>
40#include <VBox/vmm/hwaccm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include "REMInternal.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48#include <VBox/log.h>
49#include <iprt/semaphore.h>
50#include <iprt/asm.h>
51#include <iprt/assert.h>
52#include <iprt/thread.h>
53#include <iprt/string.h>
54
55/* Don't wanna include everything. */
56extern void cpu_exec_init_all(unsigned long tb_size);
57extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
58extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
59extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
60extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
61extern void tlb_flush(CPUState *env, int flush_global);
62extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
63extern void sync_ldtr(CPUX86State *env1, int selector);
64
65#ifdef VBOX_STRICT
66unsigned long get_phys_page_offset(target_ulong addr);
67#endif
68
69
70/*******************************************************************************
71* Defined Constants And Macros *
72*******************************************************************************/
73
74/** Copy 80-bit fpu register at pSrc to pDst.
75 * This is probably faster than *calling* memcpy.
76 */
77#define REM_COPY_FPU_REG(pDst, pSrc) \
78 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
79
80/** How remR3RunLoggingStep operates. */
81#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
82
83
84/*******************************************************************************
85* Internal Functions *
86*******************************************************************************/
87static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
88static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
89static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
90static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
91
92static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
93static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
95static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98
99static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
100static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
102static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105
106static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
108static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113
114/** @todo Move stats to REM::s some rainy day we have nothing do to. */
115#ifdef VBOX_WITH_STATISTICS
116static STAMPROFILEADV gStatExecuteSingleInstr;
117static STAMPROFILEADV gStatCompilationQEmu;
118static STAMPROFILEADV gStatRunCodeQEmu;
119static STAMPROFILEADV gStatTotalTimeQEmu;
120static STAMPROFILEADV gStatTimers;
121static STAMPROFILEADV gStatTBLookup;
122static STAMPROFILEADV gStatIRQ;
123static STAMPROFILEADV gStatRawCheck;
124static STAMPROFILEADV gStatMemRead;
125static STAMPROFILEADV gStatMemWrite;
126static STAMPROFILE gStatGCPhys2HCVirt;
127static STAMPROFILE gStatHCVirt2GCPhys;
128static STAMCOUNTER gStatCpuGetTSC;
129static STAMCOUNTER gStatRefuseTFInhibit;
130static STAMCOUNTER gStatRefuseVM86;
131static STAMCOUNTER gStatRefusePaging;
132static STAMCOUNTER gStatRefusePAE;
133static STAMCOUNTER gStatRefuseIOPLNot0;
134static STAMCOUNTER gStatRefuseIF0;
135static STAMCOUNTER gStatRefuseCode16;
136static STAMCOUNTER gStatRefuseWP0;
137static STAMCOUNTER gStatRefuseRing1or2;
138static STAMCOUNTER gStatRefuseCanExecute;
139static STAMCOUNTER gStatREMGDTChange;
140static STAMCOUNTER gStatREMIDTChange;
141static STAMCOUNTER gStatREMLDTRChange;
142static STAMCOUNTER gStatREMTRChange;
143static STAMCOUNTER gStatSelOutOfSync[6];
144static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
145static STAMCOUNTER gStatFlushTBs;
146#endif
147/* in exec.c */
148extern uint32_t tlb_flush_count;
149extern uint32_t tb_flush_count;
150extern uint32_t tb_phys_invalidate_count;
151
152/*
153 * Global stuff.
154 */
155
156/** MMIO read callbacks. */
157CPUReadMemoryFunc *g_apfnMMIORead[3] =
158{
159 remR3MMIOReadU8,
160 remR3MMIOReadU16,
161 remR3MMIOReadU32
162};
163
164/** MMIO write callbacks. */
165CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
166{
167 remR3MMIOWriteU8,
168 remR3MMIOWriteU16,
169 remR3MMIOWriteU32
170};
171
172/** Handler read callbacks. */
173CPUReadMemoryFunc *g_apfnHandlerRead[3] =
174{
175 remR3HandlerReadU8,
176 remR3HandlerReadU16,
177 remR3HandlerReadU32
178};
179
180/** Handler write callbacks. */
181CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
182{
183 remR3HandlerWriteU8,
184 remR3HandlerWriteU16,
185 remR3HandlerWriteU32
186};
187
188
189#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
190/*
191 * Debugger commands.
192 */
193static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
194
195/** '.remstep' arguments. */
196static const DBGCVARDESC g_aArgRemStep[] =
197{
198 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
199 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
200};
201
202/** Command descriptors. */
203static const DBGCCMD g_aCmds[] =
204{
205 {
206 .pszCmd ="remstep",
207 .cArgsMin = 0,
208 .cArgsMax = 1,
209 .paArgDescs = &g_aArgRemStep[0],
210 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 PREMHANDLERNOTIFICATION pCur;
250 uint32_t u32Dummy;
251 int rc;
252 unsigned i;
253
254#ifdef VBOX_ENABLE_VBOXREM64
255 LogRel(("Using 64-bit aware REM\n"));
256#endif
257
258 /*
259 * Assert sanity.
260 */
261 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
262 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
263 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
264#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
265 Assert(!testmath());
266#endif
267
268 /*
269 * Init some internal data members.
270 */
271 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
272 pVM->rem.s.Env.pVM = pVM;
273#ifdef CPU_RAW_MODE_INIT
274 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
275#endif
276
277 /*
278 * Initialize the REM critical section.
279 *
280 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
281 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
282 * deadlocks. (mostly pgm vs rem locking)
283 */
284 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
285 AssertRCReturn(rc, rc);
286
287 /* ctx. */
288 pVM->rem.s.pCtx = NULL; /* set when executing code. */
289 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
290
291 /* ignore all notifications */
292 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
293
294 code_gen_prologue = RTMemExecAlloc(_1K);
295 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
296
297 cpu_exec_init_all(0);
298
299 /*
300 * Init the recompiler.
301 */
302 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
303 {
304 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
305 return VERR_GENERAL_FAILURE;
306 }
307 PVMCPU pVCpu = VMMGetCpu(pVM);
308 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
309 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
310
311 /* allocate code buffer for single instruction emulation. */
312 pVM->rem.s.Env.cbCodeBuffer = 4096;
313 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
314 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
315
316 /* finally, set the cpu_single_env global. */
317 cpu_single_env = &pVM->rem.s.Env;
318
319 /* Nothing is pending by default */
320 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
321
322 /*
323 * Register ram types.
324 */
325 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
326 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
327 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
328 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
329 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
330
331 /* stop ignoring. */
332 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
333
334 /*
335 * Register the saved state data unit.
336 */
337 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
338 NULL, NULL, NULL,
339 NULL, remR3Save, NULL,
340 NULL, remR3Load, NULL);
341 if (RT_FAILURE(rc))
342 return rc;
343
344#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
345 /*
346 * Debugger commands.
347 */
348 static bool fRegisteredCmds = false;
349 if (!fRegisteredCmds)
350 {
351 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
352 if (RT_SUCCESS(rc))
353 fRegisteredCmds = true;
354 }
355#endif
356
357#ifdef VBOX_WITH_STATISTICS
358 /*
359 * Statistics.
360 */
361 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
362 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
363 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
364 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
365 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
372 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373
374 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
375
376 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
377 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
378 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
379 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
380 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
381 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
382 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
383 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
384 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
385 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
386 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
387
388 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
389 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
390 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
391 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
392
393 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
399
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
406
407 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
408#endif /* VBOX_WITH_STATISTICS */
409
410 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
411 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
412 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
413
414
415#ifdef DEBUG_ALL_LOGGING
416 loglevel = ~0;
417# ifdef DEBUG_TMP_LOGGING
418 logfile = fopen("/tmp/vbox-qemu.log", "w");
419# endif
420#endif
421
422 /*
423 * Init the handler notification lists.
424 */
425 pVM->rem.s.idxPendingList = UINT32_MAX;
426 pVM->rem.s.idxFreeList = 0;
427
428 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
429 {
430 pCur = &pVM->rem.s.aHandlerNotifications[i];
431 pCur->idxNext = i + 1;
432 pCur->idxSelf = i;
433 }
434 pCur->idxNext = UINT32_MAX; /* the last record. */
435
436 return rc;
437}
438
439
440/**
441 * Finalizes the REM initialization.
442 *
443 * This is called after all components, devices and drivers has
444 * been initialized. Its main purpose it to finish the RAM related
445 * initialization.
446 *
447 * @returns VBox status code.
448 *
449 * @param pVM The VM handle.
450 */
451REMR3DECL(int) REMR3InitFinalize(PVM pVM)
452{
453 int rc;
454
455 /*
456 * Ram size & dirty bit map.
457 */
458 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
459 pVM->rem.s.fGCPhysLastRamFixed = true;
460#ifdef RT_STRICT
461 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
462#else
463 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
464#endif
465 return rc;
466}
467
468
469/**
470 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
471 *
472 * @returns VBox status code.
473 * @param pVM The VM handle.
474 * @param fGuarded Whether to guard the map.
475 */
476static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
477{
478 int rc = VINF_SUCCESS;
479 RTGCPHYS cb;
480
481 cb = pVM->rem.s.GCPhysLastRam + 1;
482 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
483 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
484 VERR_OUT_OF_RANGE);
485 phys_ram_size = cb;
486 phys_ram_dirty_size = cb >> PAGE_SHIFT;
487 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
488
489 if (!fGuarded)
490 {
491 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
492 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
493 }
494 else
495 {
496 /*
497 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
498 */
499 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
500 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
501 if (cbBitmapFull == cbBitmapAligned)
502 cbBitmapFull += _4G >> PAGE_SHIFT;
503 else if (cbBitmapFull - cbBitmapAligned < _64K)
504 cbBitmapFull += _64K;
505
506 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
507 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
508
509 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
510 if (RT_FAILURE(rc))
511 {
512 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
513 AssertLogRelRCReturn(rc, rc);
514 }
515
516 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
517 }
518
519 /* initialize it. */
520 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
521 return rc;
522}
523
524
525/**
526 * Terminates the REM.
527 *
528 * Termination means cleaning up and freeing all resources,
529 * the VM it self is at this point powered off or suspended.
530 *
531 * @returns VBox status code.
532 * @param pVM The VM to operate on.
533 */
534REMR3DECL(int) REMR3Term(PVM pVM)
535{
536#ifdef VBOX_WITH_STATISTICS
537 /*
538 * Statistics.
539 */
540 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
541 STAM_DEREG(pVM, &gStatCompilationQEmu);
542 STAM_DEREG(pVM, &gStatRunCodeQEmu);
543 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
544 STAM_DEREG(pVM, &gStatTimers);
545 STAM_DEREG(pVM, &gStatTBLookup);
546 STAM_DEREG(pVM, &gStatIRQ);
547 STAM_DEREG(pVM, &gStatRawCheck);
548 STAM_DEREG(pVM, &gStatMemRead);
549 STAM_DEREG(pVM, &gStatMemWrite);
550 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
551 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
552
553 STAM_DEREG(pVM, &gStatCpuGetTSC);
554
555 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
556 STAM_DEREG(pVM, &gStatRefuseVM86);
557 STAM_DEREG(pVM, &gStatRefusePaging);
558 STAM_DEREG(pVM, &gStatRefusePAE);
559 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
560 STAM_DEREG(pVM, &gStatRefuseIF0);
561 STAM_DEREG(pVM, &gStatRefuseCode16);
562 STAM_DEREG(pVM, &gStatRefuseWP0);
563 STAM_DEREG(pVM, &gStatRefuseRing1or2);
564 STAM_DEREG(pVM, &gStatRefuseCanExecute);
565 STAM_DEREG(pVM, &gStatFlushTBs);
566
567 STAM_DEREG(pVM, &gStatREMGDTChange);
568 STAM_DEREG(pVM, &gStatREMLDTRChange);
569 STAM_DEREG(pVM, &gStatREMIDTChange);
570 STAM_DEREG(pVM, &gStatREMTRChange);
571
572 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
578
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
585
586 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
587#endif /* VBOX_WITH_STATISTICS */
588
589 STAM_REL_DEREG(pVM, &tb_flush_count);
590 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
591 STAM_REL_DEREG(pVM, &tlb_flush_count);
592
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * The VM is being reset.
599 *
600 * For the REM component this means to call the cpu_reset() and
601 * reinitialize some state variables.
602 *
603 * @param pVM VM handle.
604 */
605REMR3DECL(void) REMR3Reset(PVM pVM)
606{
607 /*
608 * Reset the REM cpu.
609 */
610 Assert(pVM->rem.s.cIgnoreAll == 0);
611 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
612 cpu_reset(&pVM->rem.s.Env);
613 pVM->rem.s.cInvalidatedPages = 0;
614 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
615 Assert(pVM->rem.s.cIgnoreAll == 0);
616
617 /* Clear raw ring 0 init state */
618 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
619
620 /* Flush the TBs the next time we execute code here. */
621 pVM->rem.s.fFlushTBs = true;
622}
623
624
625/**
626 * Execute state save operation.
627 *
628 * @returns VBox status code.
629 * @param pVM VM Handle.
630 * @param pSSM SSM operation handle.
631 */
632static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
633{
634 PREM pRem = &pVM->rem.s;
635
636 /*
637 * Save the required CPU Env bits.
638 * (Not much because we're never in REM when doing the save.)
639 */
640 LogFlow(("remR3Save:\n"));
641 Assert(!pRem->fInREM);
642 SSMR3PutU32(pSSM, pRem->Env.hflags);
643 SSMR3PutU32(pSSM, ~0); /* separator */
644
645 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
646 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
647 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
648
649 return SSMR3PutU32(pSSM, ~0); /* terminator */
650}
651
652
653/**
654 * Execute state load operation.
655 *
656 * @returns VBox status code.
657 * @param pVM VM Handle.
658 * @param pSSM SSM operation handle.
659 * @param uVersion Data layout version.
660 * @param uPass The data pass.
661 */
662static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
663{
664 uint32_t u32Dummy;
665 uint32_t fRawRing0 = false;
666 uint32_t u32Sep;
667 uint32_t i;
668 int rc;
669 PREM pRem;
670
671 LogFlow(("remR3Load:\n"));
672 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
673
674 /*
675 * Validate version.
676 */
677 if ( uVersion != REM_SAVED_STATE_VERSION
678 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
679 {
680 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
681 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
682 }
683
684 /*
685 * Do a reset to be on the safe side...
686 */
687 REMR3Reset(pVM);
688
689 /*
690 * Ignore all ignorable notifications.
691 * (Not doing this will cause serious trouble.)
692 */
693 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
694
695 /*
696 * Load the required CPU Env bits.
697 * (Not much because we're never in REM when doing the save.)
698 */
699 pRem = &pVM->rem.s;
700 Assert(!pRem->fInREM);
701 SSMR3GetU32(pSSM, &pRem->Env.hflags);
702 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /* Redundant REM CPU state has to be loaded, but can be ignored. */
705 CPUX86State_Ver16 temp;
706 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
707 }
708
709 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
710 if (RT_FAILURE(rc))
711 return rc;
712 if (u32Sep != ~0U)
713 {
714 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717
718 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
719 SSMR3GetUInt(pSSM, &fRawRing0);
720 if (fRawRing0)
721 pRem->Env.state |= CPU_RAW_RING0;
722
723 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
724 {
725 /*
726 * Load the REM stuff.
727 */
728 /** @todo r=bird: We should just drop all these items, restoring doesn't make
729 * sense. */
730 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
731 if (RT_FAILURE(rc))
732 return rc;
733 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
734 {
735 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
736 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
737 }
738 for (i = 0; i < pRem->cInvalidatedPages; i++)
739 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
740 }
741
742 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
743 if (RT_FAILURE(rc))
744 return rc;
745
746 /* check the terminator. */
747 rc = SSMR3GetU32(pSSM, &u32Sep);
748 if (RT_FAILURE(rc))
749 return rc;
750 if (u32Sep != ~0U)
751 {
752 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
753 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
754 }
755
756 /*
757 * Get the CPUID features.
758 */
759 PVMCPU pVCpu = VMMGetCpu(pVM);
760 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
761 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
762
763 /*
764 * Sync the Load Flush the TLB
765 */
766 tlb_flush(&pRem->Env, 1);
767
768 /*
769 * Stop ignoring ignorable notifications.
770 */
771 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
772
773 /*
774 * Sync the whole CPU state when executing code in the recompiler.
775 */
776 for (i = 0; i < pVM->cCpus; i++)
777 {
778 PVMCPU pVCpu = &pVM->aCpus[i];
779 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
780 }
781 return VINF_SUCCESS;
782}
783
784
785
786#undef LOG_GROUP
787#define LOG_GROUP LOG_GROUP_REM_RUN
788
789/**
790 * Single steps an instruction in recompiled mode.
791 *
792 * Before calling this function the REM state needs to be in sync with
793 * the VM. Call REMR3State() to perform the sync. It's only necessary
794 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
795 * and after calling REMR3StateBack().
796 *
797 * @returns VBox status code.
798 *
799 * @param pVM VM Handle.
800 * @param pVCpu VMCPU Handle.
801 */
802REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
803{
804 int rc, interrupt_request;
805 RTGCPTR GCPtrPC;
806 bool fBp;
807
808 /*
809 * Lock the REM - we don't wanna have anyone interrupting us
810 * while stepping - and enabled single stepping. We also ignore
811 * pending interrupts and suchlike.
812 */
813 interrupt_request = pVM->rem.s.Env.interrupt_request;
814 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
815 pVM->rem.s.Env.interrupt_request = 0;
816 cpu_single_step(&pVM->rem.s.Env, 1);
817
818 /*
819 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
820 */
821 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
822 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
823
824 /*
825 * Execute and handle the return code.
826 * We execute without enabling the cpu tick, so on success we'll
827 * just flip it on and off to make sure it moves
828 */
829 rc = cpu_exec(&pVM->rem.s.Env);
830 if (rc == EXCP_DEBUG)
831 {
832 TMR3NotifyResume(pVM, pVCpu);
833 TMR3NotifySuspend(pVM, pVCpu);
834 rc = VINF_EM_DBG_STEPPED;
835 }
836 else
837 {
838 switch (rc)
839 {
840 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
841 case EXCP_HLT:
842 case EXCP_HALTED: rc = VINF_EM_HALT; break;
843 case EXCP_RC:
844 rc = pVM->rem.s.rc;
845 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
846 break;
847 case EXCP_EXECUTE_RAW:
848 case EXCP_EXECUTE_HWACC:
849 /** @todo: is it correct? No! */
850 rc = VINF_SUCCESS;
851 break;
852 default:
853 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
854 rc = VERR_INTERNAL_ERROR;
855 break;
856 }
857 }
858
859 /*
860 * Restore the stuff we changed to prevent interruption.
861 * Unlock the REM.
862 */
863 if (fBp)
864 {
865 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
866 Assert(rc2 == 0); NOREF(rc2);
867 }
868 cpu_single_step(&pVM->rem.s.Env, 0);
869 pVM->rem.s.Env.interrupt_request = interrupt_request;
870
871 return rc;
872}
873
874
875/**
876 * Set a breakpoint using the REM facilities.
877 *
878 * @returns VBox status code.
879 * @param pVM The VM handle.
880 * @param Address The breakpoint address.
881 * @thread The emulation thread.
882 */
883REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
884{
885 VM_ASSERT_EMT(pVM);
886 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
887 {
888 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
889 return VINF_SUCCESS;
890 }
891 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
892 return VERR_REM_NO_MORE_BP_SLOTS;
893}
894
895
896/**
897 * Clears a breakpoint set by REMR3BreakpointSet().
898 *
899 * @returns VBox status code.
900 * @param pVM The VM handle.
901 * @param Address The breakpoint address.
902 * @thread The emulation thread.
903 */
904REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
905{
906 VM_ASSERT_EMT(pVM);
907 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
908 {
909 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
910 return VINF_SUCCESS;
911 }
912 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
913 return VERR_REM_BP_NOT_FOUND;
914}
915
916
917/**
918 * Emulate an instruction.
919 *
920 * This function executes one instruction without letting anyone
921 * interrupt it. This is intended for being called while being in
922 * raw mode and thus will take care of all the state syncing between
923 * REM and the rest.
924 *
925 * @returns VBox status code.
926 * @param pVM VM handle.
927 * @param pVCpu VMCPU Handle.
928 */
929REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
930{
931 bool fFlushTBs;
932
933 int rc, rc2;
934 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
935
936 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
937 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
938 */
939 if (HWACCMIsEnabled(pVM))
940 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
941
942 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
943 fFlushTBs = pVM->rem.s.fFlushTBs;
944 pVM->rem.s.fFlushTBs = false;
945
946 /*
947 * Sync the state and enable single instruction / single stepping.
948 */
949 rc = REMR3State(pVM, pVCpu);
950 pVM->rem.s.fFlushTBs = fFlushTBs;
951 if (RT_SUCCESS(rc))
952 {
953 int interrupt_request = pVM->rem.s.Env.interrupt_request;
954 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
955#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
956 cpu_single_step(&pVM->rem.s.Env, 0);
957#endif
958 Assert(!pVM->rem.s.Env.singlestep_enabled);
959
960 /*
961 * Now we set the execute single instruction flag and enter the cpu_exec loop.
962 */
963 TMNotifyStartOfExecution(pVCpu);
964 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
965 rc = cpu_exec(&pVM->rem.s.Env);
966 TMNotifyEndOfExecution(pVCpu);
967 switch (rc)
968 {
969 /*
970 * Executed without anything out of the way happening.
971 */
972 case EXCP_SINGLE_INSTR:
973 rc = VINF_EM_RESCHEDULE;
974 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
975 break;
976
977 /*
978 * If we take a trap or start servicing a pending interrupt, we might end up here.
979 * (Timer thread or some other thread wishing EMT's attention.)
980 */
981 case EXCP_INTERRUPT:
982 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
983 rc = VINF_EM_RESCHEDULE;
984 break;
985
986 /*
987 * Single step, we assume!
988 * If there was a breakpoint there we're fucked now.
989 */
990 case EXCP_DEBUG:
991 {
992 /* breakpoint or single step? */
993 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
994 int iBP;
995 rc = VINF_EM_DBG_STEPPED;
996 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
997 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
998 {
999 rc = VINF_EM_DBG_BREAKPOINT;
1000 break;
1001 }
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1003 break;
1004 }
1005
1006 /*
1007 * hlt instruction.
1008 */
1009 case EXCP_HLT:
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1011 rc = VINF_EM_HALT;
1012 break;
1013
1014 /*
1015 * The VM has halted.
1016 */
1017 case EXCP_HALTED:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1019 rc = VINF_EM_HALT;
1020 break;
1021
1022 /*
1023 * Switch to RAW-mode.
1024 */
1025 case EXCP_EXECUTE_RAW:
1026 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1027 rc = VINF_EM_RESCHEDULE_RAW;
1028 break;
1029
1030 /*
1031 * Switch to hardware accelerated RAW-mode.
1032 */
1033 case EXCP_EXECUTE_HWACC:
1034 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1035 rc = VINF_EM_RESCHEDULE_HWACC;
1036 break;
1037
1038 /*
1039 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1040 */
1041 case EXCP_RC:
1042 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1043 rc = pVM->rem.s.rc;
1044 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1045 break;
1046
1047 /*
1048 * Figure out the rest when they arrive....
1049 */
1050 default:
1051 AssertMsgFailed(("rc=%d\n", rc));
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1053 rc = VINF_EM_RESCHEDULE;
1054 break;
1055 }
1056
1057 /*
1058 * Switch back the state.
1059 */
1060 pVM->rem.s.Env.interrupt_request = interrupt_request;
1061 rc2 = REMR3StateBack(pVM, pVCpu);
1062 AssertRC(rc2);
1063 }
1064
1065 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1066 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1067 return rc;
1068}
1069
1070
1071/**
1072 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1073 *
1074 * @returns VBox status code.
1075 *
1076 * @param pVM The VM handle.
1077 * @param pVCpu The Virtual CPU handle.
1078 */
1079static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1080{
1081 int rc;
1082
1083 Assert(pVM->rem.s.fInREM);
1084#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1085 cpu_single_step(&pVM->rem.s.Env, 1);
1086#else
1087 Assert(!pVM->rem.s.Env.singlestep_enabled);
1088#endif
1089
1090 /*
1091 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1092 */
1093 for (;;)
1094 {
1095 char szBuf[256];
1096
1097 /*
1098 * Log the current registers state and instruction.
1099 */
1100 remR3StateUpdate(pVM, pVCpu);
1101 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1102 szBuf[0] = '\0';
1103 rc = DBGFR3DisasInstrEx(pVM,
1104 pVCpu->idCpu,
1105 0, /* Sel */
1106 0, /* GCPtr */
1107 DBGF_DISAS_FLAGS_CURRENT_GUEST
1108 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1109 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1110 szBuf,
1111 sizeof(szBuf),
1112 NULL);
1113 if (RT_FAILURE(rc))
1114 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1115 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1116
1117 /*
1118 * Execute the instruction.
1119 */
1120 TMNotifyStartOfExecution(pVCpu);
1121
1122 if ( pVM->rem.s.Env.exception_index < 0
1123 || pVM->rem.s.Env.exception_index > 256)
1124 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1125
1126#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1127 pVM->rem.s.Env.interrupt_request = 0;
1128#else
1129 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1130#endif
1131 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1132 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1133 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1134 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1135 pVM->rem.s.Env.interrupt_request,
1136 pVM->rem.s.Env.halted,
1137 pVM->rem.s.Env.exception_index
1138 );
1139
1140 rc = cpu_exec(&pVM->rem.s.Env);
1141
1142 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1143 pVM->rem.s.Env.interrupt_request,
1144 pVM->rem.s.Env.halted,
1145 pVM->rem.s.Env.exception_index
1146 );
1147
1148 TMNotifyEndOfExecution(pVCpu);
1149
1150 switch (rc)
1151 {
1152#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1153 /*
1154 * The normal exit.
1155 */
1156 case EXCP_SINGLE_INSTR:
1157 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1158 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1159 continue;
1160 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1161 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1162 rc = VINF_SUCCESS;
1163 break;
1164
1165#else
1166 /*
1167 * The normal exit, check for breakpoints at PC just to be sure.
1168 */
1169#endif
1170 case EXCP_DEBUG:
1171 rc = VINF_EM_DBG_STEPPED;
1172 if (pVM->rem.s.Env.nb_breakpoints > 0)
1173 {
1174 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1175 int iBP;
1176 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1177 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1178 {
1179 rc = VINF_EM_DBG_BREAKPOINT;
1180 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC);
1181 break;
1182 }
1183 }
1184#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1185 if (rc == VINF_EM_DBG_STEPPED)
1186 {
1187 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1188 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1189 continue;
1190
1191 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1192 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1193 rc = VINF_SUCCESS;
1194 }
1195#endif
1196 break;
1197
1198 /*
1199 * If we take a trap or start servicing a pending interrupt, we might end up here.
1200 * (Timer thread or some other thread wishing EMT's attention.)
1201 */
1202 case EXCP_INTERRUPT:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1204 rc = VINF_SUCCESS;
1205 break;
1206
1207 /*
1208 * hlt instruction.
1209 */
1210 case EXCP_HLT:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1212 rc = VINF_EM_HALT;
1213 break;
1214
1215 /*
1216 * The VM has halted.
1217 */
1218 case EXCP_HALTED:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1220 rc = VINF_EM_HALT;
1221 break;
1222
1223 /*
1224 * Switch to RAW-mode.
1225 */
1226 case EXCP_EXECUTE_RAW:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1228 rc = VINF_EM_RESCHEDULE_RAW;
1229 break;
1230
1231 /*
1232 * Switch to hardware accelerated RAW-mode.
1233 */
1234 case EXCP_EXECUTE_HWACC:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1236 rc = VINF_EM_RESCHEDULE_HWACC;
1237 break;
1238
1239 /*
1240 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1241 */
1242 case EXCP_RC:
1243 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1244 rc = pVM->rem.s.rc;
1245 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1246 break;
1247
1248 /*
1249 * Figure out the rest when they arrive....
1250 */
1251 default:
1252 AssertMsgFailed(("rc=%d\n", rc));
1253 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1254 rc = VINF_EM_RESCHEDULE;
1255 break;
1256 }
1257 break;
1258 }
1259
1260#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1261// cpu_single_step(&pVM->rem.s.Env, 0);
1262#else
1263 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1264#endif
1265 return rc;
1266}
1267
1268
1269/**
1270 * Runs code in recompiled mode.
1271 *
1272 * Before calling this function the REM state needs to be in sync with
1273 * the VM. Call REMR3State() to perform the sync. It's only necessary
1274 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1275 * and after calling REMR3StateBack().
1276 *
1277 * @returns VBox status code.
1278 *
1279 * @param pVM VM Handle.
1280 * @param pVCpu VMCPU Handle.
1281 */
1282REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1283{
1284 int rc;
1285
1286 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1287 return remR3RunLoggingStep(pVM, pVCpu);
1288
1289 Assert(pVM->rem.s.fInREM);
1290 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1291
1292 TMNotifyStartOfExecution(pVCpu);
1293 rc = cpu_exec(&pVM->rem.s.Env);
1294 TMNotifyEndOfExecution(pVCpu);
1295 switch (rc)
1296 {
1297 /*
1298 * This happens when the execution was interrupted
1299 * by an external event, like pending timers.
1300 */
1301 case EXCP_INTERRUPT:
1302 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1303 rc = VINF_SUCCESS;
1304 break;
1305
1306 /*
1307 * hlt instruction.
1308 */
1309 case EXCP_HLT:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1311 rc = VINF_EM_HALT;
1312 break;
1313
1314 /*
1315 * The VM has halted.
1316 */
1317 case EXCP_HALTED:
1318 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1319 rc = VINF_EM_HALT;
1320 break;
1321
1322 /*
1323 * Breakpoint/single step.
1324 */
1325 case EXCP_DEBUG:
1326 {
1327#if 0//def DEBUG_bird
1328 static int iBP = 0;
1329 printf("howdy, breakpoint! iBP=%d\n", iBP);
1330 switch (iBP)
1331 {
1332 case 0:
1333 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1334 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1335 //pVM->rem.s.Env.interrupt_request = 0;
1336 //pVM->rem.s.Env.exception_index = -1;
1337 //g_fInterruptDisabled = 1;
1338 rc = VINF_SUCCESS;
1339 asm("int3");
1340 break;
1341 default:
1342 asm("int3");
1343 break;
1344 }
1345 iBP++;
1346#else
1347 /* breakpoint or single step? */
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 int iBP;
1350 rc = VINF_EM_DBG_STEPPED;
1351 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1352 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1353 {
1354 rc = VINF_EM_DBG_BREAKPOINT;
1355 break;
1356 }
1357 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1358#endif
1359 break;
1360 }
1361
1362 /*
1363 * Switch to RAW-mode.
1364 */
1365 case EXCP_EXECUTE_RAW:
1366 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1367 rc = VINF_EM_RESCHEDULE_RAW;
1368 break;
1369
1370 /*
1371 * Switch to hardware accelerated RAW-mode.
1372 */
1373 case EXCP_EXECUTE_HWACC:
1374 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1375 rc = VINF_EM_RESCHEDULE_HWACC;
1376 break;
1377
1378 /*
1379 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1380 */
1381 case EXCP_RC:
1382 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1383 rc = pVM->rem.s.rc;
1384 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1385 break;
1386
1387 /*
1388 * Figure out the rest when they arrive....
1389 */
1390 default:
1391 AssertMsgFailed(("rc=%d\n", rc));
1392 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1393 rc = VINF_SUCCESS;
1394 break;
1395 }
1396
1397 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1398 return rc;
1399}
1400
1401
1402/**
1403 * Check if the cpu state is suitable for Raw execution.
1404 *
1405 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1406 *
1407 * @param env The CPU env struct.
1408 * @param eip The EIP to check this for (might differ from env->eip).
1409 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1410 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1411 *
1412 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1413 */
1414bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1415{
1416 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1417 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1418 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1419 uint32_t u32CR0;
1420
1421 /* Update counter. */
1422 env->pVM->rem.s.cCanExecuteRaw++;
1423
1424 /* Never when single stepping+logging guest code. */
1425 if (env->state & CPU_EMULATE_SINGLE_STEP)
1426 return false;
1427
1428 if (HWACCMIsEnabled(env->pVM))
1429 {
1430 CPUMCTX Ctx;
1431
1432 env->state |= CPU_RAW_HWACC;
1433
1434 /*
1435 * Create partial context for HWACCMR3CanExecuteGuest
1436 */
1437 Ctx.cr0 = env->cr[0];
1438 Ctx.cr3 = env->cr[3];
1439 Ctx.cr4 = env->cr[4];
1440
1441 Ctx.tr = env->tr.selector;
1442 Ctx.trHid.u64Base = env->tr.base;
1443 Ctx.trHid.u32Limit = env->tr.limit;
1444 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1445
1446 Ctx.ldtr = env->ldt.selector;
1447 Ctx.ldtrHid.u64Base = env->ldt.base;
1448 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1449 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1450
1451 Ctx.idtr.cbIdt = env->idt.limit;
1452 Ctx.idtr.pIdt = env->idt.base;
1453
1454 Ctx.gdtr.cbGdt = env->gdt.limit;
1455 Ctx.gdtr.pGdt = env->gdt.base;
1456
1457 Ctx.rsp = env->regs[R_ESP];
1458 Ctx.rip = env->eip;
1459
1460 Ctx.eflags.u32 = env->eflags;
1461
1462 Ctx.cs = env->segs[R_CS].selector;
1463 Ctx.csHid.u64Base = env->segs[R_CS].base;
1464 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1465 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1466
1467 Ctx.ds = env->segs[R_DS].selector;
1468 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1469 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1470 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1471
1472 Ctx.es = env->segs[R_ES].selector;
1473 Ctx.esHid.u64Base = env->segs[R_ES].base;
1474 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1475 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1476
1477 Ctx.fs = env->segs[R_FS].selector;
1478 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1479 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1480 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1481
1482 Ctx.gs = env->segs[R_GS].selector;
1483 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1484 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1485 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.ss = env->segs[R_SS].selector;
1488 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1489 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1490 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1491
1492 Ctx.msrEFER = env->efer;
1493
1494 /* Hardware accelerated raw-mode:
1495 *
1496 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1497 */
1498 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1499 {
1500 *piException = EXCP_EXECUTE_HWACC;
1501 return true;
1502 }
1503 return false;
1504 }
1505
1506 /*
1507 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1508 * or 32 bits protected mode ring 0 code
1509 *
1510 * The tests are ordered by the likelihood of being true during normal execution.
1511 */
1512 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1513 {
1514 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1515 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1516 return false;
1517 }
1518
1519#ifndef VBOX_RAW_V86
1520 if (fFlags & VM_MASK) {
1521 STAM_COUNTER_INC(&gStatRefuseVM86);
1522 Log2(("raw mode refused: VM_MASK\n"));
1523 return false;
1524 }
1525#endif
1526
1527 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1528 {
1529#ifndef DEBUG_bird
1530 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1531#endif
1532 return false;
1533 }
1534
1535 if (env->singlestep_enabled)
1536 {
1537 //Log2(("raw mode refused: Single step\n"));
1538 return false;
1539 }
1540
1541 if (env->nb_breakpoints > 0)
1542 {
1543 //Log2(("raw mode refused: Breakpoints\n"));
1544 return false;
1545 }
1546
1547 u32CR0 = env->cr[0];
1548 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1549 {
1550 STAM_COUNTER_INC(&gStatRefusePaging);
1551 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1552 return false;
1553 }
1554
1555 if (env->cr[4] & CR4_PAE_MASK)
1556 {
1557 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1558 {
1559 STAM_COUNTER_INC(&gStatRefusePAE);
1560 return false;
1561 }
1562 }
1563
1564 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1565 {
1566 if (!EMIsRawRing3Enabled(env->pVM))
1567 return false;
1568
1569 if (!(env->eflags & IF_MASK))
1570 {
1571 STAM_COUNTER_INC(&gStatRefuseIF0);
1572 Log2(("raw mode refused: IF (RawR3)\n"));
1573 return false;
1574 }
1575
1576 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1577 {
1578 STAM_COUNTER_INC(&gStatRefuseWP0);
1579 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1580 return false;
1581 }
1582 }
1583 else
1584 {
1585 if (!EMIsRawRing0Enabled(env->pVM))
1586 return false;
1587
1588 // Let's start with pure 32 bits ring 0 code first
1589 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1590 {
1591 STAM_COUNTER_INC(&gStatRefuseCode16);
1592 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1593 return false;
1594 }
1595
1596 // Only R0
1597 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1598 {
1599 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1600 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1601 return false;
1602 }
1603
1604 if (!(u32CR0 & CR0_WP_MASK))
1605 {
1606 STAM_COUNTER_INC(&gStatRefuseWP0);
1607 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1608 return false;
1609 }
1610
1611 if (PATMIsPatchGCAddr(env->pVM, eip))
1612 {
1613 Log2(("raw r0 mode forced: patch code\n"));
1614 *piException = EXCP_EXECUTE_RAW;
1615 return true;
1616 }
1617
1618#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1619 if (!(env->eflags & IF_MASK))
1620 {
1621 STAM_COUNTER_INC(&gStatRefuseIF0);
1622 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1623 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1624 return false;
1625 }
1626#endif
1627
1628 env->state |= CPU_RAW_RING0;
1629 }
1630
1631 /*
1632 * Don't reschedule the first time we're called, because there might be
1633 * special reasons why we're here that is not covered by the above checks.
1634 */
1635 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1636 {
1637 Log2(("raw mode refused: first scheduling\n"));
1638 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1639 return false;
1640 }
1641
1642 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1643 *piException = EXCP_EXECUTE_RAW;
1644 return true;
1645}
1646
1647
1648/**
1649 * Fetches a code byte.
1650 *
1651 * @returns Success indicator (bool) for ease of use.
1652 * @param env The CPU environment structure.
1653 * @param GCPtrInstr Where to fetch code.
1654 * @param pu8Byte Where to store the byte on success
1655 */
1656bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1657{
1658 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1659 if (RT_SUCCESS(rc))
1660 return true;
1661 return false;
1662}
1663
1664
1665/**
1666 * Flush (or invalidate if you like) page table/dir entry.
1667 *
1668 * (invlpg instruction; tlb_flush_page)
1669 *
1670 * @param env Pointer to cpu environment.
1671 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1672 */
1673void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1674{
1675 PVM pVM = env->pVM;
1676 PCPUMCTX pCtx;
1677 int rc;
1678
1679 /*
1680 * When we're replaying invlpg instructions or restoring a saved
1681 * state we disable this path.
1682 */
1683 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1684 return;
1685 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1686 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1687
1688 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1689
1690 /*
1691 * Update the control registers before calling PGMFlushPage.
1692 */
1693 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1694 Assert(pCtx);
1695 pCtx->cr0 = env->cr[0];
1696 pCtx->cr3 = env->cr[3];
1697 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1698 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1699 pCtx->cr4 = env->cr[4];
1700
1701 /*
1702 * Let PGM do the rest.
1703 */
1704 Assert(env->pVCpu);
1705 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1706 if (RT_FAILURE(rc))
1707 {
1708 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1709 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1710 }
1711 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1712}
1713
1714
1715#ifndef REM_PHYS_ADDR_IN_TLB
1716/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1717void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1718{
1719 void *pv;
1720 int rc;
1721
1722 /* Address must be aligned enough to fiddle with lower bits */
1723 Assert((physAddr & 0x3) == 0);
1724
1725 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1726 Assert( rc == VINF_SUCCESS
1727 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1728 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1729 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1730 if (RT_FAILURE(rc))
1731 return (void *)1;
1732 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1733 return (void *)((uintptr_t)pv | 2);
1734 return pv;
1735}
1736#endif /* REM_PHYS_ADDR_IN_TLB */
1737
1738
1739/**
1740 * Called from tlb_protect_code in order to write monitor a code page.
1741 *
1742 * @param env Pointer to the CPU environment.
1743 * @param GCPtr Code page to monitor
1744 */
1745void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1746{
1747#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1748 Assert(env->pVM->rem.s.fInREM);
1749 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1750 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1751 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1752 && !(env->eflags & VM_MASK) /* no V86 mode */
1753 && !HWACCMIsEnabled(env->pVM))
1754 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1755#endif
1756}
1757
1758
1759/**
1760 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1761 *
1762 * @param env Pointer to the CPU environment.
1763 * @param GCPtr Code page to monitor
1764 */
1765void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1766{
1767 Assert(env->pVM->rem.s.fInREM);
1768#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1769 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1770 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1771 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1772 && !(env->eflags & VM_MASK) /* no V86 mode */
1773 && !HWACCMIsEnabled(env->pVM))
1774 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1775#endif
1776}
1777
1778
1779/**
1780 * Called when the CPU is initialized, any of the CRx registers are changed or
1781 * when the A20 line is modified.
1782 *
1783 * @param env Pointer to the CPU environment.
1784 * @param fGlobal Set if the flush is global.
1785 */
1786void remR3FlushTLB(CPUState *env, bool fGlobal)
1787{
1788 PVM pVM = env->pVM;
1789 PCPUMCTX pCtx;
1790
1791 /*
1792 * When we're replaying invlpg instructions or restoring a saved
1793 * state we disable this path.
1794 */
1795 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1796 return;
1797 Assert(pVM->rem.s.fInREM);
1798
1799 /*
1800 * The caller doesn't check cr4, so we have to do that for ourselves.
1801 */
1802 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1803 fGlobal = true;
1804 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1805
1806 /*
1807 * Update the control registers before calling PGMR3FlushTLB.
1808 */
1809 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1810 Assert(pCtx);
1811 pCtx->cr0 = env->cr[0];
1812 pCtx->cr3 = env->cr[3];
1813 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1814 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1815 pCtx->cr4 = env->cr[4];
1816
1817 /*
1818 * Let PGM do the rest.
1819 */
1820 Assert(env->pVCpu);
1821 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1822}
1823
1824
1825/**
1826 * Called when any of the cr0, cr4 or efer registers is updated.
1827 *
1828 * @param env Pointer to the CPU environment.
1829 */
1830void remR3ChangeCpuMode(CPUState *env)
1831{
1832 PVM pVM = env->pVM;
1833 uint64_t efer;
1834 PCPUMCTX pCtx;
1835 int rc;
1836
1837 /*
1838 * When we're replaying loads or restoring a saved
1839 * state this path is disabled.
1840 */
1841 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1842 return;
1843 Assert(pVM->rem.s.fInREM);
1844
1845 /*
1846 * Update the control registers before calling PGMChangeMode()
1847 * as it may need to map whatever cr3 is pointing to.
1848 */
1849 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1850 Assert(pCtx);
1851 pCtx->cr0 = env->cr[0];
1852 pCtx->cr3 = env->cr[3];
1853 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1854 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1855 pCtx->cr4 = env->cr[4];
1856
1857#ifdef TARGET_X86_64
1858 efer = env->efer;
1859#else
1860 efer = 0;
1861#endif
1862 Assert(env->pVCpu);
1863 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1864 if (rc != VINF_SUCCESS)
1865 {
1866 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1867 {
1868 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1869 remR3RaiseRC(env->pVM, rc);
1870 }
1871 else
1872 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1873 }
1874}
1875
1876
1877/**
1878 * Called from compiled code to run dma.
1879 *
1880 * @param env Pointer to the CPU environment.
1881 */
1882void remR3DmaRun(CPUState *env)
1883{
1884 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1885 PDMR3DmaRun(env->pVM);
1886 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1887}
1888
1889
1890/**
1891 * Called from compiled code to schedule pending timers in VMM
1892 *
1893 * @param env Pointer to the CPU environment.
1894 */
1895void remR3TimersRun(CPUState *env)
1896{
1897 LogFlow(("remR3TimersRun:\n"));
1898 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1899 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1900 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1901 TMR3TimerQueuesDo(env->pVM);
1902 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1903 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1904}
1905
1906
1907/**
1908 * Record trap occurrence
1909 *
1910 * @returns VBox status code
1911 * @param env Pointer to the CPU environment.
1912 * @param uTrap Trap nr
1913 * @param uErrorCode Error code
1914 * @param pvNextEIP Next EIP
1915 */
1916int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1917{
1918 PVM pVM = env->pVM;
1919#ifdef VBOX_WITH_STATISTICS
1920 static STAMCOUNTER s_aStatTrap[255];
1921 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1922#endif
1923
1924#ifdef VBOX_WITH_STATISTICS
1925 if (uTrap < 255)
1926 {
1927 if (!s_aRegisters[uTrap])
1928 {
1929 char szStatName[64];
1930 s_aRegisters[uTrap] = true;
1931 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1932 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1933 }
1934 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1935 }
1936#endif
1937 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1938 if( uTrap < 0x20
1939 && (env->cr[0] & X86_CR0_PE)
1940 && !(env->eflags & X86_EFL_VM))
1941 {
1942#ifdef DEBUG
1943 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1944#endif
1945 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1946 {
1947 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1948 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1949 return VERR_REM_TOO_MANY_TRAPS;
1950 }
1951 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1952 pVM->rem.s.cPendingExceptions = 1;
1953 pVM->rem.s.uPendingException = uTrap;
1954 pVM->rem.s.uPendingExcptEIP = env->eip;
1955 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1956 }
1957 else
1958 {
1959 pVM->rem.s.cPendingExceptions = 0;
1960 pVM->rem.s.uPendingException = uTrap;
1961 pVM->rem.s.uPendingExcptEIP = env->eip;
1962 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1963 }
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/*
1969 * Clear current active trap
1970 *
1971 * @param pVM VM Handle.
1972 */
1973void remR3TrapClear(PVM pVM)
1974{
1975 pVM->rem.s.cPendingExceptions = 0;
1976 pVM->rem.s.uPendingException = 0;
1977 pVM->rem.s.uPendingExcptEIP = 0;
1978 pVM->rem.s.uPendingExcptCR2 = 0;
1979}
1980
1981
1982/*
1983 * Record previous call instruction addresses
1984 *
1985 * @param env Pointer to the CPU environment.
1986 */
1987void remR3RecordCall(CPUState *env)
1988{
1989 CSAMR3RecordCallAddress(env->pVM, env->eip);
1990}
1991
1992
1993/**
1994 * Syncs the internal REM state with the VM.
1995 *
1996 * This must be called before REMR3Run() is invoked whenever when the REM
1997 * state is not up to date. Calling it several times in a row is not
1998 * permitted.
1999 *
2000 * @returns VBox status code.
2001 *
2002 * @param pVM VM Handle.
2003 * @param pVCpu VMCPU Handle.
2004 *
2005 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2006 * no do this since the majority of the callers don't want any unnecessary of events
2007 * pending that would immediately interrupt execution.
2008 */
2009REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2010{
2011 register const CPUMCTX *pCtx;
2012 register unsigned fFlags;
2013 bool fHiddenSelRegsValid;
2014 unsigned i;
2015 TRPMEVENT enmType;
2016 uint8_t u8TrapNo;
2017 uint32_t uCpl;
2018 int rc;
2019
2020 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2021 Log2(("REMR3State:\n"));
2022
2023 pVM->rem.s.Env.pVCpu = pVCpu;
2024 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2025 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2026
2027 Assert(!pVM->rem.s.fInREM);
2028 pVM->rem.s.fInStateSync = true;
2029
2030 /*
2031 * If we have to flush TBs, do that immediately.
2032 */
2033 if (pVM->rem.s.fFlushTBs)
2034 {
2035 STAM_COUNTER_INC(&gStatFlushTBs);
2036 tb_flush(&pVM->rem.s.Env);
2037 pVM->rem.s.fFlushTBs = false;
2038 }
2039
2040 /*
2041 * Copy the registers which require no special handling.
2042 */
2043#ifdef TARGET_X86_64
2044 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2045 Assert(R_EAX == 0);
2046 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2047 Assert(R_ECX == 1);
2048 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2049 Assert(R_EDX == 2);
2050 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2051 Assert(R_EBX == 3);
2052 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2053 Assert(R_ESP == 4);
2054 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2055 Assert(R_EBP == 5);
2056 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2057 Assert(R_ESI == 6);
2058 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2059 Assert(R_EDI == 7);
2060 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2061 pVM->rem.s.Env.regs[8] = pCtx->r8;
2062 pVM->rem.s.Env.regs[9] = pCtx->r9;
2063 pVM->rem.s.Env.regs[10] = pCtx->r10;
2064 pVM->rem.s.Env.regs[11] = pCtx->r11;
2065 pVM->rem.s.Env.regs[12] = pCtx->r12;
2066 pVM->rem.s.Env.regs[13] = pCtx->r13;
2067 pVM->rem.s.Env.regs[14] = pCtx->r14;
2068 pVM->rem.s.Env.regs[15] = pCtx->r15;
2069
2070 pVM->rem.s.Env.eip = pCtx->rip;
2071
2072 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2073#else
2074 Assert(R_EAX == 0);
2075 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2076 Assert(R_ECX == 1);
2077 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2078 Assert(R_EDX == 2);
2079 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2080 Assert(R_EBX == 3);
2081 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2082 Assert(R_ESP == 4);
2083 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2084 Assert(R_EBP == 5);
2085 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2086 Assert(R_ESI == 6);
2087 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2088 Assert(R_EDI == 7);
2089 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2090 pVM->rem.s.Env.eip = pCtx->eip;
2091
2092 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2093#endif
2094
2095 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2096
2097 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2098 for (i=0;i<8;i++)
2099 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2100
2101#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2102 /*
2103 * Clear the halted hidden flag (the interrupt waking up the CPU can
2104 * have been dispatched in raw mode).
2105 */
2106 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2107#endif
2108
2109 /*
2110 * Replay invlpg?
2111 */
2112 if (pVM->rem.s.cInvalidatedPages)
2113 {
2114 RTUINT i;
2115
2116 pVM->rem.s.fIgnoreInvlPg = true;
2117 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2118 {
2119 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2120 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2121 }
2122 pVM->rem.s.fIgnoreInvlPg = false;
2123 pVM->rem.s.cInvalidatedPages = 0;
2124 }
2125
2126 /* Replay notification changes. */
2127 REMR3ReplayHandlerNotifications(pVM);
2128
2129 /* Update MSRs; before CRx registers! */
2130 pVM->rem.s.Env.efer = pCtx->msrEFER;
2131 pVM->rem.s.Env.star = pCtx->msrSTAR;
2132 pVM->rem.s.Env.pat = pCtx->msrPAT;
2133#ifdef TARGET_X86_64
2134 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2135 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2136 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2137 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2138
2139 /* Update the internal long mode activate flag according to the new EFER value. */
2140 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2141 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2142 else
2143 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2144#endif
2145
2146 /*
2147 * Registers which are rarely changed and require special handling / order when changed.
2148 */
2149 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2150 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2151 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2152 | CPUM_CHANGED_CR4
2153 | CPUM_CHANGED_CR0
2154 | CPUM_CHANGED_CR3
2155 | CPUM_CHANGED_GDTR
2156 | CPUM_CHANGED_IDTR
2157 | CPUM_CHANGED_SYSENTER_MSR
2158 | CPUM_CHANGED_LDTR
2159 | CPUM_CHANGED_CPUID
2160 | CPUM_CHANGED_FPU_REM
2161 )
2162 )
2163 {
2164 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2165 {
2166 pVM->rem.s.fIgnoreCR3Load = true;
2167 tlb_flush(&pVM->rem.s.Env, true);
2168 pVM->rem.s.fIgnoreCR3Load = false;
2169 }
2170
2171 /* CR4 before CR0! */
2172 if (fFlags & CPUM_CHANGED_CR4)
2173 {
2174 pVM->rem.s.fIgnoreCR3Load = true;
2175 pVM->rem.s.fIgnoreCpuMode = true;
2176 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2177 pVM->rem.s.fIgnoreCpuMode = false;
2178 pVM->rem.s.fIgnoreCR3Load = false;
2179 }
2180
2181 if (fFlags & CPUM_CHANGED_CR0)
2182 {
2183 pVM->rem.s.fIgnoreCR3Load = true;
2184 pVM->rem.s.fIgnoreCpuMode = true;
2185 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2186 pVM->rem.s.fIgnoreCpuMode = false;
2187 pVM->rem.s.fIgnoreCR3Load = false;
2188 }
2189
2190 if (fFlags & CPUM_CHANGED_CR3)
2191 {
2192 pVM->rem.s.fIgnoreCR3Load = true;
2193 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2194 pVM->rem.s.fIgnoreCR3Load = false;
2195 }
2196
2197 if (fFlags & CPUM_CHANGED_GDTR)
2198 {
2199 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2200 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2201 }
2202
2203 if (fFlags & CPUM_CHANGED_IDTR)
2204 {
2205 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2206 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2207 }
2208
2209 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2210 {
2211 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2212 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2213 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2214 }
2215
2216 if (fFlags & CPUM_CHANGED_LDTR)
2217 {
2218 if (fHiddenSelRegsValid)
2219 {
2220 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2221 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2222 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2223 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2224 }
2225 else
2226 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2227 }
2228
2229 if (fFlags & CPUM_CHANGED_CPUID)
2230 {
2231 uint32_t u32Dummy;
2232
2233 /*
2234 * Get the CPUID features.
2235 */
2236 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2237 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2238 }
2239
2240 /* Sync FPU state after CR4, CPUID and EFER (!). */
2241 if (fFlags & CPUM_CHANGED_FPU_REM)
2242 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2243 }
2244
2245 /*
2246 * Sync TR unconditionally to make life simpler.
2247 */
2248 pVM->rem.s.Env.tr.selector = pCtx->tr;
2249 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2250 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2251 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2252 /* Note! do_interrupt will fault if the busy flag is still set... */
2253 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2254
2255 /*
2256 * Update selector registers.
2257 * This must be done *after* we've synced gdt, ldt and crX registers
2258 * since we're reading the GDT/LDT om sync_seg. This will happen with
2259 * saved state which takes a quick dip into rawmode for instance.
2260 */
2261 /*
2262 * Stack; Note first check this one as the CPL might have changed. The
2263 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2264 */
2265
2266 if (fHiddenSelRegsValid)
2267 {
2268 /* The hidden selector registers are valid in the CPU context. */
2269 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2270
2271 /* Set current CPL */
2272 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2273
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2277 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2278 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2279 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2280 }
2281 else
2282 {
2283 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2284 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2285 {
2286 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2287
2288 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2289 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2290#ifdef VBOX_WITH_STATISTICS
2291 if (pVM->rem.s.Env.segs[R_SS].newselector)
2292 {
2293 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2294 }
2295#endif
2296 }
2297 else
2298 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2299
2300 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2301 {
2302 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2303 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2304#ifdef VBOX_WITH_STATISTICS
2305 if (pVM->rem.s.Env.segs[R_ES].newselector)
2306 {
2307 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2308 }
2309#endif
2310 }
2311 else
2312 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2313
2314 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2315 {
2316 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2317 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2318#ifdef VBOX_WITH_STATISTICS
2319 if (pVM->rem.s.Env.segs[R_CS].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2322 }
2323#endif
2324 }
2325 else
2326 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2327
2328 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2329 {
2330 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2331 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2332#ifdef VBOX_WITH_STATISTICS
2333 if (pVM->rem.s.Env.segs[R_DS].newselector)
2334 {
2335 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2336 }
2337#endif
2338 }
2339 else
2340 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2341
2342 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2343 * be the same but not the base/limit. */
2344 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2345 {
2346 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2347 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2348#ifdef VBOX_WITH_STATISTICS
2349 if (pVM->rem.s.Env.segs[R_FS].newselector)
2350 {
2351 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2352 }
2353#endif
2354 }
2355 else
2356 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2357
2358 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2359 {
2360 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2361 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2362#ifdef VBOX_WITH_STATISTICS
2363 if (pVM->rem.s.Env.segs[R_GS].newselector)
2364 {
2365 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2366 }
2367#endif
2368 }
2369 else
2370 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2371 }
2372
2373 /*
2374 * Check for traps.
2375 */
2376 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2377 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2378 if (RT_SUCCESS(rc))
2379 {
2380#ifdef DEBUG
2381 if (u8TrapNo == 0x80)
2382 {
2383 remR3DumpLnxSyscall(pVCpu);
2384 remR3DumpOBsdSyscall(pVCpu);
2385 }
2386#endif
2387
2388 pVM->rem.s.Env.exception_index = u8TrapNo;
2389 if (enmType != TRPM_SOFTWARE_INT)
2390 {
2391 pVM->rem.s.Env.exception_is_int = 0;
2392 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2393 }
2394 else
2395 {
2396 /*
2397 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2398 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2399 * for int03 and into.
2400 */
2401 pVM->rem.s.Env.exception_is_int = 1;
2402 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2403 /* int 3 may be generated by one-byte 0xcc */
2404 if (u8TrapNo == 3)
2405 {
2406 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2407 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2408 }
2409 /* int 4 may be generated by one-byte 0xce */
2410 else if (u8TrapNo == 4)
2411 {
2412 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2413 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2414 }
2415 }
2416
2417 /* get error code and cr2 if needed. */
2418 switch (u8TrapNo)
2419 {
2420 case 0x0e:
2421 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2422 /* fallthru */
2423 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2424 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2425 break;
2426
2427 case 0x11: case 0x08:
2428 default:
2429 pVM->rem.s.Env.error_code = 0;
2430 break;
2431 }
2432
2433 /*
2434 * We can now reset the active trap since the recompiler is gonna have a go at it.
2435 */
2436 rc = TRPMResetTrap(pVCpu);
2437 AssertRC(rc);
2438 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2439 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2440 }
2441
2442 /*
2443 * Clear old interrupt request flags; Check for pending hardware interrupts.
2444 * (See @remark for why we don't check for other FFs.)
2445 */
2446 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2447 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2448 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2449 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2450
2451 /*
2452 * We're now in REM mode.
2453 */
2454 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2455 pVM->rem.s.fInREM = true;
2456 pVM->rem.s.fInStateSync = false;
2457 pVM->rem.s.cCanExecuteRaw = 0;
2458 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2459 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2460 return VINF_SUCCESS;
2461}
2462
2463
2464/**
2465 * Syncs back changes in the REM state to the the VM state.
2466 *
2467 * This must be called after invoking REMR3Run().
2468 * Calling it several times in a row is not permitted.
2469 *
2470 * @returns VBox status code.
2471 *
2472 * @param pVM VM Handle.
2473 * @param pVCpu VMCPU Handle.
2474 */
2475REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2476{
2477 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2478 Assert(pCtx);
2479 unsigned i;
2480
2481 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2482 Log2(("REMR3StateBack:\n"));
2483 Assert(pVM->rem.s.fInREM);
2484
2485 /*
2486 * Copy back the registers.
2487 * This is done in the order they are declared in the CPUMCTX structure.
2488 */
2489
2490 /** @todo FOP */
2491 /** @todo FPUIP */
2492 /** @todo CS */
2493 /** @todo FPUDP */
2494 /** @todo DS */
2495
2496 /** @todo check if FPU/XMM was actually used in the recompiler */
2497 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2498//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2499
2500#ifdef TARGET_X86_64
2501 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2502 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2503 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2504 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2505 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2506 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2507 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2508 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2509 pCtx->r8 = pVM->rem.s.Env.regs[8];
2510 pCtx->r9 = pVM->rem.s.Env.regs[9];
2511 pCtx->r10 = pVM->rem.s.Env.regs[10];
2512 pCtx->r11 = pVM->rem.s.Env.regs[11];
2513 pCtx->r12 = pVM->rem.s.Env.regs[12];
2514 pCtx->r13 = pVM->rem.s.Env.regs[13];
2515 pCtx->r14 = pVM->rem.s.Env.regs[14];
2516 pCtx->r15 = pVM->rem.s.Env.regs[15];
2517
2518 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2519
2520#else
2521 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2522 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2523 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2524 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2525 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2526 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2527 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2528
2529 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2530#endif
2531
2532 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2533
2534#ifdef VBOX_WITH_STATISTICS
2535 if (pVM->rem.s.Env.segs[R_SS].newselector)
2536 {
2537 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2538 }
2539 if (pVM->rem.s.Env.segs[R_GS].newselector)
2540 {
2541 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2542 }
2543 if (pVM->rem.s.Env.segs[R_FS].newselector)
2544 {
2545 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2546 }
2547 if (pVM->rem.s.Env.segs[R_ES].newselector)
2548 {
2549 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2550 }
2551 if (pVM->rem.s.Env.segs[R_DS].newselector)
2552 {
2553 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2554 }
2555 if (pVM->rem.s.Env.segs[R_CS].newselector)
2556 {
2557 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2558 }
2559#endif
2560 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2561 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2562 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2563 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2564 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2565
2566#ifdef TARGET_X86_64
2567 pCtx->rip = pVM->rem.s.Env.eip;
2568 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2569#else
2570 pCtx->eip = pVM->rem.s.Env.eip;
2571 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2572#endif
2573
2574 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2575 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2576 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2577 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2578 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2579 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2580
2581 for (i = 0; i < 8; i++)
2582 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2583
2584 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2585 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2586 {
2587 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2588 STAM_COUNTER_INC(&gStatREMGDTChange);
2589 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2590 }
2591
2592 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2593 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2594 {
2595 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2596 STAM_COUNTER_INC(&gStatREMIDTChange);
2597 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2598 }
2599
2600 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2601 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2602 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2603 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2604 {
2605 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2606 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2607 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2608 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2609 STAM_COUNTER_INC(&gStatREMLDTRChange);
2610 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2611 }
2612
2613 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2614 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2615 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2616 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2617 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2618 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2619 : 0) )
2620 {
2621 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2622 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2623 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2624 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2625 pCtx->tr = pVM->rem.s.Env.tr.selector;
2626 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2627 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2628 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2629 if (pCtx->trHid.Attr.u)
2630 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2631 STAM_COUNTER_INC(&gStatREMTRChange);
2632 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2633 }
2634
2635 /** @todo These values could still be out of sync! */
2636 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2637 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2638 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2639 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2640
2641 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2642 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2643 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2644
2645 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2646 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2647 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2648
2649 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2650 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2651 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2652
2653 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2654 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2655 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2656
2657 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2658 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2659 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2660
2661 /* Sysenter MSR */
2662 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2663 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2664 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2665
2666 /* System MSRs. */
2667 pCtx->msrEFER = pVM->rem.s.Env.efer;
2668 pCtx->msrSTAR = pVM->rem.s.Env.star;
2669 pCtx->msrPAT = pVM->rem.s.Env.pat;
2670#ifdef TARGET_X86_64
2671 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2672 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2673 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2674 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2675#endif
2676
2677 remR3TrapClear(pVM);
2678
2679 /*
2680 * Check for traps.
2681 */
2682 if ( pVM->rem.s.Env.exception_index >= 0
2683 && pVM->rem.s.Env.exception_index < 256)
2684 {
2685 int rc;
2686
2687 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2688 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2689 AssertRC(rc);
2690 switch (pVM->rem.s.Env.exception_index)
2691 {
2692 case 0x0e:
2693 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2694 /* fallthru */
2695 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2696 case 0x11: case 0x08: /* 0 */
2697 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2698 break;
2699 }
2700
2701 }
2702
2703 /*
2704 * We're not longer in REM mode.
2705 */
2706 CPUMR3RemLeave(pVCpu,
2707 HWACCMIsEnabled(pVM)
2708 || ( pVM->rem.s.Env.segs[R_SS].newselector
2709 | pVM->rem.s.Env.segs[R_GS].newselector
2710 | pVM->rem.s.Env.segs[R_FS].newselector
2711 | pVM->rem.s.Env.segs[R_ES].newselector
2712 | pVM->rem.s.Env.segs[R_DS].newselector
2713 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2714 );
2715 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2716 pVM->rem.s.fInREM = false;
2717 pVM->rem.s.pCtx = NULL;
2718 pVM->rem.s.Env.pVCpu = NULL;
2719 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2720 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2721 return VINF_SUCCESS;
2722}
2723
2724
2725/**
2726 * This is called by the disassembler when it wants to update the cpu state
2727 * before for instance doing a register dump.
2728 */
2729static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2730{
2731 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2732 unsigned i;
2733
2734 Assert(pVM->rem.s.fInREM);
2735
2736 /*
2737 * Copy back the registers.
2738 * This is done in the order they are declared in the CPUMCTX structure.
2739 */
2740
2741 /** @todo FOP */
2742 /** @todo FPUIP */
2743 /** @todo CS */
2744 /** @todo FPUDP */
2745 /** @todo DS */
2746 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2747 pCtx->fpu.MXCSR = 0;
2748 pCtx->fpu.MXCSR_MASK = 0;
2749
2750 /** @todo check if FPU/XMM was actually used in the recompiler */
2751 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2752//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2753
2754#ifdef TARGET_X86_64
2755 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2756 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2757 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2758 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2759 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2760 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2761 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2762 pCtx->r8 = pVM->rem.s.Env.regs[8];
2763 pCtx->r9 = pVM->rem.s.Env.regs[9];
2764 pCtx->r10 = pVM->rem.s.Env.regs[10];
2765 pCtx->r11 = pVM->rem.s.Env.regs[11];
2766 pCtx->r12 = pVM->rem.s.Env.regs[12];
2767 pCtx->r13 = pVM->rem.s.Env.regs[13];
2768 pCtx->r14 = pVM->rem.s.Env.regs[14];
2769 pCtx->r15 = pVM->rem.s.Env.regs[15];
2770
2771 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2772#else
2773 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2774 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2775 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2776 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2777 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2778 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2779 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2780
2781 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2782#endif
2783
2784 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2785
2786 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2787 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2788 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2789 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2790 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2791
2792#ifdef TARGET_X86_64
2793 pCtx->rip = pVM->rem.s.Env.eip;
2794 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2795#else
2796 pCtx->eip = pVM->rem.s.Env.eip;
2797 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2798#endif
2799
2800 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2801 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2802 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2803 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2804 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2805 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2806
2807 for (i = 0; i < 8; i++)
2808 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2809
2810 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2811 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2812 {
2813 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2814 STAM_COUNTER_INC(&gStatREMGDTChange);
2815 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2816 }
2817
2818 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2819 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2820 {
2821 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2822 STAM_COUNTER_INC(&gStatREMIDTChange);
2823 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2824 }
2825
2826 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2827 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2828 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2829 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2830 {
2831 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2832 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2833 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2834 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2835 STAM_COUNTER_INC(&gStatREMLDTRChange);
2836 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2837 }
2838
2839 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2840 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2841 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2842 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2843 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2844 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2845 : 0) )
2846 {
2847 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2848 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2849 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2850 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2851 pCtx->tr = pVM->rem.s.Env.tr.selector;
2852 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2853 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2854 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2855 if (pCtx->trHid.Attr.u)
2856 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2857 STAM_COUNTER_INC(&gStatREMTRChange);
2858 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2859 }
2860
2861 /** @todo These values could still be out of sync! */
2862 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2863 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2864 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2865 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2866
2867 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2868 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2869 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2870
2871 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2872 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2873 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2874
2875 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2876 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2877 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2878
2879 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2880 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2881 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2882
2883 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2884 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2885 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2886
2887 /* Sysenter MSR */
2888 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2889 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2890 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2891
2892 /* System MSRs. */
2893 pCtx->msrEFER = pVM->rem.s.Env.efer;
2894 pCtx->msrSTAR = pVM->rem.s.Env.star;
2895 pCtx->msrPAT = pVM->rem.s.Env.pat;
2896#ifdef TARGET_X86_64
2897 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2898 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2899 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2900 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2901#endif
2902
2903}
2904
2905
2906/**
2907 * Update the VMM state information if we're currently in REM.
2908 *
2909 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2910 * we're currently executing in REM and the VMM state is invalid. This method will of
2911 * course check that we're executing in REM before syncing any data over to the VMM.
2912 *
2913 * @param pVM The VM handle.
2914 * @param pVCpu The VMCPU handle.
2915 */
2916REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2917{
2918 if (pVM->rem.s.fInREM)
2919 remR3StateUpdate(pVM, pVCpu);
2920}
2921
2922
2923#undef LOG_GROUP
2924#define LOG_GROUP LOG_GROUP_REM
2925
2926
2927/**
2928 * Notify the recompiler about Address Gate 20 state change.
2929 *
2930 * This notification is required since A20 gate changes are
2931 * initialized from a device driver and the VM might just as
2932 * well be in REM mode as in RAW mode.
2933 *
2934 * @param pVM VM handle.
2935 * @param pVCpu VMCPU handle.
2936 * @param fEnable True if the gate should be enabled.
2937 * False if the gate should be disabled.
2938 */
2939REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2940{
2941 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2942 VM_ASSERT_EMT(pVM);
2943
2944 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2945 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2946 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2947}
2948
2949
2950/**
2951 * Replays the handler notification changes
2952 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2953 *
2954 * @param pVM VM handle.
2955 */
2956REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2957{
2958 /*
2959 * Replay the flushes.
2960 */
2961 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2962 VM_ASSERT_EMT(pVM);
2963
2964 /** @todo this isn't ensuring correct replay order. */
2965 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2966 {
2967 uint32_t idxNext;
2968 uint32_t idxRevHead;
2969 uint32_t idxHead;
2970#ifdef VBOX_STRICT
2971 int32_t c = 0;
2972#endif
2973
2974 /* Lockless purging of pending notifications. */
2975 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2976 if (idxHead == UINT32_MAX)
2977 return;
2978 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2979
2980 /*
2981 * Reverse the list to process it in FIFO order.
2982 */
2983 idxRevHead = UINT32_MAX;
2984 do
2985 {
2986 /* Save the index of the next rec. */
2987 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2988 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2989 /* Push the record onto the reversed list. */
2990 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2991 idxRevHead = idxHead;
2992 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2993 /* Advance. */
2994 idxHead = idxNext;
2995 } while (idxHead != UINT32_MAX);
2996
2997 /*
2998 * Loop thru the list, reinserting the record into the free list as they are
2999 * processed to avoid having other EMTs running out of entries while we're flushing.
3000 */
3001 idxHead = idxRevHead;
3002 do
3003 {
3004 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3005 uint32_t idxCur;
3006 Assert(--c >= 0);
3007
3008 switch (pCur->enmKind)
3009 {
3010 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3011 remR3NotifyHandlerPhysicalRegister(pVM,
3012 pCur->u.PhysicalRegister.enmType,
3013 pCur->u.PhysicalRegister.GCPhys,
3014 pCur->u.PhysicalRegister.cb,
3015 pCur->u.PhysicalRegister.fHasHCHandler);
3016 break;
3017
3018 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3019 remR3NotifyHandlerPhysicalDeregister(pVM,
3020 pCur->u.PhysicalDeregister.enmType,
3021 pCur->u.PhysicalDeregister.GCPhys,
3022 pCur->u.PhysicalDeregister.cb,
3023 pCur->u.PhysicalDeregister.fHasHCHandler,
3024 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3025 break;
3026
3027 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3028 remR3NotifyHandlerPhysicalModify(pVM,
3029 pCur->u.PhysicalModify.enmType,
3030 pCur->u.PhysicalModify.GCPhysOld,
3031 pCur->u.PhysicalModify.GCPhysNew,
3032 pCur->u.PhysicalModify.cb,
3033 pCur->u.PhysicalModify.fHasHCHandler,
3034 pCur->u.PhysicalModify.fRestoreAsRAM);
3035 break;
3036
3037 default:
3038 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3039 break;
3040 }
3041
3042 /*
3043 * Advance idxHead.
3044 */
3045 idxCur = idxHead;
3046 idxHead = pCur->idxNext;
3047 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3048
3049 /*
3050 * Put the record back into the free list.
3051 */
3052 do
3053 {
3054 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3055 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3056 ASMCompilerBarrier();
3057 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3058 } while (idxHead != UINT32_MAX);
3059
3060#ifdef VBOX_STRICT
3061 if (pVM->cCpus == 1)
3062 {
3063 unsigned c;
3064 /* Check that all records are now on the free list. */
3065 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3066 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3067 c++;
3068 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3069 }
3070#endif
3071 }
3072}
3073
3074
3075/**
3076 * Notify REM about changed code page.
3077 *
3078 * @returns VBox status code.
3079 * @param pVM VM handle.
3080 * @param pVCpu VMCPU handle.
3081 * @param pvCodePage Code page address
3082 */
3083REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3084{
3085#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3086 int rc;
3087 RTGCPHYS PhysGC;
3088 uint64_t flags;
3089
3090 VM_ASSERT_EMT(pVM);
3091
3092 /*
3093 * Get the physical page address.
3094 */
3095 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3096 if (rc == VINF_SUCCESS)
3097 {
3098 /*
3099 * Sync the required registers and flush the whole page.
3100 * (Easier to do the whole page than notifying it about each physical
3101 * byte that was changed.
3102 */
3103 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3104 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3105 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3106 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3107
3108 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3109 }
3110#endif
3111 return VINF_SUCCESS;
3112}
3113
3114
3115/**
3116 * Notification about a successful MMR3PhysRegister() call.
3117 *
3118 * @param pVM VM handle.
3119 * @param GCPhys The physical address the RAM.
3120 * @param cb Size of the memory.
3121 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3122 */
3123REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3124{
3125 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3126 VM_ASSERT_EMT(pVM);
3127
3128 /*
3129 * Validate input - we trust the caller.
3130 */
3131 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3132 Assert(cb);
3133 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3134 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3135
3136 /*
3137 * Base ram? Update GCPhysLastRam.
3138 */
3139 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3140 {
3141 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3142 {
3143 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3144 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3145 }
3146 }
3147
3148 /*
3149 * Register the ram.
3150 */
3151 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3152
3153 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3154 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3155 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3156
3157 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3158}
3159
3160
3161/**
3162 * Notification about a successful MMR3PhysRomRegister() call.
3163 *
3164 * @param pVM VM handle.
3165 * @param GCPhys The physical address of the ROM.
3166 * @param cb The size of the ROM.
3167 * @param pvCopy Pointer to the ROM copy.
3168 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3169 * This function will be called when ever the protection of the
3170 * shadow ROM changes (at reset and end of POST).
3171 */
3172REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3173{
3174 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3175 VM_ASSERT_EMT(pVM);
3176
3177 /*
3178 * Validate input - we trust the caller.
3179 */
3180 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3181 Assert(cb);
3182 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3183
3184 /*
3185 * Register the rom.
3186 */
3187 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3188
3189 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3190 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3191 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3192
3193 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3194}
3195
3196
3197/**
3198 * Notification about a successful memory deregistration or reservation.
3199 *
3200 * @param pVM VM Handle.
3201 * @param GCPhys Start physical address.
3202 * @param cb The size of the range.
3203 */
3204REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3205{
3206 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3207 VM_ASSERT_EMT(pVM);
3208
3209 /*
3210 * Validate input - we trust the caller.
3211 */
3212 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3213 Assert(cb);
3214 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3215
3216 /*
3217 * Unassigning the memory.
3218 */
3219 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3220
3221 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3222 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3223 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3224
3225 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3226}
3227
3228
3229/**
3230 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3231 *
3232 * @param pVM VM Handle.
3233 * @param enmType Handler type.
3234 * @param GCPhys Handler range address.
3235 * @param cb Size of the handler range.
3236 * @param fHasHCHandler Set if the handler has a HC callback function.
3237 *
3238 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3239 * Handler memory type to memory which has no HC handler.
3240 */
3241static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3242{
3243 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3244 enmType, GCPhys, cb, fHasHCHandler));
3245
3246 VM_ASSERT_EMT(pVM);
3247 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3248 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3249
3250
3251 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3252
3253 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3254 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3255 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3256 else if (fHasHCHandler)
3257 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3258 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3259
3260 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3261}
3262
3263/**
3264 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3265 *
3266 * @param pVM VM Handle.
3267 * @param enmType Handler type.
3268 * @param GCPhys Handler range address.
3269 * @param cb Size of the handler range.
3270 * @param fHasHCHandler Set if the handler has a HC callback function.
3271 *
3272 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3273 * Handler memory type to memory which has no HC handler.
3274 */
3275REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3276{
3277 REMR3ReplayHandlerNotifications(pVM);
3278
3279 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3280}
3281
3282/**
3283 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3284 *
3285 * @param pVM VM Handle.
3286 * @param enmType Handler type.
3287 * @param GCPhys Handler range address.
3288 * @param cb Size of the handler range.
3289 * @param fHasHCHandler Set if the handler has a HC callback function.
3290 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3291 */
3292static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3293{
3294 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3295 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3296 VM_ASSERT_EMT(pVM);
3297
3298
3299 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3300
3301 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3302 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3303 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3304 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3305 else if (fHasHCHandler)
3306 {
3307 if (!fRestoreAsRAM)
3308 {
3309 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3310 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3311 }
3312 else
3313 {
3314 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3315 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3316 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3317 }
3318 }
3319 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3320
3321 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3322}
3323
3324/**
3325 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3326 *
3327 * @param pVM VM Handle.
3328 * @param enmType Handler type.
3329 * @param GCPhys Handler range address.
3330 * @param cb Size of the handler range.
3331 * @param fHasHCHandler Set if the handler has a HC callback function.
3332 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3333 */
3334REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3335{
3336 REMR3ReplayHandlerNotifications(pVM);
3337 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3338}
3339
3340
3341/**
3342 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3343 *
3344 * @param pVM VM Handle.
3345 * @param enmType Handler type.
3346 * @param GCPhysOld Old handler range address.
3347 * @param GCPhysNew New handler range address.
3348 * @param cb Size of the handler range.
3349 * @param fHasHCHandler Set if the handler has a HC callback function.
3350 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3351 */
3352static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3353{
3354 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3355 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3356 VM_ASSERT_EMT(pVM);
3357 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3358
3359 if (fHasHCHandler)
3360 {
3361 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3362
3363 /*
3364 * Reset the old page.
3365 */
3366 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3367 if (!fRestoreAsRAM)
3368 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3369 else
3370 {
3371 /* This is not perfect, but it'll do for PD monitoring... */
3372 Assert(cb == PAGE_SIZE);
3373 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3374 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3375 }
3376
3377 /*
3378 * Update the new page.
3379 */
3380 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3381 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3382 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3383 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3384
3385 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3386 }
3387}
3388
3389/**
3390 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3391 *
3392 * @param pVM VM Handle.
3393 * @param enmType Handler type.
3394 * @param GCPhysOld Old handler range address.
3395 * @param GCPhysNew New handler range address.
3396 * @param cb Size of the handler range.
3397 * @param fHasHCHandler Set if the handler has a HC callback function.
3398 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3399 */
3400REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3401{
3402 REMR3ReplayHandlerNotifications(pVM);
3403
3404 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3405}
3406
3407/**
3408 * Checks if we're handling access to this page or not.
3409 *
3410 * @returns true if we're trapping access.
3411 * @returns false if we aren't.
3412 * @param pVM The VM handle.
3413 * @param GCPhys The physical address.
3414 *
3415 * @remark This function will only work correctly in VBOX_STRICT builds!
3416 */
3417REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3418{
3419#ifdef VBOX_STRICT
3420 unsigned long off;
3421 REMR3ReplayHandlerNotifications(pVM);
3422
3423 off = get_phys_page_offset(GCPhys);
3424 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3425 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3426 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3427#else
3428 return false;
3429#endif
3430}
3431
3432
3433/**
3434 * Deals with a rare case in get_phys_addr_code where the code
3435 * is being monitored.
3436 *
3437 * It could also be an MMIO page, in which case we will raise a fatal error.
3438 *
3439 * @returns The physical address corresponding to addr.
3440 * @param env The cpu environment.
3441 * @param addr The virtual address.
3442 * @param pTLBEntry The TLB entry.
3443 */
3444target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3445 target_ulong addr,
3446 CPUTLBEntry* pTLBEntry,
3447 target_phys_addr_t ioTLBEntry)
3448{
3449 PVM pVM = env->pVM;
3450
3451 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3452 {
3453 /* If code memory is being monitored, appropriate IOTLB entry will have
3454 handler IO type, and addend will provide real physical address, no
3455 matter if we store VA in TLB or not, as handlers are always passed PA */
3456 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3457 return ret;
3458 }
3459 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3460 "*** handlers\n",
3461 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3462 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3463 LogRel(("*** mmio\n"));
3464 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3465 LogRel(("*** phys\n"));
3466 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3467 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3468 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3469 AssertFatalFailed();
3470}
3471
3472/**
3473 * Read guest RAM and ROM.
3474 *
3475 * @param SrcGCPhys The source address (guest physical).
3476 * @param pvDst The destination address.
3477 * @param cb Number of bytes
3478 */
3479void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3480{
3481 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3482 VBOX_CHECK_ADDR(SrcGCPhys);
3483 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3484#ifdef VBOX_DEBUG_PHYS
3485 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3486#endif
3487 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3488}
3489
3490
3491/**
3492 * Read guest RAM and ROM, unsigned 8-bit.
3493 *
3494 * @param SrcGCPhys The source address (guest physical).
3495 */
3496RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3497{
3498 uint8_t val;
3499 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3500 VBOX_CHECK_ADDR(SrcGCPhys);
3501 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3502 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3503#ifdef VBOX_DEBUG_PHYS
3504 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3505#endif
3506 return val;
3507}
3508
3509
3510/**
3511 * Read guest RAM and ROM, signed 8-bit.
3512 *
3513 * @param SrcGCPhys The source address (guest physical).
3514 */
3515RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3516{
3517 int8_t val;
3518 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3519 VBOX_CHECK_ADDR(SrcGCPhys);
3520 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3521 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3522#ifdef VBOX_DEBUG_PHYS
3523 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3524#endif
3525 return val;
3526}
3527
3528
3529/**
3530 * Read guest RAM and ROM, unsigned 16-bit.
3531 *
3532 * @param SrcGCPhys The source address (guest physical).
3533 */
3534RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3535{
3536 uint16_t val;
3537 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3538 VBOX_CHECK_ADDR(SrcGCPhys);
3539 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3540 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3541#ifdef VBOX_DEBUG_PHYS
3542 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3543#endif
3544 return val;
3545}
3546
3547
3548/**
3549 * Read guest RAM and ROM, signed 16-bit.
3550 *
3551 * @param SrcGCPhys The source address (guest physical).
3552 */
3553RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3554{
3555 int16_t val;
3556 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3557 VBOX_CHECK_ADDR(SrcGCPhys);
3558 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3559 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3560#ifdef VBOX_DEBUG_PHYS
3561 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3562#endif
3563 return val;
3564}
3565
3566
3567/**
3568 * Read guest RAM and ROM, unsigned 32-bit.
3569 *
3570 * @param SrcGCPhys The source address (guest physical).
3571 */
3572RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3573{
3574 uint32_t val;
3575 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3576 VBOX_CHECK_ADDR(SrcGCPhys);
3577 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3578 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3579#ifdef VBOX_DEBUG_PHYS
3580 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3581#endif
3582 return val;
3583}
3584
3585
3586/**
3587 * Read guest RAM and ROM, signed 32-bit.
3588 *
3589 * @param SrcGCPhys The source address (guest physical).
3590 */
3591RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3592{
3593 int32_t val;
3594 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3595 VBOX_CHECK_ADDR(SrcGCPhys);
3596 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3597 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3598#ifdef VBOX_DEBUG_PHYS
3599 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3600#endif
3601 return val;
3602}
3603
3604
3605/**
3606 * Read guest RAM and ROM, unsigned 64-bit.
3607 *
3608 * @param SrcGCPhys The source address (guest physical).
3609 */
3610uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3611{
3612 uint64_t val;
3613 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3614 VBOX_CHECK_ADDR(SrcGCPhys);
3615 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3616 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3617#ifdef VBOX_DEBUG_PHYS
3618 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3619#endif
3620 return val;
3621}
3622
3623
3624/**
3625 * Read guest RAM and ROM, signed 64-bit.
3626 *
3627 * @param SrcGCPhys The source address (guest physical).
3628 */
3629int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3630{
3631 int64_t val;
3632 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3633 VBOX_CHECK_ADDR(SrcGCPhys);
3634 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3635 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3636#ifdef VBOX_DEBUG_PHYS
3637 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3638#endif
3639 return val;
3640}
3641
3642
3643/**
3644 * Write guest RAM.
3645 *
3646 * @param DstGCPhys The destination address (guest physical).
3647 * @param pvSrc The source address.
3648 * @param cb Number of bytes to write
3649 */
3650void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3651{
3652 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3653 VBOX_CHECK_ADDR(DstGCPhys);
3654 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3655 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3656#ifdef VBOX_DEBUG_PHYS
3657 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3658#endif
3659}
3660
3661
3662/**
3663 * Write guest RAM, unsigned 8-bit.
3664 *
3665 * @param DstGCPhys The destination address (guest physical).
3666 * @param val Value
3667 */
3668void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3669{
3670 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3671 VBOX_CHECK_ADDR(DstGCPhys);
3672 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3673 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3674#ifdef VBOX_DEBUG_PHYS
3675 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3676#endif
3677}
3678
3679
3680/**
3681 * Write guest RAM, unsigned 8-bit.
3682 *
3683 * @param DstGCPhys The destination address (guest physical).
3684 * @param val Value
3685 */
3686void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3687{
3688 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3689 VBOX_CHECK_ADDR(DstGCPhys);
3690 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3691 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3692#ifdef VBOX_DEBUG_PHYS
3693 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3694#endif
3695}
3696
3697
3698/**
3699 * Write guest RAM, unsigned 32-bit.
3700 *
3701 * @param DstGCPhys The destination address (guest physical).
3702 * @param val Value
3703 */
3704void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3705{
3706 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3707 VBOX_CHECK_ADDR(DstGCPhys);
3708 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3709 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3710#ifdef VBOX_DEBUG_PHYS
3711 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3712#endif
3713}
3714
3715
3716/**
3717 * Write guest RAM, unsigned 64-bit.
3718 *
3719 * @param DstGCPhys The destination address (guest physical).
3720 * @param val Value
3721 */
3722void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3723{
3724 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3725 VBOX_CHECK_ADDR(DstGCPhys);
3726 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3727 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3728#ifdef VBOX_DEBUG_PHYS
3729 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3730#endif
3731}
3732
3733#undef LOG_GROUP
3734#define LOG_GROUP LOG_GROUP_REM_MMIO
3735
3736/** Read MMIO memory. */
3737static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3738{
3739 uint32_t u32 = 0;
3740 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3741 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3742 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3743 return u32;
3744}
3745
3746/** Read MMIO memory. */
3747static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3748{
3749 uint32_t u32 = 0;
3750 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3751 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3752 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3753 return u32;
3754}
3755
3756/** Read MMIO memory. */
3757static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3758{
3759 uint32_t u32 = 0;
3760 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3761 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3762 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3763 return u32;
3764}
3765
3766/** Write to MMIO memory. */
3767static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3768{
3769 int rc;
3770 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3771 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3772 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3773}
3774
3775/** Write to MMIO memory. */
3776static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3777{
3778 int rc;
3779 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3780 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3781 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3782}
3783
3784/** Write to MMIO memory. */
3785static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3786{
3787 int rc;
3788 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3789 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3790 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3791}
3792
3793
3794#undef LOG_GROUP
3795#define LOG_GROUP LOG_GROUP_REM_HANDLER
3796
3797/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3798
3799static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3800{
3801 uint8_t u8;
3802 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3803 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3804 return u8;
3805}
3806
3807static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3808{
3809 uint16_t u16;
3810 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3811 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3812 return u16;
3813}
3814
3815static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3816{
3817 uint32_t u32;
3818 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3819 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3820 return u32;
3821}
3822
3823static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3824{
3825 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3826 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3827}
3828
3829static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3830{
3831 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3832 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3833}
3834
3835static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3836{
3837 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3838 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3839}
3840
3841/* -+- disassembly -+- */
3842
3843#undef LOG_GROUP
3844#define LOG_GROUP LOG_GROUP_REM_DISAS
3845
3846
3847/**
3848 * Enables or disables singled stepped disassembly.
3849 *
3850 * @returns VBox status code.
3851 * @param pVM VM handle.
3852 * @param fEnable To enable set this flag, to disable clear it.
3853 */
3854static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3855{
3856 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3857 VM_ASSERT_EMT(pVM);
3858
3859 if (fEnable)
3860 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3861 else
3862 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3863#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3864 cpu_single_step(&pVM->rem.s.Env, fEnable);
3865#endif
3866 return VINF_SUCCESS;
3867}
3868
3869
3870/**
3871 * Enables or disables singled stepped disassembly.
3872 *
3873 * @returns VBox status code.
3874 * @param pVM VM handle.
3875 * @param fEnable To enable set this flag, to disable clear it.
3876 */
3877REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3878{
3879 int rc;
3880
3881 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3882 if (VM_IS_EMT(pVM))
3883 return remR3DisasEnableStepping(pVM, fEnable);
3884
3885 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3886 AssertRC(rc);
3887 return rc;
3888}
3889
3890
3891#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3892/**
3893 * External Debugger Command: .remstep [on|off|1|0]
3894 */
3895static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3896{
3897 int rc;
3898
3899 if (cArgs == 0)
3900 /*
3901 * Print the current status.
3902 */
3903 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3904 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3905 else
3906 {
3907 /*
3908 * Convert the argument and change the mode.
3909 */
3910 bool fEnable;
3911 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3912 if (RT_SUCCESS(rc))
3913 {
3914 rc = REMR3DisasEnableStepping(pVM, fEnable);
3915 if (RT_SUCCESS(rc))
3916 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3917 else
3918 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3919 }
3920 else
3921 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3922 }
3923 return rc;
3924}
3925#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3926
3927
3928/**
3929 * Disassembles one instruction and prints it to the log.
3930 *
3931 * @returns Success indicator.
3932 * @param env Pointer to the recompiler CPU structure.
3933 * @param f32BitCode Indicates that whether or not the code should
3934 * be disassembled as 16 or 32 bit. If -1 the CS
3935 * selector will be inspected.
3936 * @param pszPrefix
3937 */
3938bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3939{
3940 PVM pVM = env->pVM;
3941 const bool fLog = LogIsEnabled();
3942 const bool fLog2 = LogIs2Enabled();
3943 int rc = VINF_SUCCESS;
3944
3945 /*
3946 * Don't bother if there ain't any log output to do.
3947 */
3948 if (!fLog && !fLog2)
3949 return true;
3950
3951 /*
3952 * Update the state so DBGF reads the correct register values.
3953 */
3954 remR3StateUpdate(pVM, env->pVCpu);
3955
3956 /*
3957 * Log registers if requested.
3958 */
3959 if (fLog2)
3960 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3961
3962 /*
3963 * Disassemble to log.
3964 */
3965 if (fLog)
3966 {
3967 PVMCPU pVCpu = VMMGetCpu(pVM);
3968 char szBuf[256];
3969 szBuf[0] = '\0';
3970 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3971 pVCpu->idCpu,
3972 0, /* Sel */
3973 0, /* GCPtr */
3974 DBGF_DISAS_FLAGS_CURRENT_GUEST
3975 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3976 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3977 szBuf,
3978 sizeof(szBuf),
3979 NULL);
3980 if (RT_FAILURE(rc))
3981 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3982 if (pszPrefix && *pszPrefix)
3983 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3984 else
3985 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3986 }
3987
3988 return RT_SUCCESS(rc);
3989}
3990
3991
3992/**
3993 * Disassemble recompiled code.
3994 *
3995 * @param phFileIgnored Ignored, logfile usually.
3996 * @param pvCode Pointer to the code block.
3997 * @param cb Size of the code block.
3998 */
3999void disas(FILE *phFile, void *pvCode, unsigned long cb)
4000{
4001#ifdef DEBUG_TMP_LOGGING
4002# define DISAS_PRINTF(x...) fprintf(phFile, x)
4003#else
4004# define DISAS_PRINTF(x...) RTLogPrintf(x)
4005 if (LogIs2Enabled())
4006#endif
4007 {
4008 unsigned off = 0;
4009 char szOutput[256];
4010 DISCPUSTATE Cpu;
4011
4012 memset(&Cpu, 0, sizeof(Cpu));
4013#ifdef RT_ARCH_X86
4014 Cpu.mode = CPUMODE_32BIT;
4015#else
4016 Cpu.mode = CPUMODE_64BIT;
4017#endif
4018
4019 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4020 while (off < cb)
4021 {
4022 uint32_t cbInstr;
4023 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4024 DISAS_PRINTF("%s", szOutput);
4025 else
4026 {
4027 DISAS_PRINTF("disas error\n");
4028 cbInstr = 1;
4029#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4030 break;
4031#endif
4032 }
4033 off += cbInstr;
4034 }
4035 }
4036
4037#undef DISAS_PRINTF
4038}
4039
4040
4041/**
4042 * Disassemble guest code.
4043 *
4044 * @param phFileIgnored Ignored, logfile usually.
4045 * @param uCode The guest address of the code to disassemble. (flat?)
4046 * @param cb Number of bytes to disassemble.
4047 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4048 */
4049void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4050{
4051#ifdef DEBUG_TMP_LOGGING
4052# define DISAS_PRINTF(x...) fprintf(phFile, x)
4053#else
4054# define DISAS_PRINTF(x...) RTLogPrintf(x)
4055 if (LogIs2Enabled())
4056#endif
4057 {
4058 PVM pVM = cpu_single_env->pVM;
4059 PVMCPU pVCpu = cpu_single_env->pVCpu;
4060 RTSEL cs;
4061 RTGCUINTPTR eip;
4062
4063 Assert(pVCpu);
4064
4065 /*
4066 * Update the state so DBGF reads the correct register values (flags).
4067 */
4068 remR3StateUpdate(pVM, pVCpu);
4069
4070 /*
4071 * Do the disassembling.
4072 */
4073 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4074 cs = cpu_single_env->segs[R_CS].selector;
4075 eip = uCode - cpu_single_env->segs[R_CS].base;
4076 for (;;)
4077 {
4078 char szBuf[256];
4079 uint32_t cbInstr;
4080 int rc = DBGFR3DisasInstrEx(pVM,
4081 pVCpu->idCpu,
4082 cs,
4083 eip,
4084 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4085 szBuf, sizeof(szBuf),
4086 &cbInstr);
4087 if (RT_SUCCESS(rc))
4088 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4089 else
4090 {
4091 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4092 cbInstr = 1;
4093 }
4094
4095 /* next */
4096 if (cb <= cbInstr)
4097 break;
4098 cb -= cbInstr;
4099 uCode += cbInstr;
4100 eip += cbInstr;
4101 }
4102 }
4103#undef DISAS_PRINTF
4104}
4105
4106
4107/**
4108 * Looks up a guest symbol.
4109 *
4110 * @returns Pointer to symbol name. This is a static buffer.
4111 * @param orig_addr The address in question.
4112 */
4113const char *lookup_symbol(target_ulong orig_addr)
4114{
4115 PVM pVM = cpu_single_env->pVM;
4116 RTGCINTPTR off = 0;
4117 RTDBGSYMBOL Sym;
4118 DBGFADDRESS Addr;
4119
4120 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4121 if (RT_SUCCESS(rc))
4122 {
4123 static char szSym[sizeof(Sym.szName) + 48];
4124 if (!off)
4125 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4126 else if (off > 0)
4127 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4128 else
4129 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4130 return szSym;
4131 }
4132 return "<N/A>";
4133}
4134
4135
4136#undef LOG_GROUP
4137#define LOG_GROUP LOG_GROUP_REM
4138
4139
4140/* -+- FF notifications -+- */
4141
4142
4143/**
4144 * Notification about a pending interrupt.
4145 *
4146 * @param pVM VM Handle.
4147 * @param pVCpu VMCPU Handle.
4148 * @param u8Interrupt Interrupt
4149 * @thread The emulation thread.
4150 */
4151REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4152{
4153 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4154 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4155}
4156
4157/**
4158 * Notification about a pending interrupt.
4159 *
4160 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4161 * @param pVM VM Handle.
4162 * @param pVCpu VMCPU Handle.
4163 * @thread The emulation thread.
4164 */
4165REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4166{
4167 return pVM->rem.s.u32PendingInterrupt;
4168}
4169
4170/**
4171 * Notification about the interrupt FF being set.
4172 *
4173 * @param pVM VM Handle.
4174 * @param pVCpu VMCPU Handle.
4175 * @thread The emulation thread.
4176 */
4177REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4178{
4179 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4180 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4181 if (pVM->rem.s.fInREM)
4182 {
4183 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4184 CPU_INTERRUPT_EXTERNAL_HARD);
4185 }
4186}
4187
4188
4189/**
4190 * Notification about the interrupt FF being set.
4191 *
4192 * @param pVM VM Handle.
4193 * @param pVCpu VMCPU Handle.
4194 * @thread Any.
4195 */
4196REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4197{
4198 LogFlow(("REMR3NotifyInterruptClear:\n"));
4199 if (pVM->rem.s.fInREM)
4200 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4201}
4202
4203
4204/**
4205 * Notification about pending timer(s).
4206 *
4207 * @param pVM VM Handle.
4208 * @param pVCpuDst The target cpu for this notification.
4209 * TM will not broadcast pending timer events, but use
4210 * a dedicated EMT for them. So, only interrupt REM
4211 * execution if the given CPU is executing in REM.
4212 * @thread Any.
4213 */
4214REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4215{
4216#ifndef DEBUG_bird
4217 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4218#endif
4219 if (pVM->rem.s.fInREM)
4220 {
4221 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4222 {
4223 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4224 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4225 CPU_INTERRUPT_EXTERNAL_TIMER);
4226 }
4227 else
4228 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4229 }
4230 else
4231 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4232}
4233
4234
4235/**
4236 * Notification about pending DMA transfers.
4237 *
4238 * @param pVM VM Handle.
4239 * @thread Any.
4240 */
4241REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4242{
4243 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4244 if (pVM->rem.s.fInREM)
4245 {
4246 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4247 CPU_INTERRUPT_EXTERNAL_DMA);
4248 }
4249}
4250
4251
4252/**
4253 * Notification about pending timer(s).
4254 *
4255 * @param pVM VM Handle.
4256 * @thread Any.
4257 */
4258REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4259{
4260 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4261 if (pVM->rem.s.fInREM)
4262 {
4263 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4264 CPU_INTERRUPT_EXTERNAL_EXIT);
4265 }
4266}
4267
4268
4269/**
4270 * Notification about pending FF set by an external thread.
4271 *
4272 * @param pVM VM handle.
4273 * @thread Any.
4274 */
4275REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4276{
4277 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4278 if (pVM->rem.s.fInREM)
4279 {
4280 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4281 CPU_INTERRUPT_EXTERNAL_EXIT);
4282 }
4283}
4284
4285
4286#ifdef VBOX_WITH_STATISTICS
4287void remR3ProfileStart(int statcode)
4288{
4289 STAMPROFILEADV *pStat;
4290 switch(statcode)
4291 {
4292 case STATS_EMULATE_SINGLE_INSTR:
4293 pStat = &gStatExecuteSingleInstr;
4294 break;
4295 case STATS_QEMU_COMPILATION:
4296 pStat = &gStatCompilationQEmu;
4297 break;
4298 case STATS_QEMU_RUN_EMULATED_CODE:
4299 pStat = &gStatRunCodeQEmu;
4300 break;
4301 case STATS_QEMU_TOTAL:
4302 pStat = &gStatTotalTimeQEmu;
4303 break;
4304 case STATS_QEMU_RUN_TIMERS:
4305 pStat = &gStatTimers;
4306 break;
4307 case STATS_TLB_LOOKUP:
4308 pStat= &gStatTBLookup;
4309 break;
4310 case STATS_IRQ_HANDLING:
4311 pStat= &gStatIRQ;
4312 break;
4313 case STATS_RAW_CHECK:
4314 pStat = &gStatRawCheck;
4315 break;
4316
4317 default:
4318 AssertMsgFailed(("unknown stat %d\n", statcode));
4319 return;
4320 }
4321 STAM_PROFILE_ADV_START(pStat, a);
4322}
4323
4324
4325void remR3ProfileStop(int statcode)
4326{
4327 STAMPROFILEADV *pStat;
4328 switch(statcode)
4329 {
4330 case STATS_EMULATE_SINGLE_INSTR:
4331 pStat = &gStatExecuteSingleInstr;
4332 break;
4333 case STATS_QEMU_COMPILATION:
4334 pStat = &gStatCompilationQEmu;
4335 break;
4336 case STATS_QEMU_RUN_EMULATED_CODE:
4337 pStat = &gStatRunCodeQEmu;
4338 break;
4339 case STATS_QEMU_TOTAL:
4340 pStat = &gStatTotalTimeQEmu;
4341 break;
4342 case STATS_QEMU_RUN_TIMERS:
4343 pStat = &gStatTimers;
4344 break;
4345 case STATS_TLB_LOOKUP:
4346 pStat= &gStatTBLookup;
4347 break;
4348 case STATS_IRQ_HANDLING:
4349 pStat= &gStatIRQ;
4350 break;
4351 case STATS_RAW_CHECK:
4352 pStat = &gStatRawCheck;
4353 break;
4354 default:
4355 AssertMsgFailed(("unknown stat %d\n", statcode));
4356 return;
4357 }
4358 STAM_PROFILE_ADV_STOP(pStat, a);
4359}
4360#endif
4361
4362/**
4363 * Raise an RC, force rem exit.
4364 *
4365 * @param pVM VM handle.
4366 * @param rc The rc.
4367 */
4368void remR3RaiseRC(PVM pVM, int rc)
4369{
4370 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4371 Assert(pVM->rem.s.fInREM);
4372 VM_ASSERT_EMT(pVM);
4373 pVM->rem.s.rc = rc;
4374 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4375}
4376
4377
4378/* -+- timers -+- */
4379
4380uint64_t cpu_get_tsc(CPUX86State *env)
4381{
4382 STAM_COUNTER_INC(&gStatCpuGetTSC);
4383 return TMCpuTickGet(env->pVCpu);
4384}
4385
4386
4387/* -+- interrupts -+- */
4388
4389void cpu_set_ferr(CPUX86State *env)
4390{
4391 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4392 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4393}
4394
4395int cpu_get_pic_interrupt(CPUState *env)
4396{
4397 uint8_t u8Interrupt;
4398 int rc;
4399
4400 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4401 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4402 * with the (a)pic.
4403 */
4404 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4405 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4406 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4407 * remove this kludge. */
4408 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4409 {
4410 rc = VINF_SUCCESS;
4411 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4412 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4413 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4414 }
4415 else
4416 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4417
4418 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4419 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4420 if (RT_SUCCESS(rc))
4421 {
4422 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4423 env->interrupt_request |= CPU_INTERRUPT_HARD;
4424 return u8Interrupt;
4425 }
4426 return -1;
4427}
4428
4429
4430/* -+- local apic -+- */
4431
4432#if 0 /* CPUMSetGuestMsr does this now. */
4433void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4434{
4435 int rc = PDMApicSetBase(env->pVM, val);
4436 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4437}
4438#endif
4439
4440uint64_t cpu_get_apic_base(CPUX86State *env)
4441{
4442 uint64_t u64;
4443 int rc = PDMApicGetBase(env->pVM, &u64);
4444 if (RT_SUCCESS(rc))
4445 {
4446 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4447 return u64;
4448 }
4449 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4450 return 0;
4451}
4452
4453void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4454{
4455 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4456 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4457}
4458
4459uint8_t cpu_get_apic_tpr(CPUX86State *env)
4460{
4461 uint8_t u8;
4462 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4463 if (RT_SUCCESS(rc))
4464 {
4465 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4466 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4467 }
4468 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4469 return 0;
4470}
4471
4472/**
4473 * Read an MSR.
4474 *
4475 * @retval 0 success.
4476 * @retval -1 failure, raise \#GP(0).
4477 * @param env The cpu state.
4478 * @param idMsr The MSR to read.
4479 * @param puValue Where to return the value.
4480 */
4481int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4482{
4483 Assert(env->pVCpu);
4484 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4485}
4486
4487/**
4488 * Write to an MSR.
4489 *
4490 * @retval 0 success.
4491 * @retval -1 failure, raise \#GP(0).
4492 * @param env The cpu state.
4493 * @param idMsr The MSR to read.
4494 * @param puValue Where to return the value.
4495 */
4496int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4497{
4498 Assert(env->pVCpu);
4499 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4500}
4501
4502/* -+- I/O Ports -+- */
4503
4504#undef LOG_GROUP
4505#define LOG_GROUP LOG_GROUP_REM_IOPORT
4506
4507void cpu_outb(CPUState *env, int addr, int val)
4508{
4509 int rc;
4510
4511 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4512 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4513
4514 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4515 if (RT_LIKELY(rc == VINF_SUCCESS))
4516 return;
4517 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4518 {
4519 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4520 remR3RaiseRC(env->pVM, rc);
4521 return;
4522 }
4523 remAbort(rc, __FUNCTION__);
4524}
4525
4526void cpu_outw(CPUState *env, int addr, int val)
4527{
4528 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4529 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4530 if (RT_LIKELY(rc == VINF_SUCCESS))
4531 return;
4532 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4533 {
4534 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4535 remR3RaiseRC(env->pVM, rc);
4536 return;
4537 }
4538 remAbort(rc, __FUNCTION__);
4539}
4540
4541void cpu_outl(CPUState *env, int addr, int val)
4542{
4543 int rc;
4544 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4545 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4546 if (RT_LIKELY(rc == VINF_SUCCESS))
4547 return;
4548 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4549 {
4550 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4551 remR3RaiseRC(env->pVM, rc);
4552 return;
4553 }
4554 remAbort(rc, __FUNCTION__);
4555}
4556
4557int cpu_inb(CPUState *env, int addr)
4558{
4559 uint32_t u32 = 0;
4560 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4561 if (RT_LIKELY(rc == VINF_SUCCESS))
4562 {
4563 if (/*addr != 0x61 && */addr != 0x71)
4564 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4565 return (int)u32;
4566 }
4567 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4568 {
4569 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4570 remR3RaiseRC(env->pVM, rc);
4571 return (int)u32;
4572 }
4573 remAbort(rc, __FUNCTION__);
4574 return 0xff;
4575}
4576
4577int cpu_inw(CPUState *env, int addr)
4578{
4579 uint32_t u32 = 0;
4580 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4581 if (RT_LIKELY(rc == VINF_SUCCESS))
4582 {
4583 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4584 return (int)u32;
4585 }
4586 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4587 {
4588 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4589 remR3RaiseRC(env->pVM, rc);
4590 return (int)u32;
4591 }
4592 remAbort(rc, __FUNCTION__);
4593 return 0xffff;
4594}
4595
4596int cpu_inl(CPUState *env, int addr)
4597{
4598 uint32_t u32 = 0;
4599 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4600 if (RT_LIKELY(rc == VINF_SUCCESS))
4601 {
4602//if (addr==0x01f0 && u32 == 0x6b6d)
4603// loglevel = ~0;
4604 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4605 return (int)u32;
4606 }
4607 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4608 {
4609 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4610 remR3RaiseRC(env->pVM, rc);
4611 return (int)u32;
4612 }
4613 remAbort(rc, __FUNCTION__);
4614 return 0xffffffff;
4615}
4616
4617#undef LOG_GROUP
4618#define LOG_GROUP LOG_GROUP_REM
4619
4620
4621/* -+- helpers and misc other interfaces -+- */
4622
4623/**
4624 * Perform the CPUID instruction.
4625 *
4626 * ASMCpuId cannot be invoked from some source files where this is used because of global
4627 * register allocations.
4628 *
4629 * @param env Pointer to the recompiler CPU structure.
4630 * @param uOperator CPUID operation (eax).
4631 * @param pvEAX Where to store eax.
4632 * @param pvEBX Where to store ebx.
4633 * @param pvECX Where to store ecx.
4634 * @param pvEDX Where to store edx.
4635 */
4636void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4637{
4638 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4639}
4640
4641
4642#if 0 /* not used */
4643/**
4644 * Interface for qemu hardware to report back fatal errors.
4645 */
4646void hw_error(const char *pszFormat, ...)
4647{
4648 /*
4649 * Bitch about it.
4650 */
4651 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4652 * this in my Odin32 tree at home! */
4653 va_list args;
4654 va_start(args, pszFormat);
4655 RTLogPrintf("fatal error in virtual hardware:");
4656 RTLogPrintfV(pszFormat, args);
4657 va_end(args);
4658 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4659
4660 /*
4661 * If we're in REM context we'll sync back the state before 'jumping' to
4662 * the EMs failure handling.
4663 */
4664 PVM pVM = cpu_single_env->pVM;
4665 if (pVM->rem.s.fInREM)
4666 REMR3StateBack(pVM);
4667 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4668 AssertMsgFailed(("EMR3FatalError returned!\n"));
4669}
4670#endif
4671
4672/**
4673 * Interface for the qemu cpu to report unhandled situation
4674 * raising a fatal VM error.
4675 */
4676void cpu_abort(CPUState *env, const char *pszFormat, ...)
4677{
4678 va_list va;
4679 PVM pVM;
4680 PVMCPU pVCpu;
4681 char szMsg[256];
4682
4683 /*
4684 * Bitch about it.
4685 */
4686 RTLogFlags(NULL, "nodisabled nobuffered");
4687 RTLogFlush(NULL);
4688
4689 va_start(va, pszFormat);
4690#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4691 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4692 unsigned cArgs = 0;
4693 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4694 const char *psz = strchr(pszFormat, '%');
4695 while (psz && cArgs < 6)
4696 {
4697 auArgs[cArgs++] = va_arg(va, uintptr_t);
4698 psz = strchr(psz + 1, '%');
4699 }
4700 switch (cArgs)
4701 {
4702 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4703 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4704 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4705 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4706 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4707 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4708 default:
4709 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4710 }
4711#else
4712 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4713#endif
4714 va_end(va);
4715
4716 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4717 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4718
4719 /*
4720 * If we're in REM context we'll sync back the state before 'jumping' to
4721 * the EMs failure handling.
4722 */
4723 pVM = cpu_single_env->pVM;
4724 pVCpu = cpu_single_env->pVCpu;
4725 Assert(pVCpu);
4726
4727 if (pVM->rem.s.fInREM)
4728 REMR3StateBack(pVM, pVCpu);
4729 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4730 AssertMsgFailed(("EMR3FatalError returned!\n"));
4731}
4732
4733
4734/**
4735 * Aborts the VM.
4736 *
4737 * @param rc VBox error code.
4738 * @param pszTip Hint about why/when this happened.
4739 */
4740void remAbort(int rc, const char *pszTip)
4741{
4742 PVM pVM;
4743 PVMCPU pVCpu;
4744
4745 /*
4746 * Bitch about it.
4747 */
4748 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4749 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4750
4751 /*
4752 * Jump back to where we entered the recompiler.
4753 */
4754 pVM = cpu_single_env->pVM;
4755 pVCpu = cpu_single_env->pVCpu;
4756 Assert(pVCpu);
4757
4758 if (pVM->rem.s.fInREM)
4759 REMR3StateBack(pVM, pVCpu);
4760
4761 EMR3FatalError(pVCpu, rc);
4762 AssertMsgFailed(("EMR3FatalError returned!\n"));
4763}
4764
4765
4766/**
4767 * Dumps a linux system call.
4768 * @param pVCpu VMCPU handle.
4769 */
4770void remR3DumpLnxSyscall(PVMCPU pVCpu)
4771{
4772 static const char *apsz[] =
4773 {
4774 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4775 "sys_exit",
4776 "sys_fork",
4777 "sys_read",
4778 "sys_write",
4779 "sys_open", /* 5 */
4780 "sys_close",
4781 "sys_waitpid",
4782 "sys_creat",
4783 "sys_link",
4784 "sys_unlink", /* 10 */
4785 "sys_execve",
4786 "sys_chdir",
4787 "sys_time",
4788 "sys_mknod",
4789 "sys_chmod", /* 15 */
4790 "sys_lchown16",
4791 "sys_ni_syscall", /* old break syscall holder */
4792 "sys_stat",
4793 "sys_lseek",
4794 "sys_getpid", /* 20 */
4795 "sys_mount",
4796 "sys_oldumount",
4797 "sys_setuid16",
4798 "sys_getuid16",
4799 "sys_stime", /* 25 */
4800 "sys_ptrace",
4801 "sys_alarm",
4802 "sys_fstat",
4803 "sys_pause",
4804 "sys_utime", /* 30 */
4805 "sys_ni_syscall", /* old stty syscall holder */
4806 "sys_ni_syscall", /* old gtty syscall holder */
4807 "sys_access",
4808 "sys_nice",
4809 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4810 "sys_sync",
4811 "sys_kill",
4812 "sys_rename",
4813 "sys_mkdir",
4814 "sys_rmdir", /* 40 */
4815 "sys_dup",
4816 "sys_pipe",
4817 "sys_times",
4818 "sys_ni_syscall", /* old prof syscall holder */
4819 "sys_brk", /* 45 */
4820 "sys_setgid16",
4821 "sys_getgid16",
4822 "sys_signal",
4823 "sys_geteuid16",
4824 "sys_getegid16", /* 50 */
4825 "sys_acct",
4826 "sys_umount", /* recycled never used phys() */
4827 "sys_ni_syscall", /* old lock syscall holder */
4828 "sys_ioctl",
4829 "sys_fcntl", /* 55 */
4830 "sys_ni_syscall", /* old mpx syscall holder */
4831 "sys_setpgid",
4832 "sys_ni_syscall", /* old ulimit syscall holder */
4833 "sys_olduname",
4834 "sys_umask", /* 60 */
4835 "sys_chroot",
4836 "sys_ustat",
4837 "sys_dup2",
4838 "sys_getppid",
4839 "sys_getpgrp", /* 65 */
4840 "sys_setsid",
4841 "sys_sigaction",
4842 "sys_sgetmask",
4843 "sys_ssetmask",
4844 "sys_setreuid16", /* 70 */
4845 "sys_setregid16",
4846 "sys_sigsuspend",
4847 "sys_sigpending",
4848 "sys_sethostname",
4849 "sys_setrlimit", /* 75 */
4850 "sys_old_getrlimit",
4851 "sys_getrusage",
4852 "sys_gettimeofday",
4853 "sys_settimeofday",
4854 "sys_getgroups16", /* 80 */
4855 "sys_setgroups16",
4856 "old_select",
4857 "sys_symlink",
4858 "sys_lstat",
4859 "sys_readlink", /* 85 */
4860 "sys_uselib",
4861 "sys_swapon",
4862 "sys_reboot",
4863 "old_readdir",
4864 "old_mmap", /* 90 */
4865 "sys_munmap",
4866 "sys_truncate",
4867 "sys_ftruncate",
4868 "sys_fchmod",
4869 "sys_fchown16", /* 95 */
4870 "sys_getpriority",
4871 "sys_setpriority",
4872 "sys_ni_syscall", /* old profil syscall holder */
4873 "sys_statfs",
4874 "sys_fstatfs", /* 100 */
4875 "sys_ioperm",
4876 "sys_socketcall",
4877 "sys_syslog",
4878 "sys_setitimer",
4879 "sys_getitimer", /* 105 */
4880 "sys_newstat",
4881 "sys_newlstat",
4882 "sys_newfstat",
4883 "sys_uname",
4884 "sys_iopl", /* 110 */
4885 "sys_vhangup",
4886 "sys_ni_syscall", /* old "idle" system call */
4887 "sys_vm86old",
4888 "sys_wait4",
4889 "sys_swapoff", /* 115 */
4890 "sys_sysinfo",
4891 "sys_ipc",
4892 "sys_fsync",
4893 "sys_sigreturn",
4894 "sys_clone", /* 120 */
4895 "sys_setdomainname",
4896 "sys_newuname",
4897 "sys_modify_ldt",
4898 "sys_adjtimex",
4899 "sys_mprotect", /* 125 */
4900 "sys_sigprocmask",
4901 "sys_ni_syscall", /* old "create_module" */
4902 "sys_init_module",
4903 "sys_delete_module",
4904 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4905 "sys_quotactl",
4906 "sys_getpgid",
4907 "sys_fchdir",
4908 "sys_bdflush",
4909 "sys_sysfs", /* 135 */
4910 "sys_personality",
4911 "sys_ni_syscall", /* reserved for afs_syscall */
4912 "sys_setfsuid16",
4913 "sys_setfsgid16",
4914 "sys_llseek", /* 140 */
4915 "sys_getdents",
4916 "sys_select",
4917 "sys_flock",
4918 "sys_msync",
4919 "sys_readv", /* 145 */
4920 "sys_writev",
4921 "sys_getsid",
4922 "sys_fdatasync",
4923 "sys_sysctl",
4924 "sys_mlock", /* 150 */
4925 "sys_munlock",
4926 "sys_mlockall",
4927 "sys_munlockall",
4928 "sys_sched_setparam",
4929 "sys_sched_getparam", /* 155 */
4930 "sys_sched_setscheduler",
4931 "sys_sched_getscheduler",
4932 "sys_sched_yield",
4933 "sys_sched_get_priority_max",
4934 "sys_sched_get_priority_min", /* 160 */
4935 "sys_sched_rr_get_interval",
4936 "sys_nanosleep",
4937 "sys_mremap",
4938 "sys_setresuid16",
4939 "sys_getresuid16", /* 165 */
4940 "sys_vm86",
4941 "sys_ni_syscall", /* Old sys_query_module */
4942 "sys_poll",
4943 "sys_nfsservctl",
4944 "sys_setresgid16", /* 170 */
4945 "sys_getresgid16",
4946 "sys_prctl",
4947 "sys_rt_sigreturn",
4948 "sys_rt_sigaction",
4949 "sys_rt_sigprocmask", /* 175 */
4950 "sys_rt_sigpending",
4951 "sys_rt_sigtimedwait",
4952 "sys_rt_sigqueueinfo",
4953 "sys_rt_sigsuspend",
4954 "sys_pread64", /* 180 */
4955 "sys_pwrite64",
4956 "sys_chown16",
4957 "sys_getcwd",
4958 "sys_capget",
4959 "sys_capset", /* 185 */
4960 "sys_sigaltstack",
4961 "sys_sendfile",
4962 "sys_ni_syscall", /* reserved for streams1 */
4963 "sys_ni_syscall", /* reserved for streams2 */
4964 "sys_vfork", /* 190 */
4965 "sys_getrlimit",
4966 "sys_mmap2",
4967 "sys_truncate64",
4968 "sys_ftruncate64",
4969 "sys_stat64", /* 195 */
4970 "sys_lstat64",
4971 "sys_fstat64",
4972 "sys_lchown",
4973 "sys_getuid",
4974 "sys_getgid", /* 200 */
4975 "sys_geteuid",
4976 "sys_getegid",
4977 "sys_setreuid",
4978 "sys_setregid",
4979 "sys_getgroups", /* 205 */
4980 "sys_setgroups",
4981 "sys_fchown",
4982 "sys_setresuid",
4983 "sys_getresuid",
4984 "sys_setresgid", /* 210 */
4985 "sys_getresgid",
4986 "sys_chown",
4987 "sys_setuid",
4988 "sys_setgid",
4989 "sys_setfsuid", /* 215 */
4990 "sys_setfsgid",
4991 "sys_pivot_root",
4992 "sys_mincore",
4993 "sys_madvise",
4994 "sys_getdents64", /* 220 */
4995 "sys_fcntl64",
4996 "sys_ni_syscall", /* reserved for TUX */
4997 "sys_ni_syscall",
4998 "sys_gettid",
4999 "sys_readahead", /* 225 */
5000 "sys_setxattr",
5001 "sys_lsetxattr",
5002 "sys_fsetxattr",
5003 "sys_getxattr",
5004 "sys_lgetxattr", /* 230 */
5005 "sys_fgetxattr",
5006 "sys_listxattr",
5007 "sys_llistxattr",
5008 "sys_flistxattr",
5009 "sys_removexattr", /* 235 */
5010 "sys_lremovexattr",
5011 "sys_fremovexattr",
5012 "sys_tkill",
5013 "sys_sendfile64",
5014 "sys_futex", /* 240 */
5015 "sys_sched_setaffinity",
5016 "sys_sched_getaffinity",
5017 "sys_set_thread_area",
5018 "sys_get_thread_area",
5019 "sys_io_setup", /* 245 */
5020 "sys_io_destroy",
5021 "sys_io_getevents",
5022 "sys_io_submit",
5023 "sys_io_cancel",
5024 "sys_fadvise64", /* 250 */
5025 "sys_ni_syscall",
5026 "sys_exit_group",
5027 "sys_lookup_dcookie",
5028 "sys_epoll_create",
5029 "sys_epoll_ctl", /* 255 */
5030 "sys_epoll_wait",
5031 "sys_remap_file_pages",
5032 "sys_set_tid_address",
5033 "sys_timer_create",
5034 "sys_timer_settime", /* 260 */
5035 "sys_timer_gettime",
5036 "sys_timer_getoverrun",
5037 "sys_timer_delete",
5038 "sys_clock_settime",
5039 "sys_clock_gettime", /* 265 */
5040 "sys_clock_getres",
5041 "sys_clock_nanosleep",
5042 "sys_statfs64",
5043 "sys_fstatfs64",
5044 "sys_tgkill", /* 270 */
5045 "sys_utimes",
5046 "sys_fadvise64_64",
5047 "sys_ni_syscall" /* sys_vserver */
5048 };
5049
5050 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5051 switch (uEAX)
5052 {
5053 default:
5054 if (uEAX < RT_ELEMENTS(apsz))
5055 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5056 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5057 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5058 else
5059 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5060 break;
5061
5062 }
5063}
5064
5065
5066/**
5067 * Dumps an OpenBSD system call.
5068 * @param pVCpu VMCPU handle.
5069 */
5070void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5071{
5072 static const char *apsz[] =
5073 {
5074 "SYS_syscall", //0
5075 "SYS_exit", //1
5076 "SYS_fork", //2
5077 "SYS_read", //3
5078 "SYS_write", //4
5079 "SYS_open", //5
5080 "SYS_close", //6
5081 "SYS_wait4", //7
5082 "SYS_8",
5083 "SYS_link", //9
5084 "SYS_unlink", //10
5085 "SYS_11",
5086 "SYS_chdir", //12
5087 "SYS_fchdir", //13
5088 "SYS_mknod", //14
5089 "SYS_chmod", //15
5090 "SYS_chown", //16
5091 "SYS_break", //17
5092 "SYS_18",
5093 "SYS_19",
5094 "SYS_getpid", //20
5095 "SYS_mount", //21
5096 "SYS_unmount", //22
5097 "SYS_setuid", //23
5098 "SYS_getuid", //24
5099 "SYS_geteuid", //25
5100 "SYS_ptrace", //26
5101 "SYS_recvmsg", //27
5102 "SYS_sendmsg", //28
5103 "SYS_recvfrom", //29
5104 "SYS_accept", //30
5105 "SYS_getpeername", //31
5106 "SYS_getsockname", //32
5107 "SYS_access", //33
5108 "SYS_chflags", //34
5109 "SYS_fchflags", //35
5110 "SYS_sync", //36
5111 "SYS_kill", //37
5112 "SYS_38",
5113 "SYS_getppid", //39
5114 "SYS_40",
5115 "SYS_dup", //41
5116 "SYS_opipe", //42
5117 "SYS_getegid", //43
5118 "SYS_profil", //44
5119 "SYS_ktrace", //45
5120 "SYS_sigaction", //46
5121 "SYS_getgid", //47
5122 "SYS_sigprocmask", //48
5123 "SYS_getlogin", //49
5124 "SYS_setlogin", //50
5125 "SYS_acct", //51
5126 "SYS_sigpending", //52
5127 "SYS_osigaltstack", //53
5128 "SYS_ioctl", //54
5129 "SYS_reboot", //55
5130 "SYS_revoke", //56
5131 "SYS_symlink", //57
5132 "SYS_readlink", //58
5133 "SYS_execve", //59
5134 "SYS_umask", //60
5135 "SYS_chroot", //61
5136 "SYS_62",
5137 "SYS_63",
5138 "SYS_64",
5139 "SYS_65",
5140 "SYS_vfork", //66
5141 "SYS_67",
5142 "SYS_68",
5143 "SYS_sbrk", //69
5144 "SYS_sstk", //70
5145 "SYS_61",
5146 "SYS_vadvise", //72
5147 "SYS_munmap", //73
5148 "SYS_mprotect", //74
5149 "SYS_madvise", //75
5150 "SYS_76",
5151 "SYS_77",
5152 "SYS_mincore", //78
5153 "SYS_getgroups", //79
5154 "SYS_setgroups", //80
5155 "SYS_getpgrp", //81
5156 "SYS_setpgid", //82
5157 "SYS_setitimer", //83
5158 "SYS_84",
5159 "SYS_85",
5160 "SYS_getitimer", //86
5161 "SYS_87",
5162 "SYS_88",
5163 "SYS_89",
5164 "SYS_dup2", //90
5165 "SYS_91",
5166 "SYS_fcntl", //92
5167 "SYS_select", //93
5168 "SYS_94",
5169 "SYS_fsync", //95
5170 "SYS_setpriority", //96
5171 "SYS_socket", //97
5172 "SYS_connect", //98
5173 "SYS_99",
5174 "SYS_getpriority", //100
5175 "SYS_101",
5176 "SYS_102",
5177 "SYS_sigreturn", //103
5178 "SYS_bind", //104
5179 "SYS_setsockopt", //105
5180 "SYS_listen", //106
5181 "SYS_107",
5182 "SYS_108",
5183 "SYS_109",
5184 "SYS_110",
5185 "SYS_sigsuspend", //111
5186 "SYS_112",
5187 "SYS_113",
5188 "SYS_114",
5189 "SYS_115",
5190 "SYS_gettimeofday", //116
5191 "SYS_getrusage", //117
5192 "SYS_getsockopt", //118
5193 "SYS_119",
5194 "SYS_readv", //120
5195 "SYS_writev", //121
5196 "SYS_settimeofday", //122
5197 "SYS_fchown", //123
5198 "SYS_fchmod", //124
5199 "SYS_125",
5200 "SYS_setreuid", //126
5201 "SYS_setregid", //127
5202 "SYS_rename", //128
5203 "SYS_129",
5204 "SYS_130",
5205 "SYS_flock", //131
5206 "SYS_mkfifo", //132
5207 "SYS_sendto", //133
5208 "SYS_shutdown", //134
5209 "SYS_socketpair", //135
5210 "SYS_mkdir", //136
5211 "SYS_rmdir", //137
5212 "SYS_utimes", //138
5213 "SYS_139",
5214 "SYS_adjtime", //140
5215 "SYS_141",
5216 "SYS_142",
5217 "SYS_143",
5218 "SYS_144",
5219 "SYS_145",
5220 "SYS_146",
5221 "SYS_setsid", //147
5222 "SYS_quotactl", //148
5223 "SYS_149",
5224 "SYS_150",
5225 "SYS_151",
5226 "SYS_152",
5227 "SYS_153",
5228 "SYS_154",
5229 "SYS_nfssvc", //155
5230 "SYS_156",
5231 "SYS_157",
5232 "SYS_158",
5233 "SYS_159",
5234 "SYS_160",
5235 "SYS_getfh", //161
5236 "SYS_162",
5237 "SYS_163",
5238 "SYS_164",
5239 "SYS_sysarch", //165
5240 "SYS_166",
5241 "SYS_167",
5242 "SYS_168",
5243 "SYS_169",
5244 "SYS_170",
5245 "SYS_171",
5246 "SYS_172",
5247 "SYS_pread", //173
5248 "SYS_pwrite", //174
5249 "SYS_175",
5250 "SYS_176",
5251 "SYS_177",
5252 "SYS_178",
5253 "SYS_179",
5254 "SYS_180",
5255 "SYS_setgid", //181
5256 "SYS_setegid", //182
5257 "SYS_seteuid", //183
5258 "SYS_lfs_bmapv", //184
5259 "SYS_lfs_markv", //185
5260 "SYS_lfs_segclean", //186
5261 "SYS_lfs_segwait", //187
5262 "SYS_188",
5263 "SYS_189",
5264 "SYS_190",
5265 "SYS_pathconf", //191
5266 "SYS_fpathconf", //192
5267 "SYS_swapctl", //193
5268 "SYS_getrlimit", //194
5269 "SYS_setrlimit", //195
5270 "SYS_getdirentries", //196
5271 "SYS_mmap", //197
5272 "SYS___syscall", //198
5273 "SYS_lseek", //199
5274 "SYS_truncate", //200
5275 "SYS_ftruncate", //201
5276 "SYS___sysctl", //202
5277 "SYS_mlock", //203
5278 "SYS_munlock", //204
5279 "SYS_205",
5280 "SYS_futimes", //206
5281 "SYS_getpgid", //207
5282 "SYS_xfspioctl", //208
5283 "SYS_209",
5284 "SYS_210",
5285 "SYS_211",
5286 "SYS_212",
5287 "SYS_213",
5288 "SYS_214",
5289 "SYS_215",
5290 "SYS_216",
5291 "SYS_217",
5292 "SYS_218",
5293 "SYS_219",
5294 "SYS_220",
5295 "SYS_semget", //221
5296 "SYS_222",
5297 "SYS_223",
5298 "SYS_224",
5299 "SYS_msgget", //225
5300 "SYS_msgsnd", //226
5301 "SYS_msgrcv", //227
5302 "SYS_shmat", //228
5303 "SYS_229",
5304 "SYS_shmdt", //230
5305 "SYS_231",
5306 "SYS_clock_gettime", //232
5307 "SYS_clock_settime", //233
5308 "SYS_clock_getres", //234
5309 "SYS_235",
5310 "SYS_236",
5311 "SYS_237",
5312 "SYS_238",
5313 "SYS_239",
5314 "SYS_nanosleep", //240
5315 "SYS_241",
5316 "SYS_242",
5317 "SYS_243",
5318 "SYS_244",
5319 "SYS_245",
5320 "SYS_246",
5321 "SYS_247",
5322 "SYS_248",
5323 "SYS_249",
5324 "SYS_minherit", //250
5325 "SYS_rfork", //251
5326 "SYS_poll", //252
5327 "SYS_issetugid", //253
5328 "SYS_lchown", //254
5329 "SYS_getsid", //255
5330 "SYS_msync", //256
5331 "SYS_257",
5332 "SYS_258",
5333 "SYS_259",
5334 "SYS_getfsstat", //260
5335 "SYS_statfs", //261
5336 "SYS_fstatfs", //262
5337 "SYS_pipe", //263
5338 "SYS_fhopen", //264
5339 "SYS_265",
5340 "SYS_fhstatfs", //266
5341 "SYS_preadv", //267
5342 "SYS_pwritev", //268
5343 "SYS_kqueue", //269
5344 "SYS_kevent", //270
5345 "SYS_mlockall", //271
5346 "SYS_munlockall", //272
5347 "SYS_getpeereid", //273
5348 "SYS_274",
5349 "SYS_275",
5350 "SYS_276",
5351 "SYS_277",
5352 "SYS_278",
5353 "SYS_279",
5354 "SYS_280",
5355 "SYS_getresuid", //281
5356 "SYS_setresuid", //282
5357 "SYS_getresgid", //283
5358 "SYS_setresgid", //284
5359 "SYS_285",
5360 "SYS_mquery", //286
5361 "SYS_closefrom", //287
5362 "SYS_sigaltstack", //288
5363 "SYS_shmget", //289
5364 "SYS_semop", //290
5365 "SYS_stat", //291
5366 "SYS_fstat", //292
5367 "SYS_lstat", //293
5368 "SYS_fhstat", //294
5369 "SYS___semctl", //295
5370 "SYS_shmctl", //296
5371 "SYS_msgctl", //297
5372 "SYS_MAXSYSCALL", //298
5373 //299
5374 //300
5375 };
5376 uint32_t uEAX;
5377 if (!LogIsEnabled())
5378 return;
5379 uEAX = CPUMGetGuestEAX(pVCpu);
5380 switch (uEAX)
5381 {
5382 default:
5383 if (uEAX < RT_ELEMENTS(apsz))
5384 {
5385 uint32_t au32Args[8] = {0};
5386 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5387 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5388 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5389 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5390 }
5391 else
5392 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5393 break;
5394 }
5395}
5396
5397
5398#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5399/**
5400 * The Dll main entry point (stub).
5401 */
5402bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5403{
5404 return true;
5405}
5406
5407void *memcpy(void *dst, const void *src, size_t size)
5408{
5409 uint8_t*pbDst = dst, *pbSrc = src;
5410 while (size-- > 0)
5411 *pbDst++ = *pbSrc++;
5412 return dst;
5413}
5414
5415#endif
5416
5417void cpu_smm_update(CPUState *env)
5418{
5419}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette