VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36170

Last change on this file since 36170 was 36170, checked in by vboxsync, 14 years ago

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.8 KB
Line 
1/* $Id: VBoxRecompiler.c 36170 2011-03-04 12:49:02Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, NULL, NULL,
340 NULL, remR3Save, NULL,
341 NULL, remR3Load, NULL);
342 if (RT_FAILURE(rc))
343 return rc;
344
345#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
346 /*
347 * Debugger commands.
348 */
349 static bool fRegisteredCmds = false;
350 if (!fRegisteredCmds)
351 {
352 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
353 if (RT_SUCCESS(rc))
354 fRegisteredCmds = true;
355 }
356#endif
357
358#ifdef VBOX_WITH_STATISTICS
359 /*
360 * Statistics.
361 */
362 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
363 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
364 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
365 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
366 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
374
375 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
376
377 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
378 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
379 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
380 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
381 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
382 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
383 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
384 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
385 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
386 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
387 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
388
389 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
390 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
391 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
392 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
393
394 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
400
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
409#endif /* VBOX_WITH_STATISTICS */
410
411 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
412 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
413 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
414
415
416#ifdef DEBUG_ALL_LOGGING
417 loglevel = ~0;
418# ifdef DEBUG_TMP_LOGGING
419 logfile = fopen("/tmp/vbox-qemu.log", "w");
420# endif
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469
470/**
471 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM handle.
475 * @param fGuarded Whether to guard the map.
476 */
477static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
478{
479 int rc = VINF_SUCCESS;
480 RTGCPHYS cb;
481
482 cb = pVM->rem.s.GCPhysLastRam + 1;
483 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
485 VERR_OUT_OF_RANGE);
486 phys_ram_size = cb;
487 phys_ram_dirty_size = cb >> PAGE_SHIFT;
488 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
489
490 if (!fGuarded)
491 {
492 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
493 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
494 }
495 else
496 {
497 /*
498 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
499 */
500 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
501 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
502 if (cbBitmapFull == cbBitmapAligned)
503 cbBitmapFull += _4G >> PAGE_SHIFT;
504 else if (cbBitmapFull - cbBitmapAligned < _64K)
505 cbBitmapFull += _64K;
506
507 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
508 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
509
510 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
511 if (RT_FAILURE(rc))
512 {
513 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
514 AssertLogRelRCReturn(rc, rc);
515 }
516
517 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
518 }
519
520 /* initialize it. */
521 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
522 return rc;
523}
524
525
526/**
527 * Terminates the REM.
528 *
529 * Termination means cleaning up and freeing all resources,
530 * the VM it self is at this point powered off or suspended.
531 *
532 * @returns VBox status code.
533 * @param pVM The VM to operate on.
534 */
535REMR3DECL(int) REMR3Term(PVM pVM)
536{
537#ifdef VBOX_WITH_STATISTICS
538 /*
539 * Statistics.
540 */
541 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
542 STAM_DEREG(pVM, &gStatCompilationQEmu);
543 STAM_DEREG(pVM, &gStatRunCodeQEmu);
544 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
545 STAM_DEREG(pVM, &gStatTimers);
546 STAM_DEREG(pVM, &gStatTBLookup);
547 STAM_DEREG(pVM, &gStatIRQ);
548 STAM_DEREG(pVM, &gStatRawCheck);
549 STAM_DEREG(pVM, &gStatMemRead);
550 STAM_DEREG(pVM, &gStatMemWrite);
551 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
552 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
553
554 STAM_DEREG(pVM, &gStatCpuGetTSC);
555
556 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
557 STAM_DEREG(pVM, &gStatRefuseVM86);
558 STAM_DEREG(pVM, &gStatRefusePaging);
559 STAM_DEREG(pVM, &gStatRefusePAE);
560 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
561 STAM_DEREG(pVM, &gStatRefuseIF0);
562 STAM_DEREG(pVM, &gStatRefuseCode16);
563 STAM_DEREG(pVM, &gStatRefuseWP0);
564 STAM_DEREG(pVM, &gStatRefuseRing1or2);
565 STAM_DEREG(pVM, &gStatRefuseCanExecute);
566 STAM_DEREG(pVM, &gStatFlushTBs);
567
568 STAM_DEREG(pVM, &gStatREMGDTChange);
569 STAM_DEREG(pVM, &gStatREMLDTRChange);
570 STAM_DEREG(pVM, &gStatREMIDTChange);
571 STAM_DEREG(pVM, &gStatREMTRChange);
572
573 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
586
587 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
588#endif /* VBOX_WITH_STATISTICS */
589
590 STAM_REL_DEREG(pVM, &tb_flush_count);
591 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
592 STAM_REL_DEREG(pVM, &tlb_flush_count);
593
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * The VM is being reset.
600 *
601 * For the REM component this means to call the cpu_reset() and
602 * reinitialize some state variables.
603 *
604 * @param pVM VM handle.
605 */
606REMR3DECL(void) REMR3Reset(PVM pVM)
607{
608 /*
609 * Reset the REM cpu.
610 */
611 Assert(pVM->rem.s.cIgnoreAll == 0);
612 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
613 cpu_reset(&pVM->rem.s.Env);
614 pVM->rem.s.cInvalidatedPages = 0;
615 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
616 Assert(pVM->rem.s.cIgnoreAll == 0);
617
618 /* Clear raw ring 0 init state */
619 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
620
621 /* Flush the TBs the next time we execute code here. */
622 pVM->rem.s.fFlushTBs = true;
623}
624
625
626/**
627 * Execute state save operation.
628 *
629 * @returns VBox status code.
630 * @param pVM VM Handle.
631 * @param pSSM SSM operation handle.
632 */
633static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
634{
635 PREM pRem = &pVM->rem.s;
636
637 /*
638 * Save the required CPU Env bits.
639 * (Not much because we're never in REM when doing the save.)
640 */
641 LogFlow(("remR3Save:\n"));
642 Assert(!pRem->fInREM);
643 SSMR3PutU32(pSSM, pRem->Env.hflags);
644 SSMR3PutU32(pSSM, ~0); /* separator */
645
646 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
647 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
648 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
649
650 return SSMR3PutU32(pSSM, ~0); /* terminator */
651}
652
653
654/**
655 * Execute state load operation.
656 *
657 * @returns VBox status code.
658 * @param pVM VM Handle.
659 * @param pSSM SSM operation handle.
660 * @param uVersion Data layout version.
661 * @param uPass The data pass.
662 */
663static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
664{
665 uint32_t u32Dummy;
666 uint32_t fRawRing0 = false;
667 uint32_t u32Sep;
668 uint32_t i;
669 int rc;
670 PREM pRem;
671
672 LogFlow(("remR3Load:\n"));
673 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
674
675 /*
676 * Validate version.
677 */
678 if ( uVersion != REM_SAVED_STATE_VERSION
679 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
680 {
681 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
682 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
683 }
684
685 /*
686 * Do a reset to be on the safe side...
687 */
688 REMR3Reset(pVM);
689
690 /*
691 * Ignore all ignorable notifications.
692 * (Not doing this will cause serious trouble.)
693 */
694 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
695
696 /*
697 * Load the required CPU Env bits.
698 * (Not much because we're never in REM when doing the save.)
699 */
700 pRem = &pVM->rem.s;
701 Assert(!pRem->fInREM);
702 SSMR3GetU32(pSSM, &pRem->Env.hflags);
703 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 /* Redundant REM CPU state has to be loaded, but can be ignored. */
706 CPUX86State_Ver16 temp;
707 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
708 }
709
710 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
711 if (RT_FAILURE(rc))
712 return rc;
713 if (u32Sep != ~0U)
714 {
715 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
716 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
717 }
718
719 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
720 SSMR3GetUInt(pSSM, &fRawRing0);
721 if (fRawRing0)
722 pRem->Env.state |= CPU_RAW_RING0;
723
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /*
727 * Load the REM stuff.
728 */
729 /** @todo r=bird: We should just drop all these items, restoring doesn't make
730 * sense. */
731 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
732 if (RT_FAILURE(rc))
733 return rc;
734 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
735 {
736 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739 for (i = 0; i < pRem->cInvalidatedPages; i++)
740 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
741 }
742
743 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
744 if (RT_FAILURE(rc))
745 return rc;
746
747 /* check the terminator. */
748 rc = SSMR3GetU32(pSSM, &u32Sep);
749 if (RT_FAILURE(rc))
750 return rc;
751 if (u32Sep != ~0U)
752 {
753 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
754 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
755 }
756
757 /*
758 * Get the CPUID features.
759 */
760 PVMCPU pVCpu = VMMGetCpu(pVM);
761 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
762 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
763
764 /*
765 * Sync the Load Flush the TLB
766 */
767 tlb_flush(&pRem->Env, 1);
768
769 /*
770 * Stop ignoring ignorable notifications.
771 */
772 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
773
774 /*
775 * Sync the whole CPU state when executing code in the recompiler.
776 */
777 for (i = 0; i < pVM->cCpus; i++)
778 {
779 PVMCPU pVCpu = &pVM->aCpus[i];
780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
781 }
782 return VINF_SUCCESS;
783}
784
785
786
787#undef LOG_GROUP
788#define LOG_GROUP LOG_GROUP_REM_RUN
789
790/**
791 * Single steps an instruction in recompiled mode.
792 *
793 * Before calling this function the REM state needs to be in sync with
794 * the VM. Call REMR3State() to perform the sync. It's only necessary
795 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
796 * and after calling REMR3StateBack().
797 *
798 * @returns VBox status code.
799 *
800 * @param pVM VM Handle.
801 * @param pVCpu VMCPU Handle.
802 */
803REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
804{
805 int rc, interrupt_request;
806 RTGCPTR GCPtrPC;
807 bool fBp;
808
809 /*
810 * Lock the REM - we don't wanna have anyone interrupting us
811 * while stepping - and enabled single stepping. We also ignore
812 * pending interrupts and suchlike.
813 */
814 interrupt_request = pVM->rem.s.Env.interrupt_request;
815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
816 pVM->rem.s.Env.interrupt_request = 0;
817 cpu_single_step(&pVM->rem.s.Env, 1);
818
819 /*
820 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
821 */
822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
824
825 /*
826 * Execute and handle the return code.
827 * We execute without enabling the cpu tick, so on success we'll
828 * just flip it on and off to make sure it moves
829 */
830 rc = cpu_exec(&pVM->rem.s.Env);
831 if (rc == EXCP_DEBUG)
832 {
833 TMR3NotifyResume(pVM, pVCpu);
834 TMR3NotifySuspend(pVM, pVCpu);
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 else
838 {
839 switch (rc)
840 {
841 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
842 case EXCP_HLT:
843 case EXCP_HALTED: rc = VINF_EM_HALT; break;
844 case EXCP_RC:
845 rc = pVM->rem.s.rc;
846 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
847 break;
848 case EXCP_EXECUTE_RAW:
849 case EXCP_EXECUTE_HWACC:
850 /** @todo: is it correct? No! */
851 rc = VINF_SUCCESS;
852 break;
853 default:
854 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
855 rc = VERR_INTERNAL_ERROR;
856 break;
857 }
858 }
859
860 /*
861 * Restore the stuff we changed to prevent interruption.
862 * Unlock the REM.
863 */
864 if (fBp)
865 {
866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
867 Assert(rc2 == 0); NOREF(rc2);
868 }
869 cpu_single_step(&pVM->rem.s.Env, 0);
870 pVM->rem.s.Env.interrupt_request = interrupt_request;
871
872 return rc;
873}
874
875
876/**
877 * Set a breakpoint using the REM facilities.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle.
881 * @param Address The breakpoint address.
882 * @thread The emulation thread.
883 */
884REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
885{
886 VM_ASSERT_EMT(pVM);
887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
888 {
889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
890 return VINF_SUCCESS;
891 }
892 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
893 return VERR_REM_NO_MORE_BP_SLOTS;
894}
895
896
897/**
898 * Clears a breakpoint set by REMR3BreakpointSet().
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param Address The breakpoint address.
903 * @thread The emulation thread.
904 */
905REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
906{
907 VM_ASSERT_EMT(pVM);
908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
909 {
910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
911 return VINF_SUCCESS;
912 }
913 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
914 return VERR_REM_BP_NOT_FOUND;
915}
916
917
918/**
919 * Emulate an instruction.
920 *
921 * This function executes one instruction without letting anyone
922 * interrupt it. This is intended for being called while being in
923 * raw mode and thus will take care of all the state syncing between
924 * REM and the rest.
925 *
926 * @returns VBox status code.
927 * @param pVM VM handle.
928 * @param pVCpu VMCPU Handle.
929 */
930REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
931{
932 bool fFlushTBs;
933
934 int rc, rc2;
935 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
936
937 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
938 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
939 */
940 if (HWACCMIsEnabled(pVM))
941 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
942
943 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
944 fFlushTBs = pVM->rem.s.fFlushTBs;
945 pVM->rem.s.fFlushTBs = false;
946
947 /*
948 * Sync the state and enable single instruction / single stepping.
949 */
950 rc = REMR3State(pVM, pVCpu);
951 pVM->rem.s.fFlushTBs = fFlushTBs;
952 if (RT_SUCCESS(rc))
953 {
954 int interrupt_request = pVM->rem.s.Env.interrupt_request;
955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
956#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
957 cpu_single_step(&pVM->rem.s.Env, 0);
958#endif
959 Assert(!pVM->rem.s.Env.singlestep_enabled);
960
961 /*
962 * Now we set the execute single instruction flag and enter the cpu_exec loop.
963 */
964 TMNotifyStartOfExecution(pVCpu);
965 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
966 rc = cpu_exec(&pVM->rem.s.Env);
967 TMNotifyEndOfExecution(pVCpu);
968 switch (rc)
969 {
970 /*
971 * Executed without anything out of the way happening.
972 */
973 case EXCP_SINGLE_INSTR:
974 rc = VINF_EM_RESCHEDULE;
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
976 break;
977
978 /*
979 * If we take a trap or start servicing a pending interrupt, we might end up here.
980 * (Timer thread or some other thread wishing EMT's attention.)
981 */
982 case EXCP_INTERRUPT:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
984 rc = VINF_EM_RESCHEDULE;
985 break;
986
987 /*
988 * Single step, we assume!
989 * If there was a breakpoint there we're fucked now.
990 */
991 case EXCP_DEBUG:
992 if (pVM->rem.s.Env.watchpoint_hit)
993 {
994 /** @todo deal with watchpoints */
995 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
996 rc = VINF_EM_DBG_BREAKPOINT;
997 }
998 else
999 {
1000 CPUBreakpoint *pBP;
1001 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1002 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1003 if (pBP->pc == GCPtrPC)
1004 break;
1005 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1006 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1007 }
1008 break;
1009
1010 /*
1011 * hlt instruction.
1012 */
1013 case EXCP_HLT:
1014 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1015 rc = VINF_EM_HALT;
1016 break;
1017
1018 /*
1019 * The VM has halted.
1020 */
1021 case EXCP_HALTED:
1022 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1023 rc = VINF_EM_HALT;
1024 break;
1025
1026 /*
1027 * Switch to RAW-mode.
1028 */
1029 case EXCP_EXECUTE_RAW:
1030 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1031 rc = VINF_EM_RESCHEDULE_RAW;
1032 break;
1033
1034 /*
1035 * Switch to hardware accelerated RAW-mode.
1036 */
1037 case EXCP_EXECUTE_HWACC:
1038 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1039 rc = VINF_EM_RESCHEDULE_HWACC;
1040 break;
1041
1042 /*
1043 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1044 */
1045 case EXCP_RC:
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1047 rc = pVM->rem.s.rc;
1048 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1049 break;
1050
1051 /*
1052 * Figure out the rest when they arrive....
1053 */
1054 default:
1055 AssertMsgFailed(("rc=%d\n", rc));
1056 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1057 rc = VINF_EM_RESCHEDULE;
1058 break;
1059 }
1060
1061 /*
1062 * Switch back the state.
1063 */
1064 pVM->rem.s.Env.interrupt_request = interrupt_request;
1065 rc2 = REMR3StateBack(pVM, pVCpu);
1066 AssertRC(rc2);
1067 }
1068
1069 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1070 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1071 return rc;
1072}
1073
1074
1075/**
1076 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1077 *
1078 * @returns VBox status code.
1079 *
1080 * @param pVM The VM handle.
1081 * @param pVCpu The Virtual CPU handle.
1082 */
1083static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1084{
1085 int rc;
1086
1087 Assert(pVM->rem.s.fInREM);
1088#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1089 cpu_single_step(&pVM->rem.s.Env, 1);
1090#else
1091 Assert(!pVM->rem.s.Env.singlestep_enabled);
1092#endif
1093
1094 /*
1095 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1096 */
1097 for (;;)
1098 {
1099 char szBuf[256];
1100
1101 /*
1102 * Log the current registers state and instruction.
1103 */
1104 remR3StateUpdate(pVM, pVCpu);
1105 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1106 szBuf[0] = '\0';
1107 rc = DBGFR3DisasInstrEx(pVM,
1108 pVCpu->idCpu,
1109 0, /* Sel */
1110 0, /* GCPtr */
1111 DBGF_DISAS_FLAGS_CURRENT_GUEST
1112 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1113 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1114 szBuf,
1115 sizeof(szBuf),
1116 NULL);
1117 if (RT_FAILURE(rc))
1118 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1119 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1120
1121 /*
1122 * Execute the instruction.
1123 */
1124 TMNotifyStartOfExecution(pVCpu);
1125
1126 if ( pVM->rem.s.Env.exception_index < 0
1127 || pVM->rem.s.Env.exception_index > 256)
1128 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1129
1130#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1131 pVM->rem.s.Env.interrupt_request = 0;
1132#else
1133 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1134#endif
1135 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1136 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1137 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1138 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1139 pVM->rem.s.Env.interrupt_request,
1140 pVM->rem.s.Env.halted,
1141 pVM->rem.s.Env.exception_index
1142 );
1143
1144 rc = cpu_exec(&pVM->rem.s.Env);
1145
1146 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1147 pVM->rem.s.Env.interrupt_request,
1148 pVM->rem.s.Env.halted,
1149 pVM->rem.s.Env.exception_index
1150 );
1151
1152 TMNotifyEndOfExecution(pVCpu);
1153
1154 switch (rc)
1155 {
1156#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1157 /*
1158 * The normal exit.
1159 */
1160 case EXCP_SINGLE_INSTR:
1161 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1162 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1163 continue;
1164 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1165 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1166 rc = VINF_SUCCESS;
1167 break;
1168
1169#else
1170 /*
1171 * The normal exit, check for breakpoints at PC just to be sure.
1172 */
1173#endif
1174 case EXCP_DEBUG:
1175 if (pVM->rem.s.Env.watchpoint_hit)
1176 {
1177 /** @todo deal with watchpoints */
1178 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1179 rc = VINF_EM_DBG_BREAKPOINT;
1180 }
1181 else
1182 {
1183 CPUBreakpoint *pBP;
1184 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1185 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1186 if (pBP->pc == GCPtrPC)
1187 break;
1188 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1189 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1190 }
1191#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1192 if (rc == VINF_EM_DBG_STEPPED)
1193 {
1194 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1195 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1196 continue;
1197
1198 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1199 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1200 rc = VINF_SUCCESS;
1201 }
1202#endif
1203 break;
1204
1205 /*
1206 * If we take a trap or start servicing a pending interrupt, we might end up here.
1207 * (Timer thread or some other thread wishing EMT's attention.)
1208 */
1209 case EXCP_INTERRUPT:
1210 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1211 rc = VINF_SUCCESS;
1212 break;
1213
1214 /*
1215 * hlt instruction.
1216 */
1217 case EXCP_HLT:
1218 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1219 rc = VINF_EM_HALT;
1220 break;
1221
1222 /*
1223 * The VM has halted.
1224 */
1225 case EXCP_HALTED:
1226 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1227 rc = VINF_EM_HALT;
1228 break;
1229
1230 /*
1231 * Switch to RAW-mode.
1232 */
1233 case EXCP_EXECUTE_RAW:
1234 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1235 rc = VINF_EM_RESCHEDULE_RAW;
1236 break;
1237
1238 /*
1239 * Switch to hardware accelerated RAW-mode.
1240 */
1241 case EXCP_EXECUTE_HWACC:
1242 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1243 rc = VINF_EM_RESCHEDULE_HWACC;
1244 break;
1245
1246 /*
1247 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1248 */
1249 case EXCP_RC:
1250 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1251 rc = pVM->rem.s.rc;
1252 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1253 break;
1254
1255 /*
1256 * Figure out the rest when they arrive....
1257 */
1258 default:
1259 AssertMsgFailed(("rc=%d\n", rc));
1260 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1261 rc = VINF_EM_RESCHEDULE;
1262 break;
1263 }
1264 break;
1265 }
1266
1267#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1268// cpu_single_step(&pVM->rem.s.Env, 0);
1269#else
1270 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1271#endif
1272 return rc;
1273}
1274
1275
1276/**
1277 * Runs code in recompiled mode.
1278 *
1279 * Before calling this function the REM state needs to be in sync with
1280 * the VM. Call REMR3State() to perform the sync. It's only necessary
1281 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1282 * and after calling REMR3StateBack().
1283 *
1284 * @returns VBox status code.
1285 *
1286 * @param pVM VM Handle.
1287 * @param pVCpu VMCPU Handle.
1288 */
1289REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1290{
1291 int rc;
1292
1293 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1294 return remR3RunLoggingStep(pVM, pVCpu);
1295
1296 Assert(pVM->rem.s.fInREM);
1297 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1298
1299 TMNotifyStartOfExecution(pVCpu);
1300 rc = cpu_exec(&pVM->rem.s.Env);
1301 TMNotifyEndOfExecution(pVCpu);
1302 switch (rc)
1303 {
1304 /*
1305 * This happens when the execution was interrupted
1306 * by an external event, like pending timers.
1307 */
1308 case EXCP_INTERRUPT:
1309 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1310 rc = VINF_SUCCESS;
1311 break;
1312
1313 /*
1314 * hlt instruction.
1315 */
1316 case EXCP_HLT:
1317 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1318 rc = VINF_EM_HALT;
1319 break;
1320
1321 /*
1322 * The VM has halted.
1323 */
1324 case EXCP_HALTED:
1325 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1326 rc = VINF_EM_HALT;
1327 break;
1328
1329 /*
1330 * Breakpoint/single step.
1331 */
1332 case EXCP_DEBUG:
1333 if (pVM->rem.s.Env.watchpoint_hit)
1334 {
1335 /** @todo deal with watchpoints */
1336 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1337 rc = VINF_EM_DBG_BREAKPOINT;
1338 }
1339 else
1340 {
1341 CPUBreakpoint *pBP;
1342 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1343 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1344 if (pBP->pc == GCPtrPC)
1345 break;
1346 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1347 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1348 }
1349 break;
1350
1351 /*
1352 * Switch to RAW-mode.
1353 */
1354 case EXCP_EXECUTE_RAW:
1355 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1356 rc = VINF_EM_RESCHEDULE_RAW;
1357 break;
1358
1359 /*
1360 * Switch to hardware accelerated RAW-mode.
1361 */
1362 case EXCP_EXECUTE_HWACC:
1363 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1364 rc = VINF_EM_RESCHEDULE_HWACC;
1365 break;
1366
1367 /*
1368 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1369 */
1370 case EXCP_RC:
1371 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1372 rc = pVM->rem.s.rc;
1373 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1374 break;
1375
1376 /*
1377 * Figure out the rest when they arrive....
1378 */
1379 default:
1380 AssertMsgFailed(("rc=%d\n", rc));
1381 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1382 rc = VINF_SUCCESS;
1383 break;
1384 }
1385
1386 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1387 return rc;
1388}
1389
1390
1391/**
1392 * Check if the cpu state is suitable for Raw execution.
1393 *
1394 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1395 *
1396 * @param env The CPU env struct.
1397 * @param eip The EIP to check this for (might differ from env->eip).
1398 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1399 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1400 *
1401 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1402 */
1403bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1404{
1405 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1406 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1407 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1408 uint32_t u32CR0;
1409
1410 /* Update counter. */
1411 env->pVM->rem.s.cCanExecuteRaw++;
1412
1413 /* Never when single stepping+logging guest code. */
1414 if (env->state & CPU_EMULATE_SINGLE_STEP)
1415 return false;
1416
1417 if (HWACCMIsEnabled(env->pVM))
1418 {
1419 CPUMCTX Ctx;
1420
1421 env->state |= CPU_RAW_HWACC;
1422
1423 /*
1424 * Create partial context for HWACCMR3CanExecuteGuest
1425 */
1426 Ctx.cr0 = env->cr[0];
1427 Ctx.cr3 = env->cr[3];
1428 Ctx.cr4 = env->cr[4];
1429
1430 Ctx.tr = env->tr.selector;
1431 Ctx.trHid.u64Base = env->tr.base;
1432 Ctx.trHid.u32Limit = env->tr.limit;
1433 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1434
1435 Ctx.ldtr = env->ldt.selector;
1436 Ctx.ldtrHid.u64Base = env->ldt.base;
1437 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1438 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1439
1440 Ctx.idtr.cbIdt = env->idt.limit;
1441 Ctx.idtr.pIdt = env->idt.base;
1442
1443 Ctx.gdtr.cbGdt = env->gdt.limit;
1444 Ctx.gdtr.pGdt = env->gdt.base;
1445
1446 Ctx.rsp = env->regs[R_ESP];
1447 Ctx.rip = env->eip;
1448
1449 Ctx.eflags.u32 = env->eflags;
1450
1451 Ctx.cs = env->segs[R_CS].selector;
1452 Ctx.csHid.u64Base = env->segs[R_CS].base;
1453 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1454 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1455
1456 Ctx.ds = env->segs[R_DS].selector;
1457 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1458 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1459 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1460
1461 Ctx.es = env->segs[R_ES].selector;
1462 Ctx.esHid.u64Base = env->segs[R_ES].base;
1463 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1464 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1465
1466 Ctx.fs = env->segs[R_FS].selector;
1467 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1468 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1469 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1470
1471 Ctx.gs = env->segs[R_GS].selector;
1472 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1473 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1474 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1475
1476 Ctx.ss = env->segs[R_SS].selector;
1477 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1478 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1479 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1480
1481 Ctx.msrEFER = env->efer;
1482
1483 /* Hardware accelerated raw-mode:
1484 *
1485 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1486 */
1487 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1488 {
1489 *piException = EXCP_EXECUTE_HWACC;
1490 return true;
1491 }
1492 return false;
1493 }
1494
1495 /*
1496 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1497 * or 32 bits protected mode ring 0 code
1498 *
1499 * The tests are ordered by the likelihood of being true during normal execution.
1500 */
1501 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1502 {
1503 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1504 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1505 return false;
1506 }
1507
1508#ifndef VBOX_RAW_V86
1509 if (fFlags & VM_MASK) {
1510 STAM_COUNTER_INC(&gStatRefuseVM86);
1511 Log2(("raw mode refused: VM_MASK\n"));
1512 return false;
1513 }
1514#endif
1515
1516 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1517 {
1518#ifndef DEBUG_bird
1519 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1520#endif
1521 return false;
1522 }
1523
1524 if (env->singlestep_enabled)
1525 {
1526 //Log2(("raw mode refused: Single step\n"));
1527 return false;
1528 }
1529
1530 if (!TAILQ_EMPTY(&env->breakpoints))
1531 {
1532 //Log2(("raw mode refused: Breakpoints\n"));
1533 return false;
1534 }
1535
1536 if (!TAILQ_EMPTY(&env->watchpoints))
1537 {
1538 //Log2(("raw mode refused: Watchpoints\n"));
1539 return false;
1540 }
1541
1542 u32CR0 = env->cr[0];
1543 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1544 {
1545 STAM_COUNTER_INC(&gStatRefusePaging);
1546 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1547 return false;
1548 }
1549
1550 if (env->cr[4] & CR4_PAE_MASK)
1551 {
1552 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1553 {
1554 STAM_COUNTER_INC(&gStatRefusePAE);
1555 return false;
1556 }
1557 }
1558
1559 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1560 {
1561 if (!EMIsRawRing3Enabled(env->pVM))
1562 return false;
1563
1564 if (!(env->eflags & IF_MASK))
1565 {
1566 STAM_COUNTER_INC(&gStatRefuseIF0);
1567 Log2(("raw mode refused: IF (RawR3)\n"));
1568 return false;
1569 }
1570
1571 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1572 {
1573 STAM_COUNTER_INC(&gStatRefuseWP0);
1574 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1575 return false;
1576 }
1577 }
1578 else
1579 {
1580 if (!EMIsRawRing0Enabled(env->pVM))
1581 return false;
1582
1583 // Let's start with pure 32 bits ring 0 code first
1584 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1585 {
1586 STAM_COUNTER_INC(&gStatRefuseCode16);
1587 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1588 return false;
1589 }
1590
1591 // Only R0
1592 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1593 {
1594 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1595 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1596 return false;
1597 }
1598
1599 if (!(u32CR0 & CR0_WP_MASK))
1600 {
1601 STAM_COUNTER_INC(&gStatRefuseWP0);
1602 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1603 return false;
1604 }
1605
1606 if (PATMIsPatchGCAddr(env->pVM, eip))
1607 {
1608 Log2(("raw r0 mode forced: patch code\n"));
1609 *piException = EXCP_EXECUTE_RAW;
1610 return true;
1611 }
1612
1613#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1614 if (!(env->eflags & IF_MASK))
1615 {
1616 STAM_COUNTER_INC(&gStatRefuseIF0);
1617 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1618 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1619 return false;
1620 }
1621#endif
1622
1623 env->state |= CPU_RAW_RING0;
1624 }
1625
1626 /*
1627 * Don't reschedule the first time we're called, because there might be
1628 * special reasons why we're here that is not covered by the above checks.
1629 */
1630 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1631 {
1632 Log2(("raw mode refused: first scheduling\n"));
1633 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1634 return false;
1635 }
1636
1637 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1638 *piException = EXCP_EXECUTE_RAW;
1639 return true;
1640}
1641
1642
1643/**
1644 * Fetches a code byte.
1645 *
1646 * @returns Success indicator (bool) for ease of use.
1647 * @param env The CPU environment structure.
1648 * @param GCPtrInstr Where to fetch code.
1649 * @param pu8Byte Where to store the byte on success
1650 */
1651bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1652{
1653 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1654 if (RT_SUCCESS(rc))
1655 return true;
1656 return false;
1657}
1658
1659
1660/**
1661 * Flush (or invalidate if you like) page table/dir entry.
1662 *
1663 * (invlpg instruction; tlb_flush_page)
1664 *
1665 * @param env Pointer to cpu environment.
1666 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1667 */
1668void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1669{
1670 PVM pVM = env->pVM;
1671 PCPUMCTX pCtx;
1672 int rc;
1673
1674 /*
1675 * When we're replaying invlpg instructions or restoring a saved
1676 * state we disable this path.
1677 */
1678 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1679 return;
1680 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1681 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1682
1683 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1684
1685 /*
1686 * Update the control registers before calling PGMFlushPage.
1687 */
1688 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1689 Assert(pCtx);
1690 pCtx->cr0 = env->cr[0];
1691 pCtx->cr3 = env->cr[3];
1692 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1693 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1694 pCtx->cr4 = env->cr[4];
1695
1696 /*
1697 * Let PGM do the rest.
1698 */
1699 Assert(env->pVCpu);
1700 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1701 if (RT_FAILURE(rc))
1702 {
1703 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1704 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1705 }
1706 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1707}
1708
1709
1710#ifndef REM_PHYS_ADDR_IN_TLB
1711/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1712void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1713{
1714 void *pv;
1715 int rc;
1716
1717 /* Address must be aligned enough to fiddle with lower bits */
1718 Assert((physAddr & 0x3) == 0);
1719
1720 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1721 Assert( rc == VINF_SUCCESS
1722 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1723 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1724 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1725 if (RT_FAILURE(rc))
1726 return (void *)1;
1727 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1728 return (void *)((uintptr_t)pv | 2);
1729 return pv;
1730}
1731#endif /* REM_PHYS_ADDR_IN_TLB */
1732
1733
1734/**
1735 * Called from tlb_protect_code in order to write monitor a code page.
1736 *
1737 * @param env Pointer to the CPU environment.
1738 * @param GCPtr Code page to monitor
1739 */
1740void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1741{
1742#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1743 Assert(env->pVM->rem.s.fInREM);
1744 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1745 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1746 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1747 && !(env->eflags & VM_MASK) /* no V86 mode */
1748 && !HWACCMIsEnabled(env->pVM))
1749 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1750#endif
1751}
1752
1753
1754/**
1755 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1756 *
1757 * @param env Pointer to the CPU environment.
1758 * @param GCPtr Code page to monitor
1759 */
1760void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1761{
1762 Assert(env->pVM->rem.s.fInREM);
1763#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1764 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1765 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1766 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1767 && !(env->eflags & VM_MASK) /* no V86 mode */
1768 && !HWACCMIsEnabled(env->pVM))
1769 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1770#endif
1771}
1772
1773
1774/**
1775 * Called when the CPU is initialized, any of the CRx registers are changed or
1776 * when the A20 line is modified.
1777 *
1778 * @param env Pointer to the CPU environment.
1779 * @param fGlobal Set if the flush is global.
1780 */
1781void remR3FlushTLB(CPUState *env, bool fGlobal)
1782{
1783 PVM pVM = env->pVM;
1784 PCPUMCTX pCtx;
1785
1786 /*
1787 * When we're replaying invlpg instructions or restoring a saved
1788 * state we disable this path.
1789 */
1790 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1791 return;
1792 Assert(pVM->rem.s.fInREM);
1793
1794 /*
1795 * The caller doesn't check cr4, so we have to do that for ourselves.
1796 */
1797 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1798 fGlobal = true;
1799 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1800
1801 /*
1802 * Update the control registers before calling PGMR3FlushTLB.
1803 */
1804 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1805 Assert(pCtx);
1806 pCtx->cr0 = env->cr[0];
1807 pCtx->cr3 = env->cr[3];
1808 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1809 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1810 pCtx->cr4 = env->cr[4];
1811
1812 /*
1813 * Let PGM do the rest.
1814 */
1815 Assert(env->pVCpu);
1816 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1817}
1818
1819
1820/**
1821 * Called when any of the cr0, cr4 or efer registers is updated.
1822 *
1823 * @param env Pointer to the CPU environment.
1824 */
1825void remR3ChangeCpuMode(CPUState *env)
1826{
1827 PVM pVM = env->pVM;
1828 uint64_t efer;
1829 PCPUMCTX pCtx;
1830 int rc;
1831
1832 /*
1833 * When we're replaying loads or restoring a saved
1834 * state this path is disabled.
1835 */
1836 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1837 return;
1838 Assert(pVM->rem.s.fInREM);
1839
1840 /*
1841 * Update the control registers before calling PGMChangeMode()
1842 * as it may need to map whatever cr3 is pointing to.
1843 */
1844 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1845 Assert(pCtx);
1846 pCtx->cr0 = env->cr[0];
1847 pCtx->cr3 = env->cr[3];
1848 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1849 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1850 pCtx->cr4 = env->cr[4];
1851
1852#ifdef TARGET_X86_64
1853 efer = env->efer;
1854#else
1855 efer = 0;
1856#endif
1857 Assert(env->pVCpu);
1858 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1859 if (rc != VINF_SUCCESS)
1860 {
1861 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1862 {
1863 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1864 remR3RaiseRC(env->pVM, rc);
1865 }
1866 else
1867 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1868 }
1869}
1870
1871
1872/**
1873 * Called from compiled code to run dma.
1874 *
1875 * @param env Pointer to the CPU environment.
1876 */
1877void remR3DmaRun(CPUState *env)
1878{
1879 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1880 PDMR3DmaRun(env->pVM);
1881 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1882}
1883
1884
1885/**
1886 * Called from compiled code to schedule pending timers in VMM
1887 *
1888 * @param env Pointer to the CPU environment.
1889 */
1890void remR3TimersRun(CPUState *env)
1891{
1892 LogFlow(("remR3TimersRun:\n"));
1893 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1894 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1895 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1896 TMR3TimerQueuesDo(env->pVM);
1897 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1898 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1899}
1900
1901
1902/**
1903 * Record trap occurrence
1904 *
1905 * @returns VBox status code
1906 * @param env Pointer to the CPU environment.
1907 * @param uTrap Trap nr
1908 * @param uErrorCode Error code
1909 * @param pvNextEIP Next EIP
1910 */
1911int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1912{
1913 PVM pVM = env->pVM;
1914#ifdef VBOX_WITH_STATISTICS
1915 static STAMCOUNTER s_aStatTrap[255];
1916 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1917#endif
1918
1919#ifdef VBOX_WITH_STATISTICS
1920 if (uTrap < 255)
1921 {
1922 if (!s_aRegisters[uTrap])
1923 {
1924 char szStatName[64];
1925 s_aRegisters[uTrap] = true;
1926 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1927 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1928 }
1929 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1930 }
1931#endif
1932 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1933 if( uTrap < 0x20
1934 && (env->cr[0] & X86_CR0_PE)
1935 && !(env->eflags & X86_EFL_VM))
1936 {
1937#ifdef DEBUG
1938 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1939#endif
1940 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1941 {
1942 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1943 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1944 return VERR_REM_TOO_MANY_TRAPS;
1945 }
1946 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1947 pVM->rem.s.cPendingExceptions = 1;
1948 pVM->rem.s.uPendingException = uTrap;
1949 pVM->rem.s.uPendingExcptEIP = env->eip;
1950 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1951 }
1952 else
1953 {
1954 pVM->rem.s.cPendingExceptions = 0;
1955 pVM->rem.s.uPendingException = uTrap;
1956 pVM->rem.s.uPendingExcptEIP = env->eip;
1957 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1958 }
1959 return VINF_SUCCESS;
1960}
1961
1962
1963/*
1964 * Clear current active trap
1965 *
1966 * @param pVM VM Handle.
1967 */
1968void remR3TrapClear(PVM pVM)
1969{
1970 pVM->rem.s.cPendingExceptions = 0;
1971 pVM->rem.s.uPendingException = 0;
1972 pVM->rem.s.uPendingExcptEIP = 0;
1973 pVM->rem.s.uPendingExcptCR2 = 0;
1974}
1975
1976
1977/*
1978 * Record previous call instruction addresses
1979 *
1980 * @param env Pointer to the CPU environment.
1981 */
1982void remR3RecordCall(CPUState *env)
1983{
1984 CSAMR3RecordCallAddress(env->pVM, env->eip);
1985}
1986
1987
1988/**
1989 * Syncs the internal REM state with the VM.
1990 *
1991 * This must be called before REMR3Run() is invoked whenever when the REM
1992 * state is not up to date. Calling it several times in a row is not
1993 * permitted.
1994 *
1995 * @returns VBox status code.
1996 *
1997 * @param pVM VM Handle.
1998 * @param pVCpu VMCPU Handle.
1999 *
2000 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2001 * no do this since the majority of the callers don't want any unnecessary of events
2002 * pending that would immediately interrupt execution.
2003 */
2004REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2005{
2006 register const CPUMCTX *pCtx;
2007 register unsigned fFlags;
2008 bool fHiddenSelRegsValid;
2009 unsigned i;
2010 TRPMEVENT enmType;
2011 uint8_t u8TrapNo;
2012 uint32_t uCpl;
2013 int rc;
2014
2015 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2016 Log2(("REMR3State:\n"));
2017
2018 pVM->rem.s.Env.pVCpu = pVCpu;
2019 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2020 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2021
2022 Assert(!pVM->rem.s.fInREM);
2023 pVM->rem.s.fInStateSync = true;
2024
2025 /*
2026 * If we have to flush TBs, do that immediately.
2027 */
2028 if (pVM->rem.s.fFlushTBs)
2029 {
2030 STAM_COUNTER_INC(&gStatFlushTBs);
2031 tb_flush(&pVM->rem.s.Env);
2032 pVM->rem.s.fFlushTBs = false;
2033 }
2034
2035 /*
2036 * Copy the registers which require no special handling.
2037 */
2038#ifdef TARGET_X86_64
2039 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2040 Assert(R_EAX == 0);
2041 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2042 Assert(R_ECX == 1);
2043 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2044 Assert(R_EDX == 2);
2045 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2046 Assert(R_EBX == 3);
2047 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2048 Assert(R_ESP == 4);
2049 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2050 Assert(R_EBP == 5);
2051 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2052 Assert(R_ESI == 6);
2053 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2054 Assert(R_EDI == 7);
2055 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2056 pVM->rem.s.Env.regs[8] = pCtx->r8;
2057 pVM->rem.s.Env.regs[9] = pCtx->r9;
2058 pVM->rem.s.Env.regs[10] = pCtx->r10;
2059 pVM->rem.s.Env.regs[11] = pCtx->r11;
2060 pVM->rem.s.Env.regs[12] = pCtx->r12;
2061 pVM->rem.s.Env.regs[13] = pCtx->r13;
2062 pVM->rem.s.Env.regs[14] = pCtx->r14;
2063 pVM->rem.s.Env.regs[15] = pCtx->r15;
2064
2065 pVM->rem.s.Env.eip = pCtx->rip;
2066
2067 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2068#else
2069 Assert(R_EAX == 0);
2070 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2071 Assert(R_ECX == 1);
2072 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2073 Assert(R_EDX == 2);
2074 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2075 Assert(R_EBX == 3);
2076 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2077 Assert(R_ESP == 4);
2078 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2079 Assert(R_EBP == 5);
2080 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2081 Assert(R_ESI == 6);
2082 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2083 Assert(R_EDI == 7);
2084 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2085 pVM->rem.s.Env.eip = pCtx->eip;
2086
2087 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2088#endif
2089
2090 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2091
2092 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2093 for (i=0;i<8;i++)
2094 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2095
2096#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2097 /*
2098 * Clear the halted hidden flag (the interrupt waking up the CPU can
2099 * have been dispatched in raw mode).
2100 */
2101 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2102#endif
2103
2104 /*
2105 * Replay invlpg?
2106 */
2107 if (pVM->rem.s.cInvalidatedPages)
2108 {
2109 RTUINT i;
2110
2111 pVM->rem.s.fIgnoreInvlPg = true;
2112 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2113 {
2114 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2115 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2116 }
2117 pVM->rem.s.fIgnoreInvlPg = false;
2118 pVM->rem.s.cInvalidatedPages = 0;
2119 }
2120
2121 /* Replay notification changes. */
2122 REMR3ReplayHandlerNotifications(pVM);
2123
2124 /* Update MSRs; before CRx registers! */
2125 pVM->rem.s.Env.efer = pCtx->msrEFER;
2126 pVM->rem.s.Env.star = pCtx->msrSTAR;
2127 pVM->rem.s.Env.pat = pCtx->msrPAT;
2128#ifdef TARGET_X86_64
2129 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2130 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2131 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2132 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2133
2134 /* Update the internal long mode activate flag according to the new EFER value. */
2135 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2136 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2137 else
2138 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2139#endif
2140
2141 /*
2142 * Registers which are rarely changed and require special handling / order when changed.
2143 */
2144 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2145 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2146 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2147 | CPUM_CHANGED_CR4
2148 | CPUM_CHANGED_CR0
2149 | CPUM_CHANGED_CR3
2150 | CPUM_CHANGED_GDTR
2151 | CPUM_CHANGED_IDTR
2152 | CPUM_CHANGED_SYSENTER_MSR
2153 | CPUM_CHANGED_LDTR
2154 | CPUM_CHANGED_CPUID
2155 | CPUM_CHANGED_FPU_REM
2156 )
2157 )
2158 {
2159 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2160 {
2161 pVM->rem.s.fIgnoreCR3Load = true;
2162 tlb_flush(&pVM->rem.s.Env, true);
2163 pVM->rem.s.fIgnoreCR3Load = false;
2164 }
2165
2166 /* CR4 before CR0! */
2167 if (fFlags & CPUM_CHANGED_CR4)
2168 {
2169 pVM->rem.s.fIgnoreCR3Load = true;
2170 pVM->rem.s.fIgnoreCpuMode = true;
2171 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2172 pVM->rem.s.fIgnoreCpuMode = false;
2173 pVM->rem.s.fIgnoreCR3Load = false;
2174 }
2175
2176 if (fFlags & CPUM_CHANGED_CR0)
2177 {
2178 pVM->rem.s.fIgnoreCR3Load = true;
2179 pVM->rem.s.fIgnoreCpuMode = true;
2180 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2181 pVM->rem.s.fIgnoreCpuMode = false;
2182 pVM->rem.s.fIgnoreCR3Load = false;
2183 }
2184
2185 if (fFlags & CPUM_CHANGED_CR3)
2186 {
2187 pVM->rem.s.fIgnoreCR3Load = true;
2188 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2189 pVM->rem.s.fIgnoreCR3Load = false;
2190 }
2191
2192 if (fFlags & CPUM_CHANGED_GDTR)
2193 {
2194 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2195 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2196 }
2197
2198 if (fFlags & CPUM_CHANGED_IDTR)
2199 {
2200 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2201 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2202 }
2203
2204 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2205 {
2206 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2207 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2208 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2209 }
2210
2211 if (fFlags & CPUM_CHANGED_LDTR)
2212 {
2213 if (fHiddenSelRegsValid)
2214 {
2215 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2216 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2217 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2218 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2219 }
2220 else
2221 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2222 }
2223
2224 if (fFlags & CPUM_CHANGED_CPUID)
2225 {
2226 uint32_t u32Dummy;
2227
2228 /*
2229 * Get the CPUID features.
2230 */
2231 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2232 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2233 }
2234
2235 /* Sync FPU state after CR4, CPUID and EFER (!). */
2236 if (fFlags & CPUM_CHANGED_FPU_REM)
2237 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2238 }
2239
2240 /*
2241 * Sync TR unconditionally to make life simpler.
2242 */
2243 pVM->rem.s.Env.tr.selector = pCtx->tr;
2244 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2245 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2246 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2247 /* Note! do_interrupt will fault if the busy flag is still set... */
2248 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2249
2250 /*
2251 * Update selector registers.
2252 * This must be done *after* we've synced gdt, ldt and crX registers
2253 * since we're reading the GDT/LDT om sync_seg. This will happen with
2254 * saved state which takes a quick dip into rawmode for instance.
2255 */
2256 /*
2257 * Stack; Note first check this one as the CPL might have changed. The
2258 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2259 */
2260
2261 if (fHiddenSelRegsValid)
2262 {
2263 /* The hidden selector registers are valid in the CPU context. */
2264 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2265
2266 /* Set current CPL */
2267 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2268
2269 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2270 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2271 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2272 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2273 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2275 }
2276 else
2277 {
2278 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2279 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2280 {
2281 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2282
2283 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2284 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2285#ifdef VBOX_WITH_STATISTICS
2286 if (pVM->rem.s.Env.segs[R_SS].newselector)
2287 {
2288 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2289 }
2290#endif
2291 }
2292 else
2293 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2294
2295 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2296 {
2297 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2298 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2299#ifdef VBOX_WITH_STATISTICS
2300 if (pVM->rem.s.Env.segs[R_ES].newselector)
2301 {
2302 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2303 }
2304#endif
2305 }
2306 else
2307 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2308
2309 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2310 {
2311 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2312 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2313#ifdef VBOX_WITH_STATISTICS
2314 if (pVM->rem.s.Env.segs[R_CS].newselector)
2315 {
2316 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2317 }
2318#endif
2319 }
2320 else
2321 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2322
2323 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2324 {
2325 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2326 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2327#ifdef VBOX_WITH_STATISTICS
2328 if (pVM->rem.s.Env.segs[R_DS].newselector)
2329 {
2330 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2331 }
2332#endif
2333 }
2334 else
2335 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2336
2337 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2338 * be the same but not the base/limit. */
2339 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2340 {
2341 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2342 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2343#ifdef VBOX_WITH_STATISTICS
2344 if (pVM->rem.s.Env.segs[R_FS].newselector)
2345 {
2346 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2347 }
2348#endif
2349 }
2350 else
2351 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2352
2353 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2354 {
2355 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2356 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2357#ifdef VBOX_WITH_STATISTICS
2358 if (pVM->rem.s.Env.segs[R_GS].newselector)
2359 {
2360 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2361 }
2362#endif
2363 }
2364 else
2365 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2366 }
2367
2368 /*
2369 * Check for traps.
2370 */
2371 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2372 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2373 if (RT_SUCCESS(rc))
2374 {
2375#ifdef DEBUG
2376 if (u8TrapNo == 0x80)
2377 {
2378 remR3DumpLnxSyscall(pVCpu);
2379 remR3DumpOBsdSyscall(pVCpu);
2380 }
2381#endif
2382
2383 pVM->rem.s.Env.exception_index = u8TrapNo;
2384 if (enmType != TRPM_SOFTWARE_INT)
2385 {
2386 pVM->rem.s.Env.exception_is_int = 0;
2387 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2388 }
2389 else
2390 {
2391 /*
2392 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2393 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2394 * for int03 and into.
2395 */
2396 pVM->rem.s.Env.exception_is_int = 1;
2397 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2398 /* int 3 may be generated by one-byte 0xcc */
2399 if (u8TrapNo == 3)
2400 {
2401 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2402 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2403 }
2404 /* int 4 may be generated by one-byte 0xce */
2405 else if (u8TrapNo == 4)
2406 {
2407 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2408 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2409 }
2410 }
2411
2412 /* get error code and cr2 if needed. */
2413 switch (u8TrapNo)
2414 {
2415 case 0x0e:
2416 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2417 /* fallthru */
2418 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2419 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2420 break;
2421
2422 case 0x11: case 0x08:
2423 default:
2424 pVM->rem.s.Env.error_code = 0;
2425 break;
2426 }
2427
2428 /*
2429 * We can now reset the active trap since the recompiler is gonna have a go at it.
2430 */
2431 rc = TRPMResetTrap(pVCpu);
2432 AssertRC(rc);
2433 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2434 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2435 }
2436
2437 /*
2438 * Clear old interrupt request flags; Check for pending hardware interrupts.
2439 * (See @remark for why we don't check for other FFs.)
2440 */
2441 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2442 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2443 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2444 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2445
2446 /*
2447 * We're now in REM mode.
2448 */
2449 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2450 pVM->rem.s.fInREM = true;
2451 pVM->rem.s.fInStateSync = false;
2452 pVM->rem.s.cCanExecuteRaw = 0;
2453 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2454 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * Syncs back changes in the REM state to the the VM state.
2461 *
2462 * This must be called after invoking REMR3Run().
2463 * Calling it several times in a row is not permitted.
2464 *
2465 * @returns VBox status code.
2466 *
2467 * @param pVM VM Handle.
2468 * @param pVCpu VMCPU Handle.
2469 */
2470REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2471{
2472 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2473 Assert(pCtx);
2474 unsigned i;
2475
2476 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2477 Log2(("REMR3StateBack:\n"));
2478 Assert(pVM->rem.s.fInREM);
2479
2480 /*
2481 * Copy back the registers.
2482 * This is done in the order they are declared in the CPUMCTX structure.
2483 */
2484
2485 /** @todo FOP */
2486 /** @todo FPUIP */
2487 /** @todo CS */
2488 /** @todo FPUDP */
2489 /** @todo DS */
2490
2491 /** @todo check if FPU/XMM was actually used in the recompiler */
2492 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2493//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2494
2495#ifdef TARGET_X86_64
2496 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2497 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2498 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2499 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2500 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2501 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2502 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2503 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2504 pCtx->r8 = pVM->rem.s.Env.regs[8];
2505 pCtx->r9 = pVM->rem.s.Env.regs[9];
2506 pCtx->r10 = pVM->rem.s.Env.regs[10];
2507 pCtx->r11 = pVM->rem.s.Env.regs[11];
2508 pCtx->r12 = pVM->rem.s.Env.regs[12];
2509 pCtx->r13 = pVM->rem.s.Env.regs[13];
2510 pCtx->r14 = pVM->rem.s.Env.regs[14];
2511 pCtx->r15 = pVM->rem.s.Env.regs[15];
2512
2513 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2514
2515#else
2516 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2517 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2518 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2519 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2520 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2521 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2522 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2523
2524 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2525#endif
2526
2527 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2528
2529#ifdef VBOX_WITH_STATISTICS
2530 if (pVM->rem.s.Env.segs[R_SS].newselector)
2531 {
2532 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2533 }
2534 if (pVM->rem.s.Env.segs[R_GS].newselector)
2535 {
2536 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2537 }
2538 if (pVM->rem.s.Env.segs[R_FS].newselector)
2539 {
2540 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2541 }
2542 if (pVM->rem.s.Env.segs[R_ES].newselector)
2543 {
2544 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2545 }
2546 if (pVM->rem.s.Env.segs[R_DS].newselector)
2547 {
2548 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2549 }
2550 if (pVM->rem.s.Env.segs[R_CS].newselector)
2551 {
2552 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2553 }
2554#endif
2555 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2556 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2557 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2558 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2559 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2560
2561#ifdef TARGET_X86_64
2562 pCtx->rip = pVM->rem.s.Env.eip;
2563 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2564#else
2565 pCtx->eip = pVM->rem.s.Env.eip;
2566 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2567#endif
2568
2569 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2570 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2571 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2572 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2573 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2574 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2575
2576 for (i = 0; i < 8; i++)
2577 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2578
2579 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2580 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2581 {
2582 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2583 STAM_COUNTER_INC(&gStatREMGDTChange);
2584 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2585 }
2586
2587 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2588 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2589 {
2590 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2591 STAM_COUNTER_INC(&gStatREMIDTChange);
2592 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2593 }
2594
2595 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2596 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2597 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2598 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2599 {
2600 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2601 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2602 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2603 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2604 STAM_COUNTER_INC(&gStatREMLDTRChange);
2605 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2606 }
2607
2608 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2609 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2610 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2611 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2612 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2613 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2614 : 0) )
2615 {
2616 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2617 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2618 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2619 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2620 pCtx->tr = pVM->rem.s.Env.tr.selector;
2621 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2622 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2623 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2624 if (pCtx->trHid.Attr.u)
2625 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2626 STAM_COUNTER_INC(&gStatREMTRChange);
2627 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2628 }
2629
2630 /** @todo These values could still be out of sync! */
2631 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2632 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2633 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2634 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2635
2636 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2637 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2638 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2639
2640 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2641 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2642 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2643
2644 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2645 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2646 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2647
2648 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2649 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2650 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2651
2652 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2653 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2654 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2655
2656 /* Sysenter MSR */
2657 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2658 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2659 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2660
2661 /* System MSRs. */
2662 pCtx->msrEFER = pVM->rem.s.Env.efer;
2663 pCtx->msrSTAR = pVM->rem.s.Env.star;
2664 pCtx->msrPAT = pVM->rem.s.Env.pat;
2665#ifdef TARGET_X86_64
2666 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2667 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2668 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2669 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2670#endif
2671
2672 remR3TrapClear(pVM);
2673
2674 /*
2675 * Check for traps.
2676 */
2677 if ( pVM->rem.s.Env.exception_index >= 0
2678 && pVM->rem.s.Env.exception_index < 256)
2679 {
2680 int rc;
2681
2682 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2683 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2684 AssertRC(rc);
2685 switch (pVM->rem.s.Env.exception_index)
2686 {
2687 case 0x0e:
2688 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2689 /* fallthru */
2690 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2691 case 0x11: case 0x08: /* 0 */
2692 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2693 break;
2694 }
2695
2696 }
2697
2698 /*
2699 * We're not longer in REM mode.
2700 */
2701 CPUMR3RemLeave(pVCpu,
2702 HWACCMIsEnabled(pVM)
2703 || ( pVM->rem.s.Env.segs[R_SS].newselector
2704 | pVM->rem.s.Env.segs[R_GS].newselector
2705 | pVM->rem.s.Env.segs[R_FS].newselector
2706 | pVM->rem.s.Env.segs[R_ES].newselector
2707 | pVM->rem.s.Env.segs[R_DS].newselector
2708 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2709 );
2710 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2711 pVM->rem.s.fInREM = false;
2712 pVM->rem.s.pCtx = NULL;
2713 pVM->rem.s.Env.pVCpu = NULL;
2714 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2715 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * This is called by the disassembler when it wants to update the cpu state
2722 * before for instance doing a register dump.
2723 */
2724static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2725{
2726 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2727 unsigned i;
2728
2729 Assert(pVM->rem.s.fInREM);
2730
2731 /*
2732 * Copy back the registers.
2733 * This is done in the order they are declared in the CPUMCTX structure.
2734 */
2735
2736 /** @todo FOP */
2737 /** @todo FPUIP */
2738 /** @todo CS */
2739 /** @todo FPUDP */
2740 /** @todo DS */
2741 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2742 pCtx->fpu.MXCSR = 0;
2743 pCtx->fpu.MXCSR_MASK = 0;
2744
2745 /** @todo check if FPU/XMM was actually used in the recompiler */
2746 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2747//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2748
2749#ifdef TARGET_X86_64
2750 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2751 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2752 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2753 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2754 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2755 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2756 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2757 pCtx->r8 = pVM->rem.s.Env.regs[8];
2758 pCtx->r9 = pVM->rem.s.Env.regs[9];
2759 pCtx->r10 = pVM->rem.s.Env.regs[10];
2760 pCtx->r11 = pVM->rem.s.Env.regs[11];
2761 pCtx->r12 = pVM->rem.s.Env.regs[12];
2762 pCtx->r13 = pVM->rem.s.Env.regs[13];
2763 pCtx->r14 = pVM->rem.s.Env.regs[14];
2764 pCtx->r15 = pVM->rem.s.Env.regs[15];
2765
2766 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2767#else
2768 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2769 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2770 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2771 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2772 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2773 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2774 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2775
2776 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2777#endif
2778
2779 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2780
2781 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2782 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2783 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2784 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2785 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2786
2787#ifdef TARGET_X86_64
2788 pCtx->rip = pVM->rem.s.Env.eip;
2789 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2790#else
2791 pCtx->eip = pVM->rem.s.Env.eip;
2792 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2793#endif
2794
2795 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2796 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2797 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2798 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2799 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2800 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2801
2802 for (i = 0; i < 8; i++)
2803 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2804
2805 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2806 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2807 {
2808 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2809 STAM_COUNTER_INC(&gStatREMGDTChange);
2810 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2811 }
2812
2813 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2814 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2815 {
2816 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2817 STAM_COUNTER_INC(&gStatREMIDTChange);
2818 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2819 }
2820
2821 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2822 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2823 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2824 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2825 {
2826 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2827 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2828 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2829 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2830 STAM_COUNTER_INC(&gStatREMLDTRChange);
2831 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2832 }
2833
2834 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2835 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2836 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2837 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2838 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2839 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2840 : 0) )
2841 {
2842 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2843 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2844 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2845 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2846 pCtx->tr = pVM->rem.s.Env.tr.selector;
2847 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2848 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2849 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2850 if (pCtx->trHid.Attr.u)
2851 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2852 STAM_COUNTER_INC(&gStatREMTRChange);
2853 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2854 }
2855
2856 /** @todo These values could still be out of sync! */
2857 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2858 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2859 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2860 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2861
2862 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2863 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2864 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2865
2866 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2867 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2868 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2869
2870 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2871 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2872 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2873
2874 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2875 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2876 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2877
2878 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2879 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2880 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2881
2882 /* Sysenter MSR */
2883 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2884 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2885 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2886
2887 /* System MSRs. */
2888 pCtx->msrEFER = pVM->rem.s.Env.efer;
2889 pCtx->msrSTAR = pVM->rem.s.Env.star;
2890 pCtx->msrPAT = pVM->rem.s.Env.pat;
2891#ifdef TARGET_X86_64
2892 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2893 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2894 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2895 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2896#endif
2897
2898}
2899
2900
2901/**
2902 * Update the VMM state information if we're currently in REM.
2903 *
2904 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2905 * we're currently executing in REM and the VMM state is invalid. This method will of
2906 * course check that we're executing in REM before syncing any data over to the VMM.
2907 *
2908 * @param pVM The VM handle.
2909 * @param pVCpu The VMCPU handle.
2910 */
2911REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2912{
2913 if (pVM->rem.s.fInREM)
2914 remR3StateUpdate(pVM, pVCpu);
2915}
2916
2917
2918#undef LOG_GROUP
2919#define LOG_GROUP LOG_GROUP_REM
2920
2921
2922/**
2923 * Notify the recompiler about Address Gate 20 state change.
2924 *
2925 * This notification is required since A20 gate changes are
2926 * initialized from a device driver and the VM might just as
2927 * well be in REM mode as in RAW mode.
2928 *
2929 * @param pVM VM handle.
2930 * @param pVCpu VMCPU handle.
2931 * @param fEnable True if the gate should be enabled.
2932 * False if the gate should be disabled.
2933 */
2934REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2935{
2936 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2937 VM_ASSERT_EMT(pVM);
2938
2939 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2940 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2941 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2942}
2943
2944
2945/**
2946 * Replays the handler notification changes
2947 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2948 *
2949 * @param pVM VM handle.
2950 */
2951REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2952{
2953 /*
2954 * Replay the flushes.
2955 */
2956 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2957 VM_ASSERT_EMT(pVM);
2958
2959 /** @todo this isn't ensuring correct replay order. */
2960 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2961 {
2962 uint32_t idxNext;
2963 uint32_t idxRevHead;
2964 uint32_t idxHead;
2965#ifdef VBOX_STRICT
2966 int32_t c = 0;
2967#endif
2968
2969 /* Lockless purging of pending notifications. */
2970 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2971 if (idxHead == UINT32_MAX)
2972 return;
2973 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2974
2975 /*
2976 * Reverse the list to process it in FIFO order.
2977 */
2978 idxRevHead = UINT32_MAX;
2979 do
2980 {
2981 /* Save the index of the next rec. */
2982 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2983 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2984 /* Push the record onto the reversed list. */
2985 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2986 idxRevHead = idxHead;
2987 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2988 /* Advance. */
2989 idxHead = idxNext;
2990 } while (idxHead != UINT32_MAX);
2991
2992 /*
2993 * Loop thru the list, reinserting the record into the free list as they are
2994 * processed to avoid having other EMTs running out of entries while we're flushing.
2995 */
2996 idxHead = idxRevHead;
2997 do
2998 {
2999 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3000 uint32_t idxCur;
3001 Assert(--c >= 0);
3002
3003 switch (pCur->enmKind)
3004 {
3005 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3006 remR3NotifyHandlerPhysicalRegister(pVM,
3007 pCur->u.PhysicalRegister.enmType,
3008 pCur->u.PhysicalRegister.GCPhys,
3009 pCur->u.PhysicalRegister.cb,
3010 pCur->u.PhysicalRegister.fHasHCHandler);
3011 break;
3012
3013 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3014 remR3NotifyHandlerPhysicalDeregister(pVM,
3015 pCur->u.PhysicalDeregister.enmType,
3016 pCur->u.PhysicalDeregister.GCPhys,
3017 pCur->u.PhysicalDeregister.cb,
3018 pCur->u.PhysicalDeregister.fHasHCHandler,
3019 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3020 break;
3021
3022 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3023 remR3NotifyHandlerPhysicalModify(pVM,
3024 pCur->u.PhysicalModify.enmType,
3025 pCur->u.PhysicalModify.GCPhysOld,
3026 pCur->u.PhysicalModify.GCPhysNew,
3027 pCur->u.PhysicalModify.cb,
3028 pCur->u.PhysicalModify.fHasHCHandler,
3029 pCur->u.PhysicalModify.fRestoreAsRAM);
3030 break;
3031
3032 default:
3033 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3034 break;
3035 }
3036
3037 /*
3038 * Advance idxHead.
3039 */
3040 idxCur = idxHead;
3041 idxHead = pCur->idxNext;
3042 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3043
3044 /*
3045 * Put the record back into the free list.
3046 */
3047 do
3048 {
3049 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3050 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3051 ASMCompilerBarrier();
3052 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3053 } while (idxHead != UINT32_MAX);
3054
3055#ifdef VBOX_STRICT
3056 if (pVM->cCpus == 1)
3057 {
3058 unsigned c;
3059 /* Check that all records are now on the free list. */
3060 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3061 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3062 c++;
3063 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3064 }
3065#endif
3066 }
3067}
3068
3069
3070/**
3071 * Notify REM about changed code page.
3072 *
3073 * @returns VBox status code.
3074 * @param pVM VM handle.
3075 * @param pVCpu VMCPU handle.
3076 * @param pvCodePage Code page address
3077 */
3078REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3079{
3080#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3081 int rc;
3082 RTGCPHYS PhysGC;
3083 uint64_t flags;
3084
3085 VM_ASSERT_EMT(pVM);
3086
3087 /*
3088 * Get the physical page address.
3089 */
3090 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3091 if (rc == VINF_SUCCESS)
3092 {
3093 /*
3094 * Sync the required registers and flush the whole page.
3095 * (Easier to do the whole page than notifying it about each physical
3096 * byte that was changed.
3097 */
3098 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3099 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3100 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3101 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3102
3103 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3104 }
3105#endif
3106 return VINF_SUCCESS;
3107}
3108
3109
3110/**
3111 * Notification about a successful MMR3PhysRegister() call.
3112 *
3113 * @param pVM VM handle.
3114 * @param GCPhys The physical address the RAM.
3115 * @param cb Size of the memory.
3116 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3117 */
3118REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3119{
3120 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3121 VM_ASSERT_EMT(pVM);
3122
3123 /*
3124 * Validate input - we trust the caller.
3125 */
3126 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3127 Assert(cb);
3128 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3129 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3130
3131 /*
3132 * Base ram? Update GCPhysLastRam.
3133 */
3134 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3135 {
3136 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3137 {
3138 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3139 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3140 }
3141 }
3142
3143 /*
3144 * Register the ram.
3145 */
3146 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3147
3148 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3149 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3150 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3151
3152 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3153}
3154
3155
3156/**
3157 * Notification about a successful MMR3PhysRomRegister() call.
3158 *
3159 * @param pVM VM handle.
3160 * @param GCPhys The physical address of the ROM.
3161 * @param cb The size of the ROM.
3162 * @param pvCopy Pointer to the ROM copy.
3163 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3164 * This function will be called when ever the protection of the
3165 * shadow ROM changes (at reset and end of POST).
3166 */
3167REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3168{
3169 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3170 VM_ASSERT_EMT(pVM);
3171
3172 /*
3173 * Validate input - we trust the caller.
3174 */
3175 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3176 Assert(cb);
3177 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3178
3179 /*
3180 * Register the rom.
3181 */
3182 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3183
3184 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3185 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3186 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3187
3188 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3189}
3190
3191
3192/**
3193 * Notification about a successful memory deregistration or reservation.
3194 *
3195 * @param pVM VM Handle.
3196 * @param GCPhys Start physical address.
3197 * @param cb The size of the range.
3198 */
3199REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3200{
3201 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3202 VM_ASSERT_EMT(pVM);
3203
3204 /*
3205 * Validate input - we trust the caller.
3206 */
3207 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3208 Assert(cb);
3209 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3210
3211 /*
3212 * Unassigning the memory.
3213 */
3214 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3215
3216 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3217 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3218 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3219
3220 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3221}
3222
3223
3224/**
3225 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3226 *
3227 * @param pVM VM Handle.
3228 * @param enmType Handler type.
3229 * @param GCPhys Handler range address.
3230 * @param cb Size of the handler range.
3231 * @param fHasHCHandler Set if the handler has a HC callback function.
3232 *
3233 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3234 * Handler memory type to memory which has no HC handler.
3235 */
3236static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3237{
3238 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3239 enmType, GCPhys, cb, fHasHCHandler));
3240
3241 VM_ASSERT_EMT(pVM);
3242 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3243 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3244
3245
3246 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3247
3248 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3249 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3250 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3251 else if (fHasHCHandler)
3252 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3253 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3254
3255 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3256}
3257
3258/**
3259 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3260 *
3261 * @param pVM VM Handle.
3262 * @param enmType Handler type.
3263 * @param GCPhys Handler range address.
3264 * @param cb Size of the handler range.
3265 * @param fHasHCHandler Set if the handler has a HC callback function.
3266 *
3267 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3268 * Handler memory type to memory which has no HC handler.
3269 */
3270REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3271{
3272 REMR3ReplayHandlerNotifications(pVM);
3273
3274 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3275}
3276
3277/**
3278 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3279 *
3280 * @param pVM VM Handle.
3281 * @param enmType Handler type.
3282 * @param GCPhys Handler range address.
3283 * @param cb Size of the handler range.
3284 * @param fHasHCHandler Set if the handler has a HC callback function.
3285 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3286 */
3287static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3288{
3289 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3290 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3291 VM_ASSERT_EMT(pVM);
3292
3293
3294 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3295
3296 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3297 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3298 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3299 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3300 else if (fHasHCHandler)
3301 {
3302 if (!fRestoreAsRAM)
3303 {
3304 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3305 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3306 }
3307 else
3308 {
3309 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3310 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3311 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3312 }
3313 }
3314 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3315
3316 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3317}
3318
3319/**
3320 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3321 *
3322 * @param pVM VM Handle.
3323 * @param enmType Handler type.
3324 * @param GCPhys Handler range address.
3325 * @param cb Size of the handler range.
3326 * @param fHasHCHandler Set if the handler has a HC callback function.
3327 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3328 */
3329REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3330{
3331 REMR3ReplayHandlerNotifications(pVM);
3332 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3333}
3334
3335
3336/**
3337 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3338 *
3339 * @param pVM VM Handle.
3340 * @param enmType Handler type.
3341 * @param GCPhysOld Old handler range address.
3342 * @param GCPhysNew New handler range address.
3343 * @param cb Size of the handler range.
3344 * @param fHasHCHandler Set if the handler has a HC callback function.
3345 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3346 */
3347static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3348{
3349 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3350 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3351 VM_ASSERT_EMT(pVM);
3352 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3353
3354 if (fHasHCHandler)
3355 {
3356 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3357
3358 /*
3359 * Reset the old page.
3360 */
3361 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3362 if (!fRestoreAsRAM)
3363 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3364 else
3365 {
3366 /* This is not perfect, but it'll do for PD monitoring... */
3367 Assert(cb == PAGE_SIZE);
3368 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3369 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3370 }
3371
3372 /*
3373 * Update the new page.
3374 */
3375 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3376 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3377 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3378 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3379
3380 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3381 }
3382}
3383
3384/**
3385 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3386 *
3387 * @param pVM VM Handle.
3388 * @param enmType Handler type.
3389 * @param GCPhysOld Old handler range address.
3390 * @param GCPhysNew New handler range address.
3391 * @param cb Size of the handler range.
3392 * @param fHasHCHandler Set if the handler has a HC callback function.
3393 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3394 */
3395REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3396{
3397 REMR3ReplayHandlerNotifications(pVM);
3398
3399 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3400}
3401
3402/**
3403 * Checks if we're handling access to this page or not.
3404 *
3405 * @returns true if we're trapping access.
3406 * @returns false if we aren't.
3407 * @param pVM The VM handle.
3408 * @param GCPhys The physical address.
3409 *
3410 * @remark This function will only work correctly in VBOX_STRICT builds!
3411 */
3412REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3413{
3414#ifdef VBOX_STRICT
3415 unsigned long off;
3416 REMR3ReplayHandlerNotifications(pVM);
3417
3418 off = get_phys_page_offset(GCPhys);
3419 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3420 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3421 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3422#else
3423 return false;
3424#endif
3425}
3426
3427
3428/**
3429 * Deals with a rare case in get_phys_addr_code where the code
3430 * is being monitored.
3431 *
3432 * It could also be an MMIO page, in which case we will raise a fatal error.
3433 *
3434 * @returns The physical address corresponding to addr.
3435 * @param env The cpu environment.
3436 * @param addr The virtual address.
3437 * @param pTLBEntry The TLB entry.
3438 */
3439target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3440 target_ulong addr,
3441 CPUTLBEntry* pTLBEntry,
3442 target_phys_addr_t ioTLBEntry)
3443{
3444 PVM pVM = env->pVM;
3445
3446 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3447 {
3448 /* If code memory is being monitored, appropriate IOTLB entry will have
3449 handler IO type, and addend will provide real physical address, no
3450 matter if we store VA in TLB or not, as handlers are always passed PA */
3451 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3452 return ret;
3453 }
3454 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3455 "*** handlers\n",
3456 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3457 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3458 LogRel(("*** mmio\n"));
3459 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3460 LogRel(("*** phys\n"));
3461 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3462 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3463 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3464 AssertFatalFailed();
3465}
3466
3467/**
3468 * Read guest RAM and ROM.
3469 *
3470 * @param SrcGCPhys The source address (guest physical).
3471 * @param pvDst The destination address.
3472 * @param cb Number of bytes
3473 */
3474void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3475{
3476 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3477 VBOX_CHECK_ADDR(SrcGCPhys);
3478 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3479#ifdef VBOX_DEBUG_PHYS
3480 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3481#endif
3482 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3483}
3484
3485
3486/**
3487 * Read guest RAM and ROM, unsigned 8-bit.
3488 *
3489 * @param SrcGCPhys The source address (guest physical).
3490 */
3491RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3492{
3493 uint8_t val;
3494 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3495 VBOX_CHECK_ADDR(SrcGCPhys);
3496 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3497 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3498#ifdef VBOX_DEBUG_PHYS
3499 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3500#endif
3501 return val;
3502}
3503
3504
3505/**
3506 * Read guest RAM and ROM, signed 8-bit.
3507 *
3508 * @param SrcGCPhys The source address (guest physical).
3509 */
3510RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3511{
3512 int8_t val;
3513 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3514 VBOX_CHECK_ADDR(SrcGCPhys);
3515 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3516 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3517#ifdef VBOX_DEBUG_PHYS
3518 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3519#endif
3520 return val;
3521}
3522
3523
3524/**
3525 * Read guest RAM and ROM, unsigned 16-bit.
3526 *
3527 * @param SrcGCPhys The source address (guest physical).
3528 */
3529RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3530{
3531 uint16_t val;
3532 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3533 VBOX_CHECK_ADDR(SrcGCPhys);
3534 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3535 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3536#ifdef VBOX_DEBUG_PHYS
3537 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3538#endif
3539 return val;
3540}
3541
3542
3543/**
3544 * Read guest RAM and ROM, signed 16-bit.
3545 *
3546 * @param SrcGCPhys The source address (guest physical).
3547 */
3548RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3549{
3550 int16_t val;
3551 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3552 VBOX_CHECK_ADDR(SrcGCPhys);
3553 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3554 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3555#ifdef VBOX_DEBUG_PHYS
3556 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3557#endif
3558 return val;
3559}
3560
3561
3562/**
3563 * Read guest RAM and ROM, unsigned 32-bit.
3564 *
3565 * @param SrcGCPhys The source address (guest physical).
3566 */
3567RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3568{
3569 uint32_t val;
3570 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3571 VBOX_CHECK_ADDR(SrcGCPhys);
3572 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3573 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3574#ifdef VBOX_DEBUG_PHYS
3575 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3576#endif
3577 return val;
3578}
3579
3580
3581/**
3582 * Read guest RAM and ROM, signed 32-bit.
3583 *
3584 * @param SrcGCPhys The source address (guest physical).
3585 */
3586RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3587{
3588 int32_t val;
3589 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3590 VBOX_CHECK_ADDR(SrcGCPhys);
3591 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3592 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3593#ifdef VBOX_DEBUG_PHYS
3594 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3595#endif
3596 return val;
3597}
3598
3599
3600/**
3601 * Read guest RAM and ROM, unsigned 64-bit.
3602 *
3603 * @param SrcGCPhys The source address (guest physical).
3604 */
3605uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3606{
3607 uint64_t val;
3608 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3609 VBOX_CHECK_ADDR(SrcGCPhys);
3610 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3611 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3612#ifdef VBOX_DEBUG_PHYS
3613 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3614#endif
3615 return val;
3616}
3617
3618
3619/**
3620 * Read guest RAM and ROM, signed 64-bit.
3621 *
3622 * @param SrcGCPhys The source address (guest physical).
3623 */
3624int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3625{
3626 int64_t val;
3627 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3628 VBOX_CHECK_ADDR(SrcGCPhys);
3629 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3630 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3631#ifdef VBOX_DEBUG_PHYS
3632 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3633#endif
3634 return val;
3635}
3636
3637
3638/**
3639 * Write guest RAM.
3640 *
3641 * @param DstGCPhys The destination address (guest physical).
3642 * @param pvSrc The source address.
3643 * @param cb Number of bytes to write
3644 */
3645void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3646{
3647 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3648 VBOX_CHECK_ADDR(DstGCPhys);
3649 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3650 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3651#ifdef VBOX_DEBUG_PHYS
3652 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3653#endif
3654}
3655
3656
3657/**
3658 * Write guest RAM, unsigned 8-bit.
3659 *
3660 * @param DstGCPhys The destination address (guest physical).
3661 * @param val Value
3662 */
3663void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3664{
3665 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3666 VBOX_CHECK_ADDR(DstGCPhys);
3667 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3668 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3669#ifdef VBOX_DEBUG_PHYS
3670 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3671#endif
3672}
3673
3674
3675/**
3676 * Write guest RAM, unsigned 8-bit.
3677 *
3678 * @param DstGCPhys The destination address (guest physical).
3679 * @param val Value
3680 */
3681void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3682{
3683 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3684 VBOX_CHECK_ADDR(DstGCPhys);
3685 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3686 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3687#ifdef VBOX_DEBUG_PHYS
3688 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3689#endif
3690}
3691
3692
3693/**
3694 * Write guest RAM, unsigned 32-bit.
3695 *
3696 * @param DstGCPhys The destination address (guest physical).
3697 * @param val Value
3698 */
3699void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3700{
3701 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3702 VBOX_CHECK_ADDR(DstGCPhys);
3703 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3704 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3705#ifdef VBOX_DEBUG_PHYS
3706 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3707#endif
3708}
3709
3710
3711/**
3712 * Write guest RAM, unsigned 64-bit.
3713 *
3714 * @param DstGCPhys The destination address (guest physical).
3715 * @param val Value
3716 */
3717void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3718{
3719 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3720 VBOX_CHECK_ADDR(DstGCPhys);
3721 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3722 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3723#ifdef VBOX_DEBUG_PHYS
3724 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3725#endif
3726}
3727
3728#undef LOG_GROUP
3729#define LOG_GROUP LOG_GROUP_REM_MMIO
3730
3731/** Read MMIO memory. */
3732static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3733{
3734 uint32_t u32 = 0;
3735 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3736 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3737 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3738 return u32;
3739}
3740
3741/** Read MMIO memory. */
3742static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3743{
3744 uint32_t u32 = 0;
3745 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3746 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3747 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3748 return u32;
3749}
3750
3751/** Read MMIO memory. */
3752static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3753{
3754 uint32_t u32 = 0;
3755 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3756 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3757 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3758 return u32;
3759}
3760
3761/** Write to MMIO memory. */
3762static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3763{
3764 int rc;
3765 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3766 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3767 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3768}
3769
3770/** Write to MMIO memory. */
3771static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3772{
3773 int rc;
3774 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3775 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3776 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3777}
3778
3779/** Write to MMIO memory. */
3780static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3781{
3782 int rc;
3783 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3784 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3785 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3786}
3787
3788
3789#undef LOG_GROUP
3790#define LOG_GROUP LOG_GROUP_REM_HANDLER
3791
3792/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3793
3794static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3795{
3796 uint8_t u8;
3797 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3798 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3799 return u8;
3800}
3801
3802static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3803{
3804 uint16_t u16;
3805 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3806 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3807 return u16;
3808}
3809
3810static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3811{
3812 uint32_t u32;
3813 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3814 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3815 return u32;
3816}
3817
3818static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3819{
3820 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3821 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3822}
3823
3824static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3825{
3826 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3827 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3828}
3829
3830static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3831{
3832 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3833 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3834}
3835
3836/* -+- disassembly -+- */
3837
3838#undef LOG_GROUP
3839#define LOG_GROUP LOG_GROUP_REM_DISAS
3840
3841
3842/**
3843 * Enables or disables singled stepped disassembly.
3844 *
3845 * @returns VBox status code.
3846 * @param pVM VM handle.
3847 * @param fEnable To enable set this flag, to disable clear it.
3848 */
3849static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3850{
3851 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3852 VM_ASSERT_EMT(pVM);
3853
3854 if (fEnable)
3855 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3856 else
3857 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3858#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3859 cpu_single_step(&pVM->rem.s.Env, fEnable);
3860#endif
3861 return VINF_SUCCESS;
3862}
3863
3864
3865/**
3866 * Enables or disables singled stepped disassembly.
3867 *
3868 * @returns VBox status code.
3869 * @param pVM VM handle.
3870 * @param fEnable To enable set this flag, to disable clear it.
3871 */
3872REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3873{
3874 int rc;
3875
3876 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3877 if (VM_IS_EMT(pVM))
3878 return remR3DisasEnableStepping(pVM, fEnable);
3879
3880 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3881 AssertRC(rc);
3882 return rc;
3883}
3884
3885
3886#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3887/**
3888 * External Debugger Command: .remstep [on|off|1|0]
3889 */
3890static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3891{
3892 int rc;
3893
3894 if (cArgs == 0)
3895 /*
3896 * Print the current status.
3897 */
3898 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3899 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3900 else
3901 {
3902 /*
3903 * Convert the argument and change the mode.
3904 */
3905 bool fEnable;
3906 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3907 if (RT_SUCCESS(rc))
3908 {
3909 rc = REMR3DisasEnableStepping(pVM, fEnable);
3910 if (RT_SUCCESS(rc))
3911 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3912 else
3913 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3914 }
3915 else
3916 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3917 }
3918 return rc;
3919}
3920#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3921
3922
3923/**
3924 * Disassembles one instruction and prints it to the log.
3925 *
3926 * @returns Success indicator.
3927 * @param env Pointer to the recompiler CPU structure.
3928 * @param f32BitCode Indicates that whether or not the code should
3929 * be disassembled as 16 or 32 bit. If -1 the CS
3930 * selector will be inspected.
3931 * @param pszPrefix
3932 */
3933bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3934{
3935 PVM pVM = env->pVM;
3936 const bool fLog = LogIsEnabled();
3937 const bool fLog2 = LogIs2Enabled();
3938 int rc = VINF_SUCCESS;
3939
3940 /*
3941 * Don't bother if there ain't any log output to do.
3942 */
3943 if (!fLog && !fLog2)
3944 return true;
3945
3946 /*
3947 * Update the state so DBGF reads the correct register values.
3948 */
3949 remR3StateUpdate(pVM, env->pVCpu);
3950
3951 /*
3952 * Log registers if requested.
3953 */
3954 if (fLog2)
3955 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3956
3957 /*
3958 * Disassemble to log.
3959 */
3960 if (fLog)
3961 {
3962 PVMCPU pVCpu = VMMGetCpu(pVM);
3963 char szBuf[256];
3964 szBuf[0] = '\0';
3965 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3966 pVCpu->idCpu,
3967 0, /* Sel */
3968 0, /* GCPtr */
3969 DBGF_DISAS_FLAGS_CURRENT_GUEST
3970 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3971 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3972 szBuf,
3973 sizeof(szBuf),
3974 NULL);
3975 if (RT_FAILURE(rc))
3976 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3977 if (pszPrefix && *pszPrefix)
3978 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3979 else
3980 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3981 }
3982
3983 return RT_SUCCESS(rc);
3984}
3985
3986
3987/**
3988 * Disassemble recompiled code.
3989 *
3990 * @param phFileIgnored Ignored, logfile usually.
3991 * @param pvCode Pointer to the code block.
3992 * @param cb Size of the code block.
3993 */
3994void disas(FILE *phFile, void *pvCode, unsigned long cb)
3995{
3996#ifdef DEBUG_TMP_LOGGING
3997# define DISAS_PRINTF(x...) fprintf(phFile, x)
3998#else
3999# define DISAS_PRINTF(x...) RTLogPrintf(x)
4000 if (LogIs2Enabled())
4001#endif
4002 {
4003 unsigned off = 0;
4004 char szOutput[256];
4005 DISCPUSTATE Cpu;
4006
4007 memset(&Cpu, 0, sizeof(Cpu));
4008#ifdef RT_ARCH_X86
4009 Cpu.mode = CPUMODE_32BIT;
4010#else
4011 Cpu.mode = CPUMODE_64BIT;
4012#endif
4013
4014 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4015 while (off < cb)
4016 {
4017 uint32_t cbInstr;
4018 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4019 DISAS_PRINTF("%s", szOutput);
4020 else
4021 {
4022 DISAS_PRINTF("disas error\n");
4023 cbInstr = 1;
4024#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4025 break;
4026#endif
4027 }
4028 off += cbInstr;
4029 }
4030 }
4031
4032#undef DISAS_PRINTF
4033}
4034
4035
4036/**
4037 * Disassemble guest code.
4038 *
4039 * @param phFileIgnored Ignored, logfile usually.
4040 * @param uCode The guest address of the code to disassemble. (flat?)
4041 * @param cb Number of bytes to disassemble.
4042 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4043 */
4044void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4045{
4046#ifdef DEBUG_TMP_LOGGING
4047# define DISAS_PRINTF(x...) fprintf(phFile, x)
4048#else
4049# define DISAS_PRINTF(x...) RTLogPrintf(x)
4050 if (LogIs2Enabled())
4051#endif
4052 {
4053 PVM pVM = cpu_single_env->pVM;
4054 PVMCPU pVCpu = cpu_single_env->pVCpu;
4055 RTSEL cs;
4056 RTGCUINTPTR eip;
4057
4058 Assert(pVCpu);
4059
4060 /*
4061 * Update the state so DBGF reads the correct register values (flags).
4062 */
4063 remR3StateUpdate(pVM, pVCpu);
4064
4065 /*
4066 * Do the disassembling.
4067 */
4068 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4069 cs = cpu_single_env->segs[R_CS].selector;
4070 eip = uCode - cpu_single_env->segs[R_CS].base;
4071 for (;;)
4072 {
4073 char szBuf[256];
4074 uint32_t cbInstr;
4075 int rc = DBGFR3DisasInstrEx(pVM,
4076 pVCpu->idCpu,
4077 cs,
4078 eip,
4079 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4080 szBuf, sizeof(szBuf),
4081 &cbInstr);
4082 if (RT_SUCCESS(rc))
4083 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4084 else
4085 {
4086 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4087 cbInstr = 1;
4088 }
4089
4090 /* next */
4091 if (cb <= cbInstr)
4092 break;
4093 cb -= cbInstr;
4094 uCode += cbInstr;
4095 eip += cbInstr;
4096 }
4097 }
4098#undef DISAS_PRINTF
4099}
4100
4101
4102/**
4103 * Looks up a guest symbol.
4104 *
4105 * @returns Pointer to symbol name. This is a static buffer.
4106 * @param orig_addr The address in question.
4107 */
4108const char *lookup_symbol(target_ulong orig_addr)
4109{
4110 PVM pVM = cpu_single_env->pVM;
4111 RTGCINTPTR off = 0;
4112 RTDBGSYMBOL Sym;
4113 DBGFADDRESS Addr;
4114
4115 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4116 if (RT_SUCCESS(rc))
4117 {
4118 static char szSym[sizeof(Sym.szName) + 48];
4119 if (!off)
4120 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4121 else if (off > 0)
4122 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4123 else
4124 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4125 return szSym;
4126 }
4127 return "<N/A>";
4128}
4129
4130
4131#undef LOG_GROUP
4132#define LOG_GROUP LOG_GROUP_REM
4133
4134
4135/* -+- FF notifications -+- */
4136
4137
4138/**
4139 * Notification about a pending interrupt.
4140 *
4141 * @param pVM VM Handle.
4142 * @param pVCpu VMCPU Handle.
4143 * @param u8Interrupt Interrupt
4144 * @thread The emulation thread.
4145 */
4146REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4147{
4148 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4149 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4150}
4151
4152/**
4153 * Notification about a pending interrupt.
4154 *
4155 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4156 * @param pVM VM Handle.
4157 * @param pVCpu VMCPU Handle.
4158 * @thread The emulation thread.
4159 */
4160REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4161{
4162 return pVM->rem.s.u32PendingInterrupt;
4163}
4164
4165/**
4166 * Notification about the interrupt FF being set.
4167 *
4168 * @param pVM VM Handle.
4169 * @param pVCpu VMCPU Handle.
4170 * @thread The emulation thread.
4171 */
4172REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4173{
4174 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4175 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4176 if (pVM->rem.s.fInREM)
4177 {
4178 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4179 CPU_INTERRUPT_EXTERNAL_HARD);
4180 }
4181}
4182
4183
4184/**
4185 * Notification about the interrupt FF being set.
4186 *
4187 * @param pVM VM Handle.
4188 * @param pVCpu VMCPU Handle.
4189 * @thread Any.
4190 */
4191REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4192{
4193 LogFlow(("REMR3NotifyInterruptClear:\n"));
4194 if (pVM->rem.s.fInREM)
4195 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4196}
4197
4198
4199/**
4200 * Notification about pending timer(s).
4201 *
4202 * @param pVM VM Handle.
4203 * @param pVCpuDst The target cpu for this notification.
4204 * TM will not broadcast pending timer events, but use
4205 * a dedicated EMT for them. So, only interrupt REM
4206 * execution if the given CPU is executing in REM.
4207 * @thread Any.
4208 */
4209REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4210{
4211#ifndef DEBUG_bird
4212 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4213#endif
4214 if (pVM->rem.s.fInREM)
4215 {
4216 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4217 {
4218 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4219 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4220 CPU_INTERRUPT_EXTERNAL_TIMER);
4221 }
4222 else
4223 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4224 }
4225 else
4226 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4227}
4228
4229
4230/**
4231 * Notification about pending DMA transfers.
4232 *
4233 * @param pVM VM Handle.
4234 * @thread Any.
4235 */
4236REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4237{
4238 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4239 if (pVM->rem.s.fInREM)
4240 {
4241 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4242 CPU_INTERRUPT_EXTERNAL_DMA);
4243 }
4244}
4245
4246
4247/**
4248 * Notification about pending timer(s).
4249 *
4250 * @param pVM VM Handle.
4251 * @thread Any.
4252 */
4253REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4254{
4255 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4256 if (pVM->rem.s.fInREM)
4257 {
4258 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4259 CPU_INTERRUPT_EXTERNAL_EXIT);
4260 }
4261}
4262
4263
4264/**
4265 * Notification about pending FF set by an external thread.
4266 *
4267 * @param pVM VM handle.
4268 * @thread Any.
4269 */
4270REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4271{
4272 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4273 if (pVM->rem.s.fInREM)
4274 {
4275 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4276 CPU_INTERRUPT_EXTERNAL_EXIT);
4277 }
4278}
4279
4280
4281#ifdef VBOX_WITH_STATISTICS
4282void remR3ProfileStart(int statcode)
4283{
4284 STAMPROFILEADV *pStat;
4285 switch(statcode)
4286 {
4287 case STATS_EMULATE_SINGLE_INSTR:
4288 pStat = &gStatExecuteSingleInstr;
4289 break;
4290 case STATS_QEMU_COMPILATION:
4291 pStat = &gStatCompilationQEmu;
4292 break;
4293 case STATS_QEMU_RUN_EMULATED_CODE:
4294 pStat = &gStatRunCodeQEmu;
4295 break;
4296 case STATS_QEMU_TOTAL:
4297 pStat = &gStatTotalTimeQEmu;
4298 break;
4299 case STATS_QEMU_RUN_TIMERS:
4300 pStat = &gStatTimers;
4301 break;
4302 case STATS_TLB_LOOKUP:
4303 pStat= &gStatTBLookup;
4304 break;
4305 case STATS_IRQ_HANDLING:
4306 pStat= &gStatIRQ;
4307 break;
4308 case STATS_RAW_CHECK:
4309 pStat = &gStatRawCheck;
4310 break;
4311
4312 default:
4313 AssertMsgFailed(("unknown stat %d\n", statcode));
4314 return;
4315 }
4316 STAM_PROFILE_ADV_START(pStat, a);
4317}
4318
4319
4320void remR3ProfileStop(int statcode)
4321{
4322 STAMPROFILEADV *pStat;
4323 switch(statcode)
4324 {
4325 case STATS_EMULATE_SINGLE_INSTR:
4326 pStat = &gStatExecuteSingleInstr;
4327 break;
4328 case STATS_QEMU_COMPILATION:
4329 pStat = &gStatCompilationQEmu;
4330 break;
4331 case STATS_QEMU_RUN_EMULATED_CODE:
4332 pStat = &gStatRunCodeQEmu;
4333 break;
4334 case STATS_QEMU_TOTAL:
4335 pStat = &gStatTotalTimeQEmu;
4336 break;
4337 case STATS_QEMU_RUN_TIMERS:
4338 pStat = &gStatTimers;
4339 break;
4340 case STATS_TLB_LOOKUP:
4341 pStat= &gStatTBLookup;
4342 break;
4343 case STATS_IRQ_HANDLING:
4344 pStat= &gStatIRQ;
4345 break;
4346 case STATS_RAW_CHECK:
4347 pStat = &gStatRawCheck;
4348 break;
4349 default:
4350 AssertMsgFailed(("unknown stat %d\n", statcode));
4351 return;
4352 }
4353 STAM_PROFILE_ADV_STOP(pStat, a);
4354}
4355#endif
4356
4357/**
4358 * Raise an RC, force rem exit.
4359 *
4360 * @param pVM VM handle.
4361 * @param rc The rc.
4362 */
4363void remR3RaiseRC(PVM pVM, int rc)
4364{
4365 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4366 Assert(pVM->rem.s.fInREM);
4367 VM_ASSERT_EMT(pVM);
4368 pVM->rem.s.rc = rc;
4369 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4370}
4371
4372
4373/* -+- timers -+- */
4374
4375uint64_t cpu_get_tsc(CPUX86State *env)
4376{
4377 STAM_COUNTER_INC(&gStatCpuGetTSC);
4378 return TMCpuTickGet(env->pVCpu);
4379}
4380
4381
4382/* -+- interrupts -+- */
4383
4384void cpu_set_ferr(CPUX86State *env)
4385{
4386 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4387 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4388}
4389
4390int cpu_get_pic_interrupt(CPUState *env)
4391{
4392 uint8_t u8Interrupt;
4393 int rc;
4394
4395 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4396 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4397 * with the (a)pic.
4398 */
4399 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4400 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4401 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4402 * remove this kludge. */
4403 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4404 {
4405 rc = VINF_SUCCESS;
4406 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4407 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4408 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4409 }
4410 else
4411 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4412
4413 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4414 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4415 if (RT_SUCCESS(rc))
4416 {
4417 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4418 env->interrupt_request |= CPU_INTERRUPT_HARD;
4419 return u8Interrupt;
4420 }
4421 return -1;
4422}
4423
4424
4425/* -+- local apic -+- */
4426
4427#if 0 /* CPUMSetGuestMsr does this now. */
4428void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4429{
4430 int rc = PDMApicSetBase(env->pVM, val);
4431 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4432}
4433#endif
4434
4435uint64_t cpu_get_apic_base(CPUX86State *env)
4436{
4437 uint64_t u64;
4438 int rc = PDMApicGetBase(env->pVM, &u64);
4439 if (RT_SUCCESS(rc))
4440 {
4441 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4442 return u64;
4443 }
4444 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4445 return 0;
4446}
4447
4448void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4449{
4450 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4451 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4452}
4453
4454uint8_t cpu_get_apic_tpr(CPUX86State *env)
4455{
4456 uint8_t u8;
4457 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4458 if (RT_SUCCESS(rc))
4459 {
4460 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4461 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4462 }
4463 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4464 return 0;
4465}
4466
4467/**
4468 * Read an MSR.
4469 *
4470 * @retval 0 success.
4471 * @retval -1 failure, raise \#GP(0).
4472 * @param env The cpu state.
4473 * @param idMsr The MSR to read.
4474 * @param puValue Where to return the value.
4475 */
4476int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4477{
4478 Assert(env->pVCpu);
4479 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4480}
4481
4482/**
4483 * Write to an MSR.
4484 *
4485 * @retval 0 success.
4486 * @retval -1 failure, raise \#GP(0).
4487 * @param env The cpu state.
4488 * @param idMsr The MSR to read.
4489 * @param puValue Where to return the value.
4490 */
4491int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4492{
4493 Assert(env->pVCpu);
4494 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4495}
4496
4497/* -+- I/O Ports -+- */
4498
4499#undef LOG_GROUP
4500#define LOG_GROUP LOG_GROUP_REM_IOPORT
4501
4502void cpu_outb(CPUState *env, int addr, int val)
4503{
4504 int rc;
4505
4506 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4507 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4508
4509 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4510 if (RT_LIKELY(rc == VINF_SUCCESS))
4511 return;
4512 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4513 {
4514 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4515 remR3RaiseRC(env->pVM, rc);
4516 return;
4517 }
4518 remAbort(rc, __FUNCTION__);
4519}
4520
4521void cpu_outw(CPUState *env, int addr, int val)
4522{
4523 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4524 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4525 if (RT_LIKELY(rc == VINF_SUCCESS))
4526 return;
4527 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4528 {
4529 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4530 remR3RaiseRC(env->pVM, rc);
4531 return;
4532 }
4533 remAbort(rc, __FUNCTION__);
4534}
4535
4536void cpu_outl(CPUState *env, int addr, int val)
4537{
4538 int rc;
4539 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4540 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4541 if (RT_LIKELY(rc == VINF_SUCCESS))
4542 return;
4543 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4544 {
4545 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4546 remR3RaiseRC(env->pVM, rc);
4547 return;
4548 }
4549 remAbort(rc, __FUNCTION__);
4550}
4551
4552int cpu_inb(CPUState *env, int addr)
4553{
4554 uint32_t u32 = 0;
4555 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4556 if (RT_LIKELY(rc == VINF_SUCCESS))
4557 {
4558 if (/*addr != 0x61 && */addr != 0x71)
4559 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4560 return (int)u32;
4561 }
4562 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4563 {
4564 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4565 remR3RaiseRC(env->pVM, rc);
4566 return (int)u32;
4567 }
4568 remAbort(rc, __FUNCTION__);
4569 return 0xff;
4570}
4571
4572int cpu_inw(CPUState *env, int addr)
4573{
4574 uint32_t u32 = 0;
4575 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4576 if (RT_LIKELY(rc == VINF_SUCCESS))
4577 {
4578 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4579 return (int)u32;
4580 }
4581 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4582 {
4583 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4584 remR3RaiseRC(env->pVM, rc);
4585 return (int)u32;
4586 }
4587 remAbort(rc, __FUNCTION__);
4588 return 0xffff;
4589}
4590
4591int cpu_inl(CPUState *env, int addr)
4592{
4593 uint32_t u32 = 0;
4594 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4595 if (RT_LIKELY(rc == VINF_SUCCESS))
4596 {
4597//if (addr==0x01f0 && u32 == 0x6b6d)
4598// loglevel = ~0;
4599 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4600 return (int)u32;
4601 }
4602 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4603 {
4604 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4605 remR3RaiseRC(env->pVM, rc);
4606 return (int)u32;
4607 }
4608 remAbort(rc, __FUNCTION__);
4609 return 0xffffffff;
4610}
4611
4612#undef LOG_GROUP
4613#define LOG_GROUP LOG_GROUP_REM
4614
4615
4616/* -+- helpers and misc other interfaces -+- */
4617
4618/**
4619 * Perform the CPUID instruction.
4620 *
4621 * @param env Pointer to the recompiler CPU structure.
4622 * @param idx The CPUID leaf (eax).
4623 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4624 * @param pvEAX Where to store eax.
4625 * @param pvEBX Where to store ebx.
4626 * @param pvECX Where to store ecx.
4627 * @param pvEDX Where to store edx.
4628 */
4629void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4630 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4631{
4632 NOREF(idxSub);
4633 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4634}
4635
4636
4637#if 0 /* not used */
4638/**
4639 * Interface for qemu hardware to report back fatal errors.
4640 */
4641void hw_error(const char *pszFormat, ...)
4642{
4643 /*
4644 * Bitch about it.
4645 */
4646 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4647 * this in my Odin32 tree at home! */
4648 va_list args;
4649 va_start(args, pszFormat);
4650 RTLogPrintf("fatal error in virtual hardware:");
4651 RTLogPrintfV(pszFormat, args);
4652 va_end(args);
4653 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4654
4655 /*
4656 * If we're in REM context we'll sync back the state before 'jumping' to
4657 * the EMs failure handling.
4658 */
4659 PVM pVM = cpu_single_env->pVM;
4660 if (pVM->rem.s.fInREM)
4661 REMR3StateBack(pVM);
4662 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4663 AssertMsgFailed(("EMR3FatalError returned!\n"));
4664}
4665#endif
4666
4667/**
4668 * Interface for the qemu cpu to report unhandled situation
4669 * raising a fatal VM error.
4670 */
4671void cpu_abort(CPUState *env, const char *pszFormat, ...)
4672{
4673 va_list va;
4674 PVM pVM;
4675 PVMCPU pVCpu;
4676 char szMsg[256];
4677
4678 /*
4679 * Bitch about it.
4680 */
4681 RTLogFlags(NULL, "nodisabled nobuffered");
4682 RTLogFlush(NULL);
4683
4684 va_start(va, pszFormat);
4685#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4686 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4687 unsigned cArgs = 0;
4688 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4689 const char *psz = strchr(pszFormat, '%');
4690 while (psz && cArgs < 6)
4691 {
4692 auArgs[cArgs++] = va_arg(va, uintptr_t);
4693 psz = strchr(psz + 1, '%');
4694 }
4695 switch (cArgs)
4696 {
4697 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4698 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4699 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4700 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4701 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4702 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4703 default:
4704 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4705 }
4706#else
4707 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4708#endif
4709 va_end(va);
4710
4711 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4712 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4713
4714 /*
4715 * If we're in REM context we'll sync back the state before 'jumping' to
4716 * the EMs failure handling.
4717 */
4718 pVM = cpu_single_env->pVM;
4719 pVCpu = cpu_single_env->pVCpu;
4720 Assert(pVCpu);
4721
4722 if (pVM->rem.s.fInREM)
4723 REMR3StateBack(pVM, pVCpu);
4724 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4725 AssertMsgFailed(("EMR3FatalError returned!\n"));
4726}
4727
4728
4729/**
4730 * Aborts the VM.
4731 *
4732 * @param rc VBox error code.
4733 * @param pszTip Hint about why/when this happened.
4734 */
4735void remAbort(int rc, const char *pszTip)
4736{
4737 PVM pVM;
4738 PVMCPU pVCpu;
4739
4740 /*
4741 * Bitch about it.
4742 */
4743 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4744 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4745
4746 /*
4747 * Jump back to where we entered the recompiler.
4748 */
4749 pVM = cpu_single_env->pVM;
4750 pVCpu = cpu_single_env->pVCpu;
4751 Assert(pVCpu);
4752
4753 if (pVM->rem.s.fInREM)
4754 REMR3StateBack(pVM, pVCpu);
4755
4756 EMR3FatalError(pVCpu, rc);
4757 AssertMsgFailed(("EMR3FatalError returned!\n"));
4758}
4759
4760
4761/**
4762 * Dumps a linux system call.
4763 * @param pVCpu VMCPU handle.
4764 */
4765void remR3DumpLnxSyscall(PVMCPU pVCpu)
4766{
4767 static const char *apsz[] =
4768 {
4769 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4770 "sys_exit",
4771 "sys_fork",
4772 "sys_read",
4773 "sys_write",
4774 "sys_open", /* 5 */
4775 "sys_close",
4776 "sys_waitpid",
4777 "sys_creat",
4778 "sys_link",
4779 "sys_unlink", /* 10 */
4780 "sys_execve",
4781 "sys_chdir",
4782 "sys_time",
4783 "sys_mknod",
4784 "sys_chmod", /* 15 */
4785 "sys_lchown16",
4786 "sys_ni_syscall", /* old break syscall holder */
4787 "sys_stat",
4788 "sys_lseek",
4789 "sys_getpid", /* 20 */
4790 "sys_mount",
4791 "sys_oldumount",
4792 "sys_setuid16",
4793 "sys_getuid16",
4794 "sys_stime", /* 25 */
4795 "sys_ptrace",
4796 "sys_alarm",
4797 "sys_fstat",
4798 "sys_pause",
4799 "sys_utime", /* 30 */
4800 "sys_ni_syscall", /* old stty syscall holder */
4801 "sys_ni_syscall", /* old gtty syscall holder */
4802 "sys_access",
4803 "sys_nice",
4804 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4805 "sys_sync",
4806 "sys_kill",
4807 "sys_rename",
4808 "sys_mkdir",
4809 "sys_rmdir", /* 40 */
4810 "sys_dup",
4811 "sys_pipe",
4812 "sys_times",
4813 "sys_ni_syscall", /* old prof syscall holder */
4814 "sys_brk", /* 45 */
4815 "sys_setgid16",
4816 "sys_getgid16",
4817 "sys_signal",
4818 "sys_geteuid16",
4819 "sys_getegid16", /* 50 */
4820 "sys_acct",
4821 "sys_umount", /* recycled never used phys() */
4822 "sys_ni_syscall", /* old lock syscall holder */
4823 "sys_ioctl",
4824 "sys_fcntl", /* 55 */
4825 "sys_ni_syscall", /* old mpx syscall holder */
4826 "sys_setpgid",
4827 "sys_ni_syscall", /* old ulimit syscall holder */
4828 "sys_olduname",
4829 "sys_umask", /* 60 */
4830 "sys_chroot",
4831 "sys_ustat",
4832 "sys_dup2",
4833 "sys_getppid",
4834 "sys_getpgrp", /* 65 */
4835 "sys_setsid",
4836 "sys_sigaction",
4837 "sys_sgetmask",
4838 "sys_ssetmask",
4839 "sys_setreuid16", /* 70 */
4840 "sys_setregid16",
4841 "sys_sigsuspend",
4842 "sys_sigpending",
4843 "sys_sethostname",
4844 "sys_setrlimit", /* 75 */
4845 "sys_old_getrlimit",
4846 "sys_getrusage",
4847 "sys_gettimeofday",
4848 "sys_settimeofday",
4849 "sys_getgroups16", /* 80 */
4850 "sys_setgroups16",
4851 "old_select",
4852 "sys_symlink",
4853 "sys_lstat",
4854 "sys_readlink", /* 85 */
4855 "sys_uselib",
4856 "sys_swapon",
4857 "sys_reboot",
4858 "old_readdir",
4859 "old_mmap", /* 90 */
4860 "sys_munmap",
4861 "sys_truncate",
4862 "sys_ftruncate",
4863 "sys_fchmod",
4864 "sys_fchown16", /* 95 */
4865 "sys_getpriority",
4866 "sys_setpriority",
4867 "sys_ni_syscall", /* old profil syscall holder */
4868 "sys_statfs",
4869 "sys_fstatfs", /* 100 */
4870 "sys_ioperm",
4871 "sys_socketcall",
4872 "sys_syslog",
4873 "sys_setitimer",
4874 "sys_getitimer", /* 105 */
4875 "sys_newstat",
4876 "sys_newlstat",
4877 "sys_newfstat",
4878 "sys_uname",
4879 "sys_iopl", /* 110 */
4880 "sys_vhangup",
4881 "sys_ni_syscall", /* old "idle" system call */
4882 "sys_vm86old",
4883 "sys_wait4",
4884 "sys_swapoff", /* 115 */
4885 "sys_sysinfo",
4886 "sys_ipc",
4887 "sys_fsync",
4888 "sys_sigreturn",
4889 "sys_clone", /* 120 */
4890 "sys_setdomainname",
4891 "sys_newuname",
4892 "sys_modify_ldt",
4893 "sys_adjtimex",
4894 "sys_mprotect", /* 125 */
4895 "sys_sigprocmask",
4896 "sys_ni_syscall", /* old "create_module" */
4897 "sys_init_module",
4898 "sys_delete_module",
4899 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4900 "sys_quotactl",
4901 "sys_getpgid",
4902 "sys_fchdir",
4903 "sys_bdflush",
4904 "sys_sysfs", /* 135 */
4905 "sys_personality",
4906 "sys_ni_syscall", /* reserved for afs_syscall */
4907 "sys_setfsuid16",
4908 "sys_setfsgid16",
4909 "sys_llseek", /* 140 */
4910 "sys_getdents",
4911 "sys_select",
4912 "sys_flock",
4913 "sys_msync",
4914 "sys_readv", /* 145 */
4915 "sys_writev",
4916 "sys_getsid",
4917 "sys_fdatasync",
4918 "sys_sysctl",
4919 "sys_mlock", /* 150 */
4920 "sys_munlock",
4921 "sys_mlockall",
4922 "sys_munlockall",
4923 "sys_sched_setparam",
4924 "sys_sched_getparam", /* 155 */
4925 "sys_sched_setscheduler",
4926 "sys_sched_getscheduler",
4927 "sys_sched_yield",
4928 "sys_sched_get_priority_max",
4929 "sys_sched_get_priority_min", /* 160 */
4930 "sys_sched_rr_get_interval",
4931 "sys_nanosleep",
4932 "sys_mremap",
4933 "sys_setresuid16",
4934 "sys_getresuid16", /* 165 */
4935 "sys_vm86",
4936 "sys_ni_syscall", /* Old sys_query_module */
4937 "sys_poll",
4938 "sys_nfsservctl",
4939 "sys_setresgid16", /* 170 */
4940 "sys_getresgid16",
4941 "sys_prctl",
4942 "sys_rt_sigreturn",
4943 "sys_rt_sigaction",
4944 "sys_rt_sigprocmask", /* 175 */
4945 "sys_rt_sigpending",
4946 "sys_rt_sigtimedwait",
4947 "sys_rt_sigqueueinfo",
4948 "sys_rt_sigsuspend",
4949 "sys_pread64", /* 180 */
4950 "sys_pwrite64",
4951 "sys_chown16",
4952 "sys_getcwd",
4953 "sys_capget",
4954 "sys_capset", /* 185 */
4955 "sys_sigaltstack",
4956 "sys_sendfile",
4957 "sys_ni_syscall", /* reserved for streams1 */
4958 "sys_ni_syscall", /* reserved for streams2 */
4959 "sys_vfork", /* 190 */
4960 "sys_getrlimit",
4961 "sys_mmap2",
4962 "sys_truncate64",
4963 "sys_ftruncate64",
4964 "sys_stat64", /* 195 */
4965 "sys_lstat64",
4966 "sys_fstat64",
4967 "sys_lchown",
4968 "sys_getuid",
4969 "sys_getgid", /* 200 */
4970 "sys_geteuid",
4971 "sys_getegid",
4972 "sys_setreuid",
4973 "sys_setregid",
4974 "sys_getgroups", /* 205 */
4975 "sys_setgroups",
4976 "sys_fchown",
4977 "sys_setresuid",
4978 "sys_getresuid",
4979 "sys_setresgid", /* 210 */
4980 "sys_getresgid",
4981 "sys_chown",
4982 "sys_setuid",
4983 "sys_setgid",
4984 "sys_setfsuid", /* 215 */
4985 "sys_setfsgid",
4986 "sys_pivot_root",
4987 "sys_mincore",
4988 "sys_madvise",
4989 "sys_getdents64", /* 220 */
4990 "sys_fcntl64",
4991 "sys_ni_syscall", /* reserved for TUX */
4992 "sys_ni_syscall",
4993 "sys_gettid",
4994 "sys_readahead", /* 225 */
4995 "sys_setxattr",
4996 "sys_lsetxattr",
4997 "sys_fsetxattr",
4998 "sys_getxattr",
4999 "sys_lgetxattr", /* 230 */
5000 "sys_fgetxattr",
5001 "sys_listxattr",
5002 "sys_llistxattr",
5003 "sys_flistxattr",
5004 "sys_removexattr", /* 235 */
5005 "sys_lremovexattr",
5006 "sys_fremovexattr",
5007 "sys_tkill",
5008 "sys_sendfile64",
5009 "sys_futex", /* 240 */
5010 "sys_sched_setaffinity",
5011 "sys_sched_getaffinity",
5012 "sys_set_thread_area",
5013 "sys_get_thread_area",
5014 "sys_io_setup", /* 245 */
5015 "sys_io_destroy",
5016 "sys_io_getevents",
5017 "sys_io_submit",
5018 "sys_io_cancel",
5019 "sys_fadvise64", /* 250 */
5020 "sys_ni_syscall",
5021 "sys_exit_group",
5022 "sys_lookup_dcookie",
5023 "sys_epoll_create",
5024 "sys_epoll_ctl", /* 255 */
5025 "sys_epoll_wait",
5026 "sys_remap_file_pages",
5027 "sys_set_tid_address",
5028 "sys_timer_create",
5029 "sys_timer_settime", /* 260 */
5030 "sys_timer_gettime",
5031 "sys_timer_getoverrun",
5032 "sys_timer_delete",
5033 "sys_clock_settime",
5034 "sys_clock_gettime", /* 265 */
5035 "sys_clock_getres",
5036 "sys_clock_nanosleep",
5037 "sys_statfs64",
5038 "sys_fstatfs64",
5039 "sys_tgkill", /* 270 */
5040 "sys_utimes",
5041 "sys_fadvise64_64",
5042 "sys_ni_syscall" /* sys_vserver */
5043 };
5044
5045 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5046 switch (uEAX)
5047 {
5048 default:
5049 if (uEAX < RT_ELEMENTS(apsz))
5050 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5051 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5052 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5053 else
5054 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5055 break;
5056
5057 }
5058}
5059
5060
5061/**
5062 * Dumps an OpenBSD system call.
5063 * @param pVCpu VMCPU handle.
5064 */
5065void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5066{
5067 static const char *apsz[] =
5068 {
5069 "SYS_syscall", //0
5070 "SYS_exit", //1
5071 "SYS_fork", //2
5072 "SYS_read", //3
5073 "SYS_write", //4
5074 "SYS_open", //5
5075 "SYS_close", //6
5076 "SYS_wait4", //7
5077 "SYS_8",
5078 "SYS_link", //9
5079 "SYS_unlink", //10
5080 "SYS_11",
5081 "SYS_chdir", //12
5082 "SYS_fchdir", //13
5083 "SYS_mknod", //14
5084 "SYS_chmod", //15
5085 "SYS_chown", //16
5086 "SYS_break", //17
5087 "SYS_18",
5088 "SYS_19",
5089 "SYS_getpid", //20
5090 "SYS_mount", //21
5091 "SYS_unmount", //22
5092 "SYS_setuid", //23
5093 "SYS_getuid", //24
5094 "SYS_geteuid", //25
5095 "SYS_ptrace", //26
5096 "SYS_recvmsg", //27
5097 "SYS_sendmsg", //28
5098 "SYS_recvfrom", //29
5099 "SYS_accept", //30
5100 "SYS_getpeername", //31
5101 "SYS_getsockname", //32
5102 "SYS_access", //33
5103 "SYS_chflags", //34
5104 "SYS_fchflags", //35
5105 "SYS_sync", //36
5106 "SYS_kill", //37
5107 "SYS_38",
5108 "SYS_getppid", //39
5109 "SYS_40",
5110 "SYS_dup", //41
5111 "SYS_opipe", //42
5112 "SYS_getegid", //43
5113 "SYS_profil", //44
5114 "SYS_ktrace", //45
5115 "SYS_sigaction", //46
5116 "SYS_getgid", //47
5117 "SYS_sigprocmask", //48
5118 "SYS_getlogin", //49
5119 "SYS_setlogin", //50
5120 "SYS_acct", //51
5121 "SYS_sigpending", //52
5122 "SYS_osigaltstack", //53
5123 "SYS_ioctl", //54
5124 "SYS_reboot", //55
5125 "SYS_revoke", //56
5126 "SYS_symlink", //57
5127 "SYS_readlink", //58
5128 "SYS_execve", //59
5129 "SYS_umask", //60
5130 "SYS_chroot", //61
5131 "SYS_62",
5132 "SYS_63",
5133 "SYS_64",
5134 "SYS_65",
5135 "SYS_vfork", //66
5136 "SYS_67",
5137 "SYS_68",
5138 "SYS_sbrk", //69
5139 "SYS_sstk", //70
5140 "SYS_61",
5141 "SYS_vadvise", //72
5142 "SYS_munmap", //73
5143 "SYS_mprotect", //74
5144 "SYS_madvise", //75
5145 "SYS_76",
5146 "SYS_77",
5147 "SYS_mincore", //78
5148 "SYS_getgroups", //79
5149 "SYS_setgroups", //80
5150 "SYS_getpgrp", //81
5151 "SYS_setpgid", //82
5152 "SYS_setitimer", //83
5153 "SYS_84",
5154 "SYS_85",
5155 "SYS_getitimer", //86
5156 "SYS_87",
5157 "SYS_88",
5158 "SYS_89",
5159 "SYS_dup2", //90
5160 "SYS_91",
5161 "SYS_fcntl", //92
5162 "SYS_select", //93
5163 "SYS_94",
5164 "SYS_fsync", //95
5165 "SYS_setpriority", //96
5166 "SYS_socket", //97
5167 "SYS_connect", //98
5168 "SYS_99",
5169 "SYS_getpriority", //100
5170 "SYS_101",
5171 "SYS_102",
5172 "SYS_sigreturn", //103
5173 "SYS_bind", //104
5174 "SYS_setsockopt", //105
5175 "SYS_listen", //106
5176 "SYS_107",
5177 "SYS_108",
5178 "SYS_109",
5179 "SYS_110",
5180 "SYS_sigsuspend", //111
5181 "SYS_112",
5182 "SYS_113",
5183 "SYS_114",
5184 "SYS_115",
5185 "SYS_gettimeofday", //116
5186 "SYS_getrusage", //117
5187 "SYS_getsockopt", //118
5188 "SYS_119",
5189 "SYS_readv", //120
5190 "SYS_writev", //121
5191 "SYS_settimeofday", //122
5192 "SYS_fchown", //123
5193 "SYS_fchmod", //124
5194 "SYS_125",
5195 "SYS_setreuid", //126
5196 "SYS_setregid", //127
5197 "SYS_rename", //128
5198 "SYS_129",
5199 "SYS_130",
5200 "SYS_flock", //131
5201 "SYS_mkfifo", //132
5202 "SYS_sendto", //133
5203 "SYS_shutdown", //134
5204 "SYS_socketpair", //135
5205 "SYS_mkdir", //136
5206 "SYS_rmdir", //137
5207 "SYS_utimes", //138
5208 "SYS_139",
5209 "SYS_adjtime", //140
5210 "SYS_141",
5211 "SYS_142",
5212 "SYS_143",
5213 "SYS_144",
5214 "SYS_145",
5215 "SYS_146",
5216 "SYS_setsid", //147
5217 "SYS_quotactl", //148
5218 "SYS_149",
5219 "SYS_150",
5220 "SYS_151",
5221 "SYS_152",
5222 "SYS_153",
5223 "SYS_154",
5224 "SYS_nfssvc", //155
5225 "SYS_156",
5226 "SYS_157",
5227 "SYS_158",
5228 "SYS_159",
5229 "SYS_160",
5230 "SYS_getfh", //161
5231 "SYS_162",
5232 "SYS_163",
5233 "SYS_164",
5234 "SYS_sysarch", //165
5235 "SYS_166",
5236 "SYS_167",
5237 "SYS_168",
5238 "SYS_169",
5239 "SYS_170",
5240 "SYS_171",
5241 "SYS_172",
5242 "SYS_pread", //173
5243 "SYS_pwrite", //174
5244 "SYS_175",
5245 "SYS_176",
5246 "SYS_177",
5247 "SYS_178",
5248 "SYS_179",
5249 "SYS_180",
5250 "SYS_setgid", //181
5251 "SYS_setegid", //182
5252 "SYS_seteuid", //183
5253 "SYS_lfs_bmapv", //184
5254 "SYS_lfs_markv", //185
5255 "SYS_lfs_segclean", //186
5256 "SYS_lfs_segwait", //187
5257 "SYS_188",
5258 "SYS_189",
5259 "SYS_190",
5260 "SYS_pathconf", //191
5261 "SYS_fpathconf", //192
5262 "SYS_swapctl", //193
5263 "SYS_getrlimit", //194
5264 "SYS_setrlimit", //195
5265 "SYS_getdirentries", //196
5266 "SYS_mmap", //197
5267 "SYS___syscall", //198
5268 "SYS_lseek", //199
5269 "SYS_truncate", //200
5270 "SYS_ftruncate", //201
5271 "SYS___sysctl", //202
5272 "SYS_mlock", //203
5273 "SYS_munlock", //204
5274 "SYS_205",
5275 "SYS_futimes", //206
5276 "SYS_getpgid", //207
5277 "SYS_xfspioctl", //208
5278 "SYS_209",
5279 "SYS_210",
5280 "SYS_211",
5281 "SYS_212",
5282 "SYS_213",
5283 "SYS_214",
5284 "SYS_215",
5285 "SYS_216",
5286 "SYS_217",
5287 "SYS_218",
5288 "SYS_219",
5289 "SYS_220",
5290 "SYS_semget", //221
5291 "SYS_222",
5292 "SYS_223",
5293 "SYS_224",
5294 "SYS_msgget", //225
5295 "SYS_msgsnd", //226
5296 "SYS_msgrcv", //227
5297 "SYS_shmat", //228
5298 "SYS_229",
5299 "SYS_shmdt", //230
5300 "SYS_231",
5301 "SYS_clock_gettime", //232
5302 "SYS_clock_settime", //233
5303 "SYS_clock_getres", //234
5304 "SYS_235",
5305 "SYS_236",
5306 "SYS_237",
5307 "SYS_238",
5308 "SYS_239",
5309 "SYS_nanosleep", //240
5310 "SYS_241",
5311 "SYS_242",
5312 "SYS_243",
5313 "SYS_244",
5314 "SYS_245",
5315 "SYS_246",
5316 "SYS_247",
5317 "SYS_248",
5318 "SYS_249",
5319 "SYS_minherit", //250
5320 "SYS_rfork", //251
5321 "SYS_poll", //252
5322 "SYS_issetugid", //253
5323 "SYS_lchown", //254
5324 "SYS_getsid", //255
5325 "SYS_msync", //256
5326 "SYS_257",
5327 "SYS_258",
5328 "SYS_259",
5329 "SYS_getfsstat", //260
5330 "SYS_statfs", //261
5331 "SYS_fstatfs", //262
5332 "SYS_pipe", //263
5333 "SYS_fhopen", //264
5334 "SYS_265",
5335 "SYS_fhstatfs", //266
5336 "SYS_preadv", //267
5337 "SYS_pwritev", //268
5338 "SYS_kqueue", //269
5339 "SYS_kevent", //270
5340 "SYS_mlockall", //271
5341 "SYS_munlockall", //272
5342 "SYS_getpeereid", //273
5343 "SYS_274",
5344 "SYS_275",
5345 "SYS_276",
5346 "SYS_277",
5347 "SYS_278",
5348 "SYS_279",
5349 "SYS_280",
5350 "SYS_getresuid", //281
5351 "SYS_setresuid", //282
5352 "SYS_getresgid", //283
5353 "SYS_setresgid", //284
5354 "SYS_285",
5355 "SYS_mquery", //286
5356 "SYS_closefrom", //287
5357 "SYS_sigaltstack", //288
5358 "SYS_shmget", //289
5359 "SYS_semop", //290
5360 "SYS_stat", //291
5361 "SYS_fstat", //292
5362 "SYS_lstat", //293
5363 "SYS_fhstat", //294
5364 "SYS___semctl", //295
5365 "SYS_shmctl", //296
5366 "SYS_msgctl", //297
5367 "SYS_MAXSYSCALL", //298
5368 //299
5369 //300
5370 };
5371 uint32_t uEAX;
5372 if (!LogIsEnabled())
5373 return;
5374 uEAX = CPUMGetGuestEAX(pVCpu);
5375 switch (uEAX)
5376 {
5377 default:
5378 if (uEAX < RT_ELEMENTS(apsz))
5379 {
5380 uint32_t au32Args[8] = {0};
5381 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5382 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5383 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5384 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5385 }
5386 else
5387 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5388 break;
5389 }
5390}
5391
5392
5393#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5394/**
5395 * The Dll main entry point (stub).
5396 */
5397bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5398{
5399 return true;
5400}
5401
5402void *memcpy(void *dst, const void *src, size_t size)
5403{
5404 uint8_t*pbDst = dst, *pbSrc = src;
5405 while (size-- > 0)
5406 *pbDst++ = *pbSrc++;
5407 return dst;
5408}
5409
5410#endif
5411
5412void cpu_smm_update(CPUState *env)
5413{
5414}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette