VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36056

Last change on this file since 36056 was 36056, checked in by vboxsync, 14 years ago

.remstep hacking.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.6 KB
Line 
1/* $Id: VBoxRecompiler.c 36056 2011-02-22 18:19:03Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, NULL, NULL,
340 NULL, remR3Save, NULL,
341 NULL, remR3Load, NULL);
342 if (RT_FAILURE(rc))
343 return rc;
344
345#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
346 /*
347 * Debugger commands.
348 */
349 static bool fRegisteredCmds = false;
350 if (!fRegisteredCmds)
351 {
352 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
353 if (RT_SUCCESS(rc))
354 fRegisteredCmds = true;
355 }
356#endif
357
358#ifdef VBOX_WITH_STATISTICS
359 /*
360 * Statistics.
361 */
362 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
363 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
364 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
365 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
366 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
374
375 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
376
377 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
378 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
379 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
380 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
381 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
382 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
383 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
384 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
385 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
386 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
387 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
388
389 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
390 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
391 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
392 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
393
394 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
400
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
409#endif /* VBOX_WITH_STATISTICS */
410
411 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
412 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
413 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
414
415
416#ifdef DEBUG_ALL_LOGGING
417 loglevel = ~0;
418# ifdef DEBUG_TMP_LOGGING
419 logfile = fopen("/tmp/vbox-qemu.log", "w");
420# endif
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469
470/**
471 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM handle.
475 * @param fGuarded Whether to guard the map.
476 */
477static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
478{
479 int rc = VINF_SUCCESS;
480 RTGCPHYS cb;
481
482 cb = pVM->rem.s.GCPhysLastRam + 1;
483 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
485 VERR_OUT_OF_RANGE);
486 phys_ram_size = cb;
487 phys_ram_dirty_size = cb >> PAGE_SHIFT;
488 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
489
490 if (!fGuarded)
491 {
492 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
493 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
494 }
495 else
496 {
497 /*
498 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
499 */
500 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
501 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
502 if (cbBitmapFull == cbBitmapAligned)
503 cbBitmapFull += _4G >> PAGE_SHIFT;
504 else if (cbBitmapFull - cbBitmapAligned < _64K)
505 cbBitmapFull += _64K;
506
507 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
508 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
509
510 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
511 if (RT_FAILURE(rc))
512 {
513 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
514 AssertLogRelRCReturn(rc, rc);
515 }
516
517 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
518 }
519
520 /* initialize it. */
521 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
522 return rc;
523}
524
525
526/**
527 * Terminates the REM.
528 *
529 * Termination means cleaning up and freeing all resources,
530 * the VM it self is at this point powered off or suspended.
531 *
532 * @returns VBox status code.
533 * @param pVM The VM to operate on.
534 */
535REMR3DECL(int) REMR3Term(PVM pVM)
536{
537#ifdef VBOX_WITH_STATISTICS
538 /*
539 * Statistics.
540 */
541 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
542 STAM_DEREG(pVM, &gStatCompilationQEmu);
543 STAM_DEREG(pVM, &gStatRunCodeQEmu);
544 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
545 STAM_DEREG(pVM, &gStatTimers);
546 STAM_DEREG(pVM, &gStatTBLookup);
547 STAM_DEREG(pVM, &gStatIRQ);
548 STAM_DEREG(pVM, &gStatRawCheck);
549 STAM_DEREG(pVM, &gStatMemRead);
550 STAM_DEREG(pVM, &gStatMemWrite);
551 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
552 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
553
554 STAM_DEREG(pVM, &gStatCpuGetTSC);
555
556 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
557 STAM_DEREG(pVM, &gStatRefuseVM86);
558 STAM_DEREG(pVM, &gStatRefusePaging);
559 STAM_DEREG(pVM, &gStatRefusePAE);
560 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
561 STAM_DEREG(pVM, &gStatRefuseIF0);
562 STAM_DEREG(pVM, &gStatRefuseCode16);
563 STAM_DEREG(pVM, &gStatRefuseWP0);
564 STAM_DEREG(pVM, &gStatRefuseRing1or2);
565 STAM_DEREG(pVM, &gStatRefuseCanExecute);
566 STAM_DEREG(pVM, &gStatFlushTBs);
567
568 STAM_DEREG(pVM, &gStatREMGDTChange);
569 STAM_DEREG(pVM, &gStatREMLDTRChange);
570 STAM_DEREG(pVM, &gStatREMIDTChange);
571 STAM_DEREG(pVM, &gStatREMTRChange);
572
573 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
586
587 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
588#endif /* VBOX_WITH_STATISTICS */
589
590 STAM_REL_DEREG(pVM, &tb_flush_count);
591 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
592 STAM_REL_DEREG(pVM, &tlb_flush_count);
593
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * The VM is being reset.
600 *
601 * For the REM component this means to call the cpu_reset() and
602 * reinitialize some state variables.
603 *
604 * @param pVM VM handle.
605 */
606REMR3DECL(void) REMR3Reset(PVM pVM)
607{
608 /*
609 * Reset the REM cpu.
610 */
611 Assert(pVM->rem.s.cIgnoreAll == 0);
612 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
613 cpu_reset(&pVM->rem.s.Env);
614 pVM->rem.s.cInvalidatedPages = 0;
615 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
616 Assert(pVM->rem.s.cIgnoreAll == 0);
617
618 /* Clear raw ring 0 init state */
619 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
620
621 /* Flush the TBs the next time we execute code here. */
622 pVM->rem.s.fFlushTBs = true;
623}
624
625
626/**
627 * Execute state save operation.
628 *
629 * @returns VBox status code.
630 * @param pVM VM Handle.
631 * @param pSSM SSM operation handle.
632 */
633static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
634{
635 PREM pRem = &pVM->rem.s;
636
637 /*
638 * Save the required CPU Env bits.
639 * (Not much because we're never in REM when doing the save.)
640 */
641 LogFlow(("remR3Save:\n"));
642 Assert(!pRem->fInREM);
643 SSMR3PutU32(pSSM, pRem->Env.hflags);
644 SSMR3PutU32(pSSM, ~0); /* separator */
645
646 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
647 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
648 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
649
650 return SSMR3PutU32(pSSM, ~0); /* terminator */
651}
652
653
654/**
655 * Execute state load operation.
656 *
657 * @returns VBox status code.
658 * @param pVM VM Handle.
659 * @param pSSM SSM operation handle.
660 * @param uVersion Data layout version.
661 * @param uPass The data pass.
662 */
663static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
664{
665 uint32_t u32Dummy;
666 uint32_t fRawRing0 = false;
667 uint32_t u32Sep;
668 uint32_t i;
669 int rc;
670 PREM pRem;
671
672 LogFlow(("remR3Load:\n"));
673 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
674
675 /*
676 * Validate version.
677 */
678 if ( uVersion != REM_SAVED_STATE_VERSION
679 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
680 {
681 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
682 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
683 }
684
685 /*
686 * Do a reset to be on the safe side...
687 */
688 REMR3Reset(pVM);
689
690 /*
691 * Ignore all ignorable notifications.
692 * (Not doing this will cause serious trouble.)
693 */
694 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
695
696 /*
697 * Load the required CPU Env bits.
698 * (Not much because we're never in REM when doing the save.)
699 */
700 pRem = &pVM->rem.s;
701 Assert(!pRem->fInREM);
702 SSMR3GetU32(pSSM, &pRem->Env.hflags);
703 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 /* Redundant REM CPU state has to be loaded, but can be ignored. */
706 CPUX86State_Ver16 temp;
707 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
708 }
709
710 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
711 if (RT_FAILURE(rc))
712 return rc;
713 if (u32Sep != ~0U)
714 {
715 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
716 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
717 }
718
719 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
720 SSMR3GetUInt(pSSM, &fRawRing0);
721 if (fRawRing0)
722 pRem->Env.state |= CPU_RAW_RING0;
723
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /*
727 * Load the REM stuff.
728 */
729 /** @todo r=bird: We should just drop all these items, restoring doesn't make
730 * sense. */
731 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
732 if (RT_FAILURE(rc))
733 return rc;
734 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
735 {
736 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739 for (i = 0; i < pRem->cInvalidatedPages; i++)
740 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
741 }
742
743 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
744 if (RT_FAILURE(rc))
745 return rc;
746
747 /* check the terminator. */
748 rc = SSMR3GetU32(pSSM, &u32Sep);
749 if (RT_FAILURE(rc))
750 return rc;
751 if (u32Sep != ~0U)
752 {
753 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
754 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
755 }
756
757 /*
758 * Get the CPUID features.
759 */
760 PVMCPU pVCpu = VMMGetCpu(pVM);
761 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
762 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
763
764 /*
765 * Sync the Load Flush the TLB
766 */
767 tlb_flush(&pRem->Env, 1);
768
769 /*
770 * Stop ignoring ignorable notifications.
771 */
772 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
773
774 /*
775 * Sync the whole CPU state when executing code in the recompiler.
776 */
777 for (i = 0; i < pVM->cCpus; i++)
778 {
779 PVMCPU pVCpu = &pVM->aCpus[i];
780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
781 }
782 return VINF_SUCCESS;
783}
784
785
786
787#undef LOG_GROUP
788#define LOG_GROUP LOG_GROUP_REM_RUN
789
790/**
791 * Single steps an instruction in recompiled mode.
792 *
793 * Before calling this function the REM state needs to be in sync with
794 * the VM. Call REMR3State() to perform the sync. It's only necessary
795 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
796 * and after calling REMR3StateBack().
797 *
798 * @returns VBox status code.
799 *
800 * @param pVM VM Handle.
801 * @param pVCpu VMCPU Handle.
802 */
803REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
804{
805 int rc, interrupt_request;
806 RTGCPTR GCPtrPC;
807 bool fBp;
808
809 /*
810 * Lock the REM - we don't wanna have anyone interrupting us
811 * while stepping - and enabled single stepping. We also ignore
812 * pending interrupts and suchlike.
813 */
814 interrupt_request = pVM->rem.s.Env.interrupt_request;
815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
816 pVM->rem.s.Env.interrupt_request = 0;
817 cpu_single_step(&pVM->rem.s.Env, 1);
818
819 /*
820 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
821 */
822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
824
825 /*
826 * Execute and handle the return code.
827 * We execute without enabling the cpu tick, so on success we'll
828 * just flip it on and off to make sure it moves
829 */
830 rc = cpu_exec(&pVM->rem.s.Env);
831 if (rc == EXCP_DEBUG)
832 {
833 TMR3NotifyResume(pVM, pVCpu);
834 TMR3NotifySuspend(pVM, pVCpu);
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 else
838 {
839 switch (rc)
840 {
841 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
842 case EXCP_HLT:
843 case EXCP_HALTED: rc = VINF_EM_HALT; break;
844 case EXCP_RC:
845 rc = pVM->rem.s.rc;
846 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
847 break;
848 case EXCP_EXECUTE_RAW:
849 case EXCP_EXECUTE_HWACC:
850 /** @todo: is it correct? No! */
851 rc = VINF_SUCCESS;
852 break;
853 default:
854 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
855 rc = VERR_INTERNAL_ERROR;
856 break;
857 }
858 }
859
860 /*
861 * Restore the stuff we changed to prevent interruption.
862 * Unlock the REM.
863 */
864 if (fBp)
865 {
866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
867 Assert(rc2 == 0); NOREF(rc2);
868 }
869 cpu_single_step(&pVM->rem.s.Env, 0);
870 pVM->rem.s.Env.interrupt_request = interrupt_request;
871
872 return rc;
873}
874
875
876/**
877 * Set a breakpoint using the REM facilities.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle.
881 * @param Address The breakpoint address.
882 * @thread The emulation thread.
883 */
884REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
885{
886 VM_ASSERT_EMT(pVM);
887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
888 {
889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
890 return VINF_SUCCESS;
891 }
892 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
893 return VERR_REM_NO_MORE_BP_SLOTS;
894}
895
896
897/**
898 * Clears a breakpoint set by REMR3BreakpointSet().
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param Address The breakpoint address.
903 * @thread The emulation thread.
904 */
905REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
906{
907 VM_ASSERT_EMT(pVM);
908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
909 {
910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
911 return VINF_SUCCESS;
912 }
913 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
914 return VERR_REM_BP_NOT_FOUND;
915}
916
917
918/**
919 * Emulate an instruction.
920 *
921 * This function executes one instruction without letting anyone
922 * interrupt it. This is intended for being called while being in
923 * raw mode and thus will take care of all the state syncing between
924 * REM and the rest.
925 *
926 * @returns VBox status code.
927 * @param pVM VM handle.
928 * @param pVCpu VMCPU Handle.
929 */
930REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
931{
932 bool fFlushTBs;
933
934 int rc, rc2;
935 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
936
937 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
938 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
939 */
940 if (HWACCMIsEnabled(pVM))
941 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
942
943 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
944 fFlushTBs = pVM->rem.s.fFlushTBs;
945 pVM->rem.s.fFlushTBs = false;
946
947 /*
948 * Sync the state and enable single instruction / single stepping.
949 */
950 rc = REMR3State(pVM, pVCpu);
951 pVM->rem.s.fFlushTBs = fFlushTBs;
952 if (RT_SUCCESS(rc))
953 {
954 int interrupt_request = pVM->rem.s.Env.interrupt_request;
955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
956#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
957 cpu_single_step(&pVM->rem.s.Env, 0);
958#endif
959 Assert(!pVM->rem.s.Env.singlestep_enabled);
960
961 /*
962 * Now we set the execute single instruction flag and enter the cpu_exec loop.
963 */
964 TMNotifyStartOfExecution(pVCpu);
965 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
966 rc = cpu_exec(&pVM->rem.s.Env);
967 TMNotifyEndOfExecution(pVCpu);
968 switch (rc)
969 {
970 /*
971 * Executed without anything out of the way happening.
972 */
973 case EXCP_SINGLE_INSTR:
974 rc = VINF_EM_RESCHEDULE;
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
976 break;
977
978 /*
979 * If we take a trap or start servicing a pending interrupt, we might end up here.
980 * (Timer thread or some other thread wishing EMT's attention.)
981 */
982 case EXCP_INTERRUPT:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
984 rc = VINF_EM_RESCHEDULE;
985 break;
986
987 /*
988 * Single step, we assume!
989 * If there was a breakpoint there we're fucked now.
990 */
991 case EXCP_DEBUG:
992 {
993 /* breakpoint or single step? */
994 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
995 int iBP;
996 rc = VINF_EM_DBG_STEPPED;
997 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
998 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
999 {
1000 rc = VINF_EM_DBG_BREAKPOINT;
1001 break;
1002 }
1003 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1004 break;
1005 }
1006
1007 /*
1008 * hlt instruction.
1009 */
1010 case EXCP_HLT:
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1012 rc = VINF_EM_HALT;
1013 break;
1014
1015 /*
1016 * The VM has halted.
1017 */
1018 case EXCP_HALTED:
1019 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1020 rc = VINF_EM_HALT;
1021 break;
1022
1023 /*
1024 * Switch to RAW-mode.
1025 */
1026 case EXCP_EXECUTE_RAW:
1027 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1028 rc = VINF_EM_RESCHEDULE_RAW;
1029 break;
1030
1031 /*
1032 * Switch to hardware accelerated RAW-mode.
1033 */
1034 case EXCP_EXECUTE_HWACC:
1035 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1036 rc = VINF_EM_RESCHEDULE_HWACC;
1037 break;
1038
1039 /*
1040 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1041 */
1042 case EXCP_RC:
1043 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1044 rc = pVM->rem.s.rc;
1045 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1046 break;
1047
1048 /*
1049 * Figure out the rest when they arrive....
1050 */
1051 default:
1052 AssertMsgFailed(("rc=%d\n", rc));
1053 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1054 rc = VINF_EM_RESCHEDULE;
1055 break;
1056 }
1057
1058 /*
1059 * Switch back the state.
1060 */
1061 pVM->rem.s.Env.interrupt_request = interrupt_request;
1062 rc2 = REMR3StateBack(pVM, pVCpu);
1063 AssertRC(rc2);
1064 }
1065
1066 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1067 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1068 return rc;
1069}
1070
1071
1072/**
1073 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1074 *
1075 * @returns VBox status code.
1076 *
1077 * @param pVM The VM handle.
1078 * @param pVCpu The Virtual CPU handle.
1079 */
1080static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1081{
1082 int rc;
1083
1084 Assert(pVM->rem.s.fInREM);
1085#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1086 cpu_single_step(&pVM->rem.s.Env, 1);
1087#else
1088 Assert(!pVM->rem.s.Env.singlestep_enabled);
1089#endif
1090
1091 /*
1092 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1093 */
1094 for (;;)
1095 {
1096 char szBuf[256];
1097
1098 /*
1099 * Log the current registers state and instruction.
1100 */
1101 remR3StateUpdate(pVM, pVCpu);
1102 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1103 szBuf[0] = '\0';
1104 rc = DBGFR3DisasInstrEx(pVM,
1105 pVCpu->idCpu,
1106 0, /* Sel */
1107 0, /* GCPtr */
1108 DBGF_DISAS_FLAGS_CURRENT_GUEST
1109 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1110 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1111 szBuf,
1112 sizeof(szBuf),
1113 NULL);
1114 if (RT_FAILURE(rc))
1115 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1116 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1117
1118 /*
1119 * Execute the instruction.
1120 */
1121 TMNotifyStartOfExecution(pVCpu);
1122
1123 if ( pVM->rem.s.Env.exception_index < 0
1124 || pVM->rem.s.Env.exception_index > 256)
1125 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1126
1127#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1128 pVM->rem.s.Env.interrupt_request = 0;
1129#else
1130 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1131#endif
1132 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1133 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1134 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1135 pVM->rem.s.Env.interrupt_request,
1136 pVM->rem.s.Env.halted,
1137 pVM->rem.s.Env.exception_index
1138 );
1139
1140 rc = cpu_exec(&pVM->rem.s.Env);
1141
1142 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1143 pVM->rem.s.Env.interrupt_request,
1144 pVM->rem.s.Env.halted,
1145 pVM->rem.s.Env.exception_index
1146 );
1147
1148 TMNotifyEndOfExecution(pVCpu);
1149
1150 switch (rc)
1151 {
1152#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1153 /*
1154 * The normal exit.
1155 */
1156 case EXCP_SINGLE_INSTR:
1157 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1158 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1159 continue;
1160 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1161 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1162 rc = VINF_SUCCESS;
1163 break;
1164
1165#else
1166 /*
1167 * The normal exit, check for breakpoints at PC just to be sure.
1168 */
1169#endif
1170 case EXCP_DEBUG:
1171 rc = VINF_EM_DBG_STEPPED;
1172 if (pVM->rem.s.Env.nb_breakpoints > 0)
1173 {
1174 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1175 int iBP;
1176 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1177 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1178 {
1179 rc = VINF_EM_DBG_BREAKPOINT;
1180 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC);
1181 break;
1182 }
1183 }
1184#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1185 if (rc == VINF_EM_DBG_STEPPED)
1186 {
1187 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1188 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1189 continue;
1190
1191 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1192 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1193 rc = VINF_SUCCESS;
1194 }
1195#endif
1196 break;
1197
1198 /*
1199 * If we take a trap or start servicing a pending interrupt, we might end up here.
1200 * (Timer thread or some other thread wishing EMT's attention.)
1201 */
1202 case EXCP_INTERRUPT:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1204 rc = VINF_SUCCESS;
1205 break;
1206
1207 /*
1208 * hlt instruction.
1209 */
1210 case EXCP_HLT:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1212 rc = VINF_EM_HALT;
1213 break;
1214
1215 /*
1216 * The VM has halted.
1217 */
1218 case EXCP_HALTED:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1220 rc = VINF_EM_HALT;
1221 break;
1222
1223 /*
1224 * Switch to RAW-mode.
1225 */
1226 case EXCP_EXECUTE_RAW:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1228 rc = VINF_EM_RESCHEDULE_RAW;
1229 break;
1230
1231 /*
1232 * Switch to hardware accelerated RAW-mode.
1233 */
1234 case EXCP_EXECUTE_HWACC:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1236 rc = VINF_EM_RESCHEDULE_HWACC;
1237 break;
1238
1239 /*
1240 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1241 */
1242 case EXCP_RC:
1243 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1244 rc = pVM->rem.s.rc;
1245 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1246 break;
1247
1248 /*
1249 * Figure out the rest when they arrive....
1250 */
1251 default:
1252 AssertMsgFailed(("rc=%d\n", rc));
1253 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1254 rc = VINF_EM_RESCHEDULE;
1255 break;
1256 }
1257 break;
1258 }
1259
1260#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1261// cpu_single_step(&pVM->rem.s.Env, 0);
1262#else
1263 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1264#endif
1265 return rc;
1266}
1267
1268
1269/**
1270 * Runs code in recompiled mode.
1271 *
1272 * Before calling this function the REM state needs to be in sync with
1273 * the VM. Call REMR3State() to perform the sync. It's only necessary
1274 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1275 * and after calling REMR3StateBack().
1276 *
1277 * @returns VBox status code.
1278 *
1279 * @param pVM VM Handle.
1280 * @param pVCpu VMCPU Handle.
1281 */
1282REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1283{
1284 int rc;
1285
1286 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1287 return remR3RunLoggingStep(pVM, pVCpu);
1288
1289 Assert(pVM->rem.s.fInREM);
1290 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1291
1292 TMNotifyStartOfExecution(pVCpu);
1293 rc = cpu_exec(&pVM->rem.s.Env);
1294 TMNotifyEndOfExecution(pVCpu);
1295 switch (rc)
1296 {
1297 /*
1298 * This happens when the execution was interrupted
1299 * by an external event, like pending timers.
1300 */
1301 case EXCP_INTERRUPT:
1302 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1303 rc = VINF_SUCCESS;
1304 break;
1305
1306 /*
1307 * hlt instruction.
1308 */
1309 case EXCP_HLT:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1311 rc = VINF_EM_HALT;
1312 break;
1313
1314 /*
1315 * The VM has halted.
1316 */
1317 case EXCP_HALTED:
1318 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1319 rc = VINF_EM_HALT;
1320 break;
1321
1322 /*
1323 * Breakpoint/single step.
1324 */
1325 case EXCP_DEBUG:
1326 {
1327#if 0//def DEBUG_bird
1328 static int iBP = 0;
1329 printf("howdy, breakpoint! iBP=%d\n", iBP);
1330 switch (iBP)
1331 {
1332 case 0:
1333 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1334 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1335 //pVM->rem.s.Env.interrupt_request = 0;
1336 //pVM->rem.s.Env.exception_index = -1;
1337 //g_fInterruptDisabled = 1;
1338 rc = VINF_SUCCESS;
1339 asm("int3");
1340 break;
1341 default:
1342 asm("int3");
1343 break;
1344 }
1345 iBP++;
1346#else
1347 /* breakpoint or single step? */
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 int iBP;
1350 rc = VINF_EM_DBG_STEPPED;
1351 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1352 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1353 {
1354 rc = VINF_EM_DBG_BREAKPOINT;
1355 break;
1356 }
1357 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1358#endif
1359 break;
1360 }
1361
1362 /*
1363 * Switch to RAW-mode.
1364 */
1365 case EXCP_EXECUTE_RAW:
1366 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1367 rc = VINF_EM_RESCHEDULE_RAW;
1368 break;
1369
1370 /*
1371 * Switch to hardware accelerated RAW-mode.
1372 */
1373 case EXCP_EXECUTE_HWACC:
1374 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1375 rc = VINF_EM_RESCHEDULE_HWACC;
1376 break;
1377
1378 /*
1379 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1380 */
1381 case EXCP_RC:
1382 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1383 rc = pVM->rem.s.rc;
1384 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1385 break;
1386
1387 /*
1388 * Figure out the rest when they arrive....
1389 */
1390 default:
1391 AssertMsgFailed(("rc=%d\n", rc));
1392 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1393 rc = VINF_SUCCESS;
1394 break;
1395 }
1396
1397 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1398 return rc;
1399}
1400
1401
1402/**
1403 * Check if the cpu state is suitable for Raw execution.
1404 *
1405 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1406 *
1407 * @param env The CPU env struct.
1408 * @param eip The EIP to check this for (might differ from env->eip).
1409 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1410 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1411 *
1412 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1413 */
1414bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1415{
1416 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1417 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1418 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1419 uint32_t u32CR0;
1420
1421 /* Update counter. */
1422 env->pVM->rem.s.cCanExecuteRaw++;
1423
1424 /* Never when single stepping+logging guest code. */
1425 if (env->state & CPU_EMULATE_SINGLE_STEP)
1426 return false;
1427
1428 if (HWACCMIsEnabled(env->pVM))
1429 {
1430 CPUMCTX Ctx;
1431
1432 env->state |= CPU_RAW_HWACC;
1433
1434 /*
1435 * Create partial context for HWACCMR3CanExecuteGuest
1436 */
1437 Ctx.cr0 = env->cr[0];
1438 Ctx.cr3 = env->cr[3];
1439 Ctx.cr4 = env->cr[4];
1440
1441 Ctx.tr = env->tr.selector;
1442 Ctx.trHid.u64Base = env->tr.base;
1443 Ctx.trHid.u32Limit = env->tr.limit;
1444 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1445
1446 Ctx.ldtr = env->ldt.selector;
1447 Ctx.ldtrHid.u64Base = env->ldt.base;
1448 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1449 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1450
1451 Ctx.idtr.cbIdt = env->idt.limit;
1452 Ctx.idtr.pIdt = env->idt.base;
1453
1454 Ctx.gdtr.cbGdt = env->gdt.limit;
1455 Ctx.gdtr.pGdt = env->gdt.base;
1456
1457 Ctx.rsp = env->regs[R_ESP];
1458 Ctx.rip = env->eip;
1459
1460 Ctx.eflags.u32 = env->eflags;
1461
1462 Ctx.cs = env->segs[R_CS].selector;
1463 Ctx.csHid.u64Base = env->segs[R_CS].base;
1464 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1465 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1466
1467 Ctx.ds = env->segs[R_DS].selector;
1468 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1469 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1470 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1471
1472 Ctx.es = env->segs[R_ES].selector;
1473 Ctx.esHid.u64Base = env->segs[R_ES].base;
1474 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1475 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1476
1477 Ctx.fs = env->segs[R_FS].selector;
1478 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1479 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1480 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1481
1482 Ctx.gs = env->segs[R_GS].selector;
1483 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1484 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1485 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.ss = env->segs[R_SS].selector;
1488 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1489 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1490 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1491
1492 Ctx.msrEFER = env->efer;
1493
1494 /* Hardware accelerated raw-mode:
1495 *
1496 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1497 */
1498 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1499 {
1500 *piException = EXCP_EXECUTE_HWACC;
1501 return true;
1502 }
1503 return false;
1504 }
1505
1506 /*
1507 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1508 * or 32 bits protected mode ring 0 code
1509 *
1510 * The tests are ordered by the likelihood of being true during normal execution.
1511 */
1512 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1513 {
1514 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1515 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1516 return false;
1517 }
1518
1519#ifndef VBOX_RAW_V86
1520 if (fFlags & VM_MASK) {
1521 STAM_COUNTER_INC(&gStatRefuseVM86);
1522 Log2(("raw mode refused: VM_MASK\n"));
1523 return false;
1524 }
1525#endif
1526
1527 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1528 {
1529#ifndef DEBUG_bird
1530 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1531#endif
1532 return false;
1533 }
1534
1535 if (env->singlestep_enabled)
1536 {
1537 //Log2(("raw mode refused: Single step\n"));
1538 return false;
1539 }
1540
1541 if (env->nb_breakpoints > 0)
1542 {
1543 //Log2(("raw mode refused: Breakpoints\n"));
1544 return false;
1545 }
1546
1547 u32CR0 = env->cr[0];
1548 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1549 {
1550 STAM_COUNTER_INC(&gStatRefusePaging);
1551 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1552 return false;
1553 }
1554
1555 if (env->cr[4] & CR4_PAE_MASK)
1556 {
1557 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1558 {
1559 STAM_COUNTER_INC(&gStatRefusePAE);
1560 return false;
1561 }
1562 }
1563
1564 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1565 {
1566 if (!EMIsRawRing3Enabled(env->pVM))
1567 return false;
1568
1569 if (!(env->eflags & IF_MASK))
1570 {
1571 STAM_COUNTER_INC(&gStatRefuseIF0);
1572 Log2(("raw mode refused: IF (RawR3)\n"));
1573 return false;
1574 }
1575
1576 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1577 {
1578 STAM_COUNTER_INC(&gStatRefuseWP0);
1579 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1580 return false;
1581 }
1582 }
1583 else
1584 {
1585 if (!EMIsRawRing0Enabled(env->pVM))
1586 return false;
1587
1588 // Let's start with pure 32 bits ring 0 code first
1589 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1590 {
1591 STAM_COUNTER_INC(&gStatRefuseCode16);
1592 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1593 return false;
1594 }
1595
1596 // Only R0
1597 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1598 {
1599 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1600 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1601 return false;
1602 }
1603
1604 if (!(u32CR0 & CR0_WP_MASK))
1605 {
1606 STAM_COUNTER_INC(&gStatRefuseWP0);
1607 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1608 return false;
1609 }
1610
1611 if (PATMIsPatchGCAddr(env->pVM, eip))
1612 {
1613 Log2(("raw r0 mode forced: patch code\n"));
1614 *piException = EXCP_EXECUTE_RAW;
1615 return true;
1616 }
1617
1618#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1619 if (!(env->eflags & IF_MASK))
1620 {
1621 STAM_COUNTER_INC(&gStatRefuseIF0);
1622 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1623 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1624 return false;
1625 }
1626#endif
1627
1628 env->state |= CPU_RAW_RING0;
1629 }
1630
1631 /*
1632 * Don't reschedule the first time we're called, because there might be
1633 * special reasons why we're here that is not covered by the above checks.
1634 */
1635 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1636 {
1637 Log2(("raw mode refused: first scheduling\n"));
1638 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1639 return false;
1640 }
1641
1642 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1643 *piException = EXCP_EXECUTE_RAW;
1644 return true;
1645}
1646
1647
1648/**
1649 * Fetches a code byte.
1650 *
1651 * @returns Success indicator (bool) for ease of use.
1652 * @param env The CPU environment structure.
1653 * @param GCPtrInstr Where to fetch code.
1654 * @param pu8Byte Where to store the byte on success
1655 */
1656bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1657{
1658 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1659 if (RT_SUCCESS(rc))
1660 return true;
1661 return false;
1662}
1663
1664
1665/**
1666 * Flush (or invalidate if you like) page table/dir entry.
1667 *
1668 * (invlpg instruction; tlb_flush_page)
1669 *
1670 * @param env Pointer to cpu environment.
1671 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1672 */
1673void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1674{
1675 PVM pVM = env->pVM;
1676 PCPUMCTX pCtx;
1677 int rc;
1678
1679 /*
1680 * When we're replaying invlpg instructions or restoring a saved
1681 * state we disable this path.
1682 */
1683 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1684 return;
1685 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1686 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1687
1688 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1689
1690 /*
1691 * Update the control registers before calling PGMFlushPage.
1692 */
1693 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1694 Assert(pCtx);
1695 pCtx->cr0 = env->cr[0];
1696 pCtx->cr3 = env->cr[3];
1697 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1698 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1699 pCtx->cr4 = env->cr[4];
1700
1701 /*
1702 * Let PGM do the rest.
1703 */
1704 Assert(env->pVCpu);
1705 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1706 if (RT_FAILURE(rc))
1707 {
1708 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1709 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1710 }
1711 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1712}
1713
1714
1715#ifndef REM_PHYS_ADDR_IN_TLB
1716/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1717void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1718{
1719 void *pv;
1720 int rc;
1721
1722 /* Address must be aligned enough to fiddle with lower bits */
1723 Assert((physAddr & 0x3) == 0);
1724
1725 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1726 Assert( rc == VINF_SUCCESS
1727 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1728 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1729 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1730 if (RT_FAILURE(rc))
1731 return (void *)1;
1732 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1733 return (void *)((uintptr_t)pv | 2);
1734 return pv;
1735}
1736#endif /* REM_PHYS_ADDR_IN_TLB */
1737
1738
1739/**
1740 * Called from tlb_protect_code in order to write monitor a code page.
1741 *
1742 * @param env Pointer to the CPU environment.
1743 * @param GCPtr Code page to monitor
1744 */
1745void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1746{
1747#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1748 Assert(env->pVM->rem.s.fInREM);
1749 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1750 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1751 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1752 && !(env->eflags & VM_MASK) /* no V86 mode */
1753 && !HWACCMIsEnabled(env->pVM))
1754 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1755#endif
1756}
1757
1758
1759/**
1760 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1761 *
1762 * @param env Pointer to the CPU environment.
1763 * @param GCPtr Code page to monitor
1764 */
1765void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1766{
1767 Assert(env->pVM->rem.s.fInREM);
1768#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1769 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1770 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1771 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1772 && !(env->eflags & VM_MASK) /* no V86 mode */
1773 && !HWACCMIsEnabled(env->pVM))
1774 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1775#endif
1776}
1777
1778
1779/**
1780 * Called when the CPU is initialized, any of the CRx registers are changed or
1781 * when the A20 line is modified.
1782 *
1783 * @param env Pointer to the CPU environment.
1784 * @param fGlobal Set if the flush is global.
1785 */
1786void remR3FlushTLB(CPUState *env, bool fGlobal)
1787{
1788 PVM pVM = env->pVM;
1789 PCPUMCTX pCtx;
1790
1791 /*
1792 * When we're replaying invlpg instructions or restoring a saved
1793 * state we disable this path.
1794 */
1795 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1796 return;
1797 Assert(pVM->rem.s.fInREM);
1798
1799 /*
1800 * The caller doesn't check cr4, so we have to do that for ourselves.
1801 */
1802 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1803 fGlobal = true;
1804 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1805
1806 /*
1807 * Update the control registers before calling PGMR3FlushTLB.
1808 */
1809 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1810 Assert(pCtx);
1811 pCtx->cr0 = env->cr[0];
1812 pCtx->cr3 = env->cr[3];
1813 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1814 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1815 pCtx->cr4 = env->cr[4];
1816
1817 /*
1818 * Let PGM do the rest.
1819 */
1820 Assert(env->pVCpu);
1821 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1822}
1823
1824
1825/**
1826 * Called when any of the cr0, cr4 or efer registers is updated.
1827 *
1828 * @param env Pointer to the CPU environment.
1829 */
1830void remR3ChangeCpuMode(CPUState *env)
1831{
1832 PVM pVM = env->pVM;
1833 uint64_t efer;
1834 PCPUMCTX pCtx;
1835 int rc;
1836
1837 /*
1838 * When we're replaying loads or restoring a saved
1839 * state this path is disabled.
1840 */
1841 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1842 return;
1843 Assert(pVM->rem.s.fInREM);
1844
1845 /*
1846 * Update the control registers before calling PGMChangeMode()
1847 * as it may need to map whatever cr3 is pointing to.
1848 */
1849 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1850 Assert(pCtx);
1851 pCtx->cr0 = env->cr[0];
1852 pCtx->cr3 = env->cr[3];
1853 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1854 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1855 pCtx->cr4 = env->cr[4];
1856
1857#ifdef TARGET_X86_64
1858 efer = env->efer;
1859#else
1860 efer = 0;
1861#endif
1862 Assert(env->pVCpu);
1863 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1864 if (rc != VINF_SUCCESS)
1865 {
1866 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1867 {
1868 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1869 remR3RaiseRC(env->pVM, rc);
1870 }
1871 else
1872 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1873 }
1874}
1875
1876
1877/**
1878 * Called from compiled code to run dma.
1879 *
1880 * @param env Pointer to the CPU environment.
1881 */
1882void remR3DmaRun(CPUState *env)
1883{
1884 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1885 PDMR3DmaRun(env->pVM);
1886 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1887}
1888
1889
1890/**
1891 * Called from compiled code to schedule pending timers in VMM
1892 *
1893 * @param env Pointer to the CPU environment.
1894 */
1895void remR3TimersRun(CPUState *env)
1896{
1897 LogFlow(("remR3TimersRun:\n"));
1898 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1899 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1900 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1901 TMR3TimerQueuesDo(env->pVM);
1902 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1903 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1904}
1905
1906
1907/**
1908 * Record trap occurrence
1909 *
1910 * @returns VBox status code
1911 * @param env Pointer to the CPU environment.
1912 * @param uTrap Trap nr
1913 * @param uErrorCode Error code
1914 * @param pvNextEIP Next EIP
1915 */
1916int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1917{
1918 PVM pVM = env->pVM;
1919#ifdef VBOX_WITH_STATISTICS
1920 static STAMCOUNTER s_aStatTrap[255];
1921 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1922#endif
1923
1924#ifdef VBOX_WITH_STATISTICS
1925 if (uTrap < 255)
1926 {
1927 if (!s_aRegisters[uTrap])
1928 {
1929 char szStatName[64];
1930 s_aRegisters[uTrap] = true;
1931 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1932 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1933 }
1934 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1935 }
1936#endif
1937 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1938 if( uTrap < 0x20
1939 && (env->cr[0] & X86_CR0_PE)
1940 && !(env->eflags & X86_EFL_VM))
1941 {
1942#ifdef DEBUG
1943 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1944#endif
1945 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1946 {
1947 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1948 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1949 return VERR_REM_TOO_MANY_TRAPS;
1950 }
1951 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1952 pVM->rem.s.cPendingExceptions = 1;
1953 pVM->rem.s.uPendingException = uTrap;
1954 pVM->rem.s.uPendingExcptEIP = env->eip;
1955 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1956 }
1957 else
1958 {
1959 pVM->rem.s.cPendingExceptions = 0;
1960 pVM->rem.s.uPendingException = uTrap;
1961 pVM->rem.s.uPendingExcptEIP = env->eip;
1962 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1963 }
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/*
1969 * Clear current active trap
1970 *
1971 * @param pVM VM Handle.
1972 */
1973void remR3TrapClear(PVM pVM)
1974{
1975 pVM->rem.s.cPendingExceptions = 0;
1976 pVM->rem.s.uPendingException = 0;
1977 pVM->rem.s.uPendingExcptEIP = 0;
1978 pVM->rem.s.uPendingExcptCR2 = 0;
1979}
1980
1981
1982/*
1983 * Record previous call instruction addresses
1984 *
1985 * @param env Pointer to the CPU environment.
1986 */
1987void remR3RecordCall(CPUState *env)
1988{
1989 CSAMR3RecordCallAddress(env->pVM, env->eip);
1990}
1991
1992
1993/**
1994 * Syncs the internal REM state with the VM.
1995 *
1996 * This must be called before REMR3Run() is invoked whenever when the REM
1997 * state is not up to date. Calling it several times in a row is not
1998 * permitted.
1999 *
2000 * @returns VBox status code.
2001 *
2002 * @param pVM VM Handle.
2003 * @param pVCpu VMCPU Handle.
2004 *
2005 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2006 * no do this since the majority of the callers don't want any unnecessary of events
2007 * pending that would immediately interrupt execution.
2008 */
2009REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2010{
2011 register const CPUMCTX *pCtx;
2012 register unsigned fFlags;
2013 bool fHiddenSelRegsValid;
2014 unsigned i;
2015 TRPMEVENT enmType;
2016 uint8_t u8TrapNo;
2017 uint32_t uCpl;
2018 int rc;
2019
2020 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2021 Log2(("REMR3State:\n"));
2022
2023 pVM->rem.s.Env.pVCpu = pVCpu;
2024 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2025 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2026
2027 Assert(!pVM->rem.s.fInREM);
2028 pVM->rem.s.fInStateSync = true;
2029
2030 /*
2031 * If we have to flush TBs, do that immediately.
2032 */
2033 if (pVM->rem.s.fFlushTBs)
2034 {
2035 STAM_COUNTER_INC(&gStatFlushTBs);
2036 tb_flush(&pVM->rem.s.Env);
2037 pVM->rem.s.fFlushTBs = false;
2038 }
2039
2040 /*
2041 * Copy the registers which require no special handling.
2042 */
2043#ifdef TARGET_X86_64
2044 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2045 Assert(R_EAX == 0);
2046 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2047 Assert(R_ECX == 1);
2048 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2049 Assert(R_EDX == 2);
2050 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2051 Assert(R_EBX == 3);
2052 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2053 Assert(R_ESP == 4);
2054 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2055 Assert(R_EBP == 5);
2056 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2057 Assert(R_ESI == 6);
2058 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2059 Assert(R_EDI == 7);
2060 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2061 pVM->rem.s.Env.regs[8] = pCtx->r8;
2062 pVM->rem.s.Env.regs[9] = pCtx->r9;
2063 pVM->rem.s.Env.regs[10] = pCtx->r10;
2064 pVM->rem.s.Env.regs[11] = pCtx->r11;
2065 pVM->rem.s.Env.regs[12] = pCtx->r12;
2066 pVM->rem.s.Env.regs[13] = pCtx->r13;
2067 pVM->rem.s.Env.regs[14] = pCtx->r14;
2068 pVM->rem.s.Env.regs[15] = pCtx->r15;
2069
2070 pVM->rem.s.Env.eip = pCtx->rip;
2071
2072 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2073#else
2074 Assert(R_EAX == 0);
2075 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2076 Assert(R_ECX == 1);
2077 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2078 Assert(R_EDX == 2);
2079 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2080 Assert(R_EBX == 3);
2081 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2082 Assert(R_ESP == 4);
2083 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2084 Assert(R_EBP == 5);
2085 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2086 Assert(R_ESI == 6);
2087 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2088 Assert(R_EDI == 7);
2089 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2090 pVM->rem.s.Env.eip = pCtx->eip;
2091
2092 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2093#endif
2094
2095 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2096
2097 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2098 for (i=0;i<8;i++)
2099 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2100
2101 /*
2102 * Clear the halted hidden flag (the interrupt waking up the CPU can
2103 * have been dispatched in raw mode).
2104 */
2105 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2106
2107 /*
2108 * Replay invlpg?
2109 */
2110 if (pVM->rem.s.cInvalidatedPages)
2111 {
2112 RTUINT i;
2113
2114 pVM->rem.s.fIgnoreInvlPg = true;
2115 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2116 {
2117 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2118 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2119 }
2120 pVM->rem.s.fIgnoreInvlPg = false;
2121 pVM->rem.s.cInvalidatedPages = 0;
2122 }
2123
2124 /* Replay notification changes. */
2125 REMR3ReplayHandlerNotifications(pVM);
2126
2127 /* Update MSRs; before CRx registers! */
2128 pVM->rem.s.Env.efer = pCtx->msrEFER;
2129 pVM->rem.s.Env.star = pCtx->msrSTAR;
2130 pVM->rem.s.Env.pat = pCtx->msrPAT;
2131#ifdef TARGET_X86_64
2132 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2133 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2134 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2135 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2136
2137 /* Update the internal long mode activate flag according to the new EFER value. */
2138 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2139 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2140 else
2141 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2142#endif
2143
2144 /*
2145 * Registers which are rarely changed and require special handling / order when changed.
2146 */
2147 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2148 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2149 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2150 | CPUM_CHANGED_CR4
2151 | CPUM_CHANGED_CR0
2152 | CPUM_CHANGED_CR3
2153 | CPUM_CHANGED_GDTR
2154 | CPUM_CHANGED_IDTR
2155 | CPUM_CHANGED_SYSENTER_MSR
2156 | CPUM_CHANGED_LDTR
2157 | CPUM_CHANGED_CPUID
2158 | CPUM_CHANGED_FPU_REM
2159 )
2160 )
2161 {
2162 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2163 {
2164 pVM->rem.s.fIgnoreCR3Load = true;
2165 tlb_flush(&pVM->rem.s.Env, true);
2166 pVM->rem.s.fIgnoreCR3Load = false;
2167 }
2168
2169 /* CR4 before CR0! */
2170 if (fFlags & CPUM_CHANGED_CR4)
2171 {
2172 pVM->rem.s.fIgnoreCR3Load = true;
2173 pVM->rem.s.fIgnoreCpuMode = true;
2174 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2175 pVM->rem.s.fIgnoreCpuMode = false;
2176 pVM->rem.s.fIgnoreCR3Load = false;
2177 }
2178
2179 if (fFlags & CPUM_CHANGED_CR0)
2180 {
2181 pVM->rem.s.fIgnoreCR3Load = true;
2182 pVM->rem.s.fIgnoreCpuMode = true;
2183 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2184 pVM->rem.s.fIgnoreCpuMode = false;
2185 pVM->rem.s.fIgnoreCR3Load = false;
2186 }
2187
2188 if (fFlags & CPUM_CHANGED_CR3)
2189 {
2190 pVM->rem.s.fIgnoreCR3Load = true;
2191 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2192 pVM->rem.s.fIgnoreCR3Load = false;
2193 }
2194
2195 if (fFlags & CPUM_CHANGED_GDTR)
2196 {
2197 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2198 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2199 }
2200
2201 if (fFlags & CPUM_CHANGED_IDTR)
2202 {
2203 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2204 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2205 }
2206
2207 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2208 {
2209 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2210 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2211 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2212 }
2213
2214 if (fFlags & CPUM_CHANGED_LDTR)
2215 {
2216 if (fHiddenSelRegsValid)
2217 {
2218 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2219 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2220 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2221 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2222 }
2223 else
2224 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2225 }
2226
2227 if (fFlags & CPUM_CHANGED_CPUID)
2228 {
2229 uint32_t u32Dummy;
2230
2231 /*
2232 * Get the CPUID features.
2233 */
2234 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2235 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2236 }
2237
2238 /* Sync FPU state after CR4, CPUID and EFER (!). */
2239 if (fFlags & CPUM_CHANGED_FPU_REM)
2240 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2241 }
2242
2243 /*
2244 * Sync TR unconditionally to make life simpler.
2245 */
2246 pVM->rem.s.Env.tr.selector = pCtx->tr;
2247 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2248 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2249 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2250 /* Note! do_interrupt will fault if the busy flag is still set... */
2251 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2252
2253 /*
2254 * Update selector registers.
2255 * This must be done *after* we've synced gdt, ldt and crX registers
2256 * since we're reading the GDT/LDT om sync_seg. This will happen with
2257 * saved state which takes a quick dip into rawmode for instance.
2258 */
2259 /*
2260 * Stack; Note first check this one as the CPL might have changed. The
2261 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2262 */
2263
2264 if (fHiddenSelRegsValid)
2265 {
2266 /* The hidden selector registers are valid in the CPU context. */
2267 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2268
2269 /* Set current CPL */
2270 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2271
2272 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2273 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2277 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2278 }
2279 else
2280 {
2281 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2282 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2283 {
2284 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2285
2286 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2287 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2288#ifdef VBOX_WITH_STATISTICS
2289 if (pVM->rem.s.Env.segs[R_SS].newselector)
2290 {
2291 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2292 }
2293#endif
2294 }
2295 else
2296 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2297
2298 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2299 {
2300 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2301 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2302#ifdef VBOX_WITH_STATISTICS
2303 if (pVM->rem.s.Env.segs[R_ES].newselector)
2304 {
2305 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2306 }
2307#endif
2308 }
2309 else
2310 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2311
2312 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2313 {
2314 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2315 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2316#ifdef VBOX_WITH_STATISTICS
2317 if (pVM->rem.s.Env.segs[R_CS].newselector)
2318 {
2319 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2320 }
2321#endif
2322 }
2323 else
2324 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2325
2326 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2327 {
2328 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2329 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2330#ifdef VBOX_WITH_STATISTICS
2331 if (pVM->rem.s.Env.segs[R_DS].newselector)
2332 {
2333 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2334 }
2335#endif
2336 }
2337 else
2338 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2339
2340 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2341 * be the same but not the base/limit. */
2342 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2343 {
2344 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2345 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2346#ifdef VBOX_WITH_STATISTICS
2347 if (pVM->rem.s.Env.segs[R_FS].newselector)
2348 {
2349 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2350 }
2351#endif
2352 }
2353 else
2354 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2355
2356 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2357 {
2358 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2359 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2360#ifdef VBOX_WITH_STATISTICS
2361 if (pVM->rem.s.Env.segs[R_GS].newselector)
2362 {
2363 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2364 }
2365#endif
2366 }
2367 else
2368 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2369 }
2370
2371 /*
2372 * Check for traps.
2373 */
2374 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2375 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2376 if (RT_SUCCESS(rc))
2377 {
2378#ifdef DEBUG
2379 if (u8TrapNo == 0x80)
2380 {
2381 remR3DumpLnxSyscall(pVCpu);
2382 remR3DumpOBsdSyscall(pVCpu);
2383 }
2384#endif
2385
2386 pVM->rem.s.Env.exception_index = u8TrapNo;
2387 if (enmType != TRPM_SOFTWARE_INT)
2388 {
2389 pVM->rem.s.Env.exception_is_int = 0;
2390 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2391 }
2392 else
2393 {
2394 /*
2395 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2396 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2397 * for int03 and into.
2398 */
2399 pVM->rem.s.Env.exception_is_int = 1;
2400 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2401 /* int 3 may be generated by one-byte 0xcc */
2402 if (u8TrapNo == 3)
2403 {
2404 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2405 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2406 }
2407 /* int 4 may be generated by one-byte 0xce */
2408 else if (u8TrapNo == 4)
2409 {
2410 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2411 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2412 }
2413 }
2414
2415 /* get error code and cr2 if needed. */
2416 switch (u8TrapNo)
2417 {
2418 case 0x0e:
2419 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2420 /* fallthru */
2421 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2422 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2423 break;
2424
2425 case 0x11: case 0x08:
2426 default:
2427 pVM->rem.s.Env.error_code = 0;
2428 break;
2429 }
2430
2431 /*
2432 * We can now reset the active trap since the recompiler is gonna have a go at it.
2433 */
2434 rc = TRPMResetTrap(pVCpu);
2435 AssertRC(rc);
2436 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2437 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2438 }
2439
2440 /*
2441 * Clear old interrupt request flags; Check for pending hardware interrupts.
2442 * (See @remark for why we don't check for other FFs.)
2443 */
2444 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2445 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2446 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2447 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2448
2449 /*
2450 * We're now in REM mode.
2451 */
2452 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2453 pVM->rem.s.fInREM = true;
2454 pVM->rem.s.fInStateSync = false;
2455 pVM->rem.s.cCanExecuteRaw = 0;
2456 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2457 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2458 return VINF_SUCCESS;
2459}
2460
2461
2462/**
2463 * Syncs back changes in the REM state to the the VM state.
2464 *
2465 * This must be called after invoking REMR3Run().
2466 * Calling it several times in a row is not permitted.
2467 *
2468 * @returns VBox status code.
2469 *
2470 * @param pVM VM Handle.
2471 * @param pVCpu VMCPU Handle.
2472 */
2473REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2474{
2475 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2476 Assert(pCtx);
2477 unsigned i;
2478
2479 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2480 Log2(("REMR3StateBack:\n"));
2481 Assert(pVM->rem.s.fInREM);
2482
2483 /*
2484 * Copy back the registers.
2485 * This is done in the order they are declared in the CPUMCTX structure.
2486 */
2487
2488 /** @todo FOP */
2489 /** @todo FPUIP */
2490 /** @todo CS */
2491 /** @todo FPUDP */
2492 /** @todo DS */
2493
2494 /** @todo check if FPU/XMM was actually used in the recompiler */
2495 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2496//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2497
2498#ifdef TARGET_X86_64
2499 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2500 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2501 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2502 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2503 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2504 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2505 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2506 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2507 pCtx->r8 = pVM->rem.s.Env.regs[8];
2508 pCtx->r9 = pVM->rem.s.Env.regs[9];
2509 pCtx->r10 = pVM->rem.s.Env.regs[10];
2510 pCtx->r11 = pVM->rem.s.Env.regs[11];
2511 pCtx->r12 = pVM->rem.s.Env.regs[12];
2512 pCtx->r13 = pVM->rem.s.Env.regs[13];
2513 pCtx->r14 = pVM->rem.s.Env.regs[14];
2514 pCtx->r15 = pVM->rem.s.Env.regs[15];
2515
2516 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2517
2518#else
2519 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2520 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2521 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2522 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2523 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2524 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2525 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2526
2527 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2528#endif
2529
2530 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2531
2532#ifdef VBOX_WITH_STATISTICS
2533 if (pVM->rem.s.Env.segs[R_SS].newselector)
2534 {
2535 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2536 }
2537 if (pVM->rem.s.Env.segs[R_GS].newselector)
2538 {
2539 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2540 }
2541 if (pVM->rem.s.Env.segs[R_FS].newselector)
2542 {
2543 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2544 }
2545 if (pVM->rem.s.Env.segs[R_ES].newselector)
2546 {
2547 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2548 }
2549 if (pVM->rem.s.Env.segs[R_DS].newselector)
2550 {
2551 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2552 }
2553 if (pVM->rem.s.Env.segs[R_CS].newselector)
2554 {
2555 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2556 }
2557#endif
2558 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2559 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2560 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2561 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2562 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2563
2564#ifdef TARGET_X86_64
2565 pCtx->rip = pVM->rem.s.Env.eip;
2566 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2567#else
2568 pCtx->eip = pVM->rem.s.Env.eip;
2569 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2570#endif
2571
2572 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2573 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2574 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2575 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2576 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2577 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2578
2579 for (i = 0; i < 8; i++)
2580 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2581
2582 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2583 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2584 {
2585 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2586 STAM_COUNTER_INC(&gStatREMGDTChange);
2587 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2588 }
2589
2590 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2591 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2592 {
2593 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2594 STAM_COUNTER_INC(&gStatREMIDTChange);
2595 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2596 }
2597
2598 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2599 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2600 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2601 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2602 {
2603 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2604 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2605 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2606 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2607 STAM_COUNTER_INC(&gStatREMLDTRChange);
2608 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2609 }
2610
2611 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2612 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2613 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2614 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2615 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2616 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2617 : 0) )
2618 {
2619 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2620 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2621 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2622 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2623 pCtx->tr = pVM->rem.s.Env.tr.selector;
2624 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2625 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2626 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2627 if (pCtx->trHid.Attr.u)
2628 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2629 STAM_COUNTER_INC(&gStatREMTRChange);
2630 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2631 }
2632
2633 /** @todo These values could still be out of sync! */
2634 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2635 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2636 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2637 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2638
2639 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2640 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2641 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2642
2643 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2644 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2645 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2646
2647 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2648 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2649 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2650
2651 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2652 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2653 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2654
2655 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2656 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2657 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2658
2659 /* Sysenter MSR */
2660 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2661 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2662 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2663
2664 /* System MSRs. */
2665 pCtx->msrEFER = pVM->rem.s.Env.efer;
2666 pCtx->msrSTAR = pVM->rem.s.Env.star;
2667 pCtx->msrPAT = pVM->rem.s.Env.pat;
2668#ifdef TARGET_X86_64
2669 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2670 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2671 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2672 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2673#endif
2674
2675 remR3TrapClear(pVM);
2676
2677 /*
2678 * Check for traps.
2679 */
2680 if ( pVM->rem.s.Env.exception_index >= 0
2681 && pVM->rem.s.Env.exception_index < 256)
2682 {
2683 int rc;
2684
2685 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2686 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2687 AssertRC(rc);
2688 switch (pVM->rem.s.Env.exception_index)
2689 {
2690 case 0x0e:
2691 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2692 /* fallthru */
2693 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2694 case 0x11: case 0x08: /* 0 */
2695 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2696 break;
2697 }
2698
2699 }
2700
2701 /*
2702 * We're not longer in REM mode.
2703 */
2704 CPUMR3RemLeave(pVCpu,
2705 HWACCMIsEnabled(pVM)
2706 || ( pVM->rem.s.Env.segs[R_SS].newselector
2707 | pVM->rem.s.Env.segs[R_GS].newselector
2708 | pVM->rem.s.Env.segs[R_FS].newselector
2709 | pVM->rem.s.Env.segs[R_ES].newselector
2710 | pVM->rem.s.Env.segs[R_DS].newselector
2711 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2712 );
2713 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2714 pVM->rem.s.fInREM = false;
2715 pVM->rem.s.pCtx = NULL;
2716 pVM->rem.s.Env.pVCpu = NULL;
2717 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2718 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2719 return VINF_SUCCESS;
2720}
2721
2722
2723/**
2724 * This is called by the disassembler when it wants to update the cpu state
2725 * before for instance doing a register dump.
2726 */
2727static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2728{
2729 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2730 unsigned i;
2731
2732 Assert(pVM->rem.s.fInREM);
2733
2734 /*
2735 * Copy back the registers.
2736 * This is done in the order they are declared in the CPUMCTX structure.
2737 */
2738
2739 /** @todo FOP */
2740 /** @todo FPUIP */
2741 /** @todo CS */
2742 /** @todo FPUDP */
2743 /** @todo DS */
2744 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2745 pCtx->fpu.MXCSR = 0;
2746 pCtx->fpu.MXCSR_MASK = 0;
2747
2748 /** @todo check if FPU/XMM was actually used in the recompiler */
2749 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2750//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2751
2752#ifdef TARGET_X86_64
2753 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2754 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2755 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2756 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2757 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2758 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2759 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2760 pCtx->r8 = pVM->rem.s.Env.regs[8];
2761 pCtx->r9 = pVM->rem.s.Env.regs[9];
2762 pCtx->r10 = pVM->rem.s.Env.regs[10];
2763 pCtx->r11 = pVM->rem.s.Env.regs[11];
2764 pCtx->r12 = pVM->rem.s.Env.regs[12];
2765 pCtx->r13 = pVM->rem.s.Env.regs[13];
2766 pCtx->r14 = pVM->rem.s.Env.regs[14];
2767 pCtx->r15 = pVM->rem.s.Env.regs[15];
2768
2769 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2770#else
2771 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2772 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2773 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2774 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2775 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2776 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2777 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2778
2779 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2780#endif
2781
2782 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2783
2784 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2785 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2786 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2787 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2788 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2789
2790#ifdef TARGET_X86_64
2791 pCtx->rip = pVM->rem.s.Env.eip;
2792 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2793#else
2794 pCtx->eip = pVM->rem.s.Env.eip;
2795 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2796#endif
2797
2798 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2799 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2800 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2801 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2802 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2803 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2804
2805 for (i = 0; i < 8; i++)
2806 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2807
2808 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2809 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2810 {
2811 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2812 STAM_COUNTER_INC(&gStatREMGDTChange);
2813 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2814 }
2815
2816 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2817 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2818 {
2819 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2820 STAM_COUNTER_INC(&gStatREMIDTChange);
2821 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2822 }
2823
2824 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2825 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2826 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2827 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2828 {
2829 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2830 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2831 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2832 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2833 STAM_COUNTER_INC(&gStatREMLDTRChange);
2834 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2835 }
2836
2837 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2838 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2839 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2840 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2841 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2842 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2843 : 0) )
2844 {
2845 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2846 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2847 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2848 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2849 pCtx->tr = pVM->rem.s.Env.tr.selector;
2850 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2851 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2852 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2853 if (pCtx->trHid.Attr.u)
2854 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2855 STAM_COUNTER_INC(&gStatREMTRChange);
2856 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2857 }
2858
2859 /** @todo These values could still be out of sync! */
2860 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2861 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2862 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2863 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2864
2865 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2866 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2867 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2868
2869 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2870 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2871 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2872
2873 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2874 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2875 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2876
2877 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2878 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2879 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2880
2881 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2882 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2883 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2884
2885 /* Sysenter MSR */
2886 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2887 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2888 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2889
2890 /* System MSRs. */
2891 pCtx->msrEFER = pVM->rem.s.Env.efer;
2892 pCtx->msrSTAR = pVM->rem.s.Env.star;
2893 pCtx->msrPAT = pVM->rem.s.Env.pat;
2894#ifdef TARGET_X86_64
2895 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2896 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2897 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2898 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2899#endif
2900
2901}
2902
2903
2904/**
2905 * Update the VMM state information if we're currently in REM.
2906 *
2907 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2908 * we're currently executing in REM and the VMM state is invalid. This method will of
2909 * course check that we're executing in REM before syncing any data over to the VMM.
2910 *
2911 * @param pVM The VM handle.
2912 * @param pVCpu The VMCPU handle.
2913 */
2914REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2915{
2916 if (pVM->rem.s.fInREM)
2917 remR3StateUpdate(pVM, pVCpu);
2918}
2919
2920
2921#undef LOG_GROUP
2922#define LOG_GROUP LOG_GROUP_REM
2923
2924
2925/**
2926 * Notify the recompiler about Address Gate 20 state change.
2927 *
2928 * This notification is required since A20 gate changes are
2929 * initialized from a device driver and the VM might just as
2930 * well be in REM mode as in RAW mode.
2931 *
2932 * @param pVM VM handle.
2933 * @param pVCpu VMCPU handle.
2934 * @param fEnable True if the gate should be enabled.
2935 * False if the gate should be disabled.
2936 */
2937REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2938{
2939 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2940 VM_ASSERT_EMT(pVM);
2941
2942 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2943 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2944 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2945}
2946
2947
2948/**
2949 * Replays the handler notification changes
2950 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2951 *
2952 * @param pVM VM handle.
2953 */
2954REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2955{
2956 /*
2957 * Replay the flushes.
2958 */
2959 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2960 VM_ASSERT_EMT(pVM);
2961
2962 /** @todo this isn't ensuring correct replay order. */
2963 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2964 {
2965 uint32_t idxNext;
2966 uint32_t idxRevHead;
2967 uint32_t idxHead;
2968#ifdef VBOX_STRICT
2969 int32_t c = 0;
2970#endif
2971
2972 /* Lockless purging of pending notifications. */
2973 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2974 if (idxHead == UINT32_MAX)
2975 return;
2976 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2977
2978 /*
2979 * Reverse the list to process it in FIFO order.
2980 */
2981 idxRevHead = UINT32_MAX;
2982 do
2983 {
2984 /* Save the index of the next rec. */
2985 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2986 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2987 /* Push the record onto the reversed list. */
2988 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2989 idxRevHead = idxHead;
2990 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2991 /* Advance. */
2992 idxHead = idxNext;
2993 } while (idxHead != UINT32_MAX);
2994
2995 /*
2996 * Loop thru the list, reinserting the record into the free list as they are
2997 * processed to avoid having other EMTs running out of entries while we're flushing.
2998 */
2999 idxHead = idxRevHead;
3000 do
3001 {
3002 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3003 uint32_t idxCur;
3004 Assert(--c >= 0);
3005
3006 switch (pCur->enmKind)
3007 {
3008 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3009 remR3NotifyHandlerPhysicalRegister(pVM,
3010 pCur->u.PhysicalRegister.enmType,
3011 pCur->u.PhysicalRegister.GCPhys,
3012 pCur->u.PhysicalRegister.cb,
3013 pCur->u.PhysicalRegister.fHasHCHandler);
3014 break;
3015
3016 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3017 remR3NotifyHandlerPhysicalDeregister(pVM,
3018 pCur->u.PhysicalDeregister.enmType,
3019 pCur->u.PhysicalDeregister.GCPhys,
3020 pCur->u.PhysicalDeregister.cb,
3021 pCur->u.PhysicalDeregister.fHasHCHandler,
3022 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3023 break;
3024
3025 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3026 remR3NotifyHandlerPhysicalModify(pVM,
3027 pCur->u.PhysicalModify.enmType,
3028 pCur->u.PhysicalModify.GCPhysOld,
3029 pCur->u.PhysicalModify.GCPhysNew,
3030 pCur->u.PhysicalModify.cb,
3031 pCur->u.PhysicalModify.fHasHCHandler,
3032 pCur->u.PhysicalModify.fRestoreAsRAM);
3033 break;
3034
3035 default:
3036 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3037 break;
3038 }
3039
3040 /*
3041 * Advance idxHead.
3042 */
3043 idxCur = idxHead;
3044 idxHead = pCur->idxNext;
3045 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3046
3047 /*
3048 * Put the record back into the free list.
3049 */
3050 do
3051 {
3052 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3053 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3054 ASMCompilerBarrier();
3055 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3056 } while (idxHead != UINT32_MAX);
3057
3058#ifdef VBOX_STRICT
3059 if (pVM->cCpus == 1)
3060 {
3061 unsigned c;
3062 /* Check that all records are now on the free list. */
3063 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3064 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3065 c++;
3066 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3067 }
3068#endif
3069 }
3070}
3071
3072
3073/**
3074 * Notify REM about changed code page.
3075 *
3076 * @returns VBox status code.
3077 * @param pVM VM handle.
3078 * @param pVCpu VMCPU handle.
3079 * @param pvCodePage Code page address
3080 */
3081REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3082{
3083#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3084 int rc;
3085 RTGCPHYS PhysGC;
3086 uint64_t flags;
3087
3088 VM_ASSERT_EMT(pVM);
3089
3090 /*
3091 * Get the physical page address.
3092 */
3093 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3094 if (rc == VINF_SUCCESS)
3095 {
3096 /*
3097 * Sync the required registers and flush the whole page.
3098 * (Easier to do the whole page than notifying it about each physical
3099 * byte that was changed.
3100 */
3101 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3102 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3103 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3104 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3105
3106 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3107 }
3108#endif
3109 return VINF_SUCCESS;
3110}
3111
3112
3113/**
3114 * Notification about a successful MMR3PhysRegister() call.
3115 *
3116 * @param pVM VM handle.
3117 * @param GCPhys The physical address the RAM.
3118 * @param cb Size of the memory.
3119 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3120 */
3121REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3122{
3123 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3124 VM_ASSERT_EMT(pVM);
3125
3126 /*
3127 * Validate input - we trust the caller.
3128 */
3129 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3130 Assert(cb);
3131 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3132 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3133
3134 /*
3135 * Base ram? Update GCPhysLastRam.
3136 */
3137 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3138 {
3139 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3140 {
3141 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3142 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3143 }
3144 }
3145
3146 /*
3147 * Register the ram.
3148 */
3149 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3150
3151 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3152 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3153 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3154
3155 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3156}
3157
3158
3159/**
3160 * Notification about a successful MMR3PhysRomRegister() call.
3161 *
3162 * @param pVM VM handle.
3163 * @param GCPhys The physical address of the ROM.
3164 * @param cb The size of the ROM.
3165 * @param pvCopy Pointer to the ROM copy.
3166 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3167 * This function will be called when ever the protection of the
3168 * shadow ROM changes (at reset and end of POST).
3169 */
3170REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3171{
3172 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3173 VM_ASSERT_EMT(pVM);
3174
3175 /*
3176 * Validate input - we trust the caller.
3177 */
3178 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3179 Assert(cb);
3180 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3181
3182 /*
3183 * Register the rom.
3184 */
3185 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3186
3187 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3188 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3189 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3190
3191 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3192}
3193
3194
3195/**
3196 * Notification about a successful memory deregistration or reservation.
3197 *
3198 * @param pVM VM Handle.
3199 * @param GCPhys Start physical address.
3200 * @param cb The size of the range.
3201 */
3202REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3203{
3204 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3205 VM_ASSERT_EMT(pVM);
3206
3207 /*
3208 * Validate input - we trust the caller.
3209 */
3210 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3211 Assert(cb);
3212 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3213
3214 /*
3215 * Unassigning the memory.
3216 */
3217 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3218
3219 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3220 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3221 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3222
3223 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3224}
3225
3226
3227/**
3228 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3229 *
3230 * @param pVM VM Handle.
3231 * @param enmType Handler type.
3232 * @param GCPhys Handler range address.
3233 * @param cb Size of the handler range.
3234 * @param fHasHCHandler Set if the handler has a HC callback function.
3235 *
3236 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3237 * Handler memory type to memory which has no HC handler.
3238 */
3239static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3240{
3241 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3242 enmType, GCPhys, cb, fHasHCHandler));
3243
3244 VM_ASSERT_EMT(pVM);
3245 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3246 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3247
3248
3249 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3250
3251 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3252 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3253 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3254 else if (fHasHCHandler)
3255 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3256 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3257
3258 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3259}
3260
3261/**
3262 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3263 *
3264 * @param pVM VM Handle.
3265 * @param enmType Handler type.
3266 * @param GCPhys Handler range address.
3267 * @param cb Size of the handler range.
3268 * @param fHasHCHandler Set if the handler has a HC callback function.
3269 *
3270 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3271 * Handler memory type to memory which has no HC handler.
3272 */
3273REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3274{
3275 REMR3ReplayHandlerNotifications(pVM);
3276
3277 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3278}
3279
3280/**
3281 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3282 *
3283 * @param pVM VM Handle.
3284 * @param enmType Handler type.
3285 * @param GCPhys Handler range address.
3286 * @param cb Size of the handler range.
3287 * @param fHasHCHandler Set if the handler has a HC callback function.
3288 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3289 */
3290static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3291{
3292 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3293 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3294 VM_ASSERT_EMT(pVM);
3295
3296
3297 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3298
3299 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3300 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3301 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3302 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3303 else if (fHasHCHandler)
3304 {
3305 if (!fRestoreAsRAM)
3306 {
3307 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3308 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3309 }
3310 else
3311 {
3312 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3313 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3314 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3315 }
3316 }
3317 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3318
3319 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3320}
3321
3322/**
3323 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3324 *
3325 * @param pVM VM Handle.
3326 * @param enmType Handler type.
3327 * @param GCPhys Handler range address.
3328 * @param cb Size of the handler range.
3329 * @param fHasHCHandler Set if the handler has a HC callback function.
3330 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3331 */
3332REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3333{
3334 REMR3ReplayHandlerNotifications(pVM);
3335 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3336}
3337
3338
3339/**
3340 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3341 *
3342 * @param pVM VM Handle.
3343 * @param enmType Handler type.
3344 * @param GCPhysOld Old handler range address.
3345 * @param GCPhysNew New handler range address.
3346 * @param cb Size of the handler range.
3347 * @param fHasHCHandler Set if the handler has a HC callback function.
3348 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3349 */
3350static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3351{
3352 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3353 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3354 VM_ASSERT_EMT(pVM);
3355 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3356
3357 if (fHasHCHandler)
3358 {
3359 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3360
3361 /*
3362 * Reset the old page.
3363 */
3364 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3365 if (!fRestoreAsRAM)
3366 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3367 else
3368 {
3369 /* This is not perfect, but it'll do for PD monitoring... */
3370 Assert(cb == PAGE_SIZE);
3371 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3372 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3373 }
3374
3375 /*
3376 * Update the new page.
3377 */
3378 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3379 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3380 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3381 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3382
3383 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3384 }
3385}
3386
3387/**
3388 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3389 *
3390 * @param pVM VM Handle.
3391 * @param enmType Handler type.
3392 * @param GCPhysOld Old handler range address.
3393 * @param GCPhysNew New handler range address.
3394 * @param cb Size of the handler range.
3395 * @param fHasHCHandler Set if the handler has a HC callback function.
3396 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3397 */
3398REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3399{
3400 REMR3ReplayHandlerNotifications(pVM);
3401
3402 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3403}
3404
3405/**
3406 * Checks if we're handling access to this page or not.
3407 *
3408 * @returns true if we're trapping access.
3409 * @returns false if we aren't.
3410 * @param pVM The VM handle.
3411 * @param GCPhys The physical address.
3412 *
3413 * @remark This function will only work correctly in VBOX_STRICT builds!
3414 */
3415REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3416{
3417#ifdef VBOX_STRICT
3418 unsigned long off;
3419 REMR3ReplayHandlerNotifications(pVM);
3420
3421 off = get_phys_page_offset(GCPhys);
3422 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3423 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3424 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3425#else
3426 return false;
3427#endif
3428}
3429
3430
3431/**
3432 * Deals with a rare case in get_phys_addr_code where the code
3433 * is being monitored.
3434 *
3435 * It could also be an MMIO page, in which case we will raise a fatal error.
3436 *
3437 * @returns The physical address corresponding to addr.
3438 * @param env The cpu environment.
3439 * @param addr The virtual address.
3440 * @param pTLBEntry The TLB entry.
3441 */
3442target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3443 target_ulong addr,
3444 CPUTLBEntry* pTLBEntry,
3445 target_phys_addr_t ioTLBEntry)
3446{
3447 PVM pVM = env->pVM;
3448
3449 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3450 {
3451 /* If code memory is being monitored, appropriate IOTLB entry will have
3452 handler IO type, and addend will provide real physical address, no
3453 matter if we store VA in TLB or not, as handlers are always passed PA */
3454 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3455 return ret;
3456 }
3457 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3458 "*** handlers\n",
3459 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3460 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3461 LogRel(("*** mmio\n"));
3462 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3463 LogRel(("*** phys\n"));
3464 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3465 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3466 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3467 AssertFatalFailed();
3468}
3469
3470/**
3471 * Read guest RAM and ROM.
3472 *
3473 * @param SrcGCPhys The source address (guest physical).
3474 * @param pvDst The destination address.
3475 * @param cb Number of bytes
3476 */
3477void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3478{
3479 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3480 VBOX_CHECK_ADDR(SrcGCPhys);
3481 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3482#ifdef VBOX_DEBUG_PHYS
3483 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3484#endif
3485 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3486}
3487
3488
3489/**
3490 * Read guest RAM and ROM, unsigned 8-bit.
3491 *
3492 * @param SrcGCPhys The source address (guest physical).
3493 */
3494RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3495{
3496 uint8_t val;
3497 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3498 VBOX_CHECK_ADDR(SrcGCPhys);
3499 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3500 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3501#ifdef VBOX_DEBUG_PHYS
3502 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3503#endif
3504 return val;
3505}
3506
3507
3508/**
3509 * Read guest RAM and ROM, signed 8-bit.
3510 *
3511 * @param SrcGCPhys The source address (guest physical).
3512 */
3513RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3514{
3515 int8_t val;
3516 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3517 VBOX_CHECK_ADDR(SrcGCPhys);
3518 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3519 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3520#ifdef VBOX_DEBUG_PHYS
3521 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3522#endif
3523 return val;
3524}
3525
3526
3527/**
3528 * Read guest RAM and ROM, unsigned 16-bit.
3529 *
3530 * @param SrcGCPhys The source address (guest physical).
3531 */
3532RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3533{
3534 uint16_t val;
3535 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3536 VBOX_CHECK_ADDR(SrcGCPhys);
3537 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3538 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3539#ifdef VBOX_DEBUG_PHYS
3540 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3541#endif
3542 return val;
3543}
3544
3545
3546/**
3547 * Read guest RAM and ROM, signed 16-bit.
3548 *
3549 * @param SrcGCPhys The source address (guest physical).
3550 */
3551RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3552{
3553 int16_t val;
3554 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3555 VBOX_CHECK_ADDR(SrcGCPhys);
3556 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3557 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3558#ifdef VBOX_DEBUG_PHYS
3559 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3560#endif
3561 return val;
3562}
3563
3564
3565/**
3566 * Read guest RAM and ROM, unsigned 32-bit.
3567 *
3568 * @param SrcGCPhys The source address (guest physical).
3569 */
3570RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3571{
3572 uint32_t val;
3573 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3574 VBOX_CHECK_ADDR(SrcGCPhys);
3575 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3576 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3577#ifdef VBOX_DEBUG_PHYS
3578 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3579#endif
3580 return val;
3581}
3582
3583
3584/**
3585 * Read guest RAM and ROM, signed 32-bit.
3586 *
3587 * @param SrcGCPhys The source address (guest physical).
3588 */
3589RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3590{
3591 int32_t val;
3592 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3593 VBOX_CHECK_ADDR(SrcGCPhys);
3594 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3595 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3596#ifdef VBOX_DEBUG_PHYS
3597 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3598#endif
3599 return val;
3600}
3601
3602
3603/**
3604 * Read guest RAM and ROM, unsigned 64-bit.
3605 *
3606 * @param SrcGCPhys The source address (guest physical).
3607 */
3608uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3609{
3610 uint64_t val;
3611 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3612 VBOX_CHECK_ADDR(SrcGCPhys);
3613 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3614 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3615#ifdef VBOX_DEBUG_PHYS
3616 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3617#endif
3618 return val;
3619}
3620
3621
3622/**
3623 * Read guest RAM and ROM, signed 64-bit.
3624 *
3625 * @param SrcGCPhys The source address (guest physical).
3626 */
3627int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3628{
3629 int64_t val;
3630 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3631 VBOX_CHECK_ADDR(SrcGCPhys);
3632 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3633 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3634#ifdef VBOX_DEBUG_PHYS
3635 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3636#endif
3637 return val;
3638}
3639
3640
3641/**
3642 * Write guest RAM.
3643 *
3644 * @param DstGCPhys The destination address (guest physical).
3645 * @param pvSrc The source address.
3646 * @param cb Number of bytes to write
3647 */
3648void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3649{
3650 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3651 VBOX_CHECK_ADDR(DstGCPhys);
3652 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3653 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3654#ifdef VBOX_DEBUG_PHYS
3655 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3656#endif
3657}
3658
3659
3660/**
3661 * Write guest RAM, unsigned 8-bit.
3662 *
3663 * @param DstGCPhys The destination address (guest physical).
3664 * @param val Value
3665 */
3666void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3667{
3668 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3669 VBOX_CHECK_ADDR(DstGCPhys);
3670 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3671 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3672#ifdef VBOX_DEBUG_PHYS
3673 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3674#endif
3675}
3676
3677
3678/**
3679 * Write guest RAM, unsigned 8-bit.
3680 *
3681 * @param DstGCPhys The destination address (guest physical).
3682 * @param val Value
3683 */
3684void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3685{
3686 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3687 VBOX_CHECK_ADDR(DstGCPhys);
3688 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3689 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3690#ifdef VBOX_DEBUG_PHYS
3691 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3692#endif
3693}
3694
3695
3696/**
3697 * Write guest RAM, unsigned 32-bit.
3698 *
3699 * @param DstGCPhys The destination address (guest physical).
3700 * @param val Value
3701 */
3702void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3703{
3704 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3705 VBOX_CHECK_ADDR(DstGCPhys);
3706 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3707 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3708#ifdef VBOX_DEBUG_PHYS
3709 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3710#endif
3711}
3712
3713
3714/**
3715 * Write guest RAM, unsigned 64-bit.
3716 *
3717 * @param DstGCPhys The destination address (guest physical).
3718 * @param val Value
3719 */
3720void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3721{
3722 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3723 VBOX_CHECK_ADDR(DstGCPhys);
3724 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3725 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3726#ifdef VBOX_DEBUG_PHYS
3727 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3728#endif
3729}
3730
3731#undef LOG_GROUP
3732#define LOG_GROUP LOG_GROUP_REM_MMIO
3733
3734/** Read MMIO memory. */
3735static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3736{
3737 uint32_t u32 = 0;
3738 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3739 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3740 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3741 return u32;
3742}
3743
3744/** Read MMIO memory. */
3745static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3746{
3747 uint32_t u32 = 0;
3748 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3749 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3750 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3751 return u32;
3752}
3753
3754/** Read MMIO memory. */
3755static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3756{
3757 uint32_t u32 = 0;
3758 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3759 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3760 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3761 return u32;
3762}
3763
3764/** Write to MMIO memory. */
3765static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3766{
3767 int rc;
3768 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3769 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3770 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3771}
3772
3773/** Write to MMIO memory. */
3774static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3775{
3776 int rc;
3777 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3778 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3779 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3780}
3781
3782/** Write to MMIO memory. */
3783static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3784{
3785 int rc;
3786 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3787 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3788 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3789}
3790
3791
3792#undef LOG_GROUP
3793#define LOG_GROUP LOG_GROUP_REM_HANDLER
3794
3795/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3796
3797static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3798{
3799 uint8_t u8;
3800 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3801 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3802 return u8;
3803}
3804
3805static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3806{
3807 uint16_t u16;
3808 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3809 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3810 return u16;
3811}
3812
3813static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3814{
3815 uint32_t u32;
3816 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3817 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3818 return u32;
3819}
3820
3821static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3822{
3823 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3824 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3825}
3826
3827static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3828{
3829 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3830 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3831}
3832
3833static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3834{
3835 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3836 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3837}
3838
3839/* -+- disassembly -+- */
3840
3841#undef LOG_GROUP
3842#define LOG_GROUP LOG_GROUP_REM_DISAS
3843
3844
3845/**
3846 * Enables or disables singled stepped disassembly.
3847 *
3848 * @returns VBox status code.
3849 * @param pVM VM handle.
3850 * @param fEnable To enable set this flag, to disable clear it.
3851 */
3852static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3853{
3854 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3855 VM_ASSERT_EMT(pVM);
3856
3857 if (fEnable)
3858 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3859 else
3860 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3861#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3862 cpu_single_step(&pVM->rem.s.Env, fEnable);
3863#endif
3864 return VINF_SUCCESS;
3865}
3866
3867
3868/**
3869 * Enables or disables singled stepped disassembly.
3870 *
3871 * @returns VBox status code.
3872 * @param pVM VM handle.
3873 * @param fEnable To enable set this flag, to disable clear it.
3874 */
3875REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3876{
3877 int rc;
3878
3879 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3880 if (VM_IS_EMT(pVM))
3881 return remR3DisasEnableStepping(pVM, fEnable);
3882
3883 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3884 AssertRC(rc);
3885 return rc;
3886}
3887
3888
3889#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3890/**
3891 * External Debugger Command: .remstep [on|off|1|0]
3892 */
3893static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3894{
3895 int rc;
3896
3897 if (cArgs == 0)
3898 /*
3899 * Print the current status.
3900 */
3901 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3902 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3903 else
3904 {
3905 /*
3906 * Convert the argument and change the mode.
3907 */
3908 bool fEnable;
3909 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3910 if (RT_SUCCESS(rc))
3911 {
3912 rc = REMR3DisasEnableStepping(pVM, fEnable);
3913 if (RT_SUCCESS(rc))
3914 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3915 else
3916 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3917 }
3918 else
3919 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3920 }
3921 return rc;
3922}
3923#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3924
3925
3926/**
3927 * Disassembles one instruction and prints it to the log.
3928 *
3929 * @returns Success indicator.
3930 * @param env Pointer to the recompiler CPU structure.
3931 * @param f32BitCode Indicates that whether or not the code should
3932 * be disassembled as 16 or 32 bit. If -1 the CS
3933 * selector will be inspected.
3934 * @param pszPrefix
3935 */
3936bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3937{
3938 PVM pVM = env->pVM;
3939 const bool fLog = LogIsEnabled();
3940 const bool fLog2 = LogIs2Enabled();
3941 int rc = VINF_SUCCESS;
3942
3943 /*
3944 * Don't bother if there ain't any log output to do.
3945 */
3946 if (!fLog && !fLog2)
3947 return true;
3948
3949 /*
3950 * Update the state so DBGF reads the correct register values.
3951 */
3952 remR3StateUpdate(pVM, env->pVCpu);
3953
3954 /*
3955 * Log registers if requested.
3956 */
3957 if (fLog2)
3958 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3959
3960 /*
3961 * Disassemble to log.
3962 */
3963 if (fLog)
3964 {
3965 PVMCPU pVCpu = VMMGetCpu(pVM);
3966 char szBuf[256];
3967 szBuf[0] = '\0';
3968 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3969 pVCpu->idCpu,
3970 0, /* Sel */
3971 0, /* GCPtr */
3972 DBGF_DISAS_FLAGS_CURRENT_GUEST
3973 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3974 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3975 szBuf,
3976 sizeof(szBuf),
3977 NULL);
3978 if (RT_FAILURE(rc))
3979 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3980 if (pszPrefix && *pszPrefix)
3981 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3982 else
3983 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3984 }
3985
3986 return RT_SUCCESS(rc);
3987}
3988
3989
3990/**
3991 * Disassemble recompiled code.
3992 *
3993 * @param phFileIgnored Ignored, logfile usually.
3994 * @param pvCode Pointer to the code block.
3995 * @param cb Size of the code block.
3996 */
3997void disas(FILE *phFile, void *pvCode, unsigned long cb)
3998{
3999#ifdef DEBUG_TMP_LOGGING
4000# define DISAS_PRINTF(x...) fprintf(phFile, x)
4001#else
4002# define DISAS_PRINTF(x...) RTLogPrintf(x)
4003 if (LogIs2Enabled())
4004#endif
4005 {
4006 unsigned off = 0;
4007 char szOutput[256];
4008 DISCPUSTATE Cpu;
4009
4010 memset(&Cpu, 0, sizeof(Cpu));
4011#ifdef RT_ARCH_X86
4012 Cpu.mode = CPUMODE_32BIT;
4013#else
4014 Cpu.mode = CPUMODE_64BIT;
4015#endif
4016
4017 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4018 while (off < cb)
4019 {
4020 uint32_t cbInstr;
4021 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4022 DISAS_PRINTF("%s", szOutput);
4023 else
4024 {
4025 DISAS_PRINTF("disas error\n");
4026 cbInstr = 1;
4027#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4028 break;
4029#endif
4030 }
4031 off += cbInstr;
4032 }
4033 }
4034
4035#undef DISAS_PRINTF
4036}
4037
4038
4039/**
4040 * Disassemble guest code.
4041 *
4042 * @param phFileIgnored Ignored, logfile usually.
4043 * @param uCode The guest address of the code to disassemble. (flat?)
4044 * @param cb Number of bytes to disassemble.
4045 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4046 */
4047void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4048{
4049#ifdef DEBUG_TMP_LOGGING
4050# define DISAS_PRINTF(x...) fprintf(phFile, x)
4051#else
4052# define DISAS_PRINTF(x...) RTLogPrintf(x)
4053 if (LogIs2Enabled())
4054#endif
4055 {
4056 PVM pVM = cpu_single_env->pVM;
4057 PVMCPU pVCpu = cpu_single_env->pVCpu;
4058 RTSEL cs;
4059 RTGCUINTPTR eip;
4060
4061 Assert(pVCpu);
4062
4063 /*
4064 * Update the state so DBGF reads the correct register values (flags).
4065 */
4066 remR3StateUpdate(pVM, pVCpu);
4067
4068 /*
4069 * Do the disassembling.
4070 */
4071 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4072 cs = cpu_single_env->segs[R_CS].selector;
4073 eip = uCode - cpu_single_env->segs[R_CS].base;
4074 for (;;)
4075 {
4076 char szBuf[256];
4077 uint32_t cbInstr;
4078 int rc = DBGFR3DisasInstrEx(pVM,
4079 pVCpu->idCpu,
4080 cs,
4081 eip,
4082 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4083 szBuf, sizeof(szBuf),
4084 &cbInstr);
4085 if (RT_SUCCESS(rc))
4086 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4087 else
4088 {
4089 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4090 cbInstr = 1;
4091 }
4092
4093 /* next */
4094 if (cb <= cbInstr)
4095 break;
4096 cb -= cbInstr;
4097 uCode += cbInstr;
4098 eip += cbInstr;
4099 }
4100 }
4101#undef DISAS_PRINTF
4102}
4103
4104
4105/**
4106 * Looks up a guest symbol.
4107 *
4108 * @returns Pointer to symbol name. This is a static buffer.
4109 * @param orig_addr The address in question.
4110 */
4111const char *lookup_symbol(target_ulong orig_addr)
4112{
4113 PVM pVM = cpu_single_env->pVM;
4114 RTGCINTPTR off = 0;
4115 RTDBGSYMBOL Sym;
4116 DBGFADDRESS Addr;
4117
4118 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4119 if (RT_SUCCESS(rc))
4120 {
4121 static char szSym[sizeof(Sym.szName) + 48];
4122 if (!off)
4123 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4124 else if (off > 0)
4125 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4126 else
4127 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4128 return szSym;
4129 }
4130 return "<N/A>";
4131}
4132
4133
4134#undef LOG_GROUP
4135#define LOG_GROUP LOG_GROUP_REM
4136
4137
4138/* -+- FF notifications -+- */
4139
4140
4141/**
4142 * Notification about a pending interrupt.
4143 *
4144 * @param pVM VM Handle.
4145 * @param pVCpu VMCPU Handle.
4146 * @param u8Interrupt Interrupt
4147 * @thread The emulation thread.
4148 */
4149REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4150{
4151 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4152 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4153}
4154
4155/**
4156 * Notification about a pending interrupt.
4157 *
4158 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4159 * @param pVM VM Handle.
4160 * @param pVCpu VMCPU Handle.
4161 * @thread The emulation thread.
4162 */
4163REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4164{
4165 return pVM->rem.s.u32PendingInterrupt;
4166}
4167
4168/**
4169 * Notification about the interrupt FF being set.
4170 *
4171 * @param pVM VM Handle.
4172 * @param pVCpu VMCPU Handle.
4173 * @thread The emulation thread.
4174 */
4175REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4176{
4177 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4178 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4179 if (pVM->rem.s.fInREM)
4180 {
4181 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4182 CPU_INTERRUPT_EXTERNAL_HARD);
4183 }
4184}
4185
4186
4187/**
4188 * Notification about the interrupt FF being set.
4189 *
4190 * @param pVM VM Handle.
4191 * @param pVCpu VMCPU Handle.
4192 * @thread Any.
4193 */
4194REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4195{
4196 LogFlow(("REMR3NotifyInterruptClear:\n"));
4197 if (pVM->rem.s.fInREM)
4198 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4199}
4200
4201
4202/**
4203 * Notification about pending timer(s).
4204 *
4205 * @param pVM VM Handle.
4206 * @param pVCpuDst The target cpu for this notification.
4207 * TM will not broadcast pending timer events, but use
4208 * a dedicated EMT for them. So, only interrupt REM
4209 * execution if the given CPU is executing in REM.
4210 * @thread Any.
4211 */
4212REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4213{
4214#ifndef DEBUG_bird
4215 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4216#endif
4217 if (pVM->rem.s.fInREM)
4218 {
4219 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4220 {
4221 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4222 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4223 CPU_INTERRUPT_EXTERNAL_TIMER);
4224 }
4225 else
4226 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4227 }
4228 else
4229 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4230}
4231
4232
4233/**
4234 * Notification about pending DMA transfers.
4235 *
4236 * @param pVM VM Handle.
4237 * @thread Any.
4238 */
4239REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4240{
4241 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4242 if (pVM->rem.s.fInREM)
4243 {
4244 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4245 CPU_INTERRUPT_EXTERNAL_DMA);
4246 }
4247}
4248
4249
4250/**
4251 * Notification about pending timer(s).
4252 *
4253 * @param pVM VM Handle.
4254 * @thread Any.
4255 */
4256REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4257{
4258 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4259 if (pVM->rem.s.fInREM)
4260 {
4261 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4262 CPU_INTERRUPT_EXTERNAL_EXIT);
4263 }
4264}
4265
4266
4267/**
4268 * Notification about pending FF set by an external thread.
4269 *
4270 * @param pVM VM handle.
4271 * @thread Any.
4272 */
4273REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4274{
4275 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4276 if (pVM->rem.s.fInREM)
4277 {
4278 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4279 CPU_INTERRUPT_EXTERNAL_EXIT);
4280 }
4281}
4282
4283
4284#ifdef VBOX_WITH_STATISTICS
4285void remR3ProfileStart(int statcode)
4286{
4287 STAMPROFILEADV *pStat;
4288 switch(statcode)
4289 {
4290 case STATS_EMULATE_SINGLE_INSTR:
4291 pStat = &gStatExecuteSingleInstr;
4292 break;
4293 case STATS_QEMU_COMPILATION:
4294 pStat = &gStatCompilationQEmu;
4295 break;
4296 case STATS_QEMU_RUN_EMULATED_CODE:
4297 pStat = &gStatRunCodeQEmu;
4298 break;
4299 case STATS_QEMU_TOTAL:
4300 pStat = &gStatTotalTimeQEmu;
4301 break;
4302 case STATS_QEMU_RUN_TIMERS:
4303 pStat = &gStatTimers;
4304 break;
4305 case STATS_TLB_LOOKUP:
4306 pStat= &gStatTBLookup;
4307 break;
4308 case STATS_IRQ_HANDLING:
4309 pStat= &gStatIRQ;
4310 break;
4311 case STATS_RAW_CHECK:
4312 pStat = &gStatRawCheck;
4313 break;
4314
4315 default:
4316 AssertMsgFailed(("unknown stat %d\n", statcode));
4317 return;
4318 }
4319 STAM_PROFILE_ADV_START(pStat, a);
4320}
4321
4322
4323void remR3ProfileStop(int statcode)
4324{
4325 STAMPROFILEADV *pStat;
4326 switch(statcode)
4327 {
4328 case STATS_EMULATE_SINGLE_INSTR:
4329 pStat = &gStatExecuteSingleInstr;
4330 break;
4331 case STATS_QEMU_COMPILATION:
4332 pStat = &gStatCompilationQEmu;
4333 break;
4334 case STATS_QEMU_RUN_EMULATED_CODE:
4335 pStat = &gStatRunCodeQEmu;
4336 break;
4337 case STATS_QEMU_TOTAL:
4338 pStat = &gStatTotalTimeQEmu;
4339 break;
4340 case STATS_QEMU_RUN_TIMERS:
4341 pStat = &gStatTimers;
4342 break;
4343 case STATS_TLB_LOOKUP:
4344 pStat= &gStatTBLookup;
4345 break;
4346 case STATS_IRQ_HANDLING:
4347 pStat= &gStatIRQ;
4348 break;
4349 case STATS_RAW_CHECK:
4350 pStat = &gStatRawCheck;
4351 break;
4352 default:
4353 AssertMsgFailed(("unknown stat %d\n", statcode));
4354 return;
4355 }
4356 STAM_PROFILE_ADV_STOP(pStat, a);
4357}
4358#endif
4359
4360/**
4361 * Raise an RC, force rem exit.
4362 *
4363 * @param pVM VM handle.
4364 * @param rc The rc.
4365 */
4366void remR3RaiseRC(PVM pVM, int rc)
4367{
4368 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4369 Assert(pVM->rem.s.fInREM);
4370 VM_ASSERT_EMT(pVM);
4371 pVM->rem.s.rc = rc;
4372 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4373}
4374
4375
4376/* -+- timers -+- */
4377
4378uint64_t cpu_get_tsc(CPUX86State *env)
4379{
4380 STAM_COUNTER_INC(&gStatCpuGetTSC);
4381 return TMCpuTickGet(env->pVCpu);
4382}
4383
4384
4385/* -+- interrupts -+- */
4386
4387void cpu_set_ferr(CPUX86State *env)
4388{
4389 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4390 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4391}
4392
4393int cpu_get_pic_interrupt(CPUState *env)
4394{
4395 uint8_t u8Interrupt;
4396 int rc;
4397
4398 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4399 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4400 * with the (a)pic.
4401 */
4402 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4403 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4404 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4405 * remove this kludge. */
4406 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4407 {
4408 rc = VINF_SUCCESS;
4409 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4410 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4411 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4412 }
4413 else
4414 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4415
4416 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4417 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4418 if (RT_SUCCESS(rc))
4419 {
4420 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4421 env->interrupt_request |= CPU_INTERRUPT_HARD;
4422 return u8Interrupt;
4423 }
4424 return -1;
4425}
4426
4427
4428/* -+- local apic -+- */
4429
4430#if 0 /* CPUMSetGuestMsr does this now. */
4431void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4432{
4433 int rc = PDMApicSetBase(env->pVM, val);
4434 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4435}
4436#endif
4437
4438uint64_t cpu_get_apic_base(CPUX86State *env)
4439{
4440 uint64_t u64;
4441 int rc = PDMApicGetBase(env->pVM, &u64);
4442 if (RT_SUCCESS(rc))
4443 {
4444 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4445 return u64;
4446 }
4447 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4448 return 0;
4449}
4450
4451void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4452{
4453 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4454 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4455}
4456
4457uint8_t cpu_get_apic_tpr(CPUX86State *env)
4458{
4459 uint8_t u8;
4460 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4461 if (RT_SUCCESS(rc))
4462 {
4463 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4464 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4465 }
4466 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4467 return 0;
4468}
4469
4470/**
4471 * Read an MSR.
4472 *
4473 * @retval 0 success.
4474 * @retval -1 failure, raise \#GP(0).
4475 * @param env The cpu state.
4476 * @param idMsr The MSR to read.
4477 * @param puValue Where to return the value.
4478 */
4479int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4480{
4481 Assert(env->pVCpu);
4482 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4483}
4484
4485/**
4486 * Write to an MSR.
4487 *
4488 * @retval 0 success.
4489 * @retval -1 failure, raise \#GP(0).
4490 * @param env The cpu state.
4491 * @param idMsr The MSR to read.
4492 * @param puValue Where to return the value.
4493 */
4494int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4495{
4496 Assert(env->pVCpu);
4497 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4498}
4499
4500/* -+- I/O Ports -+- */
4501
4502#undef LOG_GROUP
4503#define LOG_GROUP LOG_GROUP_REM_IOPORT
4504
4505void cpu_outb(CPUState *env, int addr, int val)
4506{
4507 int rc;
4508
4509 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4510 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4511
4512 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4513 if (RT_LIKELY(rc == VINF_SUCCESS))
4514 return;
4515 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4516 {
4517 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4518 remR3RaiseRC(env->pVM, rc);
4519 return;
4520 }
4521 remAbort(rc, __FUNCTION__);
4522}
4523
4524void cpu_outw(CPUState *env, int addr, int val)
4525{
4526 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4527 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4528 if (RT_LIKELY(rc == VINF_SUCCESS))
4529 return;
4530 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4531 {
4532 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4533 remR3RaiseRC(env->pVM, rc);
4534 return;
4535 }
4536 remAbort(rc, __FUNCTION__);
4537}
4538
4539void cpu_outl(CPUState *env, int addr, int val)
4540{
4541 int rc;
4542 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4543 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4544 if (RT_LIKELY(rc == VINF_SUCCESS))
4545 return;
4546 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4547 {
4548 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4549 remR3RaiseRC(env->pVM, rc);
4550 return;
4551 }
4552 remAbort(rc, __FUNCTION__);
4553}
4554
4555int cpu_inb(CPUState *env, int addr)
4556{
4557 uint32_t u32 = 0;
4558 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4559 if (RT_LIKELY(rc == VINF_SUCCESS))
4560 {
4561 if (/*addr != 0x61 && */addr != 0x71)
4562 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4563 return (int)u32;
4564 }
4565 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4566 {
4567 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4568 remR3RaiseRC(env->pVM, rc);
4569 return (int)u32;
4570 }
4571 remAbort(rc, __FUNCTION__);
4572 return 0xff;
4573}
4574
4575int cpu_inw(CPUState *env, int addr)
4576{
4577 uint32_t u32 = 0;
4578 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4579 if (RT_LIKELY(rc == VINF_SUCCESS))
4580 {
4581 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4582 return (int)u32;
4583 }
4584 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4585 {
4586 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4587 remR3RaiseRC(env->pVM, rc);
4588 return (int)u32;
4589 }
4590 remAbort(rc, __FUNCTION__);
4591 return 0xffff;
4592}
4593
4594int cpu_inl(CPUState *env, int addr)
4595{
4596 uint32_t u32 = 0;
4597 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4598 if (RT_LIKELY(rc == VINF_SUCCESS))
4599 {
4600//if (addr==0x01f0 && u32 == 0x6b6d)
4601// loglevel = ~0;
4602 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4603 return (int)u32;
4604 }
4605 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4606 {
4607 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4608 remR3RaiseRC(env->pVM, rc);
4609 return (int)u32;
4610 }
4611 remAbort(rc, __FUNCTION__);
4612 return 0xffffffff;
4613}
4614
4615#undef LOG_GROUP
4616#define LOG_GROUP LOG_GROUP_REM
4617
4618
4619/* -+- helpers and misc other interfaces -+- */
4620
4621/**
4622 * Perform the CPUID instruction.
4623 *
4624 * ASMCpuId cannot be invoked from some source files where this is used because of global
4625 * register allocations.
4626 *
4627 * @param env Pointer to the recompiler CPU structure.
4628 * @param uOperator CPUID operation (eax).
4629 * @param pvEAX Where to store eax.
4630 * @param pvEBX Where to store ebx.
4631 * @param pvECX Where to store ecx.
4632 * @param pvEDX Where to store edx.
4633 */
4634void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4635{
4636 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4637}
4638
4639
4640#if 0 /* not used */
4641/**
4642 * Interface for qemu hardware to report back fatal errors.
4643 */
4644void hw_error(const char *pszFormat, ...)
4645{
4646 /*
4647 * Bitch about it.
4648 */
4649 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4650 * this in my Odin32 tree at home! */
4651 va_list args;
4652 va_start(args, pszFormat);
4653 RTLogPrintf("fatal error in virtual hardware:");
4654 RTLogPrintfV(pszFormat, args);
4655 va_end(args);
4656 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4657
4658 /*
4659 * If we're in REM context we'll sync back the state before 'jumping' to
4660 * the EMs failure handling.
4661 */
4662 PVM pVM = cpu_single_env->pVM;
4663 if (pVM->rem.s.fInREM)
4664 REMR3StateBack(pVM);
4665 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4666 AssertMsgFailed(("EMR3FatalError returned!\n"));
4667}
4668#endif
4669
4670/**
4671 * Interface for the qemu cpu to report unhandled situation
4672 * raising a fatal VM error.
4673 */
4674void cpu_abort(CPUState *env, const char *pszFormat, ...)
4675{
4676 va_list va;
4677 PVM pVM;
4678 PVMCPU pVCpu;
4679 char szMsg[256];
4680
4681 /*
4682 * Bitch about it.
4683 */
4684 RTLogFlags(NULL, "nodisabled nobuffered");
4685 RTLogFlush(NULL);
4686
4687 va_start(va, pszFormat);
4688#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4689 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4690 unsigned cArgs = 0;
4691 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4692 const char *psz = strchr(pszFormat, '%');
4693 while (psz && cArgs < 6)
4694 {
4695 auArgs[cArgs++] = va_arg(va, uintptr_t);
4696 psz = strchr(psz + 1, '%');
4697 }
4698 switch (cArgs)
4699 {
4700 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4701 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4702 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4703 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4704 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4705 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4706 default:
4707 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4708 }
4709#else
4710 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4711#endif
4712 va_end(va);
4713
4714 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4715 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4716
4717 /*
4718 * If we're in REM context we'll sync back the state before 'jumping' to
4719 * the EMs failure handling.
4720 */
4721 pVM = cpu_single_env->pVM;
4722 pVCpu = cpu_single_env->pVCpu;
4723 Assert(pVCpu);
4724
4725 if (pVM->rem.s.fInREM)
4726 REMR3StateBack(pVM, pVCpu);
4727 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4728 AssertMsgFailed(("EMR3FatalError returned!\n"));
4729}
4730
4731
4732/**
4733 * Aborts the VM.
4734 *
4735 * @param rc VBox error code.
4736 * @param pszTip Hint about why/when this happened.
4737 */
4738void remAbort(int rc, const char *pszTip)
4739{
4740 PVM pVM;
4741 PVMCPU pVCpu;
4742
4743 /*
4744 * Bitch about it.
4745 */
4746 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4747 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4748
4749 /*
4750 * Jump back to where we entered the recompiler.
4751 */
4752 pVM = cpu_single_env->pVM;
4753 pVCpu = cpu_single_env->pVCpu;
4754 Assert(pVCpu);
4755
4756 if (pVM->rem.s.fInREM)
4757 REMR3StateBack(pVM, pVCpu);
4758
4759 EMR3FatalError(pVCpu, rc);
4760 AssertMsgFailed(("EMR3FatalError returned!\n"));
4761}
4762
4763
4764/**
4765 * Dumps a linux system call.
4766 * @param pVCpu VMCPU handle.
4767 */
4768void remR3DumpLnxSyscall(PVMCPU pVCpu)
4769{
4770 static const char *apsz[] =
4771 {
4772 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4773 "sys_exit",
4774 "sys_fork",
4775 "sys_read",
4776 "sys_write",
4777 "sys_open", /* 5 */
4778 "sys_close",
4779 "sys_waitpid",
4780 "sys_creat",
4781 "sys_link",
4782 "sys_unlink", /* 10 */
4783 "sys_execve",
4784 "sys_chdir",
4785 "sys_time",
4786 "sys_mknod",
4787 "sys_chmod", /* 15 */
4788 "sys_lchown16",
4789 "sys_ni_syscall", /* old break syscall holder */
4790 "sys_stat",
4791 "sys_lseek",
4792 "sys_getpid", /* 20 */
4793 "sys_mount",
4794 "sys_oldumount",
4795 "sys_setuid16",
4796 "sys_getuid16",
4797 "sys_stime", /* 25 */
4798 "sys_ptrace",
4799 "sys_alarm",
4800 "sys_fstat",
4801 "sys_pause",
4802 "sys_utime", /* 30 */
4803 "sys_ni_syscall", /* old stty syscall holder */
4804 "sys_ni_syscall", /* old gtty syscall holder */
4805 "sys_access",
4806 "sys_nice",
4807 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4808 "sys_sync",
4809 "sys_kill",
4810 "sys_rename",
4811 "sys_mkdir",
4812 "sys_rmdir", /* 40 */
4813 "sys_dup",
4814 "sys_pipe",
4815 "sys_times",
4816 "sys_ni_syscall", /* old prof syscall holder */
4817 "sys_brk", /* 45 */
4818 "sys_setgid16",
4819 "sys_getgid16",
4820 "sys_signal",
4821 "sys_geteuid16",
4822 "sys_getegid16", /* 50 */
4823 "sys_acct",
4824 "sys_umount", /* recycled never used phys() */
4825 "sys_ni_syscall", /* old lock syscall holder */
4826 "sys_ioctl",
4827 "sys_fcntl", /* 55 */
4828 "sys_ni_syscall", /* old mpx syscall holder */
4829 "sys_setpgid",
4830 "sys_ni_syscall", /* old ulimit syscall holder */
4831 "sys_olduname",
4832 "sys_umask", /* 60 */
4833 "sys_chroot",
4834 "sys_ustat",
4835 "sys_dup2",
4836 "sys_getppid",
4837 "sys_getpgrp", /* 65 */
4838 "sys_setsid",
4839 "sys_sigaction",
4840 "sys_sgetmask",
4841 "sys_ssetmask",
4842 "sys_setreuid16", /* 70 */
4843 "sys_setregid16",
4844 "sys_sigsuspend",
4845 "sys_sigpending",
4846 "sys_sethostname",
4847 "sys_setrlimit", /* 75 */
4848 "sys_old_getrlimit",
4849 "sys_getrusage",
4850 "sys_gettimeofday",
4851 "sys_settimeofday",
4852 "sys_getgroups16", /* 80 */
4853 "sys_setgroups16",
4854 "old_select",
4855 "sys_symlink",
4856 "sys_lstat",
4857 "sys_readlink", /* 85 */
4858 "sys_uselib",
4859 "sys_swapon",
4860 "sys_reboot",
4861 "old_readdir",
4862 "old_mmap", /* 90 */
4863 "sys_munmap",
4864 "sys_truncate",
4865 "sys_ftruncate",
4866 "sys_fchmod",
4867 "sys_fchown16", /* 95 */
4868 "sys_getpriority",
4869 "sys_setpriority",
4870 "sys_ni_syscall", /* old profil syscall holder */
4871 "sys_statfs",
4872 "sys_fstatfs", /* 100 */
4873 "sys_ioperm",
4874 "sys_socketcall",
4875 "sys_syslog",
4876 "sys_setitimer",
4877 "sys_getitimer", /* 105 */
4878 "sys_newstat",
4879 "sys_newlstat",
4880 "sys_newfstat",
4881 "sys_uname",
4882 "sys_iopl", /* 110 */
4883 "sys_vhangup",
4884 "sys_ni_syscall", /* old "idle" system call */
4885 "sys_vm86old",
4886 "sys_wait4",
4887 "sys_swapoff", /* 115 */
4888 "sys_sysinfo",
4889 "sys_ipc",
4890 "sys_fsync",
4891 "sys_sigreturn",
4892 "sys_clone", /* 120 */
4893 "sys_setdomainname",
4894 "sys_newuname",
4895 "sys_modify_ldt",
4896 "sys_adjtimex",
4897 "sys_mprotect", /* 125 */
4898 "sys_sigprocmask",
4899 "sys_ni_syscall", /* old "create_module" */
4900 "sys_init_module",
4901 "sys_delete_module",
4902 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4903 "sys_quotactl",
4904 "sys_getpgid",
4905 "sys_fchdir",
4906 "sys_bdflush",
4907 "sys_sysfs", /* 135 */
4908 "sys_personality",
4909 "sys_ni_syscall", /* reserved for afs_syscall */
4910 "sys_setfsuid16",
4911 "sys_setfsgid16",
4912 "sys_llseek", /* 140 */
4913 "sys_getdents",
4914 "sys_select",
4915 "sys_flock",
4916 "sys_msync",
4917 "sys_readv", /* 145 */
4918 "sys_writev",
4919 "sys_getsid",
4920 "sys_fdatasync",
4921 "sys_sysctl",
4922 "sys_mlock", /* 150 */
4923 "sys_munlock",
4924 "sys_mlockall",
4925 "sys_munlockall",
4926 "sys_sched_setparam",
4927 "sys_sched_getparam", /* 155 */
4928 "sys_sched_setscheduler",
4929 "sys_sched_getscheduler",
4930 "sys_sched_yield",
4931 "sys_sched_get_priority_max",
4932 "sys_sched_get_priority_min", /* 160 */
4933 "sys_sched_rr_get_interval",
4934 "sys_nanosleep",
4935 "sys_mremap",
4936 "sys_setresuid16",
4937 "sys_getresuid16", /* 165 */
4938 "sys_vm86",
4939 "sys_ni_syscall", /* Old sys_query_module */
4940 "sys_poll",
4941 "sys_nfsservctl",
4942 "sys_setresgid16", /* 170 */
4943 "sys_getresgid16",
4944 "sys_prctl",
4945 "sys_rt_sigreturn",
4946 "sys_rt_sigaction",
4947 "sys_rt_sigprocmask", /* 175 */
4948 "sys_rt_sigpending",
4949 "sys_rt_sigtimedwait",
4950 "sys_rt_sigqueueinfo",
4951 "sys_rt_sigsuspend",
4952 "sys_pread64", /* 180 */
4953 "sys_pwrite64",
4954 "sys_chown16",
4955 "sys_getcwd",
4956 "sys_capget",
4957 "sys_capset", /* 185 */
4958 "sys_sigaltstack",
4959 "sys_sendfile",
4960 "sys_ni_syscall", /* reserved for streams1 */
4961 "sys_ni_syscall", /* reserved for streams2 */
4962 "sys_vfork", /* 190 */
4963 "sys_getrlimit",
4964 "sys_mmap2",
4965 "sys_truncate64",
4966 "sys_ftruncate64",
4967 "sys_stat64", /* 195 */
4968 "sys_lstat64",
4969 "sys_fstat64",
4970 "sys_lchown",
4971 "sys_getuid",
4972 "sys_getgid", /* 200 */
4973 "sys_geteuid",
4974 "sys_getegid",
4975 "sys_setreuid",
4976 "sys_setregid",
4977 "sys_getgroups", /* 205 */
4978 "sys_setgroups",
4979 "sys_fchown",
4980 "sys_setresuid",
4981 "sys_getresuid",
4982 "sys_setresgid", /* 210 */
4983 "sys_getresgid",
4984 "sys_chown",
4985 "sys_setuid",
4986 "sys_setgid",
4987 "sys_setfsuid", /* 215 */
4988 "sys_setfsgid",
4989 "sys_pivot_root",
4990 "sys_mincore",
4991 "sys_madvise",
4992 "sys_getdents64", /* 220 */
4993 "sys_fcntl64",
4994 "sys_ni_syscall", /* reserved for TUX */
4995 "sys_ni_syscall",
4996 "sys_gettid",
4997 "sys_readahead", /* 225 */
4998 "sys_setxattr",
4999 "sys_lsetxattr",
5000 "sys_fsetxattr",
5001 "sys_getxattr",
5002 "sys_lgetxattr", /* 230 */
5003 "sys_fgetxattr",
5004 "sys_listxattr",
5005 "sys_llistxattr",
5006 "sys_flistxattr",
5007 "sys_removexattr", /* 235 */
5008 "sys_lremovexattr",
5009 "sys_fremovexattr",
5010 "sys_tkill",
5011 "sys_sendfile64",
5012 "sys_futex", /* 240 */
5013 "sys_sched_setaffinity",
5014 "sys_sched_getaffinity",
5015 "sys_set_thread_area",
5016 "sys_get_thread_area",
5017 "sys_io_setup", /* 245 */
5018 "sys_io_destroy",
5019 "sys_io_getevents",
5020 "sys_io_submit",
5021 "sys_io_cancel",
5022 "sys_fadvise64", /* 250 */
5023 "sys_ni_syscall",
5024 "sys_exit_group",
5025 "sys_lookup_dcookie",
5026 "sys_epoll_create",
5027 "sys_epoll_ctl", /* 255 */
5028 "sys_epoll_wait",
5029 "sys_remap_file_pages",
5030 "sys_set_tid_address",
5031 "sys_timer_create",
5032 "sys_timer_settime", /* 260 */
5033 "sys_timer_gettime",
5034 "sys_timer_getoverrun",
5035 "sys_timer_delete",
5036 "sys_clock_settime",
5037 "sys_clock_gettime", /* 265 */
5038 "sys_clock_getres",
5039 "sys_clock_nanosleep",
5040 "sys_statfs64",
5041 "sys_fstatfs64",
5042 "sys_tgkill", /* 270 */
5043 "sys_utimes",
5044 "sys_fadvise64_64",
5045 "sys_ni_syscall" /* sys_vserver */
5046 };
5047
5048 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5049 switch (uEAX)
5050 {
5051 default:
5052 if (uEAX < RT_ELEMENTS(apsz))
5053 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5054 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5055 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5056 else
5057 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5058 break;
5059
5060 }
5061}
5062
5063
5064/**
5065 * Dumps an OpenBSD system call.
5066 * @param pVCpu VMCPU handle.
5067 */
5068void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5069{
5070 static const char *apsz[] =
5071 {
5072 "SYS_syscall", //0
5073 "SYS_exit", //1
5074 "SYS_fork", //2
5075 "SYS_read", //3
5076 "SYS_write", //4
5077 "SYS_open", //5
5078 "SYS_close", //6
5079 "SYS_wait4", //7
5080 "SYS_8",
5081 "SYS_link", //9
5082 "SYS_unlink", //10
5083 "SYS_11",
5084 "SYS_chdir", //12
5085 "SYS_fchdir", //13
5086 "SYS_mknod", //14
5087 "SYS_chmod", //15
5088 "SYS_chown", //16
5089 "SYS_break", //17
5090 "SYS_18",
5091 "SYS_19",
5092 "SYS_getpid", //20
5093 "SYS_mount", //21
5094 "SYS_unmount", //22
5095 "SYS_setuid", //23
5096 "SYS_getuid", //24
5097 "SYS_geteuid", //25
5098 "SYS_ptrace", //26
5099 "SYS_recvmsg", //27
5100 "SYS_sendmsg", //28
5101 "SYS_recvfrom", //29
5102 "SYS_accept", //30
5103 "SYS_getpeername", //31
5104 "SYS_getsockname", //32
5105 "SYS_access", //33
5106 "SYS_chflags", //34
5107 "SYS_fchflags", //35
5108 "SYS_sync", //36
5109 "SYS_kill", //37
5110 "SYS_38",
5111 "SYS_getppid", //39
5112 "SYS_40",
5113 "SYS_dup", //41
5114 "SYS_opipe", //42
5115 "SYS_getegid", //43
5116 "SYS_profil", //44
5117 "SYS_ktrace", //45
5118 "SYS_sigaction", //46
5119 "SYS_getgid", //47
5120 "SYS_sigprocmask", //48
5121 "SYS_getlogin", //49
5122 "SYS_setlogin", //50
5123 "SYS_acct", //51
5124 "SYS_sigpending", //52
5125 "SYS_osigaltstack", //53
5126 "SYS_ioctl", //54
5127 "SYS_reboot", //55
5128 "SYS_revoke", //56
5129 "SYS_symlink", //57
5130 "SYS_readlink", //58
5131 "SYS_execve", //59
5132 "SYS_umask", //60
5133 "SYS_chroot", //61
5134 "SYS_62",
5135 "SYS_63",
5136 "SYS_64",
5137 "SYS_65",
5138 "SYS_vfork", //66
5139 "SYS_67",
5140 "SYS_68",
5141 "SYS_sbrk", //69
5142 "SYS_sstk", //70
5143 "SYS_61",
5144 "SYS_vadvise", //72
5145 "SYS_munmap", //73
5146 "SYS_mprotect", //74
5147 "SYS_madvise", //75
5148 "SYS_76",
5149 "SYS_77",
5150 "SYS_mincore", //78
5151 "SYS_getgroups", //79
5152 "SYS_setgroups", //80
5153 "SYS_getpgrp", //81
5154 "SYS_setpgid", //82
5155 "SYS_setitimer", //83
5156 "SYS_84",
5157 "SYS_85",
5158 "SYS_getitimer", //86
5159 "SYS_87",
5160 "SYS_88",
5161 "SYS_89",
5162 "SYS_dup2", //90
5163 "SYS_91",
5164 "SYS_fcntl", //92
5165 "SYS_select", //93
5166 "SYS_94",
5167 "SYS_fsync", //95
5168 "SYS_setpriority", //96
5169 "SYS_socket", //97
5170 "SYS_connect", //98
5171 "SYS_99",
5172 "SYS_getpriority", //100
5173 "SYS_101",
5174 "SYS_102",
5175 "SYS_sigreturn", //103
5176 "SYS_bind", //104
5177 "SYS_setsockopt", //105
5178 "SYS_listen", //106
5179 "SYS_107",
5180 "SYS_108",
5181 "SYS_109",
5182 "SYS_110",
5183 "SYS_sigsuspend", //111
5184 "SYS_112",
5185 "SYS_113",
5186 "SYS_114",
5187 "SYS_115",
5188 "SYS_gettimeofday", //116
5189 "SYS_getrusage", //117
5190 "SYS_getsockopt", //118
5191 "SYS_119",
5192 "SYS_readv", //120
5193 "SYS_writev", //121
5194 "SYS_settimeofday", //122
5195 "SYS_fchown", //123
5196 "SYS_fchmod", //124
5197 "SYS_125",
5198 "SYS_setreuid", //126
5199 "SYS_setregid", //127
5200 "SYS_rename", //128
5201 "SYS_129",
5202 "SYS_130",
5203 "SYS_flock", //131
5204 "SYS_mkfifo", //132
5205 "SYS_sendto", //133
5206 "SYS_shutdown", //134
5207 "SYS_socketpair", //135
5208 "SYS_mkdir", //136
5209 "SYS_rmdir", //137
5210 "SYS_utimes", //138
5211 "SYS_139",
5212 "SYS_adjtime", //140
5213 "SYS_141",
5214 "SYS_142",
5215 "SYS_143",
5216 "SYS_144",
5217 "SYS_145",
5218 "SYS_146",
5219 "SYS_setsid", //147
5220 "SYS_quotactl", //148
5221 "SYS_149",
5222 "SYS_150",
5223 "SYS_151",
5224 "SYS_152",
5225 "SYS_153",
5226 "SYS_154",
5227 "SYS_nfssvc", //155
5228 "SYS_156",
5229 "SYS_157",
5230 "SYS_158",
5231 "SYS_159",
5232 "SYS_160",
5233 "SYS_getfh", //161
5234 "SYS_162",
5235 "SYS_163",
5236 "SYS_164",
5237 "SYS_sysarch", //165
5238 "SYS_166",
5239 "SYS_167",
5240 "SYS_168",
5241 "SYS_169",
5242 "SYS_170",
5243 "SYS_171",
5244 "SYS_172",
5245 "SYS_pread", //173
5246 "SYS_pwrite", //174
5247 "SYS_175",
5248 "SYS_176",
5249 "SYS_177",
5250 "SYS_178",
5251 "SYS_179",
5252 "SYS_180",
5253 "SYS_setgid", //181
5254 "SYS_setegid", //182
5255 "SYS_seteuid", //183
5256 "SYS_lfs_bmapv", //184
5257 "SYS_lfs_markv", //185
5258 "SYS_lfs_segclean", //186
5259 "SYS_lfs_segwait", //187
5260 "SYS_188",
5261 "SYS_189",
5262 "SYS_190",
5263 "SYS_pathconf", //191
5264 "SYS_fpathconf", //192
5265 "SYS_swapctl", //193
5266 "SYS_getrlimit", //194
5267 "SYS_setrlimit", //195
5268 "SYS_getdirentries", //196
5269 "SYS_mmap", //197
5270 "SYS___syscall", //198
5271 "SYS_lseek", //199
5272 "SYS_truncate", //200
5273 "SYS_ftruncate", //201
5274 "SYS___sysctl", //202
5275 "SYS_mlock", //203
5276 "SYS_munlock", //204
5277 "SYS_205",
5278 "SYS_futimes", //206
5279 "SYS_getpgid", //207
5280 "SYS_xfspioctl", //208
5281 "SYS_209",
5282 "SYS_210",
5283 "SYS_211",
5284 "SYS_212",
5285 "SYS_213",
5286 "SYS_214",
5287 "SYS_215",
5288 "SYS_216",
5289 "SYS_217",
5290 "SYS_218",
5291 "SYS_219",
5292 "SYS_220",
5293 "SYS_semget", //221
5294 "SYS_222",
5295 "SYS_223",
5296 "SYS_224",
5297 "SYS_msgget", //225
5298 "SYS_msgsnd", //226
5299 "SYS_msgrcv", //227
5300 "SYS_shmat", //228
5301 "SYS_229",
5302 "SYS_shmdt", //230
5303 "SYS_231",
5304 "SYS_clock_gettime", //232
5305 "SYS_clock_settime", //233
5306 "SYS_clock_getres", //234
5307 "SYS_235",
5308 "SYS_236",
5309 "SYS_237",
5310 "SYS_238",
5311 "SYS_239",
5312 "SYS_nanosleep", //240
5313 "SYS_241",
5314 "SYS_242",
5315 "SYS_243",
5316 "SYS_244",
5317 "SYS_245",
5318 "SYS_246",
5319 "SYS_247",
5320 "SYS_248",
5321 "SYS_249",
5322 "SYS_minherit", //250
5323 "SYS_rfork", //251
5324 "SYS_poll", //252
5325 "SYS_issetugid", //253
5326 "SYS_lchown", //254
5327 "SYS_getsid", //255
5328 "SYS_msync", //256
5329 "SYS_257",
5330 "SYS_258",
5331 "SYS_259",
5332 "SYS_getfsstat", //260
5333 "SYS_statfs", //261
5334 "SYS_fstatfs", //262
5335 "SYS_pipe", //263
5336 "SYS_fhopen", //264
5337 "SYS_265",
5338 "SYS_fhstatfs", //266
5339 "SYS_preadv", //267
5340 "SYS_pwritev", //268
5341 "SYS_kqueue", //269
5342 "SYS_kevent", //270
5343 "SYS_mlockall", //271
5344 "SYS_munlockall", //272
5345 "SYS_getpeereid", //273
5346 "SYS_274",
5347 "SYS_275",
5348 "SYS_276",
5349 "SYS_277",
5350 "SYS_278",
5351 "SYS_279",
5352 "SYS_280",
5353 "SYS_getresuid", //281
5354 "SYS_setresuid", //282
5355 "SYS_getresgid", //283
5356 "SYS_setresgid", //284
5357 "SYS_285",
5358 "SYS_mquery", //286
5359 "SYS_closefrom", //287
5360 "SYS_sigaltstack", //288
5361 "SYS_shmget", //289
5362 "SYS_semop", //290
5363 "SYS_stat", //291
5364 "SYS_fstat", //292
5365 "SYS_lstat", //293
5366 "SYS_fhstat", //294
5367 "SYS___semctl", //295
5368 "SYS_shmctl", //296
5369 "SYS_msgctl", //297
5370 "SYS_MAXSYSCALL", //298
5371 //299
5372 //300
5373 };
5374 uint32_t uEAX;
5375 if (!LogIsEnabled())
5376 return;
5377 uEAX = CPUMGetGuestEAX(pVCpu);
5378 switch (uEAX)
5379 {
5380 default:
5381 if (uEAX < RT_ELEMENTS(apsz))
5382 {
5383 uint32_t au32Args[8] = {0};
5384 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5385 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5386 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5387 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5388 }
5389 else
5390 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5391 break;
5392 }
5393}
5394
5395
5396#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5397/**
5398 * The Dll main entry point (stub).
5399 */
5400bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5401{
5402 return true;
5403}
5404
5405void *memcpy(void *dst, const void *src, size_t size)
5406{
5407 uint8_t*pbDst = dst, *pbSrc = src;
5408 while (size-- > 0)
5409 *pbDst++ = *pbSrc++;
5410 return dst;
5411}
5412
5413#endif
5414
5415void cpu_smm_update(CPUState *env)
5416{
5417}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette