VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36175

Last change on this file since 36175 was 36175, checked in by vboxsync, 14 years ago

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.8 KB
Line 
1/* $Id: VBoxRecompiler.c 36175 2011-03-04 16:21:09Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, NULL, NULL,
340 NULL, remR3Save, NULL,
341 NULL, remR3Load, NULL);
342 if (RT_FAILURE(rc))
343 return rc;
344
345#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
346 /*
347 * Debugger commands.
348 */
349 static bool fRegisteredCmds = false;
350 if (!fRegisteredCmds)
351 {
352 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
353 if (RT_SUCCESS(rc))
354 fRegisteredCmds = true;
355 }
356#endif
357
358#ifdef VBOX_WITH_STATISTICS
359 /*
360 * Statistics.
361 */
362 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
363 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
364 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
365 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
366 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
374
375 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
376
377 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
378 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
379 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
380 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
381 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
382 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
383 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
384 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
385 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
386 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
387 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
388
389 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
390 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
391 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
392 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
393
394 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
400
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
409#endif /* VBOX_WITH_STATISTICS */
410
411 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
412 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
413 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
414
415
416#ifdef DEBUG_ALL_LOGGING
417 loglevel = ~0;
418# ifdef DEBUG_TMP_LOGGING
419 logfile = fopen("/tmp/vbox-qemu.log", "w");
420# endif
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469
470/**
471 * Initializes phys_ram_dirty and phys_ram_dirty_size.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM handle.
475 * @param fGuarded Whether to guard the map.
476 */
477static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
478{
479 int rc = VINF_SUCCESS;
480 RTGCPHYS cb;
481
482 cb = pVM->rem.s.GCPhysLastRam + 1;
483 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
485 VERR_OUT_OF_RANGE);
486 phys_ram_dirty_size = cb >> PAGE_SHIFT;
487 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
488
489 if (!fGuarded)
490 {
491 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
492 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
493 }
494 else
495 {
496 /*
497 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
498 */
499 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
500 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
501 if (cbBitmapFull == cbBitmapAligned)
502 cbBitmapFull += _4G >> PAGE_SHIFT;
503 else if (cbBitmapFull - cbBitmapAligned < _64K)
504 cbBitmapFull += _64K;
505
506 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
507 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
508
509 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
510 if (RT_FAILURE(rc))
511 {
512 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
513 AssertLogRelRCReturn(rc, rc);
514 }
515
516 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
517 }
518
519 /* initialize it. */
520 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
521 return rc;
522}
523
524
525/**
526 * Terminates the REM.
527 *
528 * Termination means cleaning up and freeing all resources,
529 * the VM it self is at this point powered off or suspended.
530 *
531 * @returns VBox status code.
532 * @param pVM The VM to operate on.
533 */
534REMR3DECL(int) REMR3Term(PVM pVM)
535{
536#ifdef VBOX_WITH_STATISTICS
537 /*
538 * Statistics.
539 */
540 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
541 STAM_DEREG(pVM, &gStatCompilationQEmu);
542 STAM_DEREG(pVM, &gStatRunCodeQEmu);
543 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
544 STAM_DEREG(pVM, &gStatTimers);
545 STAM_DEREG(pVM, &gStatTBLookup);
546 STAM_DEREG(pVM, &gStatIRQ);
547 STAM_DEREG(pVM, &gStatRawCheck);
548 STAM_DEREG(pVM, &gStatMemRead);
549 STAM_DEREG(pVM, &gStatMemWrite);
550 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
551 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
552
553 STAM_DEREG(pVM, &gStatCpuGetTSC);
554
555 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
556 STAM_DEREG(pVM, &gStatRefuseVM86);
557 STAM_DEREG(pVM, &gStatRefusePaging);
558 STAM_DEREG(pVM, &gStatRefusePAE);
559 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
560 STAM_DEREG(pVM, &gStatRefuseIF0);
561 STAM_DEREG(pVM, &gStatRefuseCode16);
562 STAM_DEREG(pVM, &gStatRefuseWP0);
563 STAM_DEREG(pVM, &gStatRefuseRing1or2);
564 STAM_DEREG(pVM, &gStatRefuseCanExecute);
565 STAM_DEREG(pVM, &gStatFlushTBs);
566
567 STAM_DEREG(pVM, &gStatREMGDTChange);
568 STAM_DEREG(pVM, &gStatREMLDTRChange);
569 STAM_DEREG(pVM, &gStatREMIDTChange);
570 STAM_DEREG(pVM, &gStatREMTRChange);
571
572 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
578
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
585
586 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
587#endif /* VBOX_WITH_STATISTICS */
588
589 STAM_REL_DEREG(pVM, &tb_flush_count);
590 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
591 STAM_REL_DEREG(pVM, &tlb_flush_count);
592
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * The VM is being reset.
599 *
600 * For the REM component this means to call the cpu_reset() and
601 * reinitialize some state variables.
602 *
603 * @param pVM VM handle.
604 */
605REMR3DECL(void) REMR3Reset(PVM pVM)
606{
607 /*
608 * Reset the REM cpu.
609 */
610 Assert(pVM->rem.s.cIgnoreAll == 0);
611 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
612 cpu_reset(&pVM->rem.s.Env);
613 pVM->rem.s.cInvalidatedPages = 0;
614 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
615 Assert(pVM->rem.s.cIgnoreAll == 0);
616
617 /* Clear raw ring 0 init state */
618 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
619
620 /* Flush the TBs the next time we execute code here. */
621 pVM->rem.s.fFlushTBs = true;
622}
623
624
625/**
626 * Execute state save operation.
627 *
628 * @returns VBox status code.
629 * @param pVM VM Handle.
630 * @param pSSM SSM operation handle.
631 */
632static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
633{
634 PREM pRem = &pVM->rem.s;
635
636 /*
637 * Save the required CPU Env bits.
638 * (Not much because we're never in REM when doing the save.)
639 */
640 LogFlow(("remR3Save:\n"));
641 Assert(!pRem->fInREM);
642 SSMR3PutU32(pSSM, pRem->Env.hflags);
643 SSMR3PutU32(pSSM, ~0); /* separator */
644
645 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
646 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
647 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
648
649 return SSMR3PutU32(pSSM, ~0); /* terminator */
650}
651
652
653/**
654 * Execute state load operation.
655 *
656 * @returns VBox status code.
657 * @param pVM VM Handle.
658 * @param pSSM SSM operation handle.
659 * @param uVersion Data layout version.
660 * @param uPass The data pass.
661 */
662static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
663{
664 uint32_t u32Dummy;
665 uint32_t fRawRing0 = false;
666 uint32_t u32Sep;
667 uint32_t i;
668 int rc;
669 PREM pRem;
670
671 LogFlow(("remR3Load:\n"));
672 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
673
674 /*
675 * Validate version.
676 */
677 if ( uVersion != REM_SAVED_STATE_VERSION
678 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
679 {
680 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
681 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
682 }
683
684 /*
685 * Do a reset to be on the safe side...
686 */
687 REMR3Reset(pVM);
688
689 /*
690 * Ignore all ignorable notifications.
691 * (Not doing this will cause serious trouble.)
692 */
693 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
694
695 /*
696 * Load the required CPU Env bits.
697 * (Not much because we're never in REM when doing the save.)
698 */
699 pRem = &pVM->rem.s;
700 Assert(!pRem->fInREM);
701 SSMR3GetU32(pSSM, &pRem->Env.hflags);
702 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /* Redundant REM CPU state has to be loaded, but can be ignored. */
705 CPUX86State_Ver16 temp;
706 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
707 }
708
709 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
710 if (RT_FAILURE(rc))
711 return rc;
712 if (u32Sep != ~0U)
713 {
714 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717
718 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
719 SSMR3GetUInt(pSSM, &fRawRing0);
720 if (fRawRing0)
721 pRem->Env.state |= CPU_RAW_RING0;
722
723 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
724 {
725 /*
726 * Load the REM stuff.
727 */
728 /** @todo r=bird: We should just drop all these items, restoring doesn't make
729 * sense. */
730 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
731 if (RT_FAILURE(rc))
732 return rc;
733 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
734 {
735 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
736 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
737 }
738 for (i = 0; i < pRem->cInvalidatedPages; i++)
739 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
740 }
741
742 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
743 if (RT_FAILURE(rc))
744 return rc;
745
746 /* check the terminator. */
747 rc = SSMR3GetU32(pSSM, &u32Sep);
748 if (RT_FAILURE(rc))
749 return rc;
750 if (u32Sep != ~0U)
751 {
752 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
753 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
754 }
755
756 /*
757 * Get the CPUID features.
758 */
759 PVMCPU pVCpu = VMMGetCpu(pVM);
760 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
761 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
762
763 /*
764 * Sync the Load Flush the TLB
765 */
766 tlb_flush(&pRem->Env, 1);
767
768 /*
769 * Stop ignoring ignorable notifications.
770 */
771 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
772
773 /*
774 * Sync the whole CPU state when executing code in the recompiler.
775 */
776 for (i = 0; i < pVM->cCpus; i++)
777 {
778 PVMCPU pVCpu = &pVM->aCpus[i];
779 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
780 }
781 return VINF_SUCCESS;
782}
783
784
785
786#undef LOG_GROUP
787#define LOG_GROUP LOG_GROUP_REM_RUN
788
789/**
790 * Single steps an instruction in recompiled mode.
791 *
792 * Before calling this function the REM state needs to be in sync with
793 * the VM. Call REMR3State() to perform the sync. It's only necessary
794 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
795 * and after calling REMR3StateBack().
796 *
797 * @returns VBox status code.
798 *
799 * @param pVM VM Handle.
800 * @param pVCpu VMCPU Handle.
801 */
802REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
803{
804 int rc, interrupt_request;
805 RTGCPTR GCPtrPC;
806 bool fBp;
807
808 /*
809 * Lock the REM - we don't wanna have anyone interrupting us
810 * while stepping - and enabled single stepping. We also ignore
811 * pending interrupts and suchlike.
812 */
813 interrupt_request = pVM->rem.s.Env.interrupt_request;
814 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
815 pVM->rem.s.Env.interrupt_request = 0;
816 cpu_single_step(&pVM->rem.s.Env, 1);
817
818 /*
819 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
820 */
821 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
822 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
823
824 /*
825 * Execute and handle the return code.
826 * We execute without enabling the cpu tick, so on success we'll
827 * just flip it on and off to make sure it moves
828 */
829 rc = cpu_exec(&pVM->rem.s.Env);
830 if (rc == EXCP_DEBUG)
831 {
832 TMR3NotifyResume(pVM, pVCpu);
833 TMR3NotifySuspend(pVM, pVCpu);
834 rc = VINF_EM_DBG_STEPPED;
835 }
836 else
837 {
838 switch (rc)
839 {
840 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
841 case EXCP_HLT:
842 case EXCP_HALTED: rc = VINF_EM_HALT; break;
843 case EXCP_RC:
844 rc = pVM->rem.s.rc;
845 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
846 break;
847 case EXCP_EXECUTE_RAW:
848 case EXCP_EXECUTE_HWACC:
849 /** @todo: is it correct? No! */
850 rc = VINF_SUCCESS;
851 break;
852 default:
853 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
854 rc = VERR_INTERNAL_ERROR;
855 break;
856 }
857 }
858
859 /*
860 * Restore the stuff we changed to prevent interruption.
861 * Unlock the REM.
862 */
863 if (fBp)
864 {
865 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
866 Assert(rc2 == 0); NOREF(rc2);
867 }
868 cpu_single_step(&pVM->rem.s.Env, 0);
869 pVM->rem.s.Env.interrupt_request = interrupt_request;
870
871 return rc;
872}
873
874
875/**
876 * Set a breakpoint using the REM facilities.
877 *
878 * @returns VBox status code.
879 * @param pVM The VM handle.
880 * @param Address The breakpoint address.
881 * @thread The emulation thread.
882 */
883REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
884{
885 VM_ASSERT_EMT(pVM);
886 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
887 {
888 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
889 return VINF_SUCCESS;
890 }
891 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
892 return VERR_REM_NO_MORE_BP_SLOTS;
893}
894
895
896/**
897 * Clears a breakpoint set by REMR3BreakpointSet().
898 *
899 * @returns VBox status code.
900 * @param pVM The VM handle.
901 * @param Address The breakpoint address.
902 * @thread The emulation thread.
903 */
904REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
905{
906 VM_ASSERT_EMT(pVM);
907 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
908 {
909 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
910 return VINF_SUCCESS;
911 }
912 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
913 return VERR_REM_BP_NOT_FOUND;
914}
915
916
917/**
918 * Emulate an instruction.
919 *
920 * This function executes one instruction without letting anyone
921 * interrupt it. This is intended for being called while being in
922 * raw mode and thus will take care of all the state syncing between
923 * REM and the rest.
924 *
925 * @returns VBox status code.
926 * @param pVM VM handle.
927 * @param pVCpu VMCPU Handle.
928 */
929REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
930{
931 bool fFlushTBs;
932
933 int rc, rc2;
934 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
935
936 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
937 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
938 */
939 if (HWACCMIsEnabled(pVM))
940 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
941
942 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
943 fFlushTBs = pVM->rem.s.fFlushTBs;
944 pVM->rem.s.fFlushTBs = false;
945
946 /*
947 * Sync the state and enable single instruction / single stepping.
948 */
949 rc = REMR3State(pVM, pVCpu);
950 pVM->rem.s.fFlushTBs = fFlushTBs;
951 if (RT_SUCCESS(rc))
952 {
953 int interrupt_request = pVM->rem.s.Env.interrupt_request;
954 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
955#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
956 cpu_single_step(&pVM->rem.s.Env, 0);
957#endif
958 Assert(!pVM->rem.s.Env.singlestep_enabled);
959
960 /*
961 * Now we set the execute single instruction flag and enter the cpu_exec loop.
962 */
963 TMNotifyStartOfExecution(pVCpu);
964 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
965 rc = cpu_exec(&pVM->rem.s.Env);
966 TMNotifyEndOfExecution(pVCpu);
967 switch (rc)
968 {
969 /*
970 * Executed without anything out of the way happening.
971 */
972 case EXCP_SINGLE_INSTR:
973 rc = VINF_EM_RESCHEDULE;
974 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
975 break;
976
977 /*
978 * If we take a trap or start servicing a pending interrupt, we might end up here.
979 * (Timer thread or some other thread wishing EMT's attention.)
980 */
981 case EXCP_INTERRUPT:
982 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
983 rc = VINF_EM_RESCHEDULE;
984 break;
985
986 /*
987 * Single step, we assume!
988 * If there was a breakpoint there we're fucked now.
989 */
990 case EXCP_DEBUG:
991 if (pVM->rem.s.Env.watchpoint_hit)
992 {
993 /** @todo deal with watchpoints */
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
995 rc = VINF_EM_DBG_BREAKPOINT;
996 }
997 else
998 {
999 CPUBreakpoint *pBP;
1000 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1001 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1002 if (pBP->pc == GCPtrPC)
1003 break;
1004 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1006 }
1007 break;
1008
1009 /*
1010 * hlt instruction.
1011 */
1012 case EXCP_HLT:
1013 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * The VM has halted.
1019 */
1020 case EXCP_HALTED:
1021 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1022 rc = VINF_EM_HALT;
1023 break;
1024
1025 /*
1026 * Switch to RAW-mode.
1027 */
1028 case EXCP_EXECUTE_RAW:
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1030 rc = VINF_EM_RESCHEDULE_RAW;
1031 break;
1032
1033 /*
1034 * Switch to hardware accelerated RAW-mode.
1035 */
1036 case EXCP_EXECUTE_HWACC:
1037 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1038 rc = VINF_EM_RESCHEDULE_HWACC;
1039 break;
1040
1041 /*
1042 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1043 */
1044 case EXCP_RC:
1045 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1046 rc = pVM->rem.s.rc;
1047 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1048 break;
1049
1050 /*
1051 * Figure out the rest when they arrive....
1052 */
1053 default:
1054 AssertMsgFailed(("rc=%d\n", rc));
1055 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1056 rc = VINF_EM_RESCHEDULE;
1057 break;
1058 }
1059
1060 /*
1061 * Switch back the state.
1062 */
1063 pVM->rem.s.Env.interrupt_request = interrupt_request;
1064 rc2 = REMR3StateBack(pVM, pVCpu);
1065 AssertRC(rc2);
1066 }
1067
1068 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1069 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1070 return rc;
1071}
1072
1073
1074/**
1075 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1076 *
1077 * @returns VBox status code.
1078 *
1079 * @param pVM The VM handle.
1080 * @param pVCpu The Virtual CPU handle.
1081 */
1082static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1083{
1084 int rc;
1085
1086 Assert(pVM->rem.s.fInREM);
1087#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1088 cpu_single_step(&pVM->rem.s.Env, 1);
1089#else
1090 Assert(!pVM->rem.s.Env.singlestep_enabled);
1091#endif
1092
1093 /*
1094 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1095 */
1096 for (;;)
1097 {
1098 char szBuf[256];
1099
1100 /*
1101 * Log the current registers state and instruction.
1102 */
1103 remR3StateUpdate(pVM, pVCpu);
1104 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1105 szBuf[0] = '\0';
1106 rc = DBGFR3DisasInstrEx(pVM,
1107 pVCpu->idCpu,
1108 0, /* Sel */
1109 0, /* GCPtr */
1110 DBGF_DISAS_FLAGS_CURRENT_GUEST
1111 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1112 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1113 szBuf,
1114 sizeof(szBuf),
1115 NULL);
1116 if (RT_FAILURE(rc))
1117 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1118 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1119
1120 /*
1121 * Execute the instruction.
1122 */
1123 TMNotifyStartOfExecution(pVCpu);
1124
1125 if ( pVM->rem.s.Env.exception_index < 0
1126 || pVM->rem.s.Env.exception_index > 256)
1127 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1128
1129#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1130 pVM->rem.s.Env.interrupt_request = 0;
1131#else
1132 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1133#endif
1134 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1135 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1136 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1137 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1138 pVM->rem.s.Env.interrupt_request,
1139 pVM->rem.s.Env.halted,
1140 pVM->rem.s.Env.exception_index
1141 );
1142
1143 rc = cpu_exec(&pVM->rem.s.Env);
1144
1145 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1146 pVM->rem.s.Env.interrupt_request,
1147 pVM->rem.s.Env.halted,
1148 pVM->rem.s.Env.exception_index
1149 );
1150
1151 TMNotifyEndOfExecution(pVCpu);
1152
1153 switch (rc)
1154 {
1155#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1156 /*
1157 * The normal exit.
1158 */
1159 case EXCP_SINGLE_INSTR:
1160 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1161 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1162 continue;
1163 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1164 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1165 rc = VINF_SUCCESS;
1166 break;
1167
1168#else
1169 /*
1170 * The normal exit, check for breakpoints at PC just to be sure.
1171 */
1172#endif
1173 case EXCP_DEBUG:
1174 if (pVM->rem.s.Env.watchpoint_hit)
1175 {
1176 /** @todo deal with watchpoints */
1177 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1178 rc = VINF_EM_DBG_BREAKPOINT;
1179 }
1180 else
1181 {
1182 CPUBreakpoint *pBP;
1183 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1184 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1185 if (pBP->pc == GCPtrPC)
1186 break;
1187 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1188 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1189 }
1190#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1191 if (rc == VINF_EM_DBG_STEPPED)
1192 {
1193 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1194 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1195 continue;
1196
1197 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1198 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1199 rc = VINF_SUCCESS;
1200 }
1201#endif
1202 break;
1203
1204 /*
1205 * If we take a trap or start servicing a pending interrupt, we might end up here.
1206 * (Timer thread or some other thread wishing EMT's attention.)
1207 */
1208 case EXCP_INTERRUPT:
1209 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1210 rc = VINF_SUCCESS;
1211 break;
1212
1213 /*
1214 * hlt instruction.
1215 */
1216 case EXCP_HLT:
1217 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1218 rc = VINF_EM_HALT;
1219 break;
1220
1221 /*
1222 * The VM has halted.
1223 */
1224 case EXCP_HALTED:
1225 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1226 rc = VINF_EM_HALT;
1227 break;
1228
1229 /*
1230 * Switch to RAW-mode.
1231 */
1232 case EXCP_EXECUTE_RAW:
1233 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1234 rc = VINF_EM_RESCHEDULE_RAW;
1235 break;
1236
1237 /*
1238 * Switch to hardware accelerated RAW-mode.
1239 */
1240 case EXCP_EXECUTE_HWACC:
1241 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1242 rc = VINF_EM_RESCHEDULE_HWACC;
1243 break;
1244
1245 /*
1246 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1247 */
1248 case EXCP_RC:
1249 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1250 rc = pVM->rem.s.rc;
1251 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1252 break;
1253
1254 /*
1255 * Figure out the rest when they arrive....
1256 */
1257 default:
1258 AssertMsgFailed(("rc=%d\n", rc));
1259 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1260 rc = VINF_EM_RESCHEDULE;
1261 break;
1262 }
1263 break;
1264 }
1265
1266#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1267// cpu_single_step(&pVM->rem.s.Env, 0);
1268#else
1269 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1270#endif
1271 return rc;
1272}
1273
1274
1275/**
1276 * Runs code in recompiled mode.
1277 *
1278 * Before calling this function the REM state needs to be in sync with
1279 * the VM. Call REMR3State() to perform the sync. It's only necessary
1280 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1281 * and after calling REMR3StateBack().
1282 *
1283 * @returns VBox status code.
1284 *
1285 * @param pVM VM Handle.
1286 * @param pVCpu VMCPU Handle.
1287 */
1288REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1289{
1290 int rc;
1291
1292 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1293 return remR3RunLoggingStep(pVM, pVCpu);
1294
1295 Assert(pVM->rem.s.fInREM);
1296 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1297
1298 TMNotifyStartOfExecution(pVCpu);
1299 rc = cpu_exec(&pVM->rem.s.Env);
1300 TMNotifyEndOfExecution(pVCpu);
1301 switch (rc)
1302 {
1303 /*
1304 * This happens when the execution was interrupted
1305 * by an external event, like pending timers.
1306 */
1307 case EXCP_INTERRUPT:
1308 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1309 rc = VINF_SUCCESS;
1310 break;
1311
1312 /*
1313 * hlt instruction.
1314 */
1315 case EXCP_HLT:
1316 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1317 rc = VINF_EM_HALT;
1318 break;
1319
1320 /*
1321 * The VM has halted.
1322 */
1323 case EXCP_HALTED:
1324 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1325 rc = VINF_EM_HALT;
1326 break;
1327
1328 /*
1329 * Breakpoint/single step.
1330 */
1331 case EXCP_DEBUG:
1332 if (pVM->rem.s.Env.watchpoint_hit)
1333 {
1334 /** @todo deal with watchpoints */
1335 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1336 rc = VINF_EM_DBG_BREAKPOINT;
1337 }
1338 else
1339 {
1340 CPUBreakpoint *pBP;
1341 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1342 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1343 if (pBP->pc == GCPtrPC)
1344 break;
1345 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1346 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1347 }
1348 break;
1349
1350 /*
1351 * Switch to RAW-mode.
1352 */
1353 case EXCP_EXECUTE_RAW:
1354 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1355 rc = VINF_EM_RESCHEDULE_RAW;
1356 break;
1357
1358 /*
1359 * Switch to hardware accelerated RAW-mode.
1360 */
1361 case EXCP_EXECUTE_HWACC:
1362 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1363 rc = VINF_EM_RESCHEDULE_HWACC;
1364 break;
1365
1366 /*
1367 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1368 */
1369 case EXCP_RC:
1370 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1371 rc = pVM->rem.s.rc;
1372 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1373 break;
1374
1375 /*
1376 * Figure out the rest when they arrive....
1377 */
1378 default:
1379 AssertMsgFailed(("rc=%d\n", rc));
1380 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1381 rc = VINF_SUCCESS;
1382 break;
1383 }
1384
1385 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1386 return rc;
1387}
1388
1389
1390/**
1391 * Check if the cpu state is suitable for Raw execution.
1392 *
1393 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1394 *
1395 * @param env The CPU env struct.
1396 * @param eip The EIP to check this for (might differ from env->eip).
1397 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1398 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1399 *
1400 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1401 */
1402bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1403{
1404 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1405 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1406 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1407 uint32_t u32CR0;
1408
1409 /* Update counter. */
1410 env->pVM->rem.s.cCanExecuteRaw++;
1411
1412 /* Never when single stepping+logging guest code. */
1413 if (env->state & CPU_EMULATE_SINGLE_STEP)
1414 return false;
1415
1416 if (HWACCMIsEnabled(env->pVM))
1417 {
1418 CPUMCTX Ctx;
1419
1420 env->state |= CPU_RAW_HWACC;
1421
1422 /*
1423 * Create partial context for HWACCMR3CanExecuteGuest
1424 */
1425 Ctx.cr0 = env->cr[0];
1426 Ctx.cr3 = env->cr[3];
1427 Ctx.cr4 = env->cr[4];
1428
1429 Ctx.tr = env->tr.selector;
1430 Ctx.trHid.u64Base = env->tr.base;
1431 Ctx.trHid.u32Limit = env->tr.limit;
1432 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1433
1434 Ctx.ldtr = env->ldt.selector;
1435 Ctx.ldtrHid.u64Base = env->ldt.base;
1436 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1437 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1438
1439 Ctx.idtr.cbIdt = env->idt.limit;
1440 Ctx.idtr.pIdt = env->idt.base;
1441
1442 Ctx.gdtr.cbGdt = env->gdt.limit;
1443 Ctx.gdtr.pGdt = env->gdt.base;
1444
1445 Ctx.rsp = env->regs[R_ESP];
1446 Ctx.rip = env->eip;
1447
1448 Ctx.eflags.u32 = env->eflags;
1449
1450 Ctx.cs = env->segs[R_CS].selector;
1451 Ctx.csHid.u64Base = env->segs[R_CS].base;
1452 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1453 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1454
1455 Ctx.ds = env->segs[R_DS].selector;
1456 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1457 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1458 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1459
1460 Ctx.es = env->segs[R_ES].selector;
1461 Ctx.esHid.u64Base = env->segs[R_ES].base;
1462 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1463 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1464
1465 Ctx.fs = env->segs[R_FS].selector;
1466 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1467 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1468 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1469
1470 Ctx.gs = env->segs[R_GS].selector;
1471 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1472 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1473 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1474
1475 Ctx.ss = env->segs[R_SS].selector;
1476 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1477 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1478 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1479
1480 Ctx.msrEFER = env->efer;
1481
1482 /* Hardware accelerated raw-mode:
1483 *
1484 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1485 */
1486 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1487 {
1488 *piException = EXCP_EXECUTE_HWACC;
1489 return true;
1490 }
1491 return false;
1492 }
1493
1494 /*
1495 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1496 * or 32 bits protected mode ring 0 code
1497 *
1498 * The tests are ordered by the likelihood of being true during normal execution.
1499 */
1500 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1501 {
1502 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1503 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1504 return false;
1505 }
1506
1507#ifndef VBOX_RAW_V86
1508 if (fFlags & VM_MASK) {
1509 STAM_COUNTER_INC(&gStatRefuseVM86);
1510 Log2(("raw mode refused: VM_MASK\n"));
1511 return false;
1512 }
1513#endif
1514
1515 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1516 {
1517#ifndef DEBUG_bird
1518 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1519#endif
1520 return false;
1521 }
1522
1523 if (env->singlestep_enabled)
1524 {
1525 //Log2(("raw mode refused: Single step\n"));
1526 return false;
1527 }
1528
1529 if (!TAILQ_EMPTY(&env->breakpoints))
1530 {
1531 //Log2(("raw mode refused: Breakpoints\n"));
1532 return false;
1533 }
1534
1535 if (!TAILQ_EMPTY(&env->watchpoints))
1536 {
1537 //Log2(("raw mode refused: Watchpoints\n"));
1538 return false;
1539 }
1540
1541 u32CR0 = env->cr[0];
1542 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1543 {
1544 STAM_COUNTER_INC(&gStatRefusePaging);
1545 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1546 return false;
1547 }
1548
1549 if (env->cr[4] & CR4_PAE_MASK)
1550 {
1551 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1552 {
1553 STAM_COUNTER_INC(&gStatRefusePAE);
1554 return false;
1555 }
1556 }
1557
1558 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1559 {
1560 if (!EMIsRawRing3Enabled(env->pVM))
1561 return false;
1562
1563 if (!(env->eflags & IF_MASK))
1564 {
1565 STAM_COUNTER_INC(&gStatRefuseIF0);
1566 Log2(("raw mode refused: IF (RawR3)\n"));
1567 return false;
1568 }
1569
1570 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1571 {
1572 STAM_COUNTER_INC(&gStatRefuseWP0);
1573 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1574 return false;
1575 }
1576 }
1577 else
1578 {
1579 if (!EMIsRawRing0Enabled(env->pVM))
1580 return false;
1581
1582 // Let's start with pure 32 bits ring 0 code first
1583 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1584 {
1585 STAM_COUNTER_INC(&gStatRefuseCode16);
1586 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1587 return false;
1588 }
1589
1590 // Only R0
1591 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1592 {
1593 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1594 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1595 return false;
1596 }
1597
1598 if (!(u32CR0 & CR0_WP_MASK))
1599 {
1600 STAM_COUNTER_INC(&gStatRefuseWP0);
1601 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1602 return false;
1603 }
1604
1605 if (PATMIsPatchGCAddr(env->pVM, eip))
1606 {
1607 Log2(("raw r0 mode forced: patch code\n"));
1608 *piException = EXCP_EXECUTE_RAW;
1609 return true;
1610 }
1611
1612#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1613 if (!(env->eflags & IF_MASK))
1614 {
1615 STAM_COUNTER_INC(&gStatRefuseIF0);
1616 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1617 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1618 return false;
1619 }
1620#endif
1621
1622 env->state |= CPU_RAW_RING0;
1623 }
1624
1625 /*
1626 * Don't reschedule the first time we're called, because there might be
1627 * special reasons why we're here that is not covered by the above checks.
1628 */
1629 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1630 {
1631 Log2(("raw mode refused: first scheduling\n"));
1632 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1633 return false;
1634 }
1635
1636 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1637 *piException = EXCP_EXECUTE_RAW;
1638 return true;
1639}
1640
1641
1642/**
1643 * Fetches a code byte.
1644 *
1645 * @returns Success indicator (bool) for ease of use.
1646 * @param env The CPU environment structure.
1647 * @param GCPtrInstr Where to fetch code.
1648 * @param pu8Byte Where to store the byte on success
1649 */
1650bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1651{
1652 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1653 if (RT_SUCCESS(rc))
1654 return true;
1655 return false;
1656}
1657
1658
1659/**
1660 * Flush (or invalidate if you like) page table/dir entry.
1661 *
1662 * (invlpg instruction; tlb_flush_page)
1663 *
1664 * @param env Pointer to cpu environment.
1665 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1666 */
1667void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1668{
1669 PVM pVM = env->pVM;
1670 PCPUMCTX pCtx;
1671 int rc;
1672
1673 /*
1674 * When we're replaying invlpg instructions or restoring a saved
1675 * state we disable this path.
1676 */
1677 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1678 return;
1679 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1680 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1681
1682 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1683
1684 /*
1685 * Update the control registers before calling PGMFlushPage.
1686 */
1687 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1688 Assert(pCtx);
1689 pCtx->cr0 = env->cr[0];
1690 pCtx->cr3 = env->cr[3];
1691 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1692 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1693 pCtx->cr4 = env->cr[4];
1694
1695 /*
1696 * Let PGM do the rest.
1697 */
1698 Assert(env->pVCpu);
1699 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1700 if (RT_FAILURE(rc))
1701 {
1702 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1703 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1704 }
1705 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1706}
1707
1708
1709#ifndef REM_PHYS_ADDR_IN_TLB
1710/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1711void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1712{
1713 void *pv;
1714 int rc;
1715
1716 /* Address must be aligned enough to fiddle with lower bits */
1717 Assert((physAddr & 0x3) == 0);
1718
1719 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1720 Assert( rc == VINF_SUCCESS
1721 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1722 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1723 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1724 if (RT_FAILURE(rc))
1725 return (void *)1;
1726 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1727 return (void *)((uintptr_t)pv | 2);
1728 return pv;
1729}
1730#endif /* REM_PHYS_ADDR_IN_TLB */
1731
1732
1733/**
1734 * Called from tlb_protect_code in order to write monitor a code page.
1735 *
1736 * @param env Pointer to the CPU environment.
1737 * @param GCPtr Code page to monitor
1738 */
1739void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1740{
1741#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1742 Assert(env->pVM->rem.s.fInREM);
1743 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1744 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1745 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1746 && !(env->eflags & VM_MASK) /* no V86 mode */
1747 && !HWACCMIsEnabled(env->pVM))
1748 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1749#endif
1750}
1751
1752
1753/**
1754 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1755 *
1756 * @param env Pointer to the CPU environment.
1757 * @param GCPtr Code page to monitor
1758 */
1759void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1760{
1761 Assert(env->pVM->rem.s.fInREM);
1762#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1763 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1764 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1765 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1766 && !(env->eflags & VM_MASK) /* no V86 mode */
1767 && !HWACCMIsEnabled(env->pVM))
1768 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1769#endif
1770}
1771
1772
1773/**
1774 * Called when the CPU is initialized, any of the CRx registers are changed or
1775 * when the A20 line is modified.
1776 *
1777 * @param env Pointer to the CPU environment.
1778 * @param fGlobal Set if the flush is global.
1779 */
1780void remR3FlushTLB(CPUState *env, bool fGlobal)
1781{
1782 PVM pVM = env->pVM;
1783 PCPUMCTX pCtx;
1784
1785 /*
1786 * When we're replaying invlpg instructions or restoring a saved
1787 * state we disable this path.
1788 */
1789 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1790 return;
1791 Assert(pVM->rem.s.fInREM);
1792
1793 /*
1794 * The caller doesn't check cr4, so we have to do that for ourselves.
1795 */
1796 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1797 fGlobal = true;
1798 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1799
1800 /*
1801 * Update the control registers before calling PGMR3FlushTLB.
1802 */
1803 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1804 Assert(pCtx);
1805 pCtx->cr0 = env->cr[0];
1806 pCtx->cr3 = env->cr[3];
1807 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1808 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1809 pCtx->cr4 = env->cr[4];
1810
1811 /*
1812 * Let PGM do the rest.
1813 */
1814 Assert(env->pVCpu);
1815 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1816}
1817
1818
1819/**
1820 * Called when any of the cr0, cr4 or efer registers is updated.
1821 *
1822 * @param env Pointer to the CPU environment.
1823 */
1824void remR3ChangeCpuMode(CPUState *env)
1825{
1826 PVM pVM = env->pVM;
1827 uint64_t efer;
1828 PCPUMCTX pCtx;
1829 int rc;
1830
1831 /*
1832 * When we're replaying loads or restoring a saved
1833 * state this path is disabled.
1834 */
1835 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1836 return;
1837 Assert(pVM->rem.s.fInREM);
1838
1839 /*
1840 * Update the control registers before calling PGMChangeMode()
1841 * as it may need to map whatever cr3 is pointing to.
1842 */
1843 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1844 Assert(pCtx);
1845 pCtx->cr0 = env->cr[0];
1846 pCtx->cr3 = env->cr[3];
1847 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1848 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1849 pCtx->cr4 = env->cr[4];
1850
1851#ifdef TARGET_X86_64
1852 efer = env->efer;
1853#else
1854 efer = 0;
1855#endif
1856 Assert(env->pVCpu);
1857 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1858 if (rc != VINF_SUCCESS)
1859 {
1860 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1861 {
1862 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1863 remR3RaiseRC(env->pVM, rc);
1864 }
1865 else
1866 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1867 }
1868}
1869
1870
1871/**
1872 * Called from compiled code to run dma.
1873 *
1874 * @param env Pointer to the CPU environment.
1875 */
1876void remR3DmaRun(CPUState *env)
1877{
1878 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1879 PDMR3DmaRun(env->pVM);
1880 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1881}
1882
1883
1884/**
1885 * Called from compiled code to schedule pending timers in VMM
1886 *
1887 * @param env Pointer to the CPU environment.
1888 */
1889void remR3TimersRun(CPUState *env)
1890{
1891 LogFlow(("remR3TimersRun:\n"));
1892 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1893 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1894 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1895 TMR3TimerQueuesDo(env->pVM);
1896 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1897 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1898}
1899
1900
1901/**
1902 * Record trap occurrence
1903 *
1904 * @returns VBox status code
1905 * @param env Pointer to the CPU environment.
1906 * @param uTrap Trap nr
1907 * @param uErrorCode Error code
1908 * @param pvNextEIP Next EIP
1909 */
1910int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1911{
1912 PVM pVM = env->pVM;
1913#ifdef VBOX_WITH_STATISTICS
1914 static STAMCOUNTER s_aStatTrap[255];
1915 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1916#endif
1917
1918#ifdef VBOX_WITH_STATISTICS
1919 if (uTrap < 255)
1920 {
1921 if (!s_aRegisters[uTrap])
1922 {
1923 char szStatName[64];
1924 s_aRegisters[uTrap] = true;
1925 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1926 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1927 }
1928 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1929 }
1930#endif
1931 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1932 if( uTrap < 0x20
1933 && (env->cr[0] & X86_CR0_PE)
1934 && !(env->eflags & X86_EFL_VM))
1935 {
1936#ifdef DEBUG
1937 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1938#endif
1939 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1940 {
1941 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1942 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1943 return VERR_REM_TOO_MANY_TRAPS;
1944 }
1945 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1946 pVM->rem.s.cPendingExceptions = 1;
1947 pVM->rem.s.uPendingException = uTrap;
1948 pVM->rem.s.uPendingExcptEIP = env->eip;
1949 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1950 }
1951 else
1952 {
1953 pVM->rem.s.cPendingExceptions = 0;
1954 pVM->rem.s.uPendingException = uTrap;
1955 pVM->rem.s.uPendingExcptEIP = env->eip;
1956 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1957 }
1958 return VINF_SUCCESS;
1959}
1960
1961
1962/*
1963 * Clear current active trap
1964 *
1965 * @param pVM VM Handle.
1966 */
1967void remR3TrapClear(PVM pVM)
1968{
1969 pVM->rem.s.cPendingExceptions = 0;
1970 pVM->rem.s.uPendingException = 0;
1971 pVM->rem.s.uPendingExcptEIP = 0;
1972 pVM->rem.s.uPendingExcptCR2 = 0;
1973}
1974
1975
1976/*
1977 * Record previous call instruction addresses
1978 *
1979 * @param env Pointer to the CPU environment.
1980 */
1981void remR3RecordCall(CPUState *env)
1982{
1983 CSAMR3RecordCallAddress(env->pVM, env->eip);
1984}
1985
1986
1987/**
1988 * Syncs the internal REM state with the VM.
1989 *
1990 * This must be called before REMR3Run() is invoked whenever when the REM
1991 * state is not up to date. Calling it several times in a row is not
1992 * permitted.
1993 *
1994 * @returns VBox status code.
1995 *
1996 * @param pVM VM Handle.
1997 * @param pVCpu VMCPU Handle.
1998 *
1999 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2000 * no do this since the majority of the callers don't want any unnecessary of events
2001 * pending that would immediately interrupt execution.
2002 */
2003REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2004{
2005 register const CPUMCTX *pCtx;
2006 register unsigned fFlags;
2007 bool fHiddenSelRegsValid;
2008 unsigned i;
2009 TRPMEVENT enmType;
2010 uint8_t u8TrapNo;
2011 uint32_t uCpl;
2012 int rc;
2013
2014 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2015 Log2(("REMR3State:\n"));
2016
2017 pVM->rem.s.Env.pVCpu = pVCpu;
2018 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2019 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2020
2021 Assert(!pVM->rem.s.fInREM);
2022 pVM->rem.s.fInStateSync = true;
2023
2024 /*
2025 * If we have to flush TBs, do that immediately.
2026 */
2027 if (pVM->rem.s.fFlushTBs)
2028 {
2029 STAM_COUNTER_INC(&gStatFlushTBs);
2030 tb_flush(&pVM->rem.s.Env);
2031 pVM->rem.s.fFlushTBs = false;
2032 }
2033
2034 /*
2035 * Copy the registers which require no special handling.
2036 */
2037#ifdef TARGET_X86_64
2038 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2039 Assert(R_EAX == 0);
2040 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2041 Assert(R_ECX == 1);
2042 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2043 Assert(R_EDX == 2);
2044 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2045 Assert(R_EBX == 3);
2046 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2047 Assert(R_ESP == 4);
2048 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2049 Assert(R_EBP == 5);
2050 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2051 Assert(R_ESI == 6);
2052 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2053 Assert(R_EDI == 7);
2054 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2055 pVM->rem.s.Env.regs[8] = pCtx->r8;
2056 pVM->rem.s.Env.regs[9] = pCtx->r9;
2057 pVM->rem.s.Env.regs[10] = pCtx->r10;
2058 pVM->rem.s.Env.regs[11] = pCtx->r11;
2059 pVM->rem.s.Env.regs[12] = pCtx->r12;
2060 pVM->rem.s.Env.regs[13] = pCtx->r13;
2061 pVM->rem.s.Env.regs[14] = pCtx->r14;
2062 pVM->rem.s.Env.regs[15] = pCtx->r15;
2063
2064 pVM->rem.s.Env.eip = pCtx->rip;
2065
2066 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2067#else
2068 Assert(R_EAX == 0);
2069 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2070 Assert(R_ECX == 1);
2071 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2072 Assert(R_EDX == 2);
2073 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2074 Assert(R_EBX == 3);
2075 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2076 Assert(R_ESP == 4);
2077 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2078 Assert(R_EBP == 5);
2079 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2080 Assert(R_ESI == 6);
2081 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2082 Assert(R_EDI == 7);
2083 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2084 pVM->rem.s.Env.eip = pCtx->eip;
2085
2086 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2087#endif
2088
2089 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2090
2091 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2092 for (i=0;i<8;i++)
2093 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2094
2095#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2096 /*
2097 * Clear the halted hidden flag (the interrupt waking up the CPU can
2098 * have been dispatched in raw mode).
2099 */
2100 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2101#endif
2102
2103 /*
2104 * Replay invlpg?
2105 */
2106 if (pVM->rem.s.cInvalidatedPages)
2107 {
2108 RTUINT i;
2109
2110 pVM->rem.s.fIgnoreInvlPg = true;
2111 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2112 {
2113 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2114 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2115 }
2116 pVM->rem.s.fIgnoreInvlPg = false;
2117 pVM->rem.s.cInvalidatedPages = 0;
2118 }
2119
2120 /* Replay notification changes. */
2121 REMR3ReplayHandlerNotifications(pVM);
2122
2123 /* Update MSRs; before CRx registers! */
2124 pVM->rem.s.Env.efer = pCtx->msrEFER;
2125 pVM->rem.s.Env.star = pCtx->msrSTAR;
2126 pVM->rem.s.Env.pat = pCtx->msrPAT;
2127#ifdef TARGET_X86_64
2128 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2129 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2130 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2131 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2132
2133 /* Update the internal long mode activate flag according to the new EFER value. */
2134 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2135 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2136 else
2137 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2138#endif
2139
2140 /*
2141 * Registers which are rarely changed and require special handling / order when changed.
2142 */
2143 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2144 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2145 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2146 | CPUM_CHANGED_CR4
2147 | CPUM_CHANGED_CR0
2148 | CPUM_CHANGED_CR3
2149 | CPUM_CHANGED_GDTR
2150 | CPUM_CHANGED_IDTR
2151 | CPUM_CHANGED_SYSENTER_MSR
2152 | CPUM_CHANGED_LDTR
2153 | CPUM_CHANGED_CPUID
2154 | CPUM_CHANGED_FPU_REM
2155 )
2156 )
2157 {
2158 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2159 {
2160 pVM->rem.s.fIgnoreCR3Load = true;
2161 tlb_flush(&pVM->rem.s.Env, true);
2162 pVM->rem.s.fIgnoreCR3Load = false;
2163 }
2164
2165 /* CR4 before CR0! */
2166 if (fFlags & CPUM_CHANGED_CR4)
2167 {
2168 pVM->rem.s.fIgnoreCR3Load = true;
2169 pVM->rem.s.fIgnoreCpuMode = true;
2170 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2171 pVM->rem.s.fIgnoreCpuMode = false;
2172 pVM->rem.s.fIgnoreCR3Load = false;
2173 }
2174
2175 if (fFlags & CPUM_CHANGED_CR0)
2176 {
2177 pVM->rem.s.fIgnoreCR3Load = true;
2178 pVM->rem.s.fIgnoreCpuMode = true;
2179 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2180 pVM->rem.s.fIgnoreCpuMode = false;
2181 pVM->rem.s.fIgnoreCR3Load = false;
2182 }
2183
2184 if (fFlags & CPUM_CHANGED_CR3)
2185 {
2186 pVM->rem.s.fIgnoreCR3Load = true;
2187 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2188 pVM->rem.s.fIgnoreCR3Load = false;
2189 }
2190
2191 if (fFlags & CPUM_CHANGED_GDTR)
2192 {
2193 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2194 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2195 }
2196
2197 if (fFlags & CPUM_CHANGED_IDTR)
2198 {
2199 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2200 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2201 }
2202
2203 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2204 {
2205 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2206 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2207 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2208 }
2209
2210 if (fFlags & CPUM_CHANGED_LDTR)
2211 {
2212 if (fHiddenSelRegsValid)
2213 {
2214 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2215 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2216 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2217 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2218 }
2219 else
2220 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2221 }
2222
2223 if (fFlags & CPUM_CHANGED_CPUID)
2224 {
2225 uint32_t u32Dummy;
2226
2227 /*
2228 * Get the CPUID features.
2229 */
2230 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2231 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2232 }
2233
2234 /* Sync FPU state after CR4, CPUID and EFER (!). */
2235 if (fFlags & CPUM_CHANGED_FPU_REM)
2236 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2237 }
2238
2239 /*
2240 * Sync TR unconditionally to make life simpler.
2241 */
2242 pVM->rem.s.Env.tr.selector = pCtx->tr;
2243 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2244 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2245 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2246 /* Note! do_interrupt will fault if the busy flag is still set... */
2247 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2248
2249 /*
2250 * Update selector registers.
2251 * This must be done *after* we've synced gdt, ldt and crX registers
2252 * since we're reading the GDT/LDT om sync_seg. This will happen with
2253 * saved state which takes a quick dip into rawmode for instance.
2254 */
2255 /*
2256 * Stack; Note first check this one as the CPL might have changed. The
2257 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2258 */
2259
2260 if (fHiddenSelRegsValid)
2261 {
2262 /* The hidden selector registers are valid in the CPU context. */
2263 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2264
2265 /* Set current CPL */
2266 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2267
2268 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2269 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2270 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2271 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2272 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2273 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2274 }
2275 else
2276 {
2277 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2278 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2279 {
2280 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2281
2282 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2283 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2284#ifdef VBOX_WITH_STATISTICS
2285 if (pVM->rem.s.Env.segs[R_SS].newselector)
2286 {
2287 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2288 }
2289#endif
2290 }
2291 else
2292 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2293
2294 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2295 {
2296 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2297 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2298#ifdef VBOX_WITH_STATISTICS
2299 if (pVM->rem.s.Env.segs[R_ES].newselector)
2300 {
2301 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2302 }
2303#endif
2304 }
2305 else
2306 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2307
2308 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2309 {
2310 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2311 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2312#ifdef VBOX_WITH_STATISTICS
2313 if (pVM->rem.s.Env.segs[R_CS].newselector)
2314 {
2315 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2316 }
2317#endif
2318 }
2319 else
2320 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2321
2322 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2323 {
2324 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2325 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2326#ifdef VBOX_WITH_STATISTICS
2327 if (pVM->rem.s.Env.segs[R_DS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2330 }
2331#endif
2332 }
2333 else
2334 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2335
2336 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2337 * be the same but not the base/limit. */
2338 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2339 {
2340 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2341 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2342#ifdef VBOX_WITH_STATISTICS
2343 if (pVM->rem.s.Env.segs[R_FS].newselector)
2344 {
2345 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2346 }
2347#endif
2348 }
2349 else
2350 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2351
2352 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2353 {
2354 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2355 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2356#ifdef VBOX_WITH_STATISTICS
2357 if (pVM->rem.s.Env.segs[R_GS].newselector)
2358 {
2359 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2360 }
2361#endif
2362 }
2363 else
2364 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2365 }
2366
2367 /*
2368 * Check for traps.
2369 */
2370 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2371 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2372 if (RT_SUCCESS(rc))
2373 {
2374#ifdef DEBUG
2375 if (u8TrapNo == 0x80)
2376 {
2377 remR3DumpLnxSyscall(pVCpu);
2378 remR3DumpOBsdSyscall(pVCpu);
2379 }
2380#endif
2381
2382 pVM->rem.s.Env.exception_index = u8TrapNo;
2383 if (enmType != TRPM_SOFTWARE_INT)
2384 {
2385 pVM->rem.s.Env.exception_is_int = 0;
2386 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2387 }
2388 else
2389 {
2390 /*
2391 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2392 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2393 * for int03 and into.
2394 */
2395 pVM->rem.s.Env.exception_is_int = 1;
2396 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2397 /* int 3 may be generated by one-byte 0xcc */
2398 if (u8TrapNo == 3)
2399 {
2400 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2401 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2402 }
2403 /* int 4 may be generated by one-byte 0xce */
2404 else if (u8TrapNo == 4)
2405 {
2406 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2407 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2408 }
2409 }
2410
2411 /* get error code and cr2 if needed. */
2412 switch (u8TrapNo)
2413 {
2414 case 0x0e:
2415 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2416 /* fallthru */
2417 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2418 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2419 break;
2420
2421 case 0x11: case 0x08:
2422 default:
2423 pVM->rem.s.Env.error_code = 0;
2424 break;
2425 }
2426
2427 /*
2428 * We can now reset the active trap since the recompiler is gonna have a go at it.
2429 */
2430 rc = TRPMResetTrap(pVCpu);
2431 AssertRC(rc);
2432 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2433 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2434 }
2435
2436 /*
2437 * Clear old interrupt request flags; Check for pending hardware interrupts.
2438 * (See @remark for why we don't check for other FFs.)
2439 */
2440 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2441 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2442 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2443 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2444
2445 /*
2446 * We're now in REM mode.
2447 */
2448 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2449 pVM->rem.s.fInREM = true;
2450 pVM->rem.s.fInStateSync = false;
2451 pVM->rem.s.cCanExecuteRaw = 0;
2452 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2453 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2454 return VINF_SUCCESS;
2455}
2456
2457
2458/**
2459 * Syncs back changes in the REM state to the the VM state.
2460 *
2461 * This must be called after invoking REMR3Run().
2462 * Calling it several times in a row is not permitted.
2463 *
2464 * @returns VBox status code.
2465 *
2466 * @param pVM VM Handle.
2467 * @param pVCpu VMCPU Handle.
2468 */
2469REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2470{
2471 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2472 Assert(pCtx);
2473 unsigned i;
2474
2475 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2476 Log2(("REMR3StateBack:\n"));
2477 Assert(pVM->rem.s.fInREM);
2478
2479 /*
2480 * Copy back the registers.
2481 * This is done in the order they are declared in the CPUMCTX structure.
2482 */
2483
2484 /** @todo FOP */
2485 /** @todo FPUIP */
2486 /** @todo CS */
2487 /** @todo FPUDP */
2488 /** @todo DS */
2489
2490 /** @todo check if FPU/XMM was actually used in the recompiler */
2491 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2492//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2493
2494#ifdef TARGET_X86_64
2495 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2496 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2497 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2498 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2499 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2500 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2501 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2502 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2503 pCtx->r8 = pVM->rem.s.Env.regs[8];
2504 pCtx->r9 = pVM->rem.s.Env.regs[9];
2505 pCtx->r10 = pVM->rem.s.Env.regs[10];
2506 pCtx->r11 = pVM->rem.s.Env.regs[11];
2507 pCtx->r12 = pVM->rem.s.Env.regs[12];
2508 pCtx->r13 = pVM->rem.s.Env.regs[13];
2509 pCtx->r14 = pVM->rem.s.Env.regs[14];
2510 pCtx->r15 = pVM->rem.s.Env.regs[15];
2511
2512 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2513
2514#else
2515 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2516 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2517 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2518 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2519 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2520 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2521 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2522
2523 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2524#endif
2525
2526 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2527
2528#ifdef VBOX_WITH_STATISTICS
2529 if (pVM->rem.s.Env.segs[R_SS].newselector)
2530 {
2531 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2532 }
2533 if (pVM->rem.s.Env.segs[R_GS].newselector)
2534 {
2535 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2536 }
2537 if (pVM->rem.s.Env.segs[R_FS].newselector)
2538 {
2539 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2540 }
2541 if (pVM->rem.s.Env.segs[R_ES].newselector)
2542 {
2543 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2544 }
2545 if (pVM->rem.s.Env.segs[R_DS].newselector)
2546 {
2547 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2548 }
2549 if (pVM->rem.s.Env.segs[R_CS].newselector)
2550 {
2551 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2552 }
2553#endif
2554 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2555 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2556 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2557 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2558 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2559
2560#ifdef TARGET_X86_64
2561 pCtx->rip = pVM->rem.s.Env.eip;
2562 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2563#else
2564 pCtx->eip = pVM->rem.s.Env.eip;
2565 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2566#endif
2567
2568 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2569 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2570 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2571 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2572 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2573 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2574
2575 for (i = 0; i < 8; i++)
2576 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2577
2578 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2579 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2580 {
2581 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2582 STAM_COUNTER_INC(&gStatREMGDTChange);
2583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2584 }
2585
2586 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2587 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2588 {
2589 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2590 STAM_COUNTER_INC(&gStatREMIDTChange);
2591 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2592 }
2593
2594 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2595 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2596 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2597 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2598 {
2599 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2600 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2601 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2602 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2603 STAM_COUNTER_INC(&gStatREMLDTRChange);
2604 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2605 }
2606
2607 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2608 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2609 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2610 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2611 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2612 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2613 : 0) )
2614 {
2615 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2616 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2617 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2618 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2619 pCtx->tr = pVM->rem.s.Env.tr.selector;
2620 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2621 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2622 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2623 if (pCtx->trHid.Attr.u)
2624 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2625 STAM_COUNTER_INC(&gStatREMTRChange);
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2627 }
2628
2629 /** @todo These values could still be out of sync! */
2630 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2631 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2632 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2633 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2634
2635 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2636 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2637 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2638
2639 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2640 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2641 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2642
2643 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2644 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2645 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2646
2647 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2648 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2649 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2650
2651 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2652 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2653 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2654
2655 /* Sysenter MSR */
2656 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2657 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2658 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2659
2660 /* System MSRs. */
2661 pCtx->msrEFER = pVM->rem.s.Env.efer;
2662 pCtx->msrSTAR = pVM->rem.s.Env.star;
2663 pCtx->msrPAT = pVM->rem.s.Env.pat;
2664#ifdef TARGET_X86_64
2665 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2666 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2667 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2668 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2669#endif
2670
2671 remR3TrapClear(pVM);
2672
2673 /*
2674 * Check for traps.
2675 */
2676 if ( pVM->rem.s.Env.exception_index >= 0
2677 && pVM->rem.s.Env.exception_index < 256)
2678 {
2679 int rc;
2680
2681 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2682 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2683 AssertRC(rc);
2684 switch (pVM->rem.s.Env.exception_index)
2685 {
2686 case 0x0e:
2687 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2688 /* fallthru */
2689 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2690 case 0x11: case 0x08: /* 0 */
2691 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2692 break;
2693 }
2694
2695 }
2696
2697 /*
2698 * We're not longer in REM mode.
2699 */
2700 CPUMR3RemLeave(pVCpu,
2701 HWACCMIsEnabled(pVM)
2702 || ( pVM->rem.s.Env.segs[R_SS].newselector
2703 | pVM->rem.s.Env.segs[R_GS].newselector
2704 | pVM->rem.s.Env.segs[R_FS].newselector
2705 | pVM->rem.s.Env.segs[R_ES].newselector
2706 | pVM->rem.s.Env.segs[R_DS].newselector
2707 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2708 );
2709 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2710 pVM->rem.s.fInREM = false;
2711 pVM->rem.s.pCtx = NULL;
2712 pVM->rem.s.Env.pVCpu = NULL;
2713 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2714 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2715 return VINF_SUCCESS;
2716}
2717
2718
2719/**
2720 * This is called by the disassembler when it wants to update the cpu state
2721 * before for instance doing a register dump.
2722 */
2723static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2724{
2725 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2726 unsigned i;
2727
2728 Assert(pVM->rem.s.fInREM);
2729
2730 /*
2731 * Copy back the registers.
2732 * This is done in the order they are declared in the CPUMCTX structure.
2733 */
2734
2735 /** @todo FOP */
2736 /** @todo FPUIP */
2737 /** @todo CS */
2738 /** @todo FPUDP */
2739 /** @todo DS */
2740 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2741 pCtx->fpu.MXCSR = 0;
2742 pCtx->fpu.MXCSR_MASK = 0;
2743
2744 /** @todo check if FPU/XMM was actually used in the recompiler */
2745 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2746//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2747
2748#ifdef TARGET_X86_64
2749 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2750 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2751 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2752 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2753 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2754 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2755 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2756 pCtx->r8 = pVM->rem.s.Env.regs[8];
2757 pCtx->r9 = pVM->rem.s.Env.regs[9];
2758 pCtx->r10 = pVM->rem.s.Env.regs[10];
2759 pCtx->r11 = pVM->rem.s.Env.regs[11];
2760 pCtx->r12 = pVM->rem.s.Env.regs[12];
2761 pCtx->r13 = pVM->rem.s.Env.regs[13];
2762 pCtx->r14 = pVM->rem.s.Env.regs[14];
2763 pCtx->r15 = pVM->rem.s.Env.regs[15];
2764
2765 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2766#else
2767 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2768 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2769 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2770 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2771 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2772 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2773 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2774
2775 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2776#endif
2777
2778 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2779
2780 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2781 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2782 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2783 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2784 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2785
2786#ifdef TARGET_X86_64
2787 pCtx->rip = pVM->rem.s.Env.eip;
2788 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2789#else
2790 pCtx->eip = pVM->rem.s.Env.eip;
2791 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2792#endif
2793
2794 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2795 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2796 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2797 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2798 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2799 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2800
2801 for (i = 0; i < 8; i++)
2802 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2803
2804 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2805 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2806 {
2807 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2808 STAM_COUNTER_INC(&gStatREMGDTChange);
2809 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2810 }
2811
2812 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2813 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2814 {
2815 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2816 STAM_COUNTER_INC(&gStatREMIDTChange);
2817 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2818 }
2819
2820 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2821 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2822 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2823 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2824 {
2825 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2826 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2827 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2828 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2829 STAM_COUNTER_INC(&gStatREMLDTRChange);
2830 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2831 }
2832
2833 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2834 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2835 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2836 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2837 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2838 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2839 : 0) )
2840 {
2841 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2842 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2843 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2844 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2845 pCtx->tr = pVM->rem.s.Env.tr.selector;
2846 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2847 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2848 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2849 if (pCtx->trHid.Attr.u)
2850 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2851 STAM_COUNTER_INC(&gStatREMTRChange);
2852 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2853 }
2854
2855 /** @todo These values could still be out of sync! */
2856 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2857 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2858 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2859 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2860
2861 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2862 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2863 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2864
2865 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2866 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2867 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2868
2869 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2870 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2871 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2872
2873 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2874 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2875 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2876
2877 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2878 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2879 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2880
2881 /* Sysenter MSR */
2882 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2883 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2884 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2885
2886 /* System MSRs. */
2887 pCtx->msrEFER = pVM->rem.s.Env.efer;
2888 pCtx->msrSTAR = pVM->rem.s.Env.star;
2889 pCtx->msrPAT = pVM->rem.s.Env.pat;
2890#ifdef TARGET_X86_64
2891 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2892 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2893 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2894 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2895#endif
2896
2897}
2898
2899
2900/**
2901 * Update the VMM state information if we're currently in REM.
2902 *
2903 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2904 * we're currently executing in REM and the VMM state is invalid. This method will of
2905 * course check that we're executing in REM before syncing any data over to the VMM.
2906 *
2907 * @param pVM The VM handle.
2908 * @param pVCpu The VMCPU handle.
2909 */
2910REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2911{
2912 if (pVM->rem.s.fInREM)
2913 remR3StateUpdate(pVM, pVCpu);
2914}
2915
2916
2917#undef LOG_GROUP
2918#define LOG_GROUP LOG_GROUP_REM
2919
2920
2921/**
2922 * Notify the recompiler about Address Gate 20 state change.
2923 *
2924 * This notification is required since A20 gate changes are
2925 * initialized from a device driver and the VM might just as
2926 * well be in REM mode as in RAW mode.
2927 *
2928 * @param pVM VM handle.
2929 * @param pVCpu VMCPU handle.
2930 * @param fEnable True if the gate should be enabled.
2931 * False if the gate should be disabled.
2932 */
2933REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2934{
2935 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2936 VM_ASSERT_EMT(pVM);
2937
2938 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2939 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2940 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2941}
2942
2943
2944/**
2945 * Replays the handler notification changes
2946 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2947 *
2948 * @param pVM VM handle.
2949 */
2950REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2951{
2952 /*
2953 * Replay the flushes.
2954 */
2955 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2956 VM_ASSERT_EMT(pVM);
2957
2958 /** @todo this isn't ensuring correct replay order. */
2959 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2960 {
2961 uint32_t idxNext;
2962 uint32_t idxRevHead;
2963 uint32_t idxHead;
2964#ifdef VBOX_STRICT
2965 int32_t c = 0;
2966#endif
2967
2968 /* Lockless purging of pending notifications. */
2969 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2970 if (idxHead == UINT32_MAX)
2971 return;
2972 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2973
2974 /*
2975 * Reverse the list to process it in FIFO order.
2976 */
2977 idxRevHead = UINT32_MAX;
2978 do
2979 {
2980 /* Save the index of the next rec. */
2981 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2982 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2983 /* Push the record onto the reversed list. */
2984 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2985 idxRevHead = idxHead;
2986 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2987 /* Advance. */
2988 idxHead = idxNext;
2989 } while (idxHead != UINT32_MAX);
2990
2991 /*
2992 * Loop thru the list, reinserting the record into the free list as they are
2993 * processed to avoid having other EMTs running out of entries while we're flushing.
2994 */
2995 idxHead = idxRevHead;
2996 do
2997 {
2998 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2999 uint32_t idxCur;
3000 Assert(--c >= 0);
3001
3002 switch (pCur->enmKind)
3003 {
3004 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3005 remR3NotifyHandlerPhysicalRegister(pVM,
3006 pCur->u.PhysicalRegister.enmType,
3007 pCur->u.PhysicalRegister.GCPhys,
3008 pCur->u.PhysicalRegister.cb,
3009 pCur->u.PhysicalRegister.fHasHCHandler);
3010 break;
3011
3012 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3013 remR3NotifyHandlerPhysicalDeregister(pVM,
3014 pCur->u.PhysicalDeregister.enmType,
3015 pCur->u.PhysicalDeregister.GCPhys,
3016 pCur->u.PhysicalDeregister.cb,
3017 pCur->u.PhysicalDeregister.fHasHCHandler,
3018 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3019 break;
3020
3021 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3022 remR3NotifyHandlerPhysicalModify(pVM,
3023 pCur->u.PhysicalModify.enmType,
3024 pCur->u.PhysicalModify.GCPhysOld,
3025 pCur->u.PhysicalModify.GCPhysNew,
3026 pCur->u.PhysicalModify.cb,
3027 pCur->u.PhysicalModify.fHasHCHandler,
3028 pCur->u.PhysicalModify.fRestoreAsRAM);
3029 break;
3030
3031 default:
3032 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3033 break;
3034 }
3035
3036 /*
3037 * Advance idxHead.
3038 */
3039 idxCur = idxHead;
3040 idxHead = pCur->idxNext;
3041 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3042
3043 /*
3044 * Put the record back into the free list.
3045 */
3046 do
3047 {
3048 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3049 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3050 ASMCompilerBarrier();
3051 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3052 } while (idxHead != UINT32_MAX);
3053
3054#ifdef VBOX_STRICT
3055 if (pVM->cCpus == 1)
3056 {
3057 unsigned c;
3058 /* Check that all records are now on the free list. */
3059 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3060 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3061 c++;
3062 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3063 }
3064#endif
3065 }
3066}
3067
3068
3069/**
3070 * Notify REM about changed code page.
3071 *
3072 * @returns VBox status code.
3073 * @param pVM VM handle.
3074 * @param pVCpu VMCPU handle.
3075 * @param pvCodePage Code page address
3076 */
3077REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3078{
3079#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3080 int rc;
3081 RTGCPHYS PhysGC;
3082 uint64_t flags;
3083
3084 VM_ASSERT_EMT(pVM);
3085
3086 /*
3087 * Get the physical page address.
3088 */
3089 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3090 if (rc == VINF_SUCCESS)
3091 {
3092 /*
3093 * Sync the required registers and flush the whole page.
3094 * (Easier to do the whole page than notifying it about each physical
3095 * byte that was changed.
3096 */
3097 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3098 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3099 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3100 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3101
3102 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3103 }
3104#endif
3105 return VINF_SUCCESS;
3106}
3107
3108
3109/**
3110 * Notification about a successful MMR3PhysRegister() call.
3111 *
3112 * @param pVM VM handle.
3113 * @param GCPhys The physical address the RAM.
3114 * @param cb Size of the memory.
3115 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3116 */
3117REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3118{
3119 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3120 VM_ASSERT_EMT(pVM);
3121
3122 /*
3123 * Validate input - we trust the caller.
3124 */
3125 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3126 Assert(cb);
3127 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3128 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3129
3130 /*
3131 * Base ram? Update GCPhysLastRam.
3132 */
3133 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3134 {
3135 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3136 {
3137 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3138 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3139 }
3140 }
3141
3142 /*
3143 * Register the ram.
3144 */
3145 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3146
3147 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3148 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3149 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3150
3151 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3152}
3153
3154
3155/**
3156 * Notification about a successful MMR3PhysRomRegister() call.
3157 *
3158 * @param pVM VM handle.
3159 * @param GCPhys The physical address of the ROM.
3160 * @param cb The size of the ROM.
3161 * @param pvCopy Pointer to the ROM copy.
3162 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3163 * This function will be called when ever the protection of the
3164 * shadow ROM changes (at reset and end of POST).
3165 */
3166REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3167{
3168 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3169 VM_ASSERT_EMT(pVM);
3170
3171 /*
3172 * Validate input - we trust the caller.
3173 */
3174 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3175 Assert(cb);
3176 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3177
3178 /*
3179 * Register the rom.
3180 */
3181 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3182
3183 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3184 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3185 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3186
3187 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3188}
3189
3190
3191/**
3192 * Notification about a successful memory deregistration or reservation.
3193 *
3194 * @param pVM VM Handle.
3195 * @param GCPhys Start physical address.
3196 * @param cb The size of the range.
3197 */
3198REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3199{
3200 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3201 VM_ASSERT_EMT(pVM);
3202
3203 /*
3204 * Validate input - we trust the caller.
3205 */
3206 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3207 Assert(cb);
3208 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3209
3210 /*
3211 * Unassigning the memory.
3212 */
3213 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3214
3215 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3216 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3217 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3218
3219 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3220}
3221
3222
3223/**
3224 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3225 *
3226 * @param pVM VM Handle.
3227 * @param enmType Handler type.
3228 * @param GCPhys Handler range address.
3229 * @param cb Size of the handler range.
3230 * @param fHasHCHandler Set if the handler has a HC callback function.
3231 *
3232 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3233 * Handler memory type to memory which has no HC handler.
3234 */
3235static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3236{
3237 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3238 enmType, GCPhys, cb, fHasHCHandler));
3239
3240 VM_ASSERT_EMT(pVM);
3241 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3242 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3243
3244
3245 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3246
3247 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3248 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3249 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3250 else if (fHasHCHandler)
3251 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3252 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3253
3254 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3255}
3256
3257/**
3258 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3259 *
3260 * @param pVM VM Handle.
3261 * @param enmType Handler type.
3262 * @param GCPhys Handler range address.
3263 * @param cb Size of the handler range.
3264 * @param fHasHCHandler Set if the handler has a HC callback function.
3265 *
3266 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3267 * Handler memory type to memory which has no HC handler.
3268 */
3269REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3270{
3271 REMR3ReplayHandlerNotifications(pVM);
3272
3273 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3274}
3275
3276/**
3277 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3278 *
3279 * @param pVM VM Handle.
3280 * @param enmType Handler type.
3281 * @param GCPhys Handler range address.
3282 * @param cb Size of the handler range.
3283 * @param fHasHCHandler Set if the handler has a HC callback function.
3284 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3285 */
3286static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3287{
3288 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3289 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3290 VM_ASSERT_EMT(pVM);
3291
3292
3293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3294
3295 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3296 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3297 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3298 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3299 else if (fHasHCHandler)
3300 {
3301 if (!fRestoreAsRAM)
3302 {
3303 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3304 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3305 }
3306 else
3307 {
3308 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3309 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3310 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3311 }
3312 }
3313 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3314
3315 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3316}
3317
3318/**
3319 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3320 *
3321 * @param pVM VM Handle.
3322 * @param enmType Handler type.
3323 * @param GCPhys Handler range address.
3324 * @param cb Size of the handler range.
3325 * @param fHasHCHandler Set if the handler has a HC callback function.
3326 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3327 */
3328REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3329{
3330 REMR3ReplayHandlerNotifications(pVM);
3331 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3332}
3333
3334
3335/**
3336 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3337 *
3338 * @param pVM VM Handle.
3339 * @param enmType Handler type.
3340 * @param GCPhysOld Old handler range address.
3341 * @param GCPhysNew New handler range address.
3342 * @param cb Size of the handler range.
3343 * @param fHasHCHandler Set if the handler has a HC callback function.
3344 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3345 */
3346static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3347{
3348 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3349 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3350 VM_ASSERT_EMT(pVM);
3351 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3352
3353 if (fHasHCHandler)
3354 {
3355 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3356
3357 /*
3358 * Reset the old page.
3359 */
3360 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3361 if (!fRestoreAsRAM)
3362 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3363 else
3364 {
3365 /* This is not perfect, but it'll do for PD monitoring... */
3366 Assert(cb == PAGE_SIZE);
3367 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3368 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3369 }
3370
3371 /*
3372 * Update the new page.
3373 */
3374 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3375 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3376 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3377 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3378
3379 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3380 }
3381}
3382
3383/**
3384 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3385 *
3386 * @param pVM VM Handle.
3387 * @param enmType Handler type.
3388 * @param GCPhysOld Old handler range address.
3389 * @param GCPhysNew New handler range address.
3390 * @param cb Size of the handler range.
3391 * @param fHasHCHandler Set if the handler has a HC callback function.
3392 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3393 */
3394REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3395{
3396 REMR3ReplayHandlerNotifications(pVM);
3397
3398 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3399}
3400
3401/**
3402 * Checks if we're handling access to this page or not.
3403 *
3404 * @returns true if we're trapping access.
3405 * @returns false if we aren't.
3406 * @param pVM The VM handle.
3407 * @param GCPhys The physical address.
3408 *
3409 * @remark This function will only work correctly in VBOX_STRICT builds!
3410 */
3411REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3412{
3413#ifdef VBOX_STRICT
3414 unsigned long off;
3415 REMR3ReplayHandlerNotifications(pVM);
3416
3417 off = get_phys_page_offset(GCPhys);
3418 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3419 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3420 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3421#else
3422 return false;
3423#endif
3424}
3425
3426
3427/**
3428 * Deals with a rare case in get_phys_addr_code where the code
3429 * is being monitored.
3430 *
3431 * It could also be an MMIO page, in which case we will raise a fatal error.
3432 *
3433 * @returns The physical address corresponding to addr.
3434 * @param env The cpu environment.
3435 * @param addr The virtual address.
3436 * @param pTLBEntry The TLB entry.
3437 */
3438target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3439 target_ulong addr,
3440 CPUTLBEntry* pTLBEntry,
3441 target_phys_addr_t ioTLBEntry)
3442{
3443 PVM pVM = env->pVM;
3444
3445 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3446 {
3447 /* If code memory is being monitored, appropriate IOTLB entry will have
3448 handler IO type, and addend will provide real physical address, no
3449 matter if we store VA in TLB or not, as handlers are always passed PA */
3450 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3451 return ret;
3452 }
3453 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3454 "*** handlers\n",
3455 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3456 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3457 LogRel(("*** mmio\n"));
3458 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3459 LogRel(("*** phys\n"));
3460 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3461 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3462 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3463 AssertFatalFailed();
3464}
3465
3466/**
3467 * Read guest RAM and ROM.
3468 *
3469 * @param SrcGCPhys The source address (guest physical).
3470 * @param pvDst The destination address.
3471 * @param cb Number of bytes
3472 */
3473void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3474{
3475 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3476 VBOX_CHECK_ADDR(SrcGCPhys);
3477 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3478#ifdef VBOX_DEBUG_PHYS
3479 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3480#endif
3481 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3482}
3483
3484
3485/**
3486 * Read guest RAM and ROM, unsigned 8-bit.
3487 *
3488 * @param SrcGCPhys The source address (guest physical).
3489 */
3490RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3491{
3492 uint8_t val;
3493 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3494 VBOX_CHECK_ADDR(SrcGCPhys);
3495 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3496 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3497#ifdef VBOX_DEBUG_PHYS
3498 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3499#endif
3500 return val;
3501}
3502
3503
3504/**
3505 * Read guest RAM and ROM, signed 8-bit.
3506 *
3507 * @param SrcGCPhys The source address (guest physical).
3508 */
3509RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3510{
3511 int8_t val;
3512 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3513 VBOX_CHECK_ADDR(SrcGCPhys);
3514 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3515 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3516#ifdef VBOX_DEBUG_PHYS
3517 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3518#endif
3519 return val;
3520}
3521
3522
3523/**
3524 * Read guest RAM and ROM, unsigned 16-bit.
3525 *
3526 * @param SrcGCPhys The source address (guest physical).
3527 */
3528RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3529{
3530 uint16_t val;
3531 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3532 VBOX_CHECK_ADDR(SrcGCPhys);
3533 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3534 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3535#ifdef VBOX_DEBUG_PHYS
3536 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3537#endif
3538 return val;
3539}
3540
3541
3542/**
3543 * Read guest RAM and ROM, signed 16-bit.
3544 *
3545 * @param SrcGCPhys The source address (guest physical).
3546 */
3547RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3548{
3549 int16_t val;
3550 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3551 VBOX_CHECK_ADDR(SrcGCPhys);
3552 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3553 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3554#ifdef VBOX_DEBUG_PHYS
3555 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3556#endif
3557 return val;
3558}
3559
3560
3561/**
3562 * Read guest RAM and ROM, unsigned 32-bit.
3563 *
3564 * @param SrcGCPhys The source address (guest physical).
3565 */
3566RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3567{
3568 uint32_t val;
3569 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3570 VBOX_CHECK_ADDR(SrcGCPhys);
3571 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3572 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3573#ifdef VBOX_DEBUG_PHYS
3574 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3575#endif
3576 return val;
3577}
3578
3579
3580/**
3581 * Read guest RAM and ROM, signed 32-bit.
3582 *
3583 * @param SrcGCPhys The source address (guest physical).
3584 */
3585RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3586{
3587 int32_t val;
3588 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3589 VBOX_CHECK_ADDR(SrcGCPhys);
3590 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3591 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3592#ifdef VBOX_DEBUG_PHYS
3593 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3594#endif
3595 return val;
3596}
3597
3598
3599/**
3600 * Read guest RAM and ROM, unsigned 64-bit.
3601 *
3602 * @param SrcGCPhys The source address (guest physical).
3603 */
3604uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3605{
3606 uint64_t val;
3607 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3608 VBOX_CHECK_ADDR(SrcGCPhys);
3609 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3610 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3611#ifdef VBOX_DEBUG_PHYS
3612 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3613#endif
3614 return val;
3615}
3616
3617
3618/**
3619 * Read guest RAM and ROM, signed 64-bit.
3620 *
3621 * @param SrcGCPhys The source address (guest physical).
3622 */
3623int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3624{
3625 int64_t val;
3626 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3627 VBOX_CHECK_ADDR(SrcGCPhys);
3628 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3629 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3630#ifdef VBOX_DEBUG_PHYS
3631 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3632#endif
3633 return val;
3634}
3635
3636
3637/**
3638 * Write guest RAM.
3639 *
3640 * @param DstGCPhys The destination address (guest physical).
3641 * @param pvSrc The source address.
3642 * @param cb Number of bytes to write
3643 */
3644void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3645{
3646 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3647 VBOX_CHECK_ADDR(DstGCPhys);
3648 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3649 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3650#ifdef VBOX_DEBUG_PHYS
3651 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3652#endif
3653}
3654
3655
3656/**
3657 * Write guest RAM, unsigned 8-bit.
3658 *
3659 * @param DstGCPhys The destination address (guest physical).
3660 * @param val Value
3661 */
3662void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3663{
3664 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3665 VBOX_CHECK_ADDR(DstGCPhys);
3666 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3667 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3668#ifdef VBOX_DEBUG_PHYS
3669 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3670#endif
3671}
3672
3673
3674/**
3675 * Write guest RAM, unsigned 8-bit.
3676 *
3677 * @param DstGCPhys The destination address (guest physical).
3678 * @param val Value
3679 */
3680void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3681{
3682 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3683 VBOX_CHECK_ADDR(DstGCPhys);
3684 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3685 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3686#ifdef VBOX_DEBUG_PHYS
3687 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3688#endif
3689}
3690
3691
3692/**
3693 * Write guest RAM, unsigned 32-bit.
3694 *
3695 * @param DstGCPhys The destination address (guest physical).
3696 * @param val Value
3697 */
3698void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3699{
3700 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3701 VBOX_CHECK_ADDR(DstGCPhys);
3702 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3703 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3704#ifdef VBOX_DEBUG_PHYS
3705 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3706#endif
3707}
3708
3709
3710/**
3711 * Write guest RAM, unsigned 64-bit.
3712 *
3713 * @param DstGCPhys The destination address (guest physical).
3714 * @param val Value
3715 */
3716void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3717{
3718 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3719 VBOX_CHECK_ADDR(DstGCPhys);
3720 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3721 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3722#ifdef VBOX_DEBUG_PHYS
3723 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3724#endif
3725}
3726
3727#undef LOG_GROUP
3728#define LOG_GROUP LOG_GROUP_REM_MMIO
3729
3730/** Read MMIO memory. */
3731static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3732{
3733 uint32_t u32 = 0;
3734 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3735 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3736 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3737 return u32;
3738}
3739
3740/** Read MMIO memory. */
3741static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3742{
3743 uint32_t u32 = 0;
3744 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3745 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3746 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3747 return u32;
3748}
3749
3750/** Read MMIO memory. */
3751static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3752{
3753 uint32_t u32 = 0;
3754 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3755 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3756 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3757 return u32;
3758}
3759
3760/** Write to MMIO memory. */
3761static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3762{
3763 int rc;
3764 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3765 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3766 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3767}
3768
3769/** Write to MMIO memory. */
3770static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3771{
3772 int rc;
3773 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3774 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3775 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3776}
3777
3778/** Write to MMIO memory. */
3779static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3780{
3781 int rc;
3782 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3783 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3784 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3785}
3786
3787
3788#undef LOG_GROUP
3789#define LOG_GROUP LOG_GROUP_REM_HANDLER
3790
3791/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3792
3793static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3794{
3795 uint8_t u8;
3796 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3797 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3798 return u8;
3799}
3800
3801static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3802{
3803 uint16_t u16;
3804 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3805 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3806 return u16;
3807}
3808
3809static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3810{
3811 uint32_t u32;
3812 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3813 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3814 return u32;
3815}
3816
3817static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3818{
3819 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3820 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3821}
3822
3823static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3824{
3825 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3826 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3827}
3828
3829static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3830{
3831 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3832 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3833}
3834
3835/* -+- disassembly -+- */
3836
3837#undef LOG_GROUP
3838#define LOG_GROUP LOG_GROUP_REM_DISAS
3839
3840
3841/**
3842 * Enables or disables singled stepped disassembly.
3843 *
3844 * @returns VBox status code.
3845 * @param pVM VM handle.
3846 * @param fEnable To enable set this flag, to disable clear it.
3847 */
3848static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3849{
3850 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3851 VM_ASSERT_EMT(pVM);
3852
3853 if (fEnable)
3854 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3855 else
3856 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3857#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3858 cpu_single_step(&pVM->rem.s.Env, fEnable);
3859#endif
3860 return VINF_SUCCESS;
3861}
3862
3863
3864/**
3865 * Enables or disables singled stepped disassembly.
3866 *
3867 * @returns VBox status code.
3868 * @param pVM VM handle.
3869 * @param fEnable To enable set this flag, to disable clear it.
3870 */
3871REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3872{
3873 int rc;
3874
3875 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3876 if (VM_IS_EMT(pVM))
3877 return remR3DisasEnableStepping(pVM, fEnable);
3878
3879 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3880 AssertRC(rc);
3881 return rc;
3882}
3883
3884
3885#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3886/**
3887 * External Debugger Command: .remstep [on|off|1|0]
3888 */
3889static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3890{
3891 int rc;
3892
3893 if (cArgs == 0)
3894 /*
3895 * Print the current status.
3896 */
3897 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3898 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3899 else
3900 {
3901 /*
3902 * Convert the argument and change the mode.
3903 */
3904 bool fEnable;
3905 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3906 if (RT_SUCCESS(rc))
3907 {
3908 rc = REMR3DisasEnableStepping(pVM, fEnable);
3909 if (RT_SUCCESS(rc))
3910 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3911 else
3912 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3913 }
3914 else
3915 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3916 }
3917 return rc;
3918}
3919#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3920
3921
3922/**
3923 * Disassembles one instruction and prints it to the log.
3924 *
3925 * @returns Success indicator.
3926 * @param env Pointer to the recompiler CPU structure.
3927 * @param f32BitCode Indicates that whether or not the code should
3928 * be disassembled as 16 or 32 bit. If -1 the CS
3929 * selector will be inspected.
3930 * @param pszPrefix
3931 */
3932bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3933{
3934 PVM pVM = env->pVM;
3935 const bool fLog = LogIsEnabled();
3936 const bool fLog2 = LogIs2Enabled();
3937 int rc = VINF_SUCCESS;
3938
3939 /*
3940 * Don't bother if there ain't any log output to do.
3941 */
3942 if (!fLog && !fLog2)
3943 return true;
3944
3945 /*
3946 * Update the state so DBGF reads the correct register values.
3947 */
3948 remR3StateUpdate(pVM, env->pVCpu);
3949
3950 /*
3951 * Log registers if requested.
3952 */
3953 if (fLog2)
3954 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3955
3956 /*
3957 * Disassemble to log.
3958 */
3959 if (fLog)
3960 {
3961 PVMCPU pVCpu = VMMGetCpu(pVM);
3962 char szBuf[256];
3963 szBuf[0] = '\0';
3964 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3965 pVCpu->idCpu,
3966 0, /* Sel */
3967 0, /* GCPtr */
3968 DBGF_DISAS_FLAGS_CURRENT_GUEST
3969 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3970 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3971 szBuf,
3972 sizeof(szBuf),
3973 NULL);
3974 if (RT_FAILURE(rc))
3975 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3976 if (pszPrefix && *pszPrefix)
3977 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3978 else
3979 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3980 }
3981
3982 return RT_SUCCESS(rc);
3983}
3984
3985
3986/**
3987 * Disassemble recompiled code.
3988 *
3989 * @param phFileIgnored Ignored, logfile usually.
3990 * @param pvCode Pointer to the code block.
3991 * @param cb Size of the code block.
3992 */
3993void disas(FILE *phFile, void *pvCode, unsigned long cb)
3994{
3995#ifdef DEBUG_TMP_LOGGING
3996# define DISAS_PRINTF(x...) fprintf(phFile, x)
3997#else
3998# define DISAS_PRINTF(x...) RTLogPrintf(x)
3999 if (LogIs2Enabled())
4000#endif
4001 {
4002 unsigned off = 0;
4003 char szOutput[256];
4004 DISCPUSTATE Cpu;
4005
4006 memset(&Cpu, 0, sizeof(Cpu));
4007#ifdef RT_ARCH_X86
4008 Cpu.mode = CPUMODE_32BIT;
4009#else
4010 Cpu.mode = CPUMODE_64BIT;
4011#endif
4012
4013 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4014 while (off < cb)
4015 {
4016 uint32_t cbInstr;
4017 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4018 DISAS_PRINTF("%s", szOutput);
4019 else
4020 {
4021 DISAS_PRINTF("disas error\n");
4022 cbInstr = 1;
4023#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4024 break;
4025#endif
4026 }
4027 off += cbInstr;
4028 }
4029 }
4030
4031#undef DISAS_PRINTF
4032}
4033
4034
4035/**
4036 * Disassemble guest code.
4037 *
4038 * @param phFileIgnored Ignored, logfile usually.
4039 * @param uCode The guest address of the code to disassemble. (flat?)
4040 * @param cb Number of bytes to disassemble.
4041 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4042 */
4043void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4044{
4045#ifdef DEBUG_TMP_LOGGING
4046# define DISAS_PRINTF(x...) fprintf(phFile, x)
4047#else
4048# define DISAS_PRINTF(x...) RTLogPrintf(x)
4049 if (LogIs2Enabled())
4050#endif
4051 {
4052 PVM pVM = cpu_single_env->pVM;
4053 PVMCPU pVCpu = cpu_single_env->pVCpu;
4054 RTSEL cs;
4055 RTGCUINTPTR eip;
4056
4057 Assert(pVCpu);
4058
4059 /*
4060 * Update the state so DBGF reads the correct register values (flags).
4061 */
4062 remR3StateUpdate(pVM, pVCpu);
4063
4064 /*
4065 * Do the disassembling.
4066 */
4067 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4068 cs = cpu_single_env->segs[R_CS].selector;
4069 eip = uCode - cpu_single_env->segs[R_CS].base;
4070 for (;;)
4071 {
4072 char szBuf[256];
4073 uint32_t cbInstr;
4074 int rc = DBGFR3DisasInstrEx(pVM,
4075 pVCpu->idCpu,
4076 cs,
4077 eip,
4078 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4079 szBuf, sizeof(szBuf),
4080 &cbInstr);
4081 if (RT_SUCCESS(rc))
4082 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4083 else
4084 {
4085 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4086 cbInstr = 1;
4087 }
4088
4089 /* next */
4090 if (cb <= cbInstr)
4091 break;
4092 cb -= cbInstr;
4093 uCode += cbInstr;
4094 eip += cbInstr;
4095 }
4096 }
4097#undef DISAS_PRINTF
4098}
4099
4100
4101/**
4102 * Looks up a guest symbol.
4103 *
4104 * @returns Pointer to symbol name. This is a static buffer.
4105 * @param orig_addr The address in question.
4106 */
4107const char *lookup_symbol(target_ulong orig_addr)
4108{
4109 PVM pVM = cpu_single_env->pVM;
4110 RTGCINTPTR off = 0;
4111 RTDBGSYMBOL Sym;
4112 DBGFADDRESS Addr;
4113
4114 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4115 if (RT_SUCCESS(rc))
4116 {
4117 static char szSym[sizeof(Sym.szName) + 48];
4118 if (!off)
4119 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4120 else if (off > 0)
4121 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4122 else
4123 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4124 return szSym;
4125 }
4126 return "<N/A>";
4127}
4128
4129
4130#undef LOG_GROUP
4131#define LOG_GROUP LOG_GROUP_REM
4132
4133
4134/* -+- FF notifications -+- */
4135
4136
4137/**
4138 * Notification about a pending interrupt.
4139 *
4140 * @param pVM VM Handle.
4141 * @param pVCpu VMCPU Handle.
4142 * @param u8Interrupt Interrupt
4143 * @thread The emulation thread.
4144 */
4145REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4146{
4147 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4148 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4149}
4150
4151/**
4152 * Notification about a pending interrupt.
4153 *
4154 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4155 * @param pVM VM Handle.
4156 * @param pVCpu VMCPU Handle.
4157 * @thread The emulation thread.
4158 */
4159REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4160{
4161 return pVM->rem.s.u32PendingInterrupt;
4162}
4163
4164/**
4165 * Notification about the interrupt FF being set.
4166 *
4167 * @param pVM VM Handle.
4168 * @param pVCpu VMCPU Handle.
4169 * @thread The emulation thread.
4170 */
4171REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4172{
4173 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4174 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4175 if (pVM->rem.s.fInREM)
4176 {
4177 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4178 CPU_INTERRUPT_EXTERNAL_HARD);
4179 }
4180}
4181
4182
4183/**
4184 * Notification about the interrupt FF being set.
4185 *
4186 * @param pVM VM Handle.
4187 * @param pVCpu VMCPU Handle.
4188 * @thread Any.
4189 */
4190REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4191{
4192 LogFlow(("REMR3NotifyInterruptClear:\n"));
4193 if (pVM->rem.s.fInREM)
4194 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4195}
4196
4197
4198/**
4199 * Notification about pending timer(s).
4200 *
4201 * @param pVM VM Handle.
4202 * @param pVCpuDst The target cpu for this notification.
4203 * TM will not broadcast pending timer events, but use
4204 * a dedicated EMT for them. So, only interrupt REM
4205 * execution if the given CPU is executing in REM.
4206 * @thread Any.
4207 */
4208REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4209{
4210#ifndef DEBUG_bird
4211 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4212#endif
4213 if (pVM->rem.s.fInREM)
4214 {
4215 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4216 {
4217 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4218 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4219 CPU_INTERRUPT_EXTERNAL_TIMER);
4220 }
4221 else
4222 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4223 }
4224 else
4225 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4226}
4227
4228
4229/**
4230 * Notification about pending DMA transfers.
4231 *
4232 * @param pVM VM Handle.
4233 * @thread Any.
4234 */
4235REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4236{
4237 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4238 if (pVM->rem.s.fInREM)
4239 {
4240 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4241 CPU_INTERRUPT_EXTERNAL_DMA);
4242 }
4243}
4244
4245
4246/**
4247 * Notification about pending timer(s).
4248 *
4249 * @param pVM VM Handle.
4250 * @thread Any.
4251 */
4252REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4253{
4254 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4255 if (pVM->rem.s.fInREM)
4256 {
4257 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4258 CPU_INTERRUPT_EXTERNAL_EXIT);
4259 }
4260}
4261
4262
4263/**
4264 * Notification about pending FF set by an external thread.
4265 *
4266 * @param pVM VM handle.
4267 * @thread Any.
4268 */
4269REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4270{
4271 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4272 if (pVM->rem.s.fInREM)
4273 {
4274 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4275 CPU_INTERRUPT_EXTERNAL_EXIT);
4276 }
4277}
4278
4279
4280#ifdef VBOX_WITH_STATISTICS
4281void remR3ProfileStart(int statcode)
4282{
4283 STAMPROFILEADV *pStat;
4284 switch(statcode)
4285 {
4286 case STATS_EMULATE_SINGLE_INSTR:
4287 pStat = &gStatExecuteSingleInstr;
4288 break;
4289 case STATS_QEMU_COMPILATION:
4290 pStat = &gStatCompilationQEmu;
4291 break;
4292 case STATS_QEMU_RUN_EMULATED_CODE:
4293 pStat = &gStatRunCodeQEmu;
4294 break;
4295 case STATS_QEMU_TOTAL:
4296 pStat = &gStatTotalTimeQEmu;
4297 break;
4298 case STATS_QEMU_RUN_TIMERS:
4299 pStat = &gStatTimers;
4300 break;
4301 case STATS_TLB_LOOKUP:
4302 pStat= &gStatTBLookup;
4303 break;
4304 case STATS_IRQ_HANDLING:
4305 pStat= &gStatIRQ;
4306 break;
4307 case STATS_RAW_CHECK:
4308 pStat = &gStatRawCheck;
4309 break;
4310
4311 default:
4312 AssertMsgFailed(("unknown stat %d\n", statcode));
4313 return;
4314 }
4315 STAM_PROFILE_ADV_START(pStat, a);
4316}
4317
4318
4319void remR3ProfileStop(int statcode)
4320{
4321 STAMPROFILEADV *pStat;
4322 switch(statcode)
4323 {
4324 case STATS_EMULATE_SINGLE_INSTR:
4325 pStat = &gStatExecuteSingleInstr;
4326 break;
4327 case STATS_QEMU_COMPILATION:
4328 pStat = &gStatCompilationQEmu;
4329 break;
4330 case STATS_QEMU_RUN_EMULATED_CODE:
4331 pStat = &gStatRunCodeQEmu;
4332 break;
4333 case STATS_QEMU_TOTAL:
4334 pStat = &gStatTotalTimeQEmu;
4335 break;
4336 case STATS_QEMU_RUN_TIMERS:
4337 pStat = &gStatTimers;
4338 break;
4339 case STATS_TLB_LOOKUP:
4340 pStat= &gStatTBLookup;
4341 break;
4342 case STATS_IRQ_HANDLING:
4343 pStat= &gStatIRQ;
4344 break;
4345 case STATS_RAW_CHECK:
4346 pStat = &gStatRawCheck;
4347 break;
4348 default:
4349 AssertMsgFailed(("unknown stat %d\n", statcode));
4350 return;
4351 }
4352 STAM_PROFILE_ADV_STOP(pStat, a);
4353}
4354#endif
4355
4356/**
4357 * Raise an RC, force rem exit.
4358 *
4359 * @param pVM VM handle.
4360 * @param rc The rc.
4361 */
4362void remR3RaiseRC(PVM pVM, int rc)
4363{
4364 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4365 Assert(pVM->rem.s.fInREM);
4366 VM_ASSERT_EMT(pVM);
4367 pVM->rem.s.rc = rc;
4368 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4369}
4370
4371
4372/* -+- timers -+- */
4373
4374uint64_t cpu_get_tsc(CPUX86State *env)
4375{
4376 STAM_COUNTER_INC(&gStatCpuGetTSC);
4377 return TMCpuTickGet(env->pVCpu);
4378}
4379
4380
4381/* -+- interrupts -+- */
4382
4383void cpu_set_ferr(CPUX86State *env)
4384{
4385 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4386 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4387}
4388
4389int cpu_get_pic_interrupt(CPUState *env)
4390{
4391 uint8_t u8Interrupt;
4392 int rc;
4393
4394 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4395 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4396 * with the (a)pic.
4397 */
4398 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4399 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4400 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4401 * remove this kludge. */
4402 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4403 {
4404 rc = VINF_SUCCESS;
4405 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4406 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4407 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4408 }
4409 else
4410 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4411
4412 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4413 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4414 if (RT_SUCCESS(rc))
4415 {
4416 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4417 env->interrupt_request |= CPU_INTERRUPT_HARD;
4418 return u8Interrupt;
4419 }
4420 return -1;
4421}
4422
4423
4424/* -+- local apic -+- */
4425
4426#if 0 /* CPUMSetGuestMsr does this now. */
4427void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4428{
4429 int rc = PDMApicSetBase(env->pVM, val);
4430 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4431}
4432#endif
4433
4434uint64_t cpu_get_apic_base(CPUX86State *env)
4435{
4436 uint64_t u64;
4437 int rc = PDMApicGetBase(env->pVM, &u64);
4438 if (RT_SUCCESS(rc))
4439 {
4440 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4441 return u64;
4442 }
4443 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4444 return 0;
4445}
4446
4447void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4448{
4449 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4450 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4451}
4452
4453uint8_t cpu_get_apic_tpr(CPUX86State *env)
4454{
4455 uint8_t u8;
4456 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4457 if (RT_SUCCESS(rc))
4458 {
4459 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4460 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4461 }
4462 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4463 return 0;
4464}
4465
4466/**
4467 * Read an MSR.
4468 *
4469 * @retval 0 success.
4470 * @retval -1 failure, raise \#GP(0).
4471 * @param env The cpu state.
4472 * @param idMsr The MSR to read.
4473 * @param puValue Where to return the value.
4474 */
4475int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4476{
4477 Assert(env->pVCpu);
4478 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4479}
4480
4481/**
4482 * Write to an MSR.
4483 *
4484 * @retval 0 success.
4485 * @retval -1 failure, raise \#GP(0).
4486 * @param env The cpu state.
4487 * @param idMsr The MSR to read.
4488 * @param puValue Where to return the value.
4489 */
4490int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4491{
4492 Assert(env->pVCpu);
4493 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4494}
4495
4496/* -+- I/O Ports -+- */
4497
4498#undef LOG_GROUP
4499#define LOG_GROUP LOG_GROUP_REM_IOPORT
4500
4501void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val)
4502{
4503 int rc;
4504
4505 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4506 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4507
4508 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4509 if (RT_LIKELY(rc == VINF_SUCCESS))
4510 return;
4511 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4512 {
4513 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4514 remR3RaiseRC(env->pVM, rc);
4515 return;
4516 }
4517 remAbort(rc, __FUNCTION__);
4518}
4519
4520void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val)
4521{
4522 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4523 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4524 if (RT_LIKELY(rc == VINF_SUCCESS))
4525 return;
4526 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4527 {
4528 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4529 remR3RaiseRC(env->pVM, rc);
4530 return;
4531 }
4532 remAbort(rc, __FUNCTION__);
4533}
4534
4535void cpu_outl(CPUState *env, pio_addr_t addr, uint32_t val)
4536{
4537 int rc;
4538 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4539 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4540 if (RT_LIKELY(rc == VINF_SUCCESS))
4541 return;
4542 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4543 {
4544 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4545 remR3RaiseRC(env->pVM, rc);
4546 return;
4547 }
4548 remAbort(rc, __FUNCTION__);
4549}
4550
4551uint8_t cpu_inb(CPUState *env, pio_addr_t addr)
4552{
4553 uint32_t u32 = 0;
4554 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4555 if (RT_LIKELY(rc == VINF_SUCCESS))
4556 {
4557 if (/*addr != 0x61 && */addr != 0x71)
4558 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4559 return (uint8_t)u32;
4560 }
4561 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4562 {
4563 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4564 remR3RaiseRC(env->pVM, rc);
4565 return (uint8_t)u32;
4566 }
4567 remAbort(rc, __FUNCTION__);
4568 return UINT8_C(0xff);
4569}
4570
4571uint16_t cpu_inw(CPUState *env, pio_addr_t addr)
4572{
4573 uint32_t u32 = 0;
4574 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4575 if (RT_LIKELY(rc == VINF_SUCCESS))
4576 {
4577 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4578 return (uint16_t)u32;
4579 }
4580 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4581 {
4582 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4583 remR3RaiseRC(env->pVM, rc);
4584 return (uint16_t)u32;
4585 }
4586 remAbort(rc, __FUNCTION__);
4587 return UINT16_C(0xffff);
4588}
4589
4590uint32_t cpu_inl(CPUState *env, pio_addr_t addr)
4591{
4592 uint32_t u32 = 0;
4593 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4594 if (RT_LIKELY(rc == VINF_SUCCESS))
4595 {
4596//if (addr==0x01f0 && u32 == 0x6b6d)
4597// loglevel = ~0;
4598 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4599 return u32;
4600 }
4601 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4602 {
4603 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4604 remR3RaiseRC(env->pVM, rc);
4605 return u32;
4606 }
4607 remAbort(rc, __FUNCTION__);
4608 return UINT32_C(0xffffffff);
4609}
4610
4611#undef LOG_GROUP
4612#define LOG_GROUP LOG_GROUP_REM
4613
4614
4615/* -+- helpers and misc other interfaces -+- */
4616
4617/**
4618 * Perform the CPUID instruction.
4619 *
4620 * @param env Pointer to the recompiler CPU structure.
4621 * @param idx The CPUID leaf (eax).
4622 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4623 * @param pvEAX Where to store eax.
4624 * @param pvEBX Where to store ebx.
4625 * @param pvECX Where to store ecx.
4626 * @param pvEDX Where to store edx.
4627 */
4628void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4629 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4630{
4631 NOREF(idxSub);
4632 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4633}
4634
4635
4636#if 0 /* not used */
4637/**
4638 * Interface for qemu hardware to report back fatal errors.
4639 */
4640void hw_error(const char *pszFormat, ...)
4641{
4642 /*
4643 * Bitch about it.
4644 */
4645 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4646 * this in my Odin32 tree at home! */
4647 va_list args;
4648 va_start(args, pszFormat);
4649 RTLogPrintf("fatal error in virtual hardware:");
4650 RTLogPrintfV(pszFormat, args);
4651 va_end(args);
4652 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4653
4654 /*
4655 * If we're in REM context we'll sync back the state before 'jumping' to
4656 * the EMs failure handling.
4657 */
4658 PVM pVM = cpu_single_env->pVM;
4659 if (pVM->rem.s.fInREM)
4660 REMR3StateBack(pVM);
4661 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4662 AssertMsgFailed(("EMR3FatalError returned!\n"));
4663}
4664#endif
4665
4666/**
4667 * Interface for the qemu cpu to report unhandled situation
4668 * raising a fatal VM error.
4669 */
4670void cpu_abort(CPUState *env, const char *pszFormat, ...)
4671{
4672 va_list va;
4673 PVM pVM;
4674 PVMCPU pVCpu;
4675 char szMsg[256];
4676
4677 /*
4678 * Bitch about it.
4679 */
4680 RTLogFlags(NULL, "nodisabled nobuffered");
4681 RTLogFlush(NULL);
4682
4683 va_start(va, pszFormat);
4684#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4685 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4686 unsigned cArgs = 0;
4687 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4688 const char *psz = strchr(pszFormat, '%');
4689 while (psz && cArgs < 6)
4690 {
4691 auArgs[cArgs++] = va_arg(va, uintptr_t);
4692 psz = strchr(psz + 1, '%');
4693 }
4694 switch (cArgs)
4695 {
4696 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4697 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4698 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4699 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4700 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4701 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4702 default:
4703 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4704 }
4705#else
4706 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4707#endif
4708 va_end(va);
4709
4710 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4711 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4712
4713 /*
4714 * If we're in REM context we'll sync back the state before 'jumping' to
4715 * the EMs failure handling.
4716 */
4717 pVM = cpu_single_env->pVM;
4718 pVCpu = cpu_single_env->pVCpu;
4719 Assert(pVCpu);
4720
4721 if (pVM->rem.s.fInREM)
4722 REMR3StateBack(pVM, pVCpu);
4723 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4724 AssertMsgFailed(("EMR3FatalError returned!\n"));
4725}
4726
4727
4728/**
4729 * Aborts the VM.
4730 *
4731 * @param rc VBox error code.
4732 * @param pszTip Hint about why/when this happened.
4733 */
4734void remAbort(int rc, const char *pszTip)
4735{
4736 PVM pVM;
4737 PVMCPU pVCpu;
4738
4739 /*
4740 * Bitch about it.
4741 */
4742 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4743 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4744
4745 /*
4746 * Jump back to where we entered the recompiler.
4747 */
4748 pVM = cpu_single_env->pVM;
4749 pVCpu = cpu_single_env->pVCpu;
4750 Assert(pVCpu);
4751
4752 if (pVM->rem.s.fInREM)
4753 REMR3StateBack(pVM, pVCpu);
4754
4755 EMR3FatalError(pVCpu, rc);
4756 AssertMsgFailed(("EMR3FatalError returned!\n"));
4757}
4758
4759
4760/**
4761 * Dumps a linux system call.
4762 * @param pVCpu VMCPU handle.
4763 */
4764void remR3DumpLnxSyscall(PVMCPU pVCpu)
4765{
4766 static const char *apsz[] =
4767 {
4768 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4769 "sys_exit",
4770 "sys_fork",
4771 "sys_read",
4772 "sys_write",
4773 "sys_open", /* 5 */
4774 "sys_close",
4775 "sys_waitpid",
4776 "sys_creat",
4777 "sys_link",
4778 "sys_unlink", /* 10 */
4779 "sys_execve",
4780 "sys_chdir",
4781 "sys_time",
4782 "sys_mknod",
4783 "sys_chmod", /* 15 */
4784 "sys_lchown16",
4785 "sys_ni_syscall", /* old break syscall holder */
4786 "sys_stat",
4787 "sys_lseek",
4788 "sys_getpid", /* 20 */
4789 "sys_mount",
4790 "sys_oldumount",
4791 "sys_setuid16",
4792 "sys_getuid16",
4793 "sys_stime", /* 25 */
4794 "sys_ptrace",
4795 "sys_alarm",
4796 "sys_fstat",
4797 "sys_pause",
4798 "sys_utime", /* 30 */
4799 "sys_ni_syscall", /* old stty syscall holder */
4800 "sys_ni_syscall", /* old gtty syscall holder */
4801 "sys_access",
4802 "sys_nice",
4803 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4804 "sys_sync",
4805 "sys_kill",
4806 "sys_rename",
4807 "sys_mkdir",
4808 "sys_rmdir", /* 40 */
4809 "sys_dup",
4810 "sys_pipe",
4811 "sys_times",
4812 "sys_ni_syscall", /* old prof syscall holder */
4813 "sys_brk", /* 45 */
4814 "sys_setgid16",
4815 "sys_getgid16",
4816 "sys_signal",
4817 "sys_geteuid16",
4818 "sys_getegid16", /* 50 */
4819 "sys_acct",
4820 "sys_umount", /* recycled never used phys() */
4821 "sys_ni_syscall", /* old lock syscall holder */
4822 "sys_ioctl",
4823 "sys_fcntl", /* 55 */
4824 "sys_ni_syscall", /* old mpx syscall holder */
4825 "sys_setpgid",
4826 "sys_ni_syscall", /* old ulimit syscall holder */
4827 "sys_olduname",
4828 "sys_umask", /* 60 */
4829 "sys_chroot",
4830 "sys_ustat",
4831 "sys_dup2",
4832 "sys_getppid",
4833 "sys_getpgrp", /* 65 */
4834 "sys_setsid",
4835 "sys_sigaction",
4836 "sys_sgetmask",
4837 "sys_ssetmask",
4838 "sys_setreuid16", /* 70 */
4839 "sys_setregid16",
4840 "sys_sigsuspend",
4841 "sys_sigpending",
4842 "sys_sethostname",
4843 "sys_setrlimit", /* 75 */
4844 "sys_old_getrlimit",
4845 "sys_getrusage",
4846 "sys_gettimeofday",
4847 "sys_settimeofday",
4848 "sys_getgroups16", /* 80 */
4849 "sys_setgroups16",
4850 "old_select",
4851 "sys_symlink",
4852 "sys_lstat",
4853 "sys_readlink", /* 85 */
4854 "sys_uselib",
4855 "sys_swapon",
4856 "sys_reboot",
4857 "old_readdir",
4858 "old_mmap", /* 90 */
4859 "sys_munmap",
4860 "sys_truncate",
4861 "sys_ftruncate",
4862 "sys_fchmod",
4863 "sys_fchown16", /* 95 */
4864 "sys_getpriority",
4865 "sys_setpriority",
4866 "sys_ni_syscall", /* old profil syscall holder */
4867 "sys_statfs",
4868 "sys_fstatfs", /* 100 */
4869 "sys_ioperm",
4870 "sys_socketcall",
4871 "sys_syslog",
4872 "sys_setitimer",
4873 "sys_getitimer", /* 105 */
4874 "sys_newstat",
4875 "sys_newlstat",
4876 "sys_newfstat",
4877 "sys_uname",
4878 "sys_iopl", /* 110 */
4879 "sys_vhangup",
4880 "sys_ni_syscall", /* old "idle" system call */
4881 "sys_vm86old",
4882 "sys_wait4",
4883 "sys_swapoff", /* 115 */
4884 "sys_sysinfo",
4885 "sys_ipc",
4886 "sys_fsync",
4887 "sys_sigreturn",
4888 "sys_clone", /* 120 */
4889 "sys_setdomainname",
4890 "sys_newuname",
4891 "sys_modify_ldt",
4892 "sys_adjtimex",
4893 "sys_mprotect", /* 125 */
4894 "sys_sigprocmask",
4895 "sys_ni_syscall", /* old "create_module" */
4896 "sys_init_module",
4897 "sys_delete_module",
4898 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4899 "sys_quotactl",
4900 "sys_getpgid",
4901 "sys_fchdir",
4902 "sys_bdflush",
4903 "sys_sysfs", /* 135 */
4904 "sys_personality",
4905 "sys_ni_syscall", /* reserved for afs_syscall */
4906 "sys_setfsuid16",
4907 "sys_setfsgid16",
4908 "sys_llseek", /* 140 */
4909 "sys_getdents",
4910 "sys_select",
4911 "sys_flock",
4912 "sys_msync",
4913 "sys_readv", /* 145 */
4914 "sys_writev",
4915 "sys_getsid",
4916 "sys_fdatasync",
4917 "sys_sysctl",
4918 "sys_mlock", /* 150 */
4919 "sys_munlock",
4920 "sys_mlockall",
4921 "sys_munlockall",
4922 "sys_sched_setparam",
4923 "sys_sched_getparam", /* 155 */
4924 "sys_sched_setscheduler",
4925 "sys_sched_getscheduler",
4926 "sys_sched_yield",
4927 "sys_sched_get_priority_max",
4928 "sys_sched_get_priority_min", /* 160 */
4929 "sys_sched_rr_get_interval",
4930 "sys_nanosleep",
4931 "sys_mremap",
4932 "sys_setresuid16",
4933 "sys_getresuid16", /* 165 */
4934 "sys_vm86",
4935 "sys_ni_syscall", /* Old sys_query_module */
4936 "sys_poll",
4937 "sys_nfsservctl",
4938 "sys_setresgid16", /* 170 */
4939 "sys_getresgid16",
4940 "sys_prctl",
4941 "sys_rt_sigreturn",
4942 "sys_rt_sigaction",
4943 "sys_rt_sigprocmask", /* 175 */
4944 "sys_rt_sigpending",
4945 "sys_rt_sigtimedwait",
4946 "sys_rt_sigqueueinfo",
4947 "sys_rt_sigsuspend",
4948 "sys_pread64", /* 180 */
4949 "sys_pwrite64",
4950 "sys_chown16",
4951 "sys_getcwd",
4952 "sys_capget",
4953 "sys_capset", /* 185 */
4954 "sys_sigaltstack",
4955 "sys_sendfile",
4956 "sys_ni_syscall", /* reserved for streams1 */
4957 "sys_ni_syscall", /* reserved for streams2 */
4958 "sys_vfork", /* 190 */
4959 "sys_getrlimit",
4960 "sys_mmap2",
4961 "sys_truncate64",
4962 "sys_ftruncate64",
4963 "sys_stat64", /* 195 */
4964 "sys_lstat64",
4965 "sys_fstat64",
4966 "sys_lchown",
4967 "sys_getuid",
4968 "sys_getgid", /* 200 */
4969 "sys_geteuid",
4970 "sys_getegid",
4971 "sys_setreuid",
4972 "sys_setregid",
4973 "sys_getgroups", /* 205 */
4974 "sys_setgroups",
4975 "sys_fchown",
4976 "sys_setresuid",
4977 "sys_getresuid",
4978 "sys_setresgid", /* 210 */
4979 "sys_getresgid",
4980 "sys_chown",
4981 "sys_setuid",
4982 "sys_setgid",
4983 "sys_setfsuid", /* 215 */
4984 "sys_setfsgid",
4985 "sys_pivot_root",
4986 "sys_mincore",
4987 "sys_madvise",
4988 "sys_getdents64", /* 220 */
4989 "sys_fcntl64",
4990 "sys_ni_syscall", /* reserved for TUX */
4991 "sys_ni_syscall",
4992 "sys_gettid",
4993 "sys_readahead", /* 225 */
4994 "sys_setxattr",
4995 "sys_lsetxattr",
4996 "sys_fsetxattr",
4997 "sys_getxattr",
4998 "sys_lgetxattr", /* 230 */
4999 "sys_fgetxattr",
5000 "sys_listxattr",
5001 "sys_llistxattr",
5002 "sys_flistxattr",
5003 "sys_removexattr", /* 235 */
5004 "sys_lremovexattr",
5005 "sys_fremovexattr",
5006 "sys_tkill",
5007 "sys_sendfile64",
5008 "sys_futex", /* 240 */
5009 "sys_sched_setaffinity",
5010 "sys_sched_getaffinity",
5011 "sys_set_thread_area",
5012 "sys_get_thread_area",
5013 "sys_io_setup", /* 245 */
5014 "sys_io_destroy",
5015 "sys_io_getevents",
5016 "sys_io_submit",
5017 "sys_io_cancel",
5018 "sys_fadvise64", /* 250 */
5019 "sys_ni_syscall",
5020 "sys_exit_group",
5021 "sys_lookup_dcookie",
5022 "sys_epoll_create",
5023 "sys_epoll_ctl", /* 255 */
5024 "sys_epoll_wait",
5025 "sys_remap_file_pages",
5026 "sys_set_tid_address",
5027 "sys_timer_create",
5028 "sys_timer_settime", /* 260 */
5029 "sys_timer_gettime",
5030 "sys_timer_getoverrun",
5031 "sys_timer_delete",
5032 "sys_clock_settime",
5033 "sys_clock_gettime", /* 265 */
5034 "sys_clock_getres",
5035 "sys_clock_nanosleep",
5036 "sys_statfs64",
5037 "sys_fstatfs64",
5038 "sys_tgkill", /* 270 */
5039 "sys_utimes",
5040 "sys_fadvise64_64",
5041 "sys_ni_syscall" /* sys_vserver */
5042 };
5043
5044 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5045 switch (uEAX)
5046 {
5047 default:
5048 if (uEAX < RT_ELEMENTS(apsz))
5049 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5050 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5051 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5052 else
5053 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5054 break;
5055
5056 }
5057}
5058
5059
5060/**
5061 * Dumps an OpenBSD system call.
5062 * @param pVCpu VMCPU handle.
5063 */
5064void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5065{
5066 static const char *apsz[] =
5067 {
5068 "SYS_syscall", //0
5069 "SYS_exit", //1
5070 "SYS_fork", //2
5071 "SYS_read", //3
5072 "SYS_write", //4
5073 "SYS_open", //5
5074 "SYS_close", //6
5075 "SYS_wait4", //7
5076 "SYS_8",
5077 "SYS_link", //9
5078 "SYS_unlink", //10
5079 "SYS_11",
5080 "SYS_chdir", //12
5081 "SYS_fchdir", //13
5082 "SYS_mknod", //14
5083 "SYS_chmod", //15
5084 "SYS_chown", //16
5085 "SYS_break", //17
5086 "SYS_18",
5087 "SYS_19",
5088 "SYS_getpid", //20
5089 "SYS_mount", //21
5090 "SYS_unmount", //22
5091 "SYS_setuid", //23
5092 "SYS_getuid", //24
5093 "SYS_geteuid", //25
5094 "SYS_ptrace", //26
5095 "SYS_recvmsg", //27
5096 "SYS_sendmsg", //28
5097 "SYS_recvfrom", //29
5098 "SYS_accept", //30
5099 "SYS_getpeername", //31
5100 "SYS_getsockname", //32
5101 "SYS_access", //33
5102 "SYS_chflags", //34
5103 "SYS_fchflags", //35
5104 "SYS_sync", //36
5105 "SYS_kill", //37
5106 "SYS_38",
5107 "SYS_getppid", //39
5108 "SYS_40",
5109 "SYS_dup", //41
5110 "SYS_opipe", //42
5111 "SYS_getegid", //43
5112 "SYS_profil", //44
5113 "SYS_ktrace", //45
5114 "SYS_sigaction", //46
5115 "SYS_getgid", //47
5116 "SYS_sigprocmask", //48
5117 "SYS_getlogin", //49
5118 "SYS_setlogin", //50
5119 "SYS_acct", //51
5120 "SYS_sigpending", //52
5121 "SYS_osigaltstack", //53
5122 "SYS_ioctl", //54
5123 "SYS_reboot", //55
5124 "SYS_revoke", //56
5125 "SYS_symlink", //57
5126 "SYS_readlink", //58
5127 "SYS_execve", //59
5128 "SYS_umask", //60
5129 "SYS_chroot", //61
5130 "SYS_62",
5131 "SYS_63",
5132 "SYS_64",
5133 "SYS_65",
5134 "SYS_vfork", //66
5135 "SYS_67",
5136 "SYS_68",
5137 "SYS_sbrk", //69
5138 "SYS_sstk", //70
5139 "SYS_61",
5140 "SYS_vadvise", //72
5141 "SYS_munmap", //73
5142 "SYS_mprotect", //74
5143 "SYS_madvise", //75
5144 "SYS_76",
5145 "SYS_77",
5146 "SYS_mincore", //78
5147 "SYS_getgroups", //79
5148 "SYS_setgroups", //80
5149 "SYS_getpgrp", //81
5150 "SYS_setpgid", //82
5151 "SYS_setitimer", //83
5152 "SYS_84",
5153 "SYS_85",
5154 "SYS_getitimer", //86
5155 "SYS_87",
5156 "SYS_88",
5157 "SYS_89",
5158 "SYS_dup2", //90
5159 "SYS_91",
5160 "SYS_fcntl", //92
5161 "SYS_select", //93
5162 "SYS_94",
5163 "SYS_fsync", //95
5164 "SYS_setpriority", //96
5165 "SYS_socket", //97
5166 "SYS_connect", //98
5167 "SYS_99",
5168 "SYS_getpriority", //100
5169 "SYS_101",
5170 "SYS_102",
5171 "SYS_sigreturn", //103
5172 "SYS_bind", //104
5173 "SYS_setsockopt", //105
5174 "SYS_listen", //106
5175 "SYS_107",
5176 "SYS_108",
5177 "SYS_109",
5178 "SYS_110",
5179 "SYS_sigsuspend", //111
5180 "SYS_112",
5181 "SYS_113",
5182 "SYS_114",
5183 "SYS_115",
5184 "SYS_gettimeofday", //116
5185 "SYS_getrusage", //117
5186 "SYS_getsockopt", //118
5187 "SYS_119",
5188 "SYS_readv", //120
5189 "SYS_writev", //121
5190 "SYS_settimeofday", //122
5191 "SYS_fchown", //123
5192 "SYS_fchmod", //124
5193 "SYS_125",
5194 "SYS_setreuid", //126
5195 "SYS_setregid", //127
5196 "SYS_rename", //128
5197 "SYS_129",
5198 "SYS_130",
5199 "SYS_flock", //131
5200 "SYS_mkfifo", //132
5201 "SYS_sendto", //133
5202 "SYS_shutdown", //134
5203 "SYS_socketpair", //135
5204 "SYS_mkdir", //136
5205 "SYS_rmdir", //137
5206 "SYS_utimes", //138
5207 "SYS_139",
5208 "SYS_adjtime", //140
5209 "SYS_141",
5210 "SYS_142",
5211 "SYS_143",
5212 "SYS_144",
5213 "SYS_145",
5214 "SYS_146",
5215 "SYS_setsid", //147
5216 "SYS_quotactl", //148
5217 "SYS_149",
5218 "SYS_150",
5219 "SYS_151",
5220 "SYS_152",
5221 "SYS_153",
5222 "SYS_154",
5223 "SYS_nfssvc", //155
5224 "SYS_156",
5225 "SYS_157",
5226 "SYS_158",
5227 "SYS_159",
5228 "SYS_160",
5229 "SYS_getfh", //161
5230 "SYS_162",
5231 "SYS_163",
5232 "SYS_164",
5233 "SYS_sysarch", //165
5234 "SYS_166",
5235 "SYS_167",
5236 "SYS_168",
5237 "SYS_169",
5238 "SYS_170",
5239 "SYS_171",
5240 "SYS_172",
5241 "SYS_pread", //173
5242 "SYS_pwrite", //174
5243 "SYS_175",
5244 "SYS_176",
5245 "SYS_177",
5246 "SYS_178",
5247 "SYS_179",
5248 "SYS_180",
5249 "SYS_setgid", //181
5250 "SYS_setegid", //182
5251 "SYS_seteuid", //183
5252 "SYS_lfs_bmapv", //184
5253 "SYS_lfs_markv", //185
5254 "SYS_lfs_segclean", //186
5255 "SYS_lfs_segwait", //187
5256 "SYS_188",
5257 "SYS_189",
5258 "SYS_190",
5259 "SYS_pathconf", //191
5260 "SYS_fpathconf", //192
5261 "SYS_swapctl", //193
5262 "SYS_getrlimit", //194
5263 "SYS_setrlimit", //195
5264 "SYS_getdirentries", //196
5265 "SYS_mmap", //197
5266 "SYS___syscall", //198
5267 "SYS_lseek", //199
5268 "SYS_truncate", //200
5269 "SYS_ftruncate", //201
5270 "SYS___sysctl", //202
5271 "SYS_mlock", //203
5272 "SYS_munlock", //204
5273 "SYS_205",
5274 "SYS_futimes", //206
5275 "SYS_getpgid", //207
5276 "SYS_xfspioctl", //208
5277 "SYS_209",
5278 "SYS_210",
5279 "SYS_211",
5280 "SYS_212",
5281 "SYS_213",
5282 "SYS_214",
5283 "SYS_215",
5284 "SYS_216",
5285 "SYS_217",
5286 "SYS_218",
5287 "SYS_219",
5288 "SYS_220",
5289 "SYS_semget", //221
5290 "SYS_222",
5291 "SYS_223",
5292 "SYS_224",
5293 "SYS_msgget", //225
5294 "SYS_msgsnd", //226
5295 "SYS_msgrcv", //227
5296 "SYS_shmat", //228
5297 "SYS_229",
5298 "SYS_shmdt", //230
5299 "SYS_231",
5300 "SYS_clock_gettime", //232
5301 "SYS_clock_settime", //233
5302 "SYS_clock_getres", //234
5303 "SYS_235",
5304 "SYS_236",
5305 "SYS_237",
5306 "SYS_238",
5307 "SYS_239",
5308 "SYS_nanosleep", //240
5309 "SYS_241",
5310 "SYS_242",
5311 "SYS_243",
5312 "SYS_244",
5313 "SYS_245",
5314 "SYS_246",
5315 "SYS_247",
5316 "SYS_248",
5317 "SYS_249",
5318 "SYS_minherit", //250
5319 "SYS_rfork", //251
5320 "SYS_poll", //252
5321 "SYS_issetugid", //253
5322 "SYS_lchown", //254
5323 "SYS_getsid", //255
5324 "SYS_msync", //256
5325 "SYS_257",
5326 "SYS_258",
5327 "SYS_259",
5328 "SYS_getfsstat", //260
5329 "SYS_statfs", //261
5330 "SYS_fstatfs", //262
5331 "SYS_pipe", //263
5332 "SYS_fhopen", //264
5333 "SYS_265",
5334 "SYS_fhstatfs", //266
5335 "SYS_preadv", //267
5336 "SYS_pwritev", //268
5337 "SYS_kqueue", //269
5338 "SYS_kevent", //270
5339 "SYS_mlockall", //271
5340 "SYS_munlockall", //272
5341 "SYS_getpeereid", //273
5342 "SYS_274",
5343 "SYS_275",
5344 "SYS_276",
5345 "SYS_277",
5346 "SYS_278",
5347 "SYS_279",
5348 "SYS_280",
5349 "SYS_getresuid", //281
5350 "SYS_setresuid", //282
5351 "SYS_getresgid", //283
5352 "SYS_setresgid", //284
5353 "SYS_285",
5354 "SYS_mquery", //286
5355 "SYS_closefrom", //287
5356 "SYS_sigaltstack", //288
5357 "SYS_shmget", //289
5358 "SYS_semop", //290
5359 "SYS_stat", //291
5360 "SYS_fstat", //292
5361 "SYS_lstat", //293
5362 "SYS_fhstat", //294
5363 "SYS___semctl", //295
5364 "SYS_shmctl", //296
5365 "SYS_msgctl", //297
5366 "SYS_MAXSYSCALL", //298
5367 //299
5368 //300
5369 };
5370 uint32_t uEAX;
5371 if (!LogIsEnabled())
5372 return;
5373 uEAX = CPUMGetGuestEAX(pVCpu);
5374 switch (uEAX)
5375 {
5376 default:
5377 if (uEAX < RT_ELEMENTS(apsz))
5378 {
5379 uint32_t au32Args[8] = {0};
5380 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5381 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5382 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5383 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5384 }
5385 else
5386 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5387 break;
5388 }
5389}
5390
5391
5392#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5393/**
5394 * The Dll main entry point (stub).
5395 */
5396bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5397{
5398 return true;
5399}
5400
5401void *memcpy(void *dst, const void *src, size_t size)
5402{
5403 uint8_t*pbDst = dst, *pbSrc = src;
5404 while (size-- > 0)
5405 *pbDst++ = *pbSrc++;
5406 return dst;
5407}
5408
5409#endif
5410
5411void cpu_smm_update(CPUState *env)
5412{
5413}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette