VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 38014

Last change on this file since 38014 was 37852, checked in by vboxsync, 13 years ago

alignment fix (darwin.x86/VBoxREM64).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.9 KB
Line 
1/* $Id: VBoxRecompiler.c 37852 2011-07-08 19:18:05Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
418 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
419
420 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
421 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
422 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
423
424
425#ifdef DEBUG_ALL_LOGGING
426 loglevel = ~0;
427#endif
428
429 /*
430 * Init the handler notification lists.
431 */
432 pVM->rem.s.idxPendingList = UINT32_MAX;
433 pVM->rem.s.idxFreeList = 0;
434
435 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
436 {
437 pCur = &pVM->rem.s.aHandlerNotifications[i];
438 pCur->idxNext = i + 1;
439 pCur->idxSelf = i;
440 }
441 pCur->idxNext = UINT32_MAX; /* the last record. */
442
443 return rc;
444}
445
446
447/**
448 * Finalizes the REM initialization.
449 *
450 * This is called after all components, devices and drivers has
451 * been initialized. Its main purpose it to finish the RAM related
452 * initialization.
453 *
454 * @returns VBox status code.
455 *
456 * @param pVM The VM handle.
457 */
458REMR3DECL(int) REMR3InitFinalize(PVM pVM)
459{
460 int rc;
461
462 /*
463 * Ram size & dirty bit map.
464 */
465 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
466 pVM->rem.s.fGCPhysLastRamFixed = true;
467#ifdef RT_STRICT
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
469#else
470 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
471#endif
472 return rc;
473}
474
475/**
476 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
477 *
478 * @returns VBox status code.
479 * @param pVM The VM handle.
480 * @param fGuarded Whether to guard the map.
481 */
482static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
483{
484 int rc = VINF_SUCCESS;
485 RTGCPHYS cb;
486
487 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
488
489 cb = pVM->rem.s.GCPhysLastRam + 1;
490 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
491 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
492 VERR_OUT_OF_RANGE);
493
494 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
495 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
496
497 if (!fGuarded)
498 {
499 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
500 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
501 }
502 else
503 {
504 /*
505 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
506 */
507 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
508 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
509 if (cbBitmapFull == cbBitmapAligned)
510 cbBitmapFull += _4G >> PAGE_SHIFT;
511 else if (cbBitmapFull - cbBitmapAligned < _64K)
512 cbBitmapFull += _64K;
513
514 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
515 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
516
517 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
518 if (RT_FAILURE(rc))
519 {
520 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
521 AssertLogRelRCReturn(rc, rc);
522 }
523
524 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
525 }
526
527 /* initialize it. */
528 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
529 return rc;
530}
531
532
533/**
534 * Terminates the REM.
535 *
536 * Termination means cleaning up and freeing all resources,
537 * the VM it self is at this point powered off or suspended.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542REMR3DECL(int) REMR3Term(PVM pVM)
543{
544#ifdef VBOX_WITH_STATISTICS
545 /*
546 * Statistics.
547 */
548 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
549 STAM_DEREG(pVM, &gStatCompilationQEmu);
550 STAM_DEREG(pVM, &gStatRunCodeQEmu);
551 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
552 STAM_DEREG(pVM, &gStatTimers);
553 STAM_DEREG(pVM, &gStatTBLookup);
554 STAM_DEREG(pVM, &gStatIRQ);
555 STAM_DEREG(pVM, &gStatRawCheck);
556 STAM_DEREG(pVM, &gStatMemRead);
557 STAM_DEREG(pVM, &gStatMemWrite);
558 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
559 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
560
561 STAM_DEREG(pVM, &gStatCpuGetTSC);
562
563 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
564 STAM_DEREG(pVM, &gStatRefuseVM86);
565 STAM_DEREG(pVM, &gStatRefusePaging);
566 STAM_DEREG(pVM, &gStatRefusePAE);
567 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
568 STAM_DEREG(pVM, &gStatRefuseIF0);
569 STAM_DEREG(pVM, &gStatRefuseCode16);
570 STAM_DEREG(pVM, &gStatRefuseWP0);
571 STAM_DEREG(pVM, &gStatRefuseRing1or2);
572 STAM_DEREG(pVM, &gStatRefuseCanExecute);
573 STAM_DEREG(pVM, &gStatFlushTBs);
574
575 STAM_DEREG(pVM, &gStatREMGDTChange);
576 STAM_DEREG(pVM, &gStatREMLDTRChange);
577 STAM_DEREG(pVM, &gStatREMIDTChange);
578 STAM_DEREG(pVM, &gStatREMTRChange);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
586
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
591 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
592 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
593
594 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
595#endif /* VBOX_WITH_STATISTICS */
596
597 STAM_REL_DEREG(pVM, &tb_flush_count);
598 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
599 STAM_REL_DEREG(pVM, &tlb_flush_count);
600
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * The VM is being reset.
607 *
608 * For the REM component this means to call the cpu_reset() and
609 * reinitialize some state variables.
610 *
611 * @param pVM VM handle.
612 */
613REMR3DECL(void) REMR3Reset(PVM pVM)
614{
615 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
616
617 /*
618 * Reset the REM cpu.
619 */
620 Assert(pVM->rem.s.cIgnoreAll == 0);
621 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
622 cpu_reset(&pVM->rem.s.Env);
623 pVM->rem.s.cInvalidatedPages = 0;
624 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
625 Assert(pVM->rem.s.cIgnoreAll == 0);
626
627 /* Clear raw ring 0 init state */
628 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
629
630 /* Flush the TBs the next time we execute code here. */
631 pVM->rem.s.fFlushTBs = true;
632
633 EMRemUnlock(pVM);
634}
635
636
637/**
638 * Execute state save operation.
639 *
640 * @returns VBox status code.
641 * @param pVM VM Handle.
642 * @param pSSM SSM operation handle.
643 */
644static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
645{
646 PREM pRem = &pVM->rem.s;
647
648 /*
649 * Save the required CPU Env bits.
650 * (Not much because we're never in REM when doing the save.)
651 */
652 LogFlow(("remR3Save:\n"));
653 Assert(!pRem->fInREM);
654 SSMR3PutU32(pSSM, pRem->Env.hflags);
655 SSMR3PutU32(pSSM, ~0); /* separator */
656
657 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
658 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
659 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
660
661 return SSMR3PutU32(pSSM, ~0); /* terminator */
662}
663
664
665/**
666 * Execute state load operation.
667 *
668 * @returns VBox status code.
669 * @param pVM VM Handle.
670 * @param pSSM SSM operation handle.
671 * @param uVersion Data layout version.
672 * @param uPass The data pass.
673 */
674static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 uint32_t u32Dummy;
677 uint32_t fRawRing0 = false;
678 uint32_t u32Sep;
679 uint32_t i;
680 int rc;
681 PREM pRem;
682
683 LogFlow(("remR3Load:\n"));
684 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
685
686 /*
687 * Validate version.
688 */
689 if ( uVersion != REM_SAVED_STATE_VERSION
690 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
693 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
694 }
695
696 /*
697 * Do a reset to be on the safe side...
698 */
699 REMR3Reset(pVM);
700
701 /*
702 * Ignore all ignorable notifications.
703 * (Not doing this will cause serious trouble.)
704 */
705 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
706
707 /*
708 * Load the required CPU Env bits.
709 * (Not much because we're never in REM when doing the save.)
710 */
711 pRem = &pVM->rem.s;
712 Assert(!pRem->fInREM);
713 SSMR3GetU32(pSSM, &pRem->Env.hflags);
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /* Redundant REM CPU state has to be loaded, but can be ignored. */
717 CPUX86State_Ver16 temp;
718 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
719 }
720
721 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
722 if (RT_FAILURE(rc))
723 return rc;
724 if (u32Sep != ~0U)
725 {
726 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729
730 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
731 SSMR3GetUInt(pSSM, &fRawRing0);
732 if (fRawRing0)
733 pRem->Env.state |= CPU_RAW_RING0;
734
735 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
736 {
737 /*
738 * Load the REM stuff.
739 */
740 /** @todo r=bird: We should just drop all these items, restoring doesn't make
741 * sense. */
742 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
743 if (RT_FAILURE(rc))
744 return rc;
745 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
746 {
747 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
748 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
749 }
750 for (i = 0; i < pRem->cInvalidatedPages; i++)
751 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
752 }
753
754 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
755 if (RT_FAILURE(rc))
756 return rc;
757
758 /* check the terminator. */
759 rc = SSMR3GetU32(pSSM, &u32Sep);
760 if (RT_FAILURE(rc))
761 return rc;
762 if (u32Sep != ~0U)
763 {
764 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
765 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
766 }
767
768 /*
769 * Get the CPUID features.
770 */
771 PVMCPU pVCpu = VMMGetCpu(pVM);
772 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
773 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
774
775 /*
776 * Stop ignoring ignorable notifications.
777 */
778 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
779
780 /*
781 * Sync the whole CPU state when executing code in the recompiler.
782 */
783 for (i = 0; i < pVM->cCpus; i++)
784 {
785 PVMCPU pVCpu = &pVM->aCpus[i];
786 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
787 }
788 return VINF_SUCCESS;
789}
790
791
792
793#undef LOG_GROUP
794#define LOG_GROUP LOG_GROUP_REM_RUN
795
796/**
797 * Single steps an instruction in recompiled mode.
798 *
799 * Before calling this function the REM state needs to be in sync with
800 * the VM. Call REMR3State() to perform the sync. It's only necessary
801 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
802 * and after calling REMR3StateBack().
803 *
804 * @returns VBox status code.
805 *
806 * @param pVM VM Handle.
807 * @param pVCpu VMCPU Handle.
808 */
809REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
810{
811 int rc, interrupt_request;
812 RTGCPTR GCPtrPC;
813 bool fBp;
814
815 /*
816 * Lock the REM - we don't wanna have anyone interrupting us
817 * while stepping - and enabled single stepping. We also ignore
818 * pending interrupts and suchlike.
819 */
820 interrupt_request = pVM->rem.s.Env.interrupt_request;
821 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
822 pVM->rem.s.Env.interrupt_request = 0;
823 cpu_single_step(&pVM->rem.s.Env, 1);
824
825 /*
826 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
827 */
828 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
829 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
830
831 /*
832 * Execute and handle the return code.
833 * We execute without enabling the cpu tick, so on success we'll
834 * just flip it on and off to make sure it moves
835 */
836 rc = cpu_exec(&pVM->rem.s.Env);
837 if (rc == EXCP_DEBUG)
838 {
839 TMR3NotifyResume(pVM, pVCpu);
840 TMR3NotifySuspend(pVM, pVCpu);
841 rc = VINF_EM_DBG_STEPPED;
842 }
843 else
844 {
845 switch (rc)
846 {
847 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
848 case EXCP_HLT:
849 case EXCP_HALTED: rc = VINF_EM_HALT; break;
850 case EXCP_RC:
851 rc = pVM->rem.s.rc;
852 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
853 break;
854 case EXCP_EXECUTE_RAW:
855 case EXCP_EXECUTE_HWACC:
856 /** @todo: is it correct? No! */
857 rc = VINF_SUCCESS;
858 break;
859 default:
860 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
861 rc = VERR_INTERNAL_ERROR;
862 break;
863 }
864 }
865
866 /*
867 * Restore the stuff we changed to prevent interruption.
868 * Unlock the REM.
869 */
870 if (fBp)
871 {
872 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
873 Assert(rc2 == 0); NOREF(rc2);
874 }
875 cpu_single_step(&pVM->rem.s.Env, 0);
876 pVM->rem.s.Env.interrupt_request = interrupt_request;
877
878 return rc;
879}
880
881
882/**
883 * Set a breakpoint using the REM facilities.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param Address The breakpoint address.
888 * @thread The emulation thread.
889 */
890REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
891{
892 VM_ASSERT_EMT(pVM);
893 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
894 {
895 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
896 return VINF_SUCCESS;
897 }
898 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
899 return VERR_REM_NO_MORE_BP_SLOTS;
900}
901
902
903/**
904 * Clears a breakpoint set by REMR3BreakpointSet().
905 *
906 * @returns VBox status code.
907 * @param pVM The VM handle.
908 * @param Address The breakpoint address.
909 * @thread The emulation thread.
910 */
911REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
912{
913 VM_ASSERT_EMT(pVM);
914 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
915 {
916 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
917 return VINF_SUCCESS;
918 }
919 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
920 return VERR_REM_BP_NOT_FOUND;
921}
922
923
924/**
925 * Emulate an instruction.
926 *
927 * This function executes one instruction without letting anyone
928 * interrupt it. This is intended for being called while being in
929 * raw mode and thus will take care of all the state syncing between
930 * REM and the rest.
931 *
932 * @returns VBox status code.
933 * @param pVM VM handle.
934 * @param pVCpu VMCPU Handle.
935 */
936REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
937{
938 bool fFlushTBs;
939
940 int rc, rc2;
941 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
942
943 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
944 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
945 */
946 if (HWACCMIsEnabled(pVM))
947 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
948
949 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
950 fFlushTBs = pVM->rem.s.fFlushTBs;
951 pVM->rem.s.fFlushTBs = false;
952
953 /*
954 * Sync the state and enable single instruction / single stepping.
955 */
956 rc = REMR3State(pVM, pVCpu);
957 pVM->rem.s.fFlushTBs = fFlushTBs;
958 if (RT_SUCCESS(rc))
959 {
960 int interrupt_request = pVM->rem.s.Env.interrupt_request;
961 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HWACC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1045 rc = VINF_EM_RESCHEDULE_HWACC;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */
1116 0, /* GCPtr */
1117 DBGF_DISAS_FLAGS_CURRENT_GUEST
1118 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1119 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1120 szBuf,
1121 sizeof(szBuf),
1122 NULL);
1123 if (RT_FAILURE(rc))
1124 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1125 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1126
1127 /*
1128 * Execute the instruction.
1129 */
1130 TMNotifyStartOfExecution(pVCpu);
1131
1132 if ( pVM->rem.s.Env.exception_index < 0
1133 || pVM->rem.s.Env.exception_index > 256)
1134 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1135
1136#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1137 pVM->rem.s.Env.interrupt_request = 0;
1138#else
1139 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1140#endif
1141 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1142 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1143 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1144 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 rc = cpu_exec(&pVM->rem.s.Env);
1151
1152 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1153 pVM->rem.s.Env.interrupt_request,
1154 pVM->rem.s.Env.halted,
1155 pVM->rem.s.Env.exception_index
1156 );
1157
1158 TMNotifyEndOfExecution(pVCpu);
1159
1160 switch (rc)
1161 {
1162#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1163 /*
1164 * The normal exit.
1165 */
1166 case EXCP_SINGLE_INSTR:
1167 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1168 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1169 continue;
1170 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1171 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1172 rc = VINF_SUCCESS;
1173 break;
1174
1175#else
1176 /*
1177 * The normal exit, check for breakpoints at PC just to be sure.
1178 */
1179#endif
1180 case EXCP_DEBUG:
1181 if (pVM->rem.s.Env.watchpoint_hit)
1182 {
1183 /** @todo deal with watchpoints */
1184 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1185 rc = VINF_EM_DBG_BREAKPOINT;
1186 }
1187 else
1188 {
1189 CPUBreakpoint *pBP;
1190 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1191 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1192 if (pBP->pc == GCPtrPC)
1193 break;
1194 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1195 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1196 }
1197#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1198 if (rc == VINF_EM_DBG_STEPPED)
1199 {
1200 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1201 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1202 continue;
1203
1204 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1205 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1206 rc = VINF_SUCCESS;
1207 }
1208#endif
1209 break;
1210
1211 /*
1212 * If we take a trap or start servicing a pending interrupt, we might end up here.
1213 * (Timer thread or some other thread wishing EMT's attention.)
1214 */
1215 case EXCP_INTERRUPT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1217 rc = VINF_SUCCESS;
1218 break;
1219
1220 /*
1221 * hlt instruction.
1222 */
1223 case EXCP_HLT:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * The VM has halted.
1230 */
1231 case EXCP_HALTED:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1233 rc = VINF_EM_HALT;
1234 break;
1235
1236 /*
1237 * Switch to RAW-mode.
1238 */
1239 case EXCP_EXECUTE_RAW:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1241 rc = VINF_EM_RESCHEDULE_RAW;
1242 break;
1243
1244 /*
1245 * Switch to hardware accelerated RAW-mode.
1246 */
1247 case EXCP_EXECUTE_HWACC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1249 rc = VINF_EM_RESCHEDULE_HWACC;
1250 break;
1251
1252 /*
1253 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1254 */
1255 case EXCP_RC:
1256 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1257 rc = pVM->rem.s.rc;
1258 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1259 break;
1260
1261 /*
1262 * Figure out the rest when they arrive....
1263 */
1264 default:
1265 AssertMsgFailed(("rc=%d\n", rc));
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1267 rc = VINF_EM_RESCHEDULE;
1268 break;
1269 }
1270 break;
1271 }
1272
1273#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1274// cpu_single_step(&pVM->rem.s.Env, 0);
1275#else
1276 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1277#endif
1278 return rc;
1279}
1280
1281
1282/**
1283 * Runs code in recompiled mode.
1284 *
1285 * Before calling this function the REM state needs to be in sync with
1286 * the VM. Call REMR3State() to perform the sync. It's only necessary
1287 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1288 * and after calling REMR3StateBack().
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM VM Handle.
1293 * @param pVCpu VMCPU Handle.
1294 */
1295REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1296{
1297 int rc;
1298
1299 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1300 return remR3RunLoggingStep(pVM, pVCpu);
1301
1302 Assert(pVM->rem.s.fInREM);
1303 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1304
1305 TMNotifyStartOfExecution(pVCpu);
1306 rc = cpu_exec(&pVM->rem.s.Env);
1307 TMNotifyEndOfExecution(pVCpu);
1308 switch (rc)
1309 {
1310 /*
1311 * This happens when the execution was interrupted
1312 * by an external event, like pending timers.
1313 */
1314 case EXCP_INTERRUPT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1316 rc = VINF_SUCCESS;
1317 break;
1318
1319 /*
1320 * hlt instruction.
1321 */
1322 case EXCP_HLT:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * The VM has halted.
1329 */
1330 case EXCP_HALTED:
1331 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1332 rc = VINF_EM_HALT;
1333 break;
1334
1335 /*
1336 * Breakpoint/single step.
1337 */
1338 case EXCP_DEBUG:
1339 if (pVM->rem.s.Env.watchpoint_hit)
1340 {
1341 /** @todo deal with watchpoints */
1342 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1343 rc = VINF_EM_DBG_BREAKPOINT;
1344 }
1345 else
1346 {
1347 CPUBreakpoint *pBP;
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1350 if (pBP->pc == GCPtrPC)
1351 break;
1352 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1353 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1354 }
1355 break;
1356
1357 /*
1358 * Switch to RAW-mode.
1359 */
1360 case EXCP_EXECUTE_RAW:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1362 rc = VINF_EM_RESCHEDULE_RAW;
1363 break;
1364
1365 /*
1366 * Switch to hardware accelerated RAW-mode.
1367 */
1368 case EXCP_EXECUTE_HWACC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1370 rc = VINF_EM_RESCHEDULE_HWACC;
1371 break;
1372
1373 /*
1374 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1375 */
1376 case EXCP_RC:
1377 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1378 rc = pVM->rem.s.rc;
1379 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1380 break;
1381
1382 /*
1383 * Figure out the rest when they arrive....
1384 */
1385 default:
1386 AssertMsgFailed(("rc=%d\n", rc));
1387 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1388 rc = VINF_SUCCESS;
1389 break;
1390 }
1391
1392 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1393 return rc;
1394}
1395
1396
1397/**
1398 * Check if the cpu state is suitable for Raw execution.
1399 *
1400 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1401 *
1402 * @param env The CPU env struct.
1403 * @param eip The EIP to check this for (might differ from env->eip).
1404 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1405 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1406 *
1407 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1408 */
1409bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1410{
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1413 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1414 uint32_t u32CR0;
1415
1416#ifdef IEM_VERIFICATION_MODE
1417 return false;
1418#endif
1419
1420 /* Update counter. */
1421 env->pVM->rem.s.cCanExecuteRaw++;
1422
1423 /* Never when single stepping+logging guest code. */
1424 if (env->state & CPU_EMULATE_SINGLE_STEP)
1425 return false;
1426
1427 if (HWACCMIsEnabled(env->pVM))
1428 {
1429 CPUMCTX Ctx;
1430
1431 env->state |= CPU_RAW_HWACC;
1432
1433 /*
1434 * Create partial context for HWACCMR3CanExecuteGuest
1435 */
1436 Ctx.cr0 = env->cr[0];
1437 Ctx.cr3 = env->cr[3];
1438 Ctx.cr4 = env->cr[4];
1439
1440 Ctx.tr = env->tr.selector;
1441 Ctx.trHid.u64Base = env->tr.base;
1442 Ctx.trHid.u32Limit = env->tr.limit;
1443 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1444
1445 Ctx.ldtr = env->ldt.selector;
1446 Ctx.ldtrHid.u64Base = env->ldt.base;
1447 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1448 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1449
1450 Ctx.idtr.cbIdt = env->idt.limit;
1451 Ctx.idtr.pIdt = env->idt.base;
1452
1453 Ctx.gdtr.cbGdt = env->gdt.limit;
1454 Ctx.gdtr.pGdt = env->gdt.base;
1455
1456 Ctx.rsp = env->regs[R_ESP];
1457 Ctx.rip = env->eip;
1458
1459 Ctx.eflags.u32 = env->eflags;
1460
1461 Ctx.cs = env->segs[R_CS].selector;
1462 Ctx.csHid.u64Base = env->segs[R_CS].base;
1463 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1464 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1465
1466 Ctx.ds = env->segs[R_DS].selector;
1467 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1468 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1469 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1470
1471 Ctx.es = env->segs[R_ES].selector;
1472 Ctx.esHid.u64Base = env->segs[R_ES].base;
1473 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1474 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1475
1476 Ctx.fs = env->segs[R_FS].selector;
1477 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1478 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1479 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1480
1481 Ctx.gs = env->segs[R_GS].selector;
1482 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1483 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1484 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1485
1486 Ctx.ss = env->segs[R_SS].selector;
1487 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1488 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1489 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1490
1491 Ctx.msrEFER = env->efer;
1492
1493 /* Hardware accelerated raw-mode:
1494 *
1495 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1496 */
1497 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1498 {
1499 *piException = EXCP_EXECUTE_HWACC;
1500 return true;
1501 }
1502 return false;
1503 }
1504
1505 /*
1506 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1507 * or 32 bits protected mode ring 0 code
1508 *
1509 * The tests are ordered by the likelihood of being true during normal execution.
1510 */
1511 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1512 {
1513 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1514 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1515 return false;
1516 }
1517
1518#ifndef VBOX_RAW_V86
1519 if (fFlags & VM_MASK) {
1520 STAM_COUNTER_INC(&gStatRefuseVM86);
1521 Log2(("raw mode refused: VM_MASK\n"));
1522 return false;
1523 }
1524#endif
1525
1526 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1527 {
1528#ifndef DEBUG_bird
1529 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1530#endif
1531 return false;
1532 }
1533
1534 if (env->singlestep_enabled)
1535 {
1536 //Log2(("raw mode refused: Single step\n"));
1537 return false;
1538 }
1539
1540 if (!QTAILQ_EMPTY(&env->breakpoints))
1541 {
1542 //Log2(("raw mode refused: Breakpoints\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->watchpoints))
1547 {
1548 //Log2(("raw mode refused: Watchpoints\n"));
1549 return false;
1550 }
1551
1552 u32CR0 = env->cr[0];
1553 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1554 {
1555 STAM_COUNTER_INC(&gStatRefusePaging);
1556 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1557 return false;
1558 }
1559
1560 if (env->cr[4] & CR4_PAE_MASK)
1561 {
1562 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1563 {
1564 STAM_COUNTER_INC(&gStatRefusePAE);
1565 return false;
1566 }
1567 }
1568
1569 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1570 {
1571 if (!EMIsRawRing3Enabled(env->pVM))
1572 return false;
1573
1574 if (!(env->eflags & IF_MASK))
1575 {
1576 STAM_COUNTER_INC(&gStatRefuseIF0);
1577 Log2(("raw mode refused: IF (RawR3)\n"));
1578 return false;
1579 }
1580
1581 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1582 {
1583 STAM_COUNTER_INC(&gStatRefuseWP0);
1584 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1585 return false;
1586 }
1587 }
1588 else
1589 {
1590 if (!EMIsRawRing0Enabled(env->pVM))
1591 return false;
1592
1593 // Let's start with pure 32 bits ring 0 code first
1594 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1595 {
1596 STAM_COUNTER_INC(&gStatRefuseCode16);
1597 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1598 return false;
1599 }
1600
1601 // Only R0
1602 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1603 {
1604 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1605 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1606 return false;
1607 }
1608
1609 if (!(u32CR0 & CR0_WP_MASK))
1610 {
1611 STAM_COUNTER_INC(&gStatRefuseWP0);
1612 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1613 return false;
1614 }
1615
1616 if (PATMIsPatchGCAddr(env->pVM, eip))
1617 {
1618 Log2(("raw r0 mode forced: patch code\n"));
1619 *piException = EXCP_EXECUTE_RAW;
1620 return true;
1621 }
1622
1623#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1624 if (!(env->eflags & IF_MASK))
1625 {
1626 STAM_COUNTER_INC(&gStatRefuseIF0);
1627 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1628 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1629 return false;
1630 }
1631#endif
1632
1633 env->state |= CPU_RAW_RING0;
1634 }
1635
1636 /*
1637 * Don't reschedule the first time we're called, because there might be
1638 * special reasons why we're here that is not covered by the above checks.
1639 */
1640 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1641 {
1642 Log2(("raw mode refused: first scheduling\n"));
1643 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1644 return false;
1645 }
1646
1647 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1648 *piException = EXCP_EXECUTE_RAW;
1649 return true;
1650}
1651
1652
1653/**
1654 * Fetches a code byte.
1655 *
1656 * @returns Success indicator (bool) for ease of use.
1657 * @param env The CPU environment structure.
1658 * @param GCPtrInstr Where to fetch code.
1659 * @param pu8Byte Where to store the byte on success
1660 */
1661bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1662{
1663 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1664 if (RT_SUCCESS(rc))
1665 return true;
1666 return false;
1667}
1668
1669
1670/**
1671 * Flush (or invalidate if you like) page table/dir entry.
1672 *
1673 * (invlpg instruction; tlb_flush_page)
1674 *
1675 * @param env Pointer to cpu environment.
1676 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1677 */
1678void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1679{
1680 PVM pVM = env->pVM;
1681 PCPUMCTX pCtx;
1682 int rc;
1683
1684 Assert(EMRemIsLockOwner(env->pVM));
1685
1686 /*
1687 * When we're replaying invlpg instructions or restoring a saved
1688 * state we disable this path.
1689 */
1690 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1691 return;
1692 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1693 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1694
1695 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1696
1697 /*
1698 * Update the control registers before calling PGMFlushPage.
1699 */
1700 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1701 Assert(pCtx);
1702 pCtx->cr0 = env->cr[0];
1703 pCtx->cr3 = env->cr[3];
1704 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1705 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1706 pCtx->cr4 = env->cr[4];
1707
1708 /*
1709 * Let PGM do the rest.
1710 */
1711 Assert(env->pVCpu);
1712 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1713 if (RT_FAILURE(rc))
1714 {
1715 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1716 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1717 }
1718 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1719}
1720
1721
1722#ifndef REM_PHYS_ADDR_IN_TLB
1723/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1724void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1725{
1726 void *pv;
1727 int rc;
1728
1729 /* Address must be aligned enough to fiddle with lower bits */
1730 Assert((physAddr & 0x3) == 0);
1731
1732 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1733 Assert( rc == VINF_SUCCESS
1734 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1735 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1736 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1737 if (RT_FAILURE(rc))
1738 return (void *)1;
1739 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1740 return (void *)((uintptr_t)pv | 2);
1741 return pv;
1742}
1743#endif /* REM_PHYS_ADDR_IN_TLB */
1744
1745
1746/**
1747 * Called from tlb_protect_code in order to write monitor a code page.
1748 *
1749 * @param env Pointer to the CPU environment.
1750 * @param GCPtr Code page to monitor
1751 */
1752void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1753{
1754#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1755 Assert(env->pVM->rem.s.fInREM);
1756 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1757 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1758 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1759 && !(env->eflags & VM_MASK) /* no V86 mode */
1760 && !HWACCMIsEnabled(env->pVM))
1761 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1762#endif
1763}
1764
1765
1766/**
1767 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1768 *
1769 * @param env Pointer to the CPU environment.
1770 * @param GCPtr Code page to monitor
1771 */
1772void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1773{
1774 Assert(env->pVM->rem.s.fInREM);
1775#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1776 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1777 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1778 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1779 && !(env->eflags & VM_MASK) /* no V86 mode */
1780 && !HWACCMIsEnabled(env->pVM))
1781 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1782#endif
1783}
1784
1785
1786/**
1787 * Called when the CPU is initialized, any of the CRx registers are changed or
1788 * when the A20 line is modified.
1789 *
1790 * @param env Pointer to the CPU environment.
1791 * @param fGlobal Set if the flush is global.
1792 */
1793void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1794{
1795 PVM pVM = env->pVM;
1796 PCPUMCTX pCtx;
1797 Assert(EMRemIsLockOwner(pVM));
1798
1799 /*
1800 * When we're replaying invlpg instructions or restoring a saved
1801 * state we disable this path.
1802 */
1803 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1804 return;
1805 Assert(pVM->rem.s.fInREM);
1806
1807 /*
1808 * The caller doesn't check cr4, so we have to do that for ourselves.
1809 */
1810 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1811 fGlobal = true;
1812 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1813
1814 /*
1815 * Update the control registers before calling PGMR3FlushTLB.
1816 */
1817 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1818 Assert(pCtx);
1819 pCtx->cr0 = env->cr[0];
1820 pCtx->cr3 = env->cr[3];
1821 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1822 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1823 pCtx->cr4 = env->cr[4];
1824
1825 /*
1826 * Let PGM do the rest.
1827 */
1828 Assert(env->pVCpu);
1829 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1830}
1831
1832
1833/**
1834 * Called when any of the cr0, cr4 or efer registers is updated.
1835 *
1836 * @param env Pointer to the CPU environment.
1837 */
1838void remR3ChangeCpuMode(CPUX86State *env)
1839{
1840 PVM pVM = env->pVM;
1841 uint64_t efer;
1842 PCPUMCTX pCtx;
1843 int rc;
1844
1845 /*
1846 * When we're replaying loads or restoring a saved
1847 * state this path is disabled.
1848 */
1849 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1850 return;
1851 Assert(pVM->rem.s.fInREM);
1852
1853 /*
1854 * Update the control registers before calling PGMChangeMode()
1855 * as it may need to map whatever cr3 is pointing to.
1856 */
1857 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1858 Assert(pCtx);
1859 pCtx->cr0 = env->cr[0];
1860 pCtx->cr3 = env->cr[3];
1861 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1862 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1863 pCtx->cr4 = env->cr[4];
1864#ifdef TARGET_X86_64
1865 efer = env->efer;
1866 pCtx->msrEFER = efer;
1867#else
1868 efer = 0;
1869#endif
1870 Assert(env->pVCpu);
1871 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1872 if (rc != VINF_SUCCESS)
1873 {
1874 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1875 {
1876 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1877 remR3RaiseRC(env->pVM, rc);
1878 }
1879 else
1880 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1881 }
1882}
1883
1884
1885/**
1886 * Called from compiled code to run dma.
1887 *
1888 * @param env Pointer to the CPU environment.
1889 */
1890void remR3DmaRun(CPUX86State *env)
1891{
1892 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1893 PDMR3DmaRun(env->pVM);
1894 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1895}
1896
1897
1898/**
1899 * Called from compiled code to schedule pending timers in VMM
1900 *
1901 * @param env Pointer to the CPU environment.
1902 */
1903void remR3TimersRun(CPUX86State *env)
1904{
1905 LogFlow(("remR3TimersRun:\n"));
1906 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1907 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1908 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1909 TMR3TimerQueuesDo(env->pVM);
1910 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1911 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1912}
1913
1914
1915/**
1916 * Record trap occurrence
1917 *
1918 * @returns VBox status code
1919 * @param env Pointer to the CPU environment.
1920 * @param uTrap Trap nr
1921 * @param uErrorCode Error code
1922 * @param pvNextEIP Next EIP
1923 */
1924int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1925{
1926 PVM pVM = env->pVM;
1927#ifdef VBOX_WITH_STATISTICS
1928 static STAMCOUNTER s_aStatTrap[255];
1929 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1930#endif
1931
1932#ifdef VBOX_WITH_STATISTICS
1933 if (uTrap < 255)
1934 {
1935 if (!s_aRegisters[uTrap])
1936 {
1937 char szStatName[64];
1938 s_aRegisters[uTrap] = true;
1939 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1940 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1941 }
1942 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1943 }
1944#endif
1945 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1946 if( uTrap < 0x20
1947 && (env->cr[0] & X86_CR0_PE)
1948 && !(env->eflags & X86_EFL_VM))
1949 {
1950#ifdef DEBUG
1951 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1952#endif
1953 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1954 {
1955 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1956 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1957 return VERR_REM_TOO_MANY_TRAPS;
1958 }
1959 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1960 pVM->rem.s.cPendingExceptions = 1;
1961 pVM->rem.s.uPendingException = uTrap;
1962 pVM->rem.s.uPendingExcptEIP = env->eip;
1963 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1964 }
1965 else
1966 {
1967 pVM->rem.s.cPendingExceptions = 0;
1968 pVM->rem.s.uPendingException = uTrap;
1969 pVM->rem.s.uPendingExcptEIP = env->eip;
1970 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1971 }
1972 return VINF_SUCCESS;
1973}
1974
1975
1976/*
1977 * Clear current active trap
1978 *
1979 * @param pVM VM Handle.
1980 */
1981void remR3TrapClear(PVM pVM)
1982{
1983 pVM->rem.s.cPendingExceptions = 0;
1984 pVM->rem.s.uPendingException = 0;
1985 pVM->rem.s.uPendingExcptEIP = 0;
1986 pVM->rem.s.uPendingExcptCR2 = 0;
1987}
1988
1989
1990/*
1991 * Record previous call instruction addresses
1992 *
1993 * @param env Pointer to the CPU environment.
1994 */
1995void remR3RecordCall(CPUX86State *env)
1996{
1997 CSAMR3RecordCallAddress(env->pVM, env->eip);
1998}
1999
2000
2001/**
2002 * Syncs the internal REM state with the VM.
2003 *
2004 * This must be called before REMR3Run() is invoked whenever when the REM
2005 * state is not up to date. Calling it several times in a row is not
2006 * permitted.
2007 *
2008 * @returns VBox status code.
2009 *
2010 * @param pVM VM Handle.
2011 * @param pVCpu VMCPU Handle.
2012 *
2013 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2014 * no do this since the majority of the callers don't want any unnecessary of events
2015 * pending that would immediately interrupt execution.
2016 */
2017REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2018{
2019 register const CPUMCTX *pCtx;
2020 register unsigned fFlags;
2021 bool fHiddenSelRegsValid;
2022 unsigned i;
2023 TRPMEVENT enmType;
2024 uint8_t u8TrapNo;
2025 uint32_t uCpl;
2026 int rc;
2027
2028 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2029 Log2(("REMR3State:\n"));
2030
2031 pVM->rem.s.Env.pVCpu = pVCpu;
2032 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2033 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2034
2035 Assert(!pVM->rem.s.fInREM);
2036 pVM->rem.s.fInStateSync = true;
2037
2038 /*
2039 * If we have to flush TBs, do that immediately.
2040 */
2041 if (pVM->rem.s.fFlushTBs)
2042 {
2043 STAM_COUNTER_INC(&gStatFlushTBs);
2044 tb_flush(&pVM->rem.s.Env);
2045 pVM->rem.s.fFlushTBs = false;
2046 }
2047
2048 /*
2049 * Copy the registers which require no special handling.
2050 */
2051#ifdef TARGET_X86_64
2052 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2053 Assert(R_EAX == 0);
2054 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2055 Assert(R_ECX == 1);
2056 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2057 Assert(R_EDX == 2);
2058 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2059 Assert(R_EBX == 3);
2060 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2061 Assert(R_ESP == 4);
2062 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2063 Assert(R_EBP == 5);
2064 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2065 Assert(R_ESI == 6);
2066 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2067 Assert(R_EDI == 7);
2068 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2069 pVM->rem.s.Env.regs[8] = pCtx->r8;
2070 pVM->rem.s.Env.regs[9] = pCtx->r9;
2071 pVM->rem.s.Env.regs[10] = pCtx->r10;
2072 pVM->rem.s.Env.regs[11] = pCtx->r11;
2073 pVM->rem.s.Env.regs[12] = pCtx->r12;
2074 pVM->rem.s.Env.regs[13] = pCtx->r13;
2075 pVM->rem.s.Env.regs[14] = pCtx->r14;
2076 pVM->rem.s.Env.regs[15] = pCtx->r15;
2077
2078 pVM->rem.s.Env.eip = pCtx->rip;
2079
2080 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2081#else
2082 Assert(R_EAX == 0);
2083 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2084 Assert(R_ECX == 1);
2085 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2086 Assert(R_EDX == 2);
2087 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2088 Assert(R_EBX == 3);
2089 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2090 Assert(R_ESP == 4);
2091 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2092 Assert(R_EBP == 5);
2093 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2094 Assert(R_ESI == 6);
2095 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2096 Assert(R_EDI == 7);
2097 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2098 pVM->rem.s.Env.eip = pCtx->eip;
2099
2100 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2101#endif
2102
2103 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2104
2105 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2106 for (i=0;i<8;i++)
2107 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2108
2109#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2110 /*
2111 * Clear the halted hidden flag (the interrupt waking up the CPU can
2112 * have been dispatched in raw mode).
2113 */
2114 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2115#endif
2116
2117 /*
2118 * Replay invlpg? Only if we're not flushing the TLB.
2119 */
2120 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2121 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2122 if (pVM->rem.s.cInvalidatedPages)
2123 {
2124 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2125 {
2126 RTUINT i;
2127
2128 pVM->rem.s.fIgnoreCR3Load = true;
2129 pVM->rem.s.fIgnoreInvlPg = true;
2130 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2131 {
2132 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2133 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2134 }
2135 pVM->rem.s.fIgnoreInvlPg = false;
2136 pVM->rem.s.fIgnoreCR3Load = false;
2137 }
2138 pVM->rem.s.cInvalidatedPages = 0;
2139 }
2140
2141 /* Replay notification changes. */
2142 REMR3ReplayHandlerNotifications(pVM);
2143
2144 /* Update MSRs; before CRx registers! */
2145 pVM->rem.s.Env.efer = pCtx->msrEFER;
2146 pVM->rem.s.Env.star = pCtx->msrSTAR;
2147 pVM->rem.s.Env.pat = pCtx->msrPAT;
2148#ifdef TARGET_X86_64
2149 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2150 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2151 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2152 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2153
2154 /* Update the internal long mode activate flag according to the new EFER value. */
2155 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2156 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2157 else
2158 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2159#endif
2160
2161 /*
2162 * Registers which are rarely changed and require special handling / order when changed.
2163 */
2164 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2165 | CPUM_CHANGED_CR4
2166 | CPUM_CHANGED_CR0
2167 | CPUM_CHANGED_CR3
2168 | CPUM_CHANGED_GDTR
2169 | CPUM_CHANGED_IDTR
2170 | CPUM_CHANGED_SYSENTER_MSR
2171 | CPUM_CHANGED_LDTR
2172 | CPUM_CHANGED_CPUID
2173 | CPUM_CHANGED_FPU_REM
2174 )
2175 )
2176 {
2177 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2178 {
2179 pVM->rem.s.fIgnoreCR3Load = true;
2180 tlb_flush(&pVM->rem.s.Env, true);
2181 pVM->rem.s.fIgnoreCR3Load = false;
2182 }
2183
2184 /* CR4 before CR0! */
2185 if (fFlags & CPUM_CHANGED_CR4)
2186 {
2187 pVM->rem.s.fIgnoreCR3Load = true;
2188 pVM->rem.s.fIgnoreCpuMode = true;
2189 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2190 pVM->rem.s.fIgnoreCpuMode = false;
2191 pVM->rem.s.fIgnoreCR3Load = false;
2192 }
2193
2194 if (fFlags & CPUM_CHANGED_CR0)
2195 {
2196 pVM->rem.s.fIgnoreCR3Load = true;
2197 pVM->rem.s.fIgnoreCpuMode = true;
2198 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2199 pVM->rem.s.fIgnoreCpuMode = false;
2200 pVM->rem.s.fIgnoreCR3Load = false;
2201 }
2202
2203 if (fFlags & CPUM_CHANGED_CR3)
2204 {
2205 pVM->rem.s.fIgnoreCR3Load = true;
2206 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2207 pVM->rem.s.fIgnoreCR3Load = false;
2208 }
2209
2210 if (fFlags & CPUM_CHANGED_GDTR)
2211 {
2212 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2213 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2214 }
2215
2216 if (fFlags & CPUM_CHANGED_IDTR)
2217 {
2218 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2219 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2220 }
2221
2222 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2223 {
2224 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2225 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2226 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2227 }
2228
2229 if (fFlags & CPUM_CHANGED_LDTR)
2230 {
2231 if (fHiddenSelRegsValid)
2232 {
2233 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2234 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2235 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2236 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2237 }
2238 else
2239 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2240 }
2241
2242 if (fFlags & CPUM_CHANGED_CPUID)
2243 {
2244 uint32_t u32Dummy;
2245
2246 /*
2247 * Get the CPUID features.
2248 */
2249 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2250 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2251 }
2252
2253 /* Sync FPU state after CR4, CPUID and EFER (!). */
2254 if (fFlags & CPUM_CHANGED_FPU_REM)
2255 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2256 }
2257
2258 /*
2259 * Sync TR unconditionally to make life simpler.
2260 */
2261 pVM->rem.s.Env.tr.selector = pCtx->tr;
2262 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2263 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2264 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2265 /* Note! do_interrupt will fault if the busy flag is still set... */
2266 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2267
2268 /*
2269 * Update selector registers.
2270 * This must be done *after* we've synced gdt, ldt and crX registers
2271 * since we're reading the GDT/LDT om sync_seg. This will happen with
2272 * saved state which takes a quick dip into rawmode for instance.
2273 */
2274 /*
2275 * Stack; Note first check this one as the CPL might have changed. The
2276 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2277 */
2278
2279 if (fHiddenSelRegsValid)
2280 {
2281 /* The hidden selector registers are valid in the CPU context. */
2282 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2283
2284 /* Set current CPL */
2285 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2286
2287 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2288 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2289 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2290 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2291 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2292 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2293 }
2294 else
2295 {
2296 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2297 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2298 {
2299 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2300
2301 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2302 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2303#ifdef VBOX_WITH_STATISTICS
2304 if (pVM->rem.s.Env.segs[R_SS].newselector)
2305 {
2306 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2307 }
2308#endif
2309 }
2310 else
2311 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2312
2313 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2314 {
2315 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2316 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2317#ifdef VBOX_WITH_STATISTICS
2318 if (pVM->rem.s.Env.segs[R_ES].newselector)
2319 {
2320 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2321 }
2322#endif
2323 }
2324 else
2325 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2326
2327 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2328 {
2329 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2330 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2331#ifdef VBOX_WITH_STATISTICS
2332 if (pVM->rem.s.Env.segs[R_CS].newselector)
2333 {
2334 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2335 }
2336#endif
2337 }
2338 else
2339 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2340
2341 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2342 {
2343 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2344 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2345#ifdef VBOX_WITH_STATISTICS
2346 if (pVM->rem.s.Env.segs[R_DS].newselector)
2347 {
2348 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2349 }
2350#endif
2351 }
2352 else
2353 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2354
2355 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2356 * be the same but not the base/limit. */
2357 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2358 {
2359 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2360 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2361#ifdef VBOX_WITH_STATISTICS
2362 if (pVM->rem.s.Env.segs[R_FS].newselector)
2363 {
2364 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2365 }
2366#endif
2367 }
2368 else
2369 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2370
2371 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2372 {
2373 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2374 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2375#ifdef VBOX_WITH_STATISTICS
2376 if (pVM->rem.s.Env.segs[R_GS].newselector)
2377 {
2378 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2379 }
2380#endif
2381 }
2382 else
2383 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2384 }
2385
2386 /*
2387 * Check for traps.
2388 */
2389 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2390 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2391 if (RT_SUCCESS(rc))
2392 {
2393#ifdef DEBUG
2394 if (u8TrapNo == 0x80)
2395 {
2396 remR3DumpLnxSyscall(pVCpu);
2397 remR3DumpOBsdSyscall(pVCpu);
2398 }
2399#endif
2400
2401 pVM->rem.s.Env.exception_index = u8TrapNo;
2402 if (enmType != TRPM_SOFTWARE_INT)
2403 {
2404 pVM->rem.s.Env.exception_is_int = 0;
2405 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2406 }
2407 else
2408 {
2409 /*
2410 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2411 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2412 * for int03 and into.
2413 */
2414 pVM->rem.s.Env.exception_is_int = 1;
2415 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2416 /* int 3 may be generated by one-byte 0xcc */
2417 if (u8TrapNo == 3)
2418 {
2419 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2420 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2421 }
2422 /* int 4 may be generated by one-byte 0xce */
2423 else if (u8TrapNo == 4)
2424 {
2425 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2426 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2427 }
2428 }
2429
2430 /* get error code and cr2 if needed. */
2431 if (enmType == TRPM_TRAP)
2432 {
2433 switch (u8TrapNo)
2434 {
2435 case 0x0e:
2436 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2437 /* fallthru */
2438 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2439 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2440 break;
2441
2442 case 0x11: case 0x08:
2443 default:
2444 pVM->rem.s.Env.error_code = 0;
2445 break;
2446 }
2447 }
2448 else
2449 pVM->rem.s.Env.error_code = 0;
2450
2451 /*
2452 * We can now reset the active trap since the recompiler is gonna have a go at it.
2453 */
2454 rc = TRPMResetTrap(pVCpu);
2455 AssertRC(rc);
2456 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2457 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2458 }
2459
2460 /*
2461 * Clear old interrupt request flags; Check for pending hardware interrupts.
2462 * (See @remark for why we don't check for other FFs.)
2463 */
2464 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2465 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2466 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2467 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2468
2469 /*
2470 * We're now in REM mode.
2471 */
2472 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2473 pVM->rem.s.fInREM = true;
2474 pVM->rem.s.fInStateSync = false;
2475 pVM->rem.s.cCanExecuteRaw = 0;
2476 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2477 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2478 return VINF_SUCCESS;
2479}
2480
2481
2482/**
2483 * Syncs back changes in the REM state to the the VM state.
2484 *
2485 * This must be called after invoking REMR3Run().
2486 * Calling it several times in a row is not permitted.
2487 *
2488 * @returns VBox status code.
2489 *
2490 * @param pVM VM Handle.
2491 * @param pVCpu VMCPU Handle.
2492 */
2493REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2494{
2495 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2496 Assert(pCtx);
2497 unsigned i;
2498
2499 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2500 Log2(("REMR3StateBack:\n"));
2501 Assert(pVM->rem.s.fInREM);
2502
2503 /*
2504 * Copy back the registers.
2505 * This is done in the order they are declared in the CPUMCTX structure.
2506 */
2507
2508 /** @todo FOP */
2509 /** @todo FPUIP */
2510 /** @todo CS */
2511 /** @todo FPUDP */
2512 /** @todo DS */
2513
2514 /** @todo check if FPU/XMM was actually used in the recompiler */
2515 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2516//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2517
2518#ifdef TARGET_X86_64
2519 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2520 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2521 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2522 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2523 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2524 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2525 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2526 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2527 pCtx->r8 = pVM->rem.s.Env.regs[8];
2528 pCtx->r9 = pVM->rem.s.Env.regs[9];
2529 pCtx->r10 = pVM->rem.s.Env.regs[10];
2530 pCtx->r11 = pVM->rem.s.Env.regs[11];
2531 pCtx->r12 = pVM->rem.s.Env.regs[12];
2532 pCtx->r13 = pVM->rem.s.Env.regs[13];
2533 pCtx->r14 = pVM->rem.s.Env.regs[14];
2534 pCtx->r15 = pVM->rem.s.Env.regs[15];
2535
2536 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2537
2538#else
2539 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2540 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2541 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2542 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2543 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2544 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2545 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2546
2547 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2548#endif
2549
2550 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2551
2552#ifdef VBOX_WITH_STATISTICS
2553 if (pVM->rem.s.Env.segs[R_SS].newselector)
2554 {
2555 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2556 }
2557 if (pVM->rem.s.Env.segs[R_GS].newselector)
2558 {
2559 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2560 }
2561 if (pVM->rem.s.Env.segs[R_FS].newselector)
2562 {
2563 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2564 }
2565 if (pVM->rem.s.Env.segs[R_ES].newselector)
2566 {
2567 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2568 }
2569 if (pVM->rem.s.Env.segs[R_DS].newselector)
2570 {
2571 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2572 }
2573 if (pVM->rem.s.Env.segs[R_CS].newselector)
2574 {
2575 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2576 }
2577#endif
2578 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2579 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2580 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2581 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2582 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2583
2584#ifdef TARGET_X86_64
2585 pCtx->rip = pVM->rem.s.Env.eip;
2586 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2587#else
2588 pCtx->eip = pVM->rem.s.Env.eip;
2589 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2590#endif
2591
2592 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2593 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2594 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2595 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2596 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2597 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2598
2599 for (i = 0; i < 8; i++)
2600 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2601
2602 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2603 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2604 {
2605 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2606 STAM_COUNTER_INC(&gStatREMGDTChange);
2607 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2608 }
2609
2610 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2611 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2612 {
2613 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2614 STAM_COUNTER_INC(&gStatREMIDTChange);
2615 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2616 }
2617
2618 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2619 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2620 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2621 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2622 {
2623 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2624 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2625 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2626 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2627 STAM_COUNTER_INC(&gStatREMLDTRChange);
2628 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2629 }
2630
2631 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2632 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2633 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2634 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2635 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2636 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2637 : 0) )
2638 {
2639 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2640 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2641 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2642 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2643 pCtx->tr = pVM->rem.s.Env.tr.selector;
2644 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2645 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2646 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2647 if (pCtx->trHid.Attr.u)
2648 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2649 STAM_COUNTER_INC(&gStatREMTRChange);
2650 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2651 }
2652
2653 /** @todo These values could still be out of sync! */
2654 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2655 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2656 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2657 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2658
2659 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2660 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2661 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2662
2663 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2664 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2665 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2666
2667 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2668 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2669 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2670
2671 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2672 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2673 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2674
2675 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2676 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2677 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2678
2679 /* Sysenter MSR */
2680 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2681 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2682 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2683
2684 /* System MSRs. */
2685 pCtx->msrEFER = pVM->rem.s.Env.efer;
2686 pCtx->msrSTAR = pVM->rem.s.Env.star;
2687 pCtx->msrPAT = pVM->rem.s.Env.pat;
2688#ifdef TARGET_X86_64
2689 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2690 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2691 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2692 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2693#endif
2694
2695 remR3TrapClear(pVM);
2696
2697 /*
2698 * Check for traps.
2699 */
2700 if ( pVM->rem.s.Env.exception_index >= 0
2701 && pVM->rem.s.Env.exception_index < 256)
2702 {
2703 int rc;
2704
2705 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2706 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2707 AssertRC(rc);
2708 switch (pVM->rem.s.Env.exception_index)
2709 {
2710 case 0x0e:
2711 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2712 /* fallthru */
2713 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2714 case 0x11: case 0x08: /* 0 */
2715 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2716 break;
2717 }
2718
2719 }
2720
2721 /*
2722 * We're not longer in REM mode.
2723 */
2724 CPUMR3RemLeave(pVCpu,
2725 HWACCMIsEnabled(pVM)
2726 || ( pVM->rem.s.Env.segs[R_SS].newselector
2727 | pVM->rem.s.Env.segs[R_GS].newselector
2728 | pVM->rem.s.Env.segs[R_FS].newselector
2729 | pVM->rem.s.Env.segs[R_ES].newselector
2730 | pVM->rem.s.Env.segs[R_DS].newselector
2731 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2732 );
2733 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2734 pVM->rem.s.fInREM = false;
2735 pVM->rem.s.pCtx = NULL;
2736 pVM->rem.s.Env.pVCpu = NULL;
2737 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2738 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2739 return VINF_SUCCESS;
2740}
2741
2742
2743/**
2744 * This is called by the disassembler when it wants to update the cpu state
2745 * before for instance doing a register dump.
2746 */
2747static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2748{
2749 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2750 unsigned i;
2751
2752 Assert(pVM->rem.s.fInREM);
2753
2754 /*
2755 * Copy back the registers.
2756 * This is done in the order they are declared in the CPUMCTX structure.
2757 */
2758
2759 /** @todo FOP */
2760 /** @todo FPUIP */
2761 /** @todo CS */
2762 /** @todo FPUDP */
2763 /** @todo DS */
2764 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2765 pCtx->fpu.MXCSR = 0;
2766 pCtx->fpu.MXCSR_MASK = 0;
2767
2768 /** @todo check if FPU/XMM was actually used in the recompiler */
2769 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2770//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2771
2772#ifdef TARGET_X86_64
2773 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2774 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2775 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2776 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2777 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2778 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2779 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2780 pCtx->r8 = pVM->rem.s.Env.regs[8];
2781 pCtx->r9 = pVM->rem.s.Env.regs[9];
2782 pCtx->r10 = pVM->rem.s.Env.regs[10];
2783 pCtx->r11 = pVM->rem.s.Env.regs[11];
2784 pCtx->r12 = pVM->rem.s.Env.regs[12];
2785 pCtx->r13 = pVM->rem.s.Env.regs[13];
2786 pCtx->r14 = pVM->rem.s.Env.regs[14];
2787 pCtx->r15 = pVM->rem.s.Env.regs[15];
2788
2789 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2790#else
2791 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2792 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2793 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2794 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2795 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2796 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2797 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2798
2799 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2800#endif
2801
2802 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2803
2804 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2805 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2806 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2807 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2808 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2809
2810#ifdef TARGET_X86_64
2811 pCtx->rip = pVM->rem.s.Env.eip;
2812 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2813#else
2814 pCtx->eip = pVM->rem.s.Env.eip;
2815 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2816#endif
2817
2818 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2819 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2820 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2821 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2822 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2823 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2824
2825 for (i = 0; i < 8; i++)
2826 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2827
2828 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2829 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2830 {
2831 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2832 STAM_COUNTER_INC(&gStatREMGDTChange);
2833 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2834 }
2835
2836 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2837 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2838 {
2839 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2840 STAM_COUNTER_INC(&gStatREMIDTChange);
2841 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2842 }
2843
2844 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2845 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2846 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2847 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2848 {
2849 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2850 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2851 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2852 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2853 STAM_COUNTER_INC(&gStatREMLDTRChange);
2854 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2855 }
2856
2857 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2858 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2859 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2860 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2861 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2862 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2863 : 0) )
2864 {
2865 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2866 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2867 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2868 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2869 pCtx->tr = pVM->rem.s.Env.tr.selector;
2870 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2871 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2872 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2873 if (pCtx->trHid.Attr.u)
2874 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2875 STAM_COUNTER_INC(&gStatREMTRChange);
2876 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2877 }
2878
2879 /** @todo These values could still be out of sync! */
2880 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2881 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2882 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2883 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2884
2885 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2886 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2887 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2888
2889 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2890 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2891 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2892
2893 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2894 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2895 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2896
2897 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2898 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2899 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2900
2901 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2902 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2903 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2904
2905 /* Sysenter MSR */
2906 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2907 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2908 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2909
2910 /* System MSRs. */
2911 pCtx->msrEFER = pVM->rem.s.Env.efer;
2912 pCtx->msrSTAR = pVM->rem.s.Env.star;
2913 pCtx->msrPAT = pVM->rem.s.Env.pat;
2914#ifdef TARGET_X86_64
2915 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2916 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2917 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2918 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2919#endif
2920
2921}
2922
2923
2924/**
2925 * Update the VMM state information if we're currently in REM.
2926 *
2927 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2928 * we're currently executing in REM and the VMM state is invalid. This method will of
2929 * course check that we're executing in REM before syncing any data over to the VMM.
2930 *
2931 * @param pVM The VM handle.
2932 * @param pVCpu The VMCPU handle.
2933 */
2934REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2935{
2936 if (pVM->rem.s.fInREM)
2937 remR3StateUpdate(pVM, pVCpu);
2938}
2939
2940
2941#undef LOG_GROUP
2942#define LOG_GROUP LOG_GROUP_REM
2943
2944
2945/**
2946 * Notify the recompiler about Address Gate 20 state change.
2947 *
2948 * This notification is required since A20 gate changes are
2949 * initialized from a device driver and the VM might just as
2950 * well be in REM mode as in RAW mode.
2951 *
2952 * @param pVM VM handle.
2953 * @param pVCpu VMCPU handle.
2954 * @param fEnable True if the gate should be enabled.
2955 * False if the gate should be disabled.
2956 */
2957REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2958{
2959 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2960 VM_ASSERT_EMT(pVM);
2961
2962 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2963 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2964 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2965}
2966
2967
2968/**
2969 * Replays the handler notification changes
2970 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2971 *
2972 * @param pVM VM handle.
2973 */
2974REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2975{
2976 /*
2977 * Replay the flushes.
2978 */
2979 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2980 VM_ASSERT_EMT(pVM);
2981
2982 /** @todo this isn't ensuring correct replay order. */
2983 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2984 {
2985 uint32_t idxNext;
2986 uint32_t idxRevHead;
2987 uint32_t idxHead;
2988#ifdef VBOX_STRICT
2989 int32_t c = 0;
2990#endif
2991
2992 /* Lockless purging of pending notifications. */
2993 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2994 if (idxHead == UINT32_MAX)
2995 return;
2996 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2997
2998 /*
2999 * Reverse the list to process it in FIFO order.
3000 */
3001 idxRevHead = UINT32_MAX;
3002 do
3003 {
3004 /* Save the index of the next rec. */
3005 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3006 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3007 /* Push the record onto the reversed list. */
3008 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3009 idxRevHead = idxHead;
3010 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3011 /* Advance. */
3012 idxHead = idxNext;
3013 } while (idxHead != UINT32_MAX);
3014
3015 /*
3016 * Loop thru the list, reinserting the record into the free list as they are
3017 * processed to avoid having other EMTs running out of entries while we're flushing.
3018 */
3019 idxHead = idxRevHead;
3020 do
3021 {
3022 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3023 uint32_t idxCur;
3024 Assert(--c >= 0);
3025
3026 switch (pCur->enmKind)
3027 {
3028 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3029 remR3NotifyHandlerPhysicalRegister(pVM,
3030 pCur->u.PhysicalRegister.enmType,
3031 pCur->u.PhysicalRegister.GCPhys,
3032 pCur->u.PhysicalRegister.cb,
3033 pCur->u.PhysicalRegister.fHasHCHandler);
3034 break;
3035
3036 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3037 remR3NotifyHandlerPhysicalDeregister(pVM,
3038 pCur->u.PhysicalDeregister.enmType,
3039 pCur->u.PhysicalDeregister.GCPhys,
3040 pCur->u.PhysicalDeregister.cb,
3041 pCur->u.PhysicalDeregister.fHasHCHandler,
3042 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3043 break;
3044
3045 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3046 remR3NotifyHandlerPhysicalModify(pVM,
3047 pCur->u.PhysicalModify.enmType,
3048 pCur->u.PhysicalModify.GCPhysOld,
3049 pCur->u.PhysicalModify.GCPhysNew,
3050 pCur->u.PhysicalModify.cb,
3051 pCur->u.PhysicalModify.fHasHCHandler,
3052 pCur->u.PhysicalModify.fRestoreAsRAM);
3053 break;
3054
3055 default:
3056 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3057 break;
3058 }
3059
3060 /*
3061 * Advance idxHead.
3062 */
3063 idxCur = idxHead;
3064 idxHead = pCur->idxNext;
3065 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3066
3067 /*
3068 * Put the record back into the free list.
3069 */
3070 do
3071 {
3072 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3073 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3074 ASMCompilerBarrier();
3075 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3076 } while (idxHead != UINT32_MAX);
3077
3078#ifdef VBOX_STRICT
3079 if (pVM->cCpus == 1)
3080 {
3081 unsigned c;
3082 /* Check that all records are now on the free list. */
3083 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3084 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3085 c++;
3086 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3087 }
3088#endif
3089 }
3090}
3091
3092
3093/**
3094 * Notify REM about changed code page.
3095 *
3096 * @returns VBox status code.
3097 * @param pVM VM handle.
3098 * @param pVCpu VMCPU handle.
3099 * @param pvCodePage Code page address
3100 */
3101REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3102{
3103#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3104 int rc;
3105 RTGCPHYS PhysGC;
3106 uint64_t flags;
3107
3108 VM_ASSERT_EMT(pVM);
3109
3110 /*
3111 * Get the physical page address.
3112 */
3113 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3114 if (rc == VINF_SUCCESS)
3115 {
3116 /*
3117 * Sync the required registers and flush the whole page.
3118 * (Easier to do the whole page than notifying it about each physical
3119 * byte that was changed.
3120 */
3121 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3122 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3123 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3124 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3125
3126 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3127 }
3128#endif
3129 return VINF_SUCCESS;
3130}
3131
3132
3133/**
3134 * Notification about a successful MMR3PhysRegister() call.
3135 *
3136 * @param pVM VM handle.
3137 * @param GCPhys The physical address the RAM.
3138 * @param cb Size of the memory.
3139 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3140 */
3141REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3142{
3143 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3144 VM_ASSERT_EMT(pVM);
3145
3146 /*
3147 * Validate input - we trust the caller.
3148 */
3149 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3150 Assert(cb);
3151 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3152 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3153
3154 /*
3155 * Base ram? Update GCPhysLastRam.
3156 */
3157 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3158 {
3159 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3160 {
3161 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3162 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3163 }
3164 }
3165
3166 /*
3167 * Register the ram.
3168 */
3169 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3170
3171 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3172 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3173 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3174
3175 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3176}
3177
3178
3179/**
3180 * Notification about a successful MMR3PhysRomRegister() call.
3181 *
3182 * @param pVM VM handle.
3183 * @param GCPhys The physical address of the ROM.
3184 * @param cb The size of the ROM.
3185 * @param pvCopy Pointer to the ROM copy.
3186 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3187 * This function will be called when ever the protection of the
3188 * shadow ROM changes (at reset and end of POST).
3189 */
3190REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3191{
3192 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3193 VM_ASSERT_EMT(pVM);
3194
3195 /*
3196 * Validate input - we trust the caller.
3197 */
3198 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3199 Assert(cb);
3200 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3201
3202 /*
3203 * Register the rom.
3204 */
3205 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3206
3207 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3208 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3209 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3210
3211 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3212}
3213
3214
3215/**
3216 * Notification about a successful memory deregistration or reservation.
3217 *
3218 * @param pVM VM Handle.
3219 * @param GCPhys Start physical address.
3220 * @param cb The size of the range.
3221 */
3222REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3223{
3224 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3225 VM_ASSERT_EMT(pVM);
3226
3227 /*
3228 * Validate input - we trust the caller.
3229 */
3230 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3231 Assert(cb);
3232 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3233
3234 /*
3235 * Unassigning the memory.
3236 */
3237 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3238
3239 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3240 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3241 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3242
3243 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3244}
3245
3246
3247/**
3248 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3249 *
3250 * @param pVM VM Handle.
3251 * @param enmType Handler type.
3252 * @param GCPhys Handler range address.
3253 * @param cb Size of the handler range.
3254 * @param fHasHCHandler Set if the handler has a HC callback function.
3255 *
3256 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3257 * Handler memory type to memory which has no HC handler.
3258 */
3259static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3260{
3261 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3262 enmType, GCPhys, cb, fHasHCHandler));
3263
3264 VM_ASSERT_EMT(pVM);
3265 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3266 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3267
3268
3269 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3270
3271 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3272 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3273 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3274 else if (fHasHCHandler)
3275 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3276 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3277
3278 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3279}
3280
3281/**
3282 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3283 *
3284 * @param pVM VM Handle.
3285 * @param enmType Handler type.
3286 * @param GCPhys Handler range address.
3287 * @param cb Size of the handler range.
3288 * @param fHasHCHandler Set if the handler has a HC callback function.
3289 *
3290 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3291 * Handler memory type to memory which has no HC handler.
3292 */
3293REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3294{
3295 REMR3ReplayHandlerNotifications(pVM);
3296
3297 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3298}
3299
3300/**
3301 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3302 *
3303 * @param pVM VM Handle.
3304 * @param enmType Handler type.
3305 * @param GCPhys Handler range address.
3306 * @param cb Size of the handler range.
3307 * @param fHasHCHandler Set if the handler has a HC callback function.
3308 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3309 */
3310static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3311{
3312 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3313 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3314 VM_ASSERT_EMT(pVM);
3315
3316
3317 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3318
3319 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3320 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3321 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3322 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3323 else if (fHasHCHandler)
3324 {
3325 if (!fRestoreAsRAM)
3326 {
3327 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3328 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3329 }
3330 else
3331 {
3332 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3333 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3334 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3335 }
3336 }
3337 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3338
3339 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3340}
3341
3342/**
3343 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3344 *
3345 * @param pVM VM Handle.
3346 * @param enmType Handler type.
3347 * @param GCPhys Handler range address.
3348 * @param cb Size of the handler range.
3349 * @param fHasHCHandler Set if the handler has a HC callback function.
3350 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3351 */
3352REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3353{
3354 REMR3ReplayHandlerNotifications(pVM);
3355 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3356}
3357
3358
3359/**
3360 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3361 *
3362 * @param pVM VM Handle.
3363 * @param enmType Handler type.
3364 * @param GCPhysOld Old handler range address.
3365 * @param GCPhysNew New handler range address.
3366 * @param cb Size of the handler range.
3367 * @param fHasHCHandler Set if the handler has a HC callback function.
3368 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3369 */
3370static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3371{
3372 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3373 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3374 VM_ASSERT_EMT(pVM);
3375 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3376
3377 if (fHasHCHandler)
3378 {
3379 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3380
3381 /*
3382 * Reset the old page.
3383 */
3384 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3385 if (!fRestoreAsRAM)
3386 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3387 else
3388 {
3389 /* This is not perfect, but it'll do for PD monitoring... */
3390 Assert(cb == PAGE_SIZE);
3391 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3392 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3393 }
3394
3395 /*
3396 * Update the new page.
3397 */
3398 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3399 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3400 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3401 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3402
3403 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3404 }
3405}
3406
3407/**
3408 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3409 *
3410 * @param pVM VM Handle.
3411 * @param enmType Handler type.
3412 * @param GCPhysOld Old handler range address.
3413 * @param GCPhysNew New handler range address.
3414 * @param cb Size of the handler range.
3415 * @param fHasHCHandler Set if the handler has a HC callback function.
3416 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3417 */
3418REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3419{
3420 REMR3ReplayHandlerNotifications(pVM);
3421
3422 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3423}
3424
3425/**
3426 * Checks if we're handling access to this page or not.
3427 *
3428 * @returns true if we're trapping access.
3429 * @returns false if we aren't.
3430 * @param pVM The VM handle.
3431 * @param GCPhys The physical address.
3432 *
3433 * @remark This function will only work correctly in VBOX_STRICT builds!
3434 */
3435REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3436{
3437#ifdef VBOX_STRICT
3438 unsigned long off;
3439 REMR3ReplayHandlerNotifications(pVM);
3440
3441 off = get_phys_page_offset(GCPhys);
3442 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3443 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3444 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3445#else
3446 return false;
3447#endif
3448}
3449
3450
3451/**
3452 * Deals with a rare case in get_phys_addr_code where the code
3453 * is being monitored.
3454 *
3455 * It could also be an MMIO page, in which case we will raise a fatal error.
3456 *
3457 * @returns The physical address corresponding to addr.
3458 * @param env The cpu environment.
3459 * @param addr The virtual address.
3460 * @param pTLBEntry The TLB entry.
3461 */
3462target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3463 target_ulong addr,
3464 CPUTLBEntry *pTLBEntry,
3465 target_phys_addr_t ioTLBEntry)
3466{
3467 PVM pVM = env->pVM;
3468
3469 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3470 {
3471 /* If code memory is being monitored, appropriate IOTLB entry will have
3472 handler IO type, and addend will provide real physical address, no
3473 matter if we store VA in TLB or not, as handlers are always passed PA */
3474 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3475 return ret;
3476 }
3477 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3478 "*** handlers\n",
3479 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3480 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3481 LogRel(("*** mmio\n"));
3482 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3483 LogRel(("*** phys\n"));
3484 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3485 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3486 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3487 AssertFatalFailed();
3488}
3489
3490/**
3491 * Read guest RAM and ROM.
3492 *
3493 * @param SrcGCPhys The source address (guest physical).
3494 * @param pvDst The destination address.
3495 * @param cb Number of bytes
3496 */
3497void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3498{
3499 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3500 VBOX_CHECK_ADDR(SrcGCPhys);
3501 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3502#ifdef VBOX_DEBUG_PHYS
3503 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3504#endif
3505 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3506}
3507
3508
3509/**
3510 * Read guest RAM and ROM, unsigned 8-bit.
3511 *
3512 * @param SrcGCPhys The source address (guest physical).
3513 */
3514RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3515{
3516 uint8_t val;
3517 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3518 VBOX_CHECK_ADDR(SrcGCPhys);
3519 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3520 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3521#ifdef VBOX_DEBUG_PHYS
3522 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3523#endif
3524 return val;
3525}
3526
3527
3528/**
3529 * Read guest RAM and ROM, signed 8-bit.
3530 *
3531 * @param SrcGCPhys The source address (guest physical).
3532 */
3533RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3534{
3535 int8_t val;
3536 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3537 VBOX_CHECK_ADDR(SrcGCPhys);
3538 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3539 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3540#ifdef VBOX_DEBUG_PHYS
3541 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3542#endif
3543 return val;
3544}
3545
3546
3547/**
3548 * Read guest RAM and ROM, unsigned 16-bit.
3549 *
3550 * @param SrcGCPhys The source address (guest physical).
3551 */
3552RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3553{
3554 uint16_t val;
3555 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3556 VBOX_CHECK_ADDR(SrcGCPhys);
3557 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3558 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3559#ifdef VBOX_DEBUG_PHYS
3560 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3561#endif
3562 return val;
3563}
3564
3565
3566/**
3567 * Read guest RAM and ROM, signed 16-bit.
3568 *
3569 * @param SrcGCPhys The source address (guest physical).
3570 */
3571RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3572{
3573 int16_t val;
3574 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3575 VBOX_CHECK_ADDR(SrcGCPhys);
3576 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3577 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3578#ifdef VBOX_DEBUG_PHYS
3579 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3580#endif
3581 return val;
3582}
3583
3584
3585/**
3586 * Read guest RAM and ROM, unsigned 32-bit.
3587 *
3588 * @param SrcGCPhys The source address (guest physical).
3589 */
3590RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3591{
3592 uint32_t val;
3593 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3594 VBOX_CHECK_ADDR(SrcGCPhys);
3595 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3596 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3597#ifdef VBOX_DEBUG_PHYS
3598 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3599#endif
3600 return val;
3601}
3602
3603
3604/**
3605 * Read guest RAM and ROM, signed 32-bit.
3606 *
3607 * @param SrcGCPhys The source address (guest physical).
3608 */
3609RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3610{
3611 int32_t val;
3612 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3613 VBOX_CHECK_ADDR(SrcGCPhys);
3614 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3615 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3616#ifdef VBOX_DEBUG_PHYS
3617 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3618#endif
3619 return val;
3620}
3621
3622
3623/**
3624 * Read guest RAM and ROM, unsigned 64-bit.
3625 *
3626 * @param SrcGCPhys The source address (guest physical).
3627 */
3628uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3629{
3630 uint64_t val;
3631 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3632 VBOX_CHECK_ADDR(SrcGCPhys);
3633 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3634 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3635#ifdef VBOX_DEBUG_PHYS
3636 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3637#endif
3638 return val;
3639}
3640
3641
3642/**
3643 * Read guest RAM and ROM, signed 64-bit.
3644 *
3645 * @param SrcGCPhys The source address (guest physical).
3646 */
3647int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3648{
3649 int64_t val;
3650 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3651 VBOX_CHECK_ADDR(SrcGCPhys);
3652 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3653 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3654#ifdef VBOX_DEBUG_PHYS
3655 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3656#endif
3657 return val;
3658}
3659
3660
3661/**
3662 * Write guest RAM.
3663 *
3664 * @param DstGCPhys The destination address (guest physical).
3665 * @param pvSrc The source address.
3666 * @param cb Number of bytes to write
3667 */
3668void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3669{
3670 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3671 VBOX_CHECK_ADDR(DstGCPhys);
3672 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3673 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3674#ifdef VBOX_DEBUG_PHYS
3675 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3676#endif
3677}
3678
3679
3680/**
3681 * Write guest RAM, unsigned 8-bit.
3682 *
3683 * @param DstGCPhys The destination address (guest physical).
3684 * @param val Value
3685 */
3686void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3687{
3688 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3689 VBOX_CHECK_ADDR(DstGCPhys);
3690 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3691 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3692#ifdef VBOX_DEBUG_PHYS
3693 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3694#endif
3695}
3696
3697
3698/**
3699 * Write guest RAM, unsigned 8-bit.
3700 *
3701 * @param DstGCPhys The destination address (guest physical).
3702 * @param val Value
3703 */
3704void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3705{
3706 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3707 VBOX_CHECK_ADDR(DstGCPhys);
3708 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3709 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3710#ifdef VBOX_DEBUG_PHYS
3711 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3712#endif
3713}
3714
3715
3716/**
3717 * Write guest RAM, unsigned 32-bit.
3718 *
3719 * @param DstGCPhys The destination address (guest physical).
3720 * @param val Value
3721 */
3722void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3723{
3724 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3725 VBOX_CHECK_ADDR(DstGCPhys);
3726 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3727 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3728#ifdef VBOX_DEBUG_PHYS
3729 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3730#endif
3731}
3732
3733
3734/**
3735 * Write guest RAM, unsigned 64-bit.
3736 *
3737 * @param DstGCPhys The destination address (guest physical).
3738 * @param val Value
3739 */
3740void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3741{
3742 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3743 VBOX_CHECK_ADDR(DstGCPhys);
3744 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3745 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3746#ifdef VBOX_DEBUG_PHYS
3747 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3748#endif
3749}
3750
3751#undef LOG_GROUP
3752#define LOG_GROUP LOG_GROUP_REM_MMIO
3753
3754/** Read MMIO memory. */
3755static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3756{
3757 uint32_t u32 = 0;
3758 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3759 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3760 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3761 return u32;
3762}
3763
3764/** Read MMIO memory. */
3765static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3766{
3767 uint32_t u32 = 0;
3768 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3769 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3770 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3771 return u32;
3772}
3773
3774/** Read MMIO memory. */
3775static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3776{
3777 uint32_t u32 = 0;
3778 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3779 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3780 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3781 return u32;
3782}
3783
3784/** Write to MMIO memory. */
3785static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3786{
3787 int rc;
3788 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3789 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3790 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3791}
3792
3793/** Write to MMIO memory. */
3794static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3795{
3796 int rc;
3797 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3798 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3799 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3800}
3801
3802/** Write to MMIO memory. */
3803static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3804{
3805 int rc;
3806 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3807 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3808 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3809}
3810
3811
3812#undef LOG_GROUP
3813#define LOG_GROUP LOG_GROUP_REM_HANDLER
3814
3815/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3816
3817static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3818{
3819 uint8_t u8;
3820 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3821 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3822 return u8;
3823}
3824
3825static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3826{
3827 uint16_t u16;
3828 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3829 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3830 return u16;
3831}
3832
3833static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3834{
3835 uint32_t u32;
3836 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3837 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3838 return u32;
3839}
3840
3841static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3842{
3843 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3844 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3845}
3846
3847static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3848{
3849 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3850 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3851}
3852
3853static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3854{
3855 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3856 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3857}
3858
3859/* -+- disassembly -+- */
3860
3861#undef LOG_GROUP
3862#define LOG_GROUP LOG_GROUP_REM_DISAS
3863
3864
3865/**
3866 * Enables or disables singled stepped disassembly.
3867 *
3868 * @returns VBox status code.
3869 * @param pVM VM handle.
3870 * @param fEnable To enable set this flag, to disable clear it.
3871 */
3872static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3873{
3874 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3875 VM_ASSERT_EMT(pVM);
3876
3877 if (fEnable)
3878 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3879 else
3880 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3881#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3882 cpu_single_step(&pVM->rem.s.Env, fEnable);
3883#endif
3884 return VINF_SUCCESS;
3885}
3886
3887
3888/**
3889 * Enables or disables singled stepped disassembly.
3890 *
3891 * @returns VBox status code.
3892 * @param pVM VM handle.
3893 * @param fEnable To enable set this flag, to disable clear it.
3894 */
3895REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3896{
3897 int rc;
3898
3899 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3900 if (VM_IS_EMT(pVM))
3901 return remR3DisasEnableStepping(pVM, fEnable);
3902
3903 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3904 AssertRC(rc);
3905 return rc;
3906}
3907
3908
3909#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3910/**
3911 * External Debugger Command: .remstep [on|off|1|0]
3912 */
3913static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3914{
3915 int rc;
3916
3917 if (cArgs == 0)
3918 /*
3919 * Print the current status.
3920 */
3921 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3922 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3923 else
3924 {
3925 /*
3926 * Convert the argument and change the mode.
3927 */
3928 bool fEnable;
3929 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3930 if (RT_SUCCESS(rc))
3931 {
3932 rc = REMR3DisasEnableStepping(pVM, fEnable);
3933 if (RT_SUCCESS(rc))
3934 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3935 else
3936 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3937 }
3938 else
3939 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3940 }
3941 return rc;
3942}
3943#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3944
3945
3946/**
3947 * Disassembles one instruction and prints it to the log.
3948 *
3949 * @returns Success indicator.
3950 * @param env Pointer to the recompiler CPU structure.
3951 * @param f32BitCode Indicates that whether or not the code should
3952 * be disassembled as 16 or 32 bit. If -1 the CS
3953 * selector will be inspected.
3954 * @param pszPrefix
3955 */
3956bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3957{
3958 PVM pVM = env->pVM;
3959 const bool fLog = LogIsEnabled();
3960 const bool fLog2 = LogIs2Enabled();
3961 int rc = VINF_SUCCESS;
3962
3963 /*
3964 * Don't bother if there ain't any log output to do.
3965 */
3966 if (!fLog && !fLog2)
3967 return true;
3968
3969 /*
3970 * Update the state so DBGF reads the correct register values.
3971 */
3972 remR3StateUpdate(pVM, env->pVCpu);
3973
3974 /*
3975 * Log registers if requested.
3976 */
3977 if (fLog2)
3978 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3979
3980 /*
3981 * Disassemble to log.
3982 */
3983 if (fLog)
3984 {
3985 PVMCPU pVCpu = VMMGetCpu(pVM);
3986 char szBuf[256];
3987 szBuf[0] = '\0';
3988 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3989 pVCpu->idCpu,
3990 0, /* Sel */
3991 0, /* GCPtr */
3992 DBGF_DISAS_FLAGS_CURRENT_GUEST
3993 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3994 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3995 szBuf,
3996 sizeof(szBuf),
3997 NULL);
3998 if (RT_FAILURE(rc))
3999 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4000 if (pszPrefix && *pszPrefix)
4001 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4002 else
4003 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4004 }
4005
4006 return RT_SUCCESS(rc);
4007}
4008
4009
4010/**
4011 * Disassemble recompiled code.
4012 *
4013 * @param phFileIgnored Ignored, logfile usually.
4014 * @param pvCode Pointer to the code block.
4015 * @param cb Size of the code block.
4016 */
4017void disas(FILE *phFile, void *pvCode, unsigned long cb)
4018{
4019 if (LogIs2Enabled())
4020 {
4021 unsigned off = 0;
4022 char szOutput[256];
4023 DISCPUSTATE Cpu;
4024
4025 memset(&Cpu, 0, sizeof(Cpu));
4026#ifdef RT_ARCH_X86
4027 Cpu.mode = CPUMODE_32BIT;
4028#else
4029 Cpu.mode = CPUMODE_64BIT;
4030#endif
4031
4032 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4033 while (off < cb)
4034 {
4035 uint32_t cbInstr;
4036 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4037 RTLogPrintf("%s", szOutput);
4038 else
4039 {
4040 RTLogPrintf("disas error\n");
4041 cbInstr = 1;
4042#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4043 break;
4044#endif
4045 }
4046 off += cbInstr;
4047 }
4048 }
4049}
4050
4051
4052/**
4053 * Disassemble guest code.
4054 *
4055 * @param phFileIgnored Ignored, logfile usually.
4056 * @param uCode The guest address of the code to disassemble. (flat?)
4057 * @param cb Number of bytes to disassemble.
4058 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4059 */
4060void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4061{
4062 if (LogIs2Enabled())
4063 {
4064 PVM pVM = cpu_single_env->pVM;
4065 PVMCPU pVCpu = cpu_single_env->pVCpu;
4066 RTSEL cs;
4067 RTGCUINTPTR eip;
4068
4069 Assert(pVCpu);
4070
4071 /*
4072 * Update the state so DBGF reads the correct register values (flags).
4073 */
4074 remR3StateUpdate(pVM, pVCpu);
4075
4076 /*
4077 * Do the disassembling.
4078 */
4079 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4080 cs = cpu_single_env->segs[R_CS].selector;
4081 eip = uCode - cpu_single_env->segs[R_CS].base;
4082 for (;;)
4083 {
4084 char szBuf[256];
4085 uint32_t cbInstr;
4086 int rc = DBGFR3DisasInstrEx(pVM,
4087 pVCpu->idCpu,
4088 cs,
4089 eip,
4090 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4091 szBuf, sizeof(szBuf),
4092 &cbInstr);
4093 if (RT_SUCCESS(rc))
4094 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4095 else
4096 {
4097 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4098 cbInstr = 1;
4099 }
4100
4101 /* next */
4102 if (cb <= cbInstr)
4103 break;
4104 cb -= cbInstr;
4105 uCode += cbInstr;
4106 eip += cbInstr;
4107 }
4108 }
4109}
4110
4111
4112/**
4113 * Looks up a guest symbol.
4114 *
4115 * @returns Pointer to symbol name. This is a static buffer.
4116 * @param orig_addr The address in question.
4117 */
4118const char *lookup_symbol(target_ulong orig_addr)
4119{
4120 PVM pVM = cpu_single_env->pVM;
4121 RTGCINTPTR off = 0;
4122 RTDBGSYMBOL Sym;
4123 DBGFADDRESS Addr;
4124
4125 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4126 if (RT_SUCCESS(rc))
4127 {
4128 static char szSym[sizeof(Sym.szName) + 48];
4129 if (!off)
4130 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4131 else if (off > 0)
4132 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4133 else
4134 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4135 return szSym;
4136 }
4137 return "<N/A>";
4138}
4139
4140
4141#undef LOG_GROUP
4142#define LOG_GROUP LOG_GROUP_REM
4143
4144
4145/* -+- FF notifications -+- */
4146
4147
4148/**
4149 * Notification about a pending interrupt.
4150 *
4151 * @param pVM VM Handle.
4152 * @param pVCpu VMCPU Handle.
4153 * @param u8Interrupt Interrupt
4154 * @thread The emulation thread.
4155 */
4156REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4157{
4158 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4159 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4160}
4161
4162/**
4163 * Notification about a pending interrupt.
4164 *
4165 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4166 * @param pVM VM Handle.
4167 * @param pVCpu VMCPU Handle.
4168 * @thread The emulation thread.
4169 */
4170REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4171{
4172 return pVM->rem.s.u32PendingInterrupt;
4173}
4174
4175/**
4176 * Notification about the interrupt FF being set.
4177 *
4178 * @param pVM VM Handle.
4179 * @param pVCpu VMCPU Handle.
4180 * @thread The emulation thread.
4181 */
4182REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4183{
4184#ifndef IEM_VERIFICATION_MODE
4185 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4186 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4187 if (pVM->rem.s.fInREM)
4188 {
4189 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4190 CPU_INTERRUPT_EXTERNAL_HARD);
4191 }
4192#endif
4193}
4194
4195
4196/**
4197 * Notification about the interrupt FF being set.
4198 *
4199 * @param pVM VM Handle.
4200 * @param pVCpu VMCPU Handle.
4201 * @thread Any.
4202 */
4203REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4204{
4205 LogFlow(("REMR3NotifyInterruptClear:\n"));
4206 if (pVM->rem.s.fInREM)
4207 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4208}
4209
4210
4211/**
4212 * Notification about pending timer(s).
4213 *
4214 * @param pVM VM Handle.
4215 * @param pVCpuDst The target cpu for this notification.
4216 * TM will not broadcast pending timer events, but use
4217 * a dedicated EMT for them. So, only interrupt REM
4218 * execution if the given CPU is executing in REM.
4219 * @thread Any.
4220 */
4221REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4222{
4223#ifndef IEM_VERIFICATION_MODE
4224#ifndef DEBUG_bird
4225 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4226#endif
4227 if (pVM->rem.s.fInREM)
4228 {
4229 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4230 {
4231 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4232 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4233 CPU_INTERRUPT_EXTERNAL_TIMER);
4234 }
4235 else
4236 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4237 }
4238 else
4239 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4240#endif
4241}
4242
4243
4244/**
4245 * Notification about pending DMA transfers.
4246 *
4247 * @param pVM VM Handle.
4248 * @thread Any.
4249 */
4250REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4251{
4252#ifndef IEM_VERIFICATION_MODE
4253 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4254 if (pVM->rem.s.fInREM)
4255 {
4256 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4257 CPU_INTERRUPT_EXTERNAL_DMA);
4258 }
4259#endif
4260}
4261
4262
4263/**
4264 * Notification about pending timer(s).
4265 *
4266 * @param pVM VM Handle.
4267 * @thread Any.
4268 */
4269REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4270{
4271#ifndef IEM_VERIFICATION_MODE
4272 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4273 if (pVM->rem.s.fInREM)
4274 {
4275 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4276 CPU_INTERRUPT_EXTERNAL_EXIT);
4277 }
4278#endif
4279}
4280
4281
4282/**
4283 * Notification about pending FF set by an external thread.
4284 *
4285 * @param pVM VM handle.
4286 * @thread Any.
4287 */
4288REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4289{
4290#ifndef IEM_VERIFICATION_MODE
4291 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4292 if (pVM->rem.s.fInREM)
4293 {
4294 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4295 CPU_INTERRUPT_EXTERNAL_EXIT);
4296 }
4297#endif
4298}
4299
4300
4301#ifdef VBOX_WITH_STATISTICS
4302void remR3ProfileStart(int statcode)
4303{
4304 STAMPROFILEADV *pStat;
4305 switch(statcode)
4306 {
4307 case STATS_EMULATE_SINGLE_INSTR:
4308 pStat = &gStatExecuteSingleInstr;
4309 break;
4310 case STATS_QEMU_COMPILATION:
4311 pStat = &gStatCompilationQEmu;
4312 break;
4313 case STATS_QEMU_RUN_EMULATED_CODE:
4314 pStat = &gStatRunCodeQEmu;
4315 break;
4316 case STATS_QEMU_TOTAL:
4317 pStat = &gStatTotalTimeQEmu;
4318 break;
4319 case STATS_QEMU_RUN_TIMERS:
4320 pStat = &gStatTimers;
4321 break;
4322 case STATS_TLB_LOOKUP:
4323 pStat= &gStatTBLookup;
4324 break;
4325 case STATS_IRQ_HANDLING:
4326 pStat= &gStatIRQ;
4327 break;
4328 case STATS_RAW_CHECK:
4329 pStat = &gStatRawCheck;
4330 break;
4331
4332 default:
4333 AssertMsgFailed(("unknown stat %d\n", statcode));
4334 return;
4335 }
4336 STAM_PROFILE_ADV_START(pStat, a);
4337}
4338
4339
4340void remR3ProfileStop(int statcode)
4341{
4342 STAMPROFILEADV *pStat;
4343 switch(statcode)
4344 {
4345 case STATS_EMULATE_SINGLE_INSTR:
4346 pStat = &gStatExecuteSingleInstr;
4347 break;
4348 case STATS_QEMU_COMPILATION:
4349 pStat = &gStatCompilationQEmu;
4350 break;
4351 case STATS_QEMU_RUN_EMULATED_CODE:
4352 pStat = &gStatRunCodeQEmu;
4353 break;
4354 case STATS_QEMU_TOTAL:
4355 pStat = &gStatTotalTimeQEmu;
4356 break;
4357 case STATS_QEMU_RUN_TIMERS:
4358 pStat = &gStatTimers;
4359 break;
4360 case STATS_TLB_LOOKUP:
4361 pStat= &gStatTBLookup;
4362 break;
4363 case STATS_IRQ_HANDLING:
4364 pStat= &gStatIRQ;
4365 break;
4366 case STATS_RAW_CHECK:
4367 pStat = &gStatRawCheck;
4368 break;
4369 default:
4370 AssertMsgFailed(("unknown stat %d\n", statcode));
4371 return;
4372 }
4373 STAM_PROFILE_ADV_STOP(pStat, a);
4374}
4375#endif
4376
4377/**
4378 * Raise an RC, force rem exit.
4379 *
4380 * @param pVM VM handle.
4381 * @param rc The rc.
4382 */
4383void remR3RaiseRC(PVM pVM, int rc)
4384{
4385 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4386 Assert(pVM->rem.s.fInREM);
4387 VM_ASSERT_EMT(pVM);
4388 pVM->rem.s.rc = rc;
4389 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4390}
4391
4392
4393/* -+- timers -+- */
4394
4395uint64_t cpu_get_tsc(CPUX86State *env)
4396{
4397 STAM_COUNTER_INC(&gStatCpuGetTSC);
4398 return TMCpuTickGet(env->pVCpu);
4399}
4400
4401
4402/* -+- interrupts -+- */
4403
4404void cpu_set_ferr(CPUX86State *env)
4405{
4406 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4407 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4408}
4409
4410int cpu_get_pic_interrupt(CPUX86State *env)
4411{
4412 uint8_t u8Interrupt;
4413 int rc;
4414
4415 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4416 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4417 * with the (a)pic.
4418 */
4419 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4420 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4421 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4422 * remove this kludge. */
4423 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4424 {
4425 rc = VINF_SUCCESS;
4426 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4427 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4428 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4429 }
4430 else
4431 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4432
4433 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4434 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4435 if (RT_SUCCESS(rc))
4436 {
4437 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4438 env->interrupt_request |= CPU_INTERRUPT_HARD;
4439 return u8Interrupt;
4440 }
4441 return -1;
4442}
4443
4444
4445/* -+- local apic -+- */
4446
4447#if 0 /* CPUMSetGuestMsr does this now. */
4448void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4449{
4450 int rc = PDMApicSetBase(env->pVM, val);
4451 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4452}
4453#endif
4454
4455uint64_t cpu_get_apic_base(CPUX86State *env)
4456{
4457 uint64_t u64;
4458 int rc = PDMApicGetBase(env->pVM, &u64);
4459 if (RT_SUCCESS(rc))
4460 {
4461 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4462 return u64;
4463 }
4464 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4465 return 0;
4466}
4467
4468void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4469{
4470 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4471 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4472}
4473
4474uint8_t cpu_get_apic_tpr(CPUX86State *env)
4475{
4476 uint8_t u8;
4477 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4478 if (RT_SUCCESS(rc))
4479 {
4480 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4481 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4482 }
4483 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4484 return 0;
4485}
4486
4487/**
4488 * Read an MSR.
4489 *
4490 * @retval 0 success.
4491 * @retval -1 failure, raise \#GP(0).
4492 * @param env The cpu state.
4493 * @param idMsr The MSR to read.
4494 * @param puValue Where to return the value.
4495 */
4496int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4497{
4498 Assert(env->pVCpu);
4499 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4500}
4501
4502/**
4503 * Write to an MSR.
4504 *
4505 * @retval 0 success.
4506 * @retval -1 failure, raise \#GP(0).
4507 * @param env The cpu state.
4508 * @param idMsr The MSR to read.
4509 * @param puValue Where to return the value.
4510 */
4511int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4512{
4513 Assert(env->pVCpu);
4514 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4515}
4516
4517/* -+- I/O Ports -+- */
4518
4519#undef LOG_GROUP
4520#define LOG_GROUP LOG_GROUP_REM_IOPORT
4521
4522void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4523{
4524 int rc;
4525
4526 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4527 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4528
4529 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4530 if (RT_LIKELY(rc == VINF_SUCCESS))
4531 return;
4532 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4533 {
4534 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4535 remR3RaiseRC(env->pVM, rc);
4536 return;
4537 }
4538 remAbort(rc, __FUNCTION__);
4539}
4540
4541void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4542{
4543 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4544 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4545 if (RT_LIKELY(rc == VINF_SUCCESS))
4546 return;
4547 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4548 {
4549 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4550 remR3RaiseRC(env->pVM, rc);
4551 return;
4552 }
4553 remAbort(rc, __FUNCTION__);
4554}
4555
4556void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4557{
4558 int rc;
4559 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4560 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4561 if (RT_LIKELY(rc == VINF_SUCCESS))
4562 return;
4563 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4564 {
4565 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4566 remR3RaiseRC(env->pVM, rc);
4567 return;
4568 }
4569 remAbort(rc, __FUNCTION__);
4570}
4571
4572uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4573{
4574 uint32_t u32 = 0;
4575 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4576 if (RT_LIKELY(rc == VINF_SUCCESS))
4577 {
4578 if (/*addr != 0x61 && */addr != 0x71)
4579 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4580 return (uint8_t)u32;
4581 }
4582 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4583 {
4584 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4585 remR3RaiseRC(env->pVM, rc);
4586 return (uint8_t)u32;
4587 }
4588 remAbort(rc, __FUNCTION__);
4589 return UINT8_C(0xff);
4590}
4591
4592uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4593{
4594 uint32_t u32 = 0;
4595 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4596 if (RT_LIKELY(rc == VINF_SUCCESS))
4597 {
4598 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4599 return (uint16_t)u32;
4600 }
4601 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4602 {
4603 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4604 remR3RaiseRC(env->pVM, rc);
4605 return (uint16_t)u32;
4606 }
4607 remAbort(rc, __FUNCTION__);
4608 return UINT16_C(0xffff);
4609}
4610
4611uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4612{
4613 uint32_t u32 = 0;
4614 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4615 if (RT_LIKELY(rc == VINF_SUCCESS))
4616 {
4617//if (addr==0x01f0 && u32 == 0x6b6d)
4618// loglevel = ~0;
4619 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4620 return u32;
4621 }
4622 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4623 {
4624 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4625 remR3RaiseRC(env->pVM, rc);
4626 return u32;
4627 }
4628 remAbort(rc, __FUNCTION__);
4629 return UINT32_C(0xffffffff);
4630}
4631
4632#undef LOG_GROUP
4633#define LOG_GROUP LOG_GROUP_REM
4634
4635
4636/* -+- helpers and misc other interfaces -+- */
4637
4638/**
4639 * Perform the CPUID instruction.
4640 *
4641 * @param env Pointer to the recompiler CPU structure.
4642 * @param idx The CPUID leaf (eax).
4643 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4644 * @param pvEAX Where to store eax.
4645 * @param pvEBX Where to store ebx.
4646 * @param pvECX Where to store ecx.
4647 * @param pvEDX Where to store edx.
4648 */
4649void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4650 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4651{
4652 NOREF(idxSub);
4653 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4654}
4655
4656
4657#if 0 /* not used */
4658/**
4659 * Interface for qemu hardware to report back fatal errors.
4660 */
4661void hw_error(const char *pszFormat, ...)
4662{
4663 /*
4664 * Bitch about it.
4665 */
4666 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4667 * this in my Odin32 tree at home! */
4668 va_list args;
4669 va_start(args, pszFormat);
4670 RTLogPrintf("fatal error in virtual hardware:");
4671 RTLogPrintfV(pszFormat, args);
4672 va_end(args);
4673 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4674
4675 /*
4676 * If we're in REM context we'll sync back the state before 'jumping' to
4677 * the EMs failure handling.
4678 */
4679 PVM pVM = cpu_single_env->pVM;
4680 if (pVM->rem.s.fInREM)
4681 REMR3StateBack(pVM);
4682 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4683 AssertMsgFailed(("EMR3FatalError returned!\n"));
4684}
4685#endif
4686
4687/**
4688 * Interface for the qemu cpu to report unhandled situation
4689 * raising a fatal VM error.
4690 */
4691void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4692{
4693 va_list va;
4694 PVM pVM;
4695 PVMCPU pVCpu;
4696 char szMsg[256];
4697
4698 /*
4699 * Bitch about it.
4700 */
4701 RTLogFlags(NULL, "nodisabled nobuffered");
4702 RTLogFlush(NULL);
4703
4704 va_start(va, pszFormat);
4705#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4706 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4707 unsigned cArgs = 0;
4708 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4709 const char *psz = strchr(pszFormat, '%');
4710 while (psz && cArgs < 6)
4711 {
4712 auArgs[cArgs++] = va_arg(va, uintptr_t);
4713 psz = strchr(psz + 1, '%');
4714 }
4715 switch (cArgs)
4716 {
4717 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4718 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4719 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4720 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4721 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4722 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4723 default:
4724 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4725 }
4726#else
4727 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4728#endif
4729 va_end(va);
4730
4731 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4732 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4733
4734 /*
4735 * If we're in REM context we'll sync back the state before 'jumping' to
4736 * the EMs failure handling.
4737 */
4738 pVM = cpu_single_env->pVM;
4739 pVCpu = cpu_single_env->pVCpu;
4740 Assert(pVCpu);
4741
4742 if (pVM->rem.s.fInREM)
4743 REMR3StateBack(pVM, pVCpu);
4744 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4745 AssertMsgFailed(("EMR3FatalError returned!\n"));
4746}
4747
4748
4749/**
4750 * Aborts the VM.
4751 *
4752 * @param rc VBox error code.
4753 * @param pszTip Hint about why/when this happened.
4754 */
4755void remAbort(int rc, const char *pszTip)
4756{
4757 PVM pVM;
4758 PVMCPU pVCpu;
4759
4760 /*
4761 * Bitch about it.
4762 */
4763 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4764 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4765
4766 /*
4767 * Jump back to where we entered the recompiler.
4768 */
4769 pVM = cpu_single_env->pVM;
4770 pVCpu = cpu_single_env->pVCpu;
4771 Assert(pVCpu);
4772
4773 if (pVM->rem.s.fInREM)
4774 REMR3StateBack(pVM, pVCpu);
4775
4776 EMR3FatalError(pVCpu, rc);
4777 AssertMsgFailed(("EMR3FatalError returned!\n"));
4778}
4779
4780
4781/**
4782 * Dumps a linux system call.
4783 * @param pVCpu VMCPU handle.
4784 */
4785void remR3DumpLnxSyscall(PVMCPU pVCpu)
4786{
4787 static const char *apsz[] =
4788 {
4789 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4790 "sys_exit",
4791 "sys_fork",
4792 "sys_read",
4793 "sys_write",
4794 "sys_open", /* 5 */
4795 "sys_close",
4796 "sys_waitpid",
4797 "sys_creat",
4798 "sys_link",
4799 "sys_unlink", /* 10 */
4800 "sys_execve",
4801 "sys_chdir",
4802 "sys_time",
4803 "sys_mknod",
4804 "sys_chmod", /* 15 */
4805 "sys_lchown16",
4806 "sys_ni_syscall", /* old break syscall holder */
4807 "sys_stat",
4808 "sys_lseek",
4809 "sys_getpid", /* 20 */
4810 "sys_mount",
4811 "sys_oldumount",
4812 "sys_setuid16",
4813 "sys_getuid16",
4814 "sys_stime", /* 25 */
4815 "sys_ptrace",
4816 "sys_alarm",
4817 "sys_fstat",
4818 "sys_pause",
4819 "sys_utime", /* 30 */
4820 "sys_ni_syscall", /* old stty syscall holder */
4821 "sys_ni_syscall", /* old gtty syscall holder */
4822 "sys_access",
4823 "sys_nice",
4824 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4825 "sys_sync",
4826 "sys_kill",
4827 "sys_rename",
4828 "sys_mkdir",
4829 "sys_rmdir", /* 40 */
4830 "sys_dup",
4831 "sys_pipe",
4832 "sys_times",
4833 "sys_ni_syscall", /* old prof syscall holder */
4834 "sys_brk", /* 45 */
4835 "sys_setgid16",
4836 "sys_getgid16",
4837 "sys_signal",
4838 "sys_geteuid16",
4839 "sys_getegid16", /* 50 */
4840 "sys_acct",
4841 "sys_umount", /* recycled never used phys() */
4842 "sys_ni_syscall", /* old lock syscall holder */
4843 "sys_ioctl",
4844 "sys_fcntl", /* 55 */
4845 "sys_ni_syscall", /* old mpx syscall holder */
4846 "sys_setpgid",
4847 "sys_ni_syscall", /* old ulimit syscall holder */
4848 "sys_olduname",
4849 "sys_umask", /* 60 */
4850 "sys_chroot",
4851 "sys_ustat",
4852 "sys_dup2",
4853 "sys_getppid",
4854 "sys_getpgrp", /* 65 */
4855 "sys_setsid",
4856 "sys_sigaction",
4857 "sys_sgetmask",
4858 "sys_ssetmask",
4859 "sys_setreuid16", /* 70 */
4860 "sys_setregid16",
4861 "sys_sigsuspend",
4862 "sys_sigpending",
4863 "sys_sethostname",
4864 "sys_setrlimit", /* 75 */
4865 "sys_old_getrlimit",
4866 "sys_getrusage",
4867 "sys_gettimeofday",
4868 "sys_settimeofday",
4869 "sys_getgroups16", /* 80 */
4870 "sys_setgroups16",
4871 "old_select",
4872 "sys_symlink",
4873 "sys_lstat",
4874 "sys_readlink", /* 85 */
4875 "sys_uselib",
4876 "sys_swapon",
4877 "sys_reboot",
4878 "old_readdir",
4879 "old_mmap", /* 90 */
4880 "sys_munmap",
4881 "sys_truncate",
4882 "sys_ftruncate",
4883 "sys_fchmod",
4884 "sys_fchown16", /* 95 */
4885 "sys_getpriority",
4886 "sys_setpriority",
4887 "sys_ni_syscall", /* old profil syscall holder */
4888 "sys_statfs",
4889 "sys_fstatfs", /* 100 */
4890 "sys_ioperm",
4891 "sys_socketcall",
4892 "sys_syslog",
4893 "sys_setitimer",
4894 "sys_getitimer", /* 105 */
4895 "sys_newstat",
4896 "sys_newlstat",
4897 "sys_newfstat",
4898 "sys_uname",
4899 "sys_iopl", /* 110 */
4900 "sys_vhangup",
4901 "sys_ni_syscall", /* old "idle" system call */
4902 "sys_vm86old",
4903 "sys_wait4",
4904 "sys_swapoff", /* 115 */
4905 "sys_sysinfo",
4906 "sys_ipc",
4907 "sys_fsync",
4908 "sys_sigreturn",
4909 "sys_clone", /* 120 */
4910 "sys_setdomainname",
4911 "sys_newuname",
4912 "sys_modify_ldt",
4913 "sys_adjtimex",
4914 "sys_mprotect", /* 125 */
4915 "sys_sigprocmask",
4916 "sys_ni_syscall", /* old "create_module" */
4917 "sys_init_module",
4918 "sys_delete_module",
4919 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4920 "sys_quotactl",
4921 "sys_getpgid",
4922 "sys_fchdir",
4923 "sys_bdflush",
4924 "sys_sysfs", /* 135 */
4925 "sys_personality",
4926 "sys_ni_syscall", /* reserved for afs_syscall */
4927 "sys_setfsuid16",
4928 "sys_setfsgid16",
4929 "sys_llseek", /* 140 */
4930 "sys_getdents",
4931 "sys_select",
4932 "sys_flock",
4933 "sys_msync",
4934 "sys_readv", /* 145 */
4935 "sys_writev",
4936 "sys_getsid",
4937 "sys_fdatasync",
4938 "sys_sysctl",
4939 "sys_mlock", /* 150 */
4940 "sys_munlock",
4941 "sys_mlockall",
4942 "sys_munlockall",
4943 "sys_sched_setparam",
4944 "sys_sched_getparam", /* 155 */
4945 "sys_sched_setscheduler",
4946 "sys_sched_getscheduler",
4947 "sys_sched_yield",
4948 "sys_sched_get_priority_max",
4949 "sys_sched_get_priority_min", /* 160 */
4950 "sys_sched_rr_get_interval",
4951 "sys_nanosleep",
4952 "sys_mremap",
4953 "sys_setresuid16",
4954 "sys_getresuid16", /* 165 */
4955 "sys_vm86",
4956 "sys_ni_syscall", /* Old sys_query_module */
4957 "sys_poll",
4958 "sys_nfsservctl",
4959 "sys_setresgid16", /* 170 */
4960 "sys_getresgid16",
4961 "sys_prctl",
4962 "sys_rt_sigreturn",
4963 "sys_rt_sigaction",
4964 "sys_rt_sigprocmask", /* 175 */
4965 "sys_rt_sigpending",
4966 "sys_rt_sigtimedwait",
4967 "sys_rt_sigqueueinfo",
4968 "sys_rt_sigsuspend",
4969 "sys_pread64", /* 180 */
4970 "sys_pwrite64",
4971 "sys_chown16",
4972 "sys_getcwd",
4973 "sys_capget",
4974 "sys_capset", /* 185 */
4975 "sys_sigaltstack",
4976 "sys_sendfile",
4977 "sys_ni_syscall", /* reserved for streams1 */
4978 "sys_ni_syscall", /* reserved for streams2 */
4979 "sys_vfork", /* 190 */
4980 "sys_getrlimit",
4981 "sys_mmap2",
4982 "sys_truncate64",
4983 "sys_ftruncate64",
4984 "sys_stat64", /* 195 */
4985 "sys_lstat64",
4986 "sys_fstat64",
4987 "sys_lchown",
4988 "sys_getuid",
4989 "sys_getgid", /* 200 */
4990 "sys_geteuid",
4991 "sys_getegid",
4992 "sys_setreuid",
4993 "sys_setregid",
4994 "sys_getgroups", /* 205 */
4995 "sys_setgroups",
4996 "sys_fchown",
4997 "sys_setresuid",
4998 "sys_getresuid",
4999 "sys_setresgid", /* 210 */
5000 "sys_getresgid",
5001 "sys_chown",
5002 "sys_setuid",
5003 "sys_setgid",
5004 "sys_setfsuid", /* 215 */
5005 "sys_setfsgid",
5006 "sys_pivot_root",
5007 "sys_mincore",
5008 "sys_madvise",
5009 "sys_getdents64", /* 220 */
5010 "sys_fcntl64",
5011 "sys_ni_syscall", /* reserved for TUX */
5012 "sys_ni_syscall",
5013 "sys_gettid",
5014 "sys_readahead", /* 225 */
5015 "sys_setxattr",
5016 "sys_lsetxattr",
5017 "sys_fsetxattr",
5018 "sys_getxattr",
5019 "sys_lgetxattr", /* 230 */
5020 "sys_fgetxattr",
5021 "sys_listxattr",
5022 "sys_llistxattr",
5023 "sys_flistxattr",
5024 "sys_removexattr", /* 235 */
5025 "sys_lremovexattr",
5026 "sys_fremovexattr",
5027 "sys_tkill",
5028 "sys_sendfile64",
5029 "sys_futex", /* 240 */
5030 "sys_sched_setaffinity",
5031 "sys_sched_getaffinity",
5032 "sys_set_thread_area",
5033 "sys_get_thread_area",
5034 "sys_io_setup", /* 245 */
5035 "sys_io_destroy",
5036 "sys_io_getevents",
5037 "sys_io_submit",
5038 "sys_io_cancel",
5039 "sys_fadvise64", /* 250 */
5040 "sys_ni_syscall",
5041 "sys_exit_group",
5042 "sys_lookup_dcookie",
5043 "sys_epoll_create",
5044 "sys_epoll_ctl", /* 255 */
5045 "sys_epoll_wait",
5046 "sys_remap_file_pages",
5047 "sys_set_tid_address",
5048 "sys_timer_create",
5049 "sys_timer_settime", /* 260 */
5050 "sys_timer_gettime",
5051 "sys_timer_getoverrun",
5052 "sys_timer_delete",
5053 "sys_clock_settime",
5054 "sys_clock_gettime", /* 265 */
5055 "sys_clock_getres",
5056 "sys_clock_nanosleep",
5057 "sys_statfs64",
5058 "sys_fstatfs64",
5059 "sys_tgkill", /* 270 */
5060 "sys_utimes",
5061 "sys_fadvise64_64",
5062 "sys_ni_syscall" /* sys_vserver */
5063 };
5064
5065 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5066 switch (uEAX)
5067 {
5068 default:
5069 if (uEAX < RT_ELEMENTS(apsz))
5070 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5071 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5072 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5073 else
5074 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5075 break;
5076
5077 }
5078}
5079
5080
5081/**
5082 * Dumps an OpenBSD system call.
5083 * @param pVCpu VMCPU handle.
5084 */
5085void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5086{
5087 static const char *apsz[] =
5088 {
5089 "SYS_syscall", //0
5090 "SYS_exit", //1
5091 "SYS_fork", //2
5092 "SYS_read", //3
5093 "SYS_write", //4
5094 "SYS_open", //5
5095 "SYS_close", //6
5096 "SYS_wait4", //7
5097 "SYS_8",
5098 "SYS_link", //9
5099 "SYS_unlink", //10
5100 "SYS_11",
5101 "SYS_chdir", //12
5102 "SYS_fchdir", //13
5103 "SYS_mknod", //14
5104 "SYS_chmod", //15
5105 "SYS_chown", //16
5106 "SYS_break", //17
5107 "SYS_18",
5108 "SYS_19",
5109 "SYS_getpid", //20
5110 "SYS_mount", //21
5111 "SYS_unmount", //22
5112 "SYS_setuid", //23
5113 "SYS_getuid", //24
5114 "SYS_geteuid", //25
5115 "SYS_ptrace", //26
5116 "SYS_recvmsg", //27
5117 "SYS_sendmsg", //28
5118 "SYS_recvfrom", //29
5119 "SYS_accept", //30
5120 "SYS_getpeername", //31
5121 "SYS_getsockname", //32
5122 "SYS_access", //33
5123 "SYS_chflags", //34
5124 "SYS_fchflags", //35
5125 "SYS_sync", //36
5126 "SYS_kill", //37
5127 "SYS_38",
5128 "SYS_getppid", //39
5129 "SYS_40",
5130 "SYS_dup", //41
5131 "SYS_opipe", //42
5132 "SYS_getegid", //43
5133 "SYS_profil", //44
5134 "SYS_ktrace", //45
5135 "SYS_sigaction", //46
5136 "SYS_getgid", //47
5137 "SYS_sigprocmask", //48
5138 "SYS_getlogin", //49
5139 "SYS_setlogin", //50
5140 "SYS_acct", //51
5141 "SYS_sigpending", //52
5142 "SYS_osigaltstack", //53
5143 "SYS_ioctl", //54
5144 "SYS_reboot", //55
5145 "SYS_revoke", //56
5146 "SYS_symlink", //57
5147 "SYS_readlink", //58
5148 "SYS_execve", //59
5149 "SYS_umask", //60
5150 "SYS_chroot", //61
5151 "SYS_62",
5152 "SYS_63",
5153 "SYS_64",
5154 "SYS_65",
5155 "SYS_vfork", //66
5156 "SYS_67",
5157 "SYS_68",
5158 "SYS_sbrk", //69
5159 "SYS_sstk", //70
5160 "SYS_61",
5161 "SYS_vadvise", //72
5162 "SYS_munmap", //73
5163 "SYS_mprotect", //74
5164 "SYS_madvise", //75
5165 "SYS_76",
5166 "SYS_77",
5167 "SYS_mincore", //78
5168 "SYS_getgroups", //79
5169 "SYS_setgroups", //80
5170 "SYS_getpgrp", //81
5171 "SYS_setpgid", //82
5172 "SYS_setitimer", //83
5173 "SYS_84",
5174 "SYS_85",
5175 "SYS_getitimer", //86
5176 "SYS_87",
5177 "SYS_88",
5178 "SYS_89",
5179 "SYS_dup2", //90
5180 "SYS_91",
5181 "SYS_fcntl", //92
5182 "SYS_select", //93
5183 "SYS_94",
5184 "SYS_fsync", //95
5185 "SYS_setpriority", //96
5186 "SYS_socket", //97
5187 "SYS_connect", //98
5188 "SYS_99",
5189 "SYS_getpriority", //100
5190 "SYS_101",
5191 "SYS_102",
5192 "SYS_sigreturn", //103
5193 "SYS_bind", //104
5194 "SYS_setsockopt", //105
5195 "SYS_listen", //106
5196 "SYS_107",
5197 "SYS_108",
5198 "SYS_109",
5199 "SYS_110",
5200 "SYS_sigsuspend", //111
5201 "SYS_112",
5202 "SYS_113",
5203 "SYS_114",
5204 "SYS_115",
5205 "SYS_gettimeofday", //116
5206 "SYS_getrusage", //117
5207 "SYS_getsockopt", //118
5208 "SYS_119",
5209 "SYS_readv", //120
5210 "SYS_writev", //121
5211 "SYS_settimeofday", //122
5212 "SYS_fchown", //123
5213 "SYS_fchmod", //124
5214 "SYS_125",
5215 "SYS_setreuid", //126
5216 "SYS_setregid", //127
5217 "SYS_rename", //128
5218 "SYS_129",
5219 "SYS_130",
5220 "SYS_flock", //131
5221 "SYS_mkfifo", //132
5222 "SYS_sendto", //133
5223 "SYS_shutdown", //134
5224 "SYS_socketpair", //135
5225 "SYS_mkdir", //136
5226 "SYS_rmdir", //137
5227 "SYS_utimes", //138
5228 "SYS_139",
5229 "SYS_adjtime", //140
5230 "SYS_141",
5231 "SYS_142",
5232 "SYS_143",
5233 "SYS_144",
5234 "SYS_145",
5235 "SYS_146",
5236 "SYS_setsid", //147
5237 "SYS_quotactl", //148
5238 "SYS_149",
5239 "SYS_150",
5240 "SYS_151",
5241 "SYS_152",
5242 "SYS_153",
5243 "SYS_154",
5244 "SYS_nfssvc", //155
5245 "SYS_156",
5246 "SYS_157",
5247 "SYS_158",
5248 "SYS_159",
5249 "SYS_160",
5250 "SYS_getfh", //161
5251 "SYS_162",
5252 "SYS_163",
5253 "SYS_164",
5254 "SYS_sysarch", //165
5255 "SYS_166",
5256 "SYS_167",
5257 "SYS_168",
5258 "SYS_169",
5259 "SYS_170",
5260 "SYS_171",
5261 "SYS_172",
5262 "SYS_pread", //173
5263 "SYS_pwrite", //174
5264 "SYS_175",
5265 "SYS_176",
5266 "SYS_177",
5267 "SYS_178",
5268 "SYS_179",
5269 "SYS_180",
5270 "SYS_setgid", //181
5271 "SYS_setegid", //182
5272 "SYS_seteuid", //183
5273 "SYS_lfs_bmapv", //184
5274 "SYS_lfs_markv", //185
5275 "SYS_lfs_segclean", //186
5276 "SYS_lfs_segwait", //187
5277 "SYS_188",
5278 "SYS_189",
5279 "SYS_190",
5280 "SYS_pathconf", //191
5281 "SYS_fpathconf", //192
5282 "SYS_swapctl", //193
5283 "SYS_getrlimit", //194
5284 "SYS_setrlimit", //195
5285 "SYS_getdirentries", //196
5286 "SYS_mmap", //197
5287 "SYS___syscall", //198
5288 "SYS_lseek", //199
5289 "SYS_truncate", //200
5290 "SYS_ftruncate", //201
5291 "SYS___sysctl", //202
5292 "SYS_mlock", //203
5293 "SYS_munlock", //204
5294 "SYS_205",
5295 "SYS_futimes", //206
5296 "SYS_getpgid", //207
5297 "SYS_xfspioctl", //208
5298 "SYS_209",
5299 "SYS_210",
5300 "SYS_211",
5301 "SYS_212",
5302 "SYS_213",
5303 "SYS_214",
5304 "SYS_215",
5305 "SYS_216",
5306 "SYS_217",
5307 "SYS_218",
5308 "SYS_219",
5309 "SYS_220",
5310 "SYS_semget", //221
5311 "SYS_222",
5312 "SYS_223",
5313 "SYS_224",
5314 "SYS_msgget", //225
5315 "SYS_msgsnd", //226
5316 "SYS_msgrcv", //227
5317 "SYS_shmat", //228
5318 "SYS_229",
5319 "SYS_shmdt", //230
5320 "SYS_231",
5321 "SYS_clock_gettime", //232
5322 "SYS_clock_settime", //233
5323 "SYS_clock_getres", //234
5324 "SYS_235",
5325 "SYS_236",
5326 "SYS_237",
5327 "SYS_238",
5328 "SYS_239",
5329 "SYS_nanosleep", //240
5330 "SYS_241",
5331 "SYS_242",
5332 "SYS_243",
5333 "SYS_244",
5334 "SYS_245",
5335 "SYS_246",
5336 "SYS_247",
5337 "SYS_248",
5338 "SYS_249",
5339 "SYS_minherit", //250
5340 "SYS_rfork", //251
5341 "SYS_poll", //252
5342 "SYS_issetugid", //253
5343 "SYS_lchown", //254
5344 "SYS_getsid", //255
5345 "SYS_msync", //256
5346 "SYS_257",
5347 "SYS_258",
5348 "SYS_259",
5349 "SYS_getfsstat", //260
5350 "SYS_statfs", //261
5351 "SYS_fstatfs", //262
5352 "SYS_pipe", //263
5353 "SYS_fhopen", //264
5354 "SYS_265",
5355 "SYS_fhstatfs", //266
5356 "SYS_preadv", //267
5357 "SYS_pwritev", //268
5358 "SYS_kqueue", //269
5359 "SYS_kevent", //270
5360 "SYS_mlockall", //271
5361 "SYS_munlockall", //272
5362 "SYS_getpeereid", //273
5363 "SYS_274",
5364 "SYS_275",
5365 "SYS_276",
5366 "SYS_277",
5367 "SYS_278",
5368 "SYS_279",
5369 "SYS_280",
5370 "SYS_getresuid", //281
5371 "SYS_setresuid", //282
5372 "SYS_getresgid", //283
5373 "SYS_setresgid", //284
5374 "SYS_285",
5375 "SYS_mquery", //286
5376 "SYS_closefrom", //287
5377 "SYS_sigaltstack", //288
5378 "SYS_shmget", //289
5379 "SYS_semop", //290
5380 "SYS_stat", //291
5381 "SYS_fstat", //292
5382 "SYS_lstat", //293
5383 "SYS_fhstat", //294
5384 "SYS___semctl", //295
5385 "SYS_shmctl", //296
5386 "SYS_msgctl", //297
5387 "SYS_MAXSYSCALL", //298
5388 //299
5389 //300
5390 };
5391 uint32_t uEAX;
5392 if (!LogIsEnabled())
5393 return;
5394 uEAX = CPUMGetGuestEAX(pVCpu);
5395 switch (uEAX)
5396 {
5397 default:
5398 if (uEAX < RT_ELEMENTS(apsz))
5399 {
5400 uint32_t au32Args[8] = {0};
5401 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5402 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5403 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5404 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5405 }
5406 else
5407 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5408 break;
5409 }
5410}
5411
5412
5413#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5414/**
5415 * The Dll main entry point (stub).
5416 */
5417bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5418{
5419 return true;
5420}
5421
5422void *memcpy(void *dst, const void *src, size_t size)
5423{
5424 uint8_t*pbDst = dst, *pbSrc = src;
5425 while (size-- > 0)
5426 *pbDst++ = *pbSrc++;
5427 return dst;
5428}
5429
5430#endif
5431
5432void cpu_smm_update(CPUX86State *env)
5433{
5434}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette