VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 38320

Last change on this file since 38320 was 38320, checked in by vboxsync, 13 years ago

Redid the A20 gate assertion fix - the recompiler needs to be told.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.3 KB
Line 
1/* $Id: VBoxRecompiler.c 38320 2011-08-04 19:16:53Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
418 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
419
420 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
421 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
422 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
423
424
425#ifdef DEBUG_ALL_LOGGING
426 loglevel = ~0;
427#endif
428
429 /*
430 * Init the handler notification lists.
431 */
432 pVM->rem.s.idxPendingList = UINT32_MAX;
433 pVM->rem.s.idxFreeList = 0;
434
435 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
436 {
437 pCur = &pVM->rem.s.aHandlerNotifications[i];
438 pCur->idxNext = i + 1;
439 pCur->idxSelf = i;
440 }
441 pCur->idxNext = UINT32_MAX; /* the last record. */
442
443 return rc;
444}
445
446
447/**
448 * Finalizes the REM initialization.
449 *
450 * This is called after all components, devices and drivers has
451 * been initialized. Its main purpose it to finish the RAM related
452 * initialization.
453 *
454 * @returns VBox status code.
455 *
456 * @param pVM The VM handle.
457 */
458REMR3DECL(int) REMR3InitFinalize(PVM pVM)
459{
460 int rc;
461
462 /*
463 * Ram size & dirty bit map.
464 */
465 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
466 pVM->rem.s.fGCPhysLastRamFixed = true;
467#ifdef RT_STRICT
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
469#else
470 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
471#endif
472 return rc;
473}
474
475/**
476 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
477 *
478 * @returns VBox status code.
479 * @param pVM The VM handle.
480 * @param fGuarded Whether to guard the map.
481 */
482static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
483{
484 int rc = VINF_SUCCESS;
485 RTGCPHYS cb;
486
487 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
488
489 cb = pVM->rem.s.GCPhysLastRam + 1;
490 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
491 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
492 VERR_OUT_OF_RANGE);
493
494 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
495 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
496
497 if (!fGuarded)
498 {
499 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
500 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
501 }
502 else
503 {
504 /*
505 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
506 */
507 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
508 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
509 if (cbBitmapFull == cbBitmapAligned)
510 cbBitmapFull += _4G >> PAGE_SHIFT;
511 else if (cbBitmapFull - cbBitmapAligned < _64K)
512 cbBitmapFull += _64K;
513
514 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
515 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
516
517 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
518 if (RT_FAILURE(rc))
519 {
520 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
521 AssertLogRelRCReturn(rc, rc);
522 }
523
524 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
525 }
526
527 /* initialize it. */
528 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
529 return rc;
530}
531
532
533/**
534 * Terminates the REM.
535 *
536 * Termination means cleaning up and freeing all resources,
537 * the VM it self is at this point powered off or suspended.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542REMR3DECL(int) REMR3Term(PVM pVM)
543{
544#ifdef VBOX_WITH_STATISTICS
545 /*
546 * Statistics.
547 */
548 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
549 STAM_DEREG(pVM, &gStatCompilationQEmu);
550 STAM_DEREG(pVM, &gStatRunCodeQEmu);
551 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
552 STAM_DEREG(pVM, &gStatTimers);
553 STAM_DEREG(pVM, &gStatTBLookup);
554 STAM_DEREG(pVM, &gStatIRQ);
555 STAM_DEREG(pVM, &gStatRawCheck);
556 STAM_DEREG(pVM, &gStatMemRead);
557 STAM_DEREG(pVM, &gStatMemWrite);
558 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
559 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
560
561 STAM_DEREG(pVM, &gStatCpuGetTSC);
562
563 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
564 STAM_DEREG(pVM, &gStatRefuseVM86);
565 STAM_DEREG(pVM, &gStatRefusePaging);
566 STAM_DEREG(pVM, &gStatRefusePAE);
567 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
568 STAM_DEREG(pVM, &gStatRefuseIF0);
569 STAM_DEREG(pVM, &gStatRefuseCode16);
570 STAM_DEREG(pVM, &gStatRefuseWP0);
571 STAM_DEREG(pVM, &gStatRefuseRing1or2);
572 STAM_DEREG(pVM, &gStatRefuseCanExecute);
573 STAM_DEREG(pVM, &gStatFlushTBs);
574
575 STAM_DEREG(pVM, &gStatREMGDTChange);
576 STAM_DEREG(pVM, &gStatREMLDTRChange);
577 STAM_DEREG(pVM, &gStatREMIDTChange);
578 STAM_DEREG(pVM, &gStatREMTRChange);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
586
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
591 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
592 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
593
594 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
595#endif /* VBOX_WITH_STATISTICS */
596
597 STAM_REL_DEREG(pVM, &tb_flush_count);
598 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
599 STAM_REL_DEREG(pVM, &tlb_flush_count);
600
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * The VM is being reset.
607 *
608 * For the REM component this means to call the cpu_reset() and
609 * reinitialize some state variables.
610 *
611 * @param pVM VM handle.
612 */
613REMR3DECL(void) REMR3Reset(PVM pVM)
614{
615 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
616
617 /*
618 * Reset the REM cpu.
619 */
620 Assert(pVM->rem.s.cIgnoreAll == 0);
621 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
622 cpu_reset(&pVM->rem.s.Env);
623 pVM->rem.s.cInvalidatedPages = 0;
624 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
625 Assert(pVM->rem.s.cIgnoreAll == 0);
626
627 /* Clear raw ring 0 init state */
628 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
629
630 /* Flush the TBs the next time we execute code here. */
631 pVM->rem.s.fFlushTBs = true;
632
633 EMRemUnlock(pVM);
634}
635
636
637/**
638 * Execute state save operation.
639 *
640 * @returns VBox status code.
641 * @param pVM VM Handle.
642 * @param pSSM SSM operation handle.
643 */
644static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
645{
646 PREM pRem = &pVM->rem.s;
647
648 /*
649 * Save the required CPU Env bits.
650 * (Not much because we're never in REM when doing the save.)
651 */
652 LogFlow(("remR3Save:\n"));
653 Assert(!pRem->fInREM);
654 SSMR3PutU32(pSSM, pRem->Env.hflags);
655 SSMR3PutU32(pSSM, ~0); /* separator */
656
657 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
658 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
659 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
660
661 return SSMR3PutU32(pSSM, ~0); /* terminator */
662}
663
664
665/**
666 * Execute state load operation.
667 *
668 * @returns VBox status code.
669 * @param pVM VM Handle.
670 * @param pSSM SSM operation handle.
671 * @param uVersion Data layout version.
672 * @param uPass The data pass.
673 */
674static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 uint32_t u32Dummy;
677 uint32_t fRawRing0 = false;
678 uint32_t u32Sep;
679 uint32_t i;
680 int rc;
681 PREM pRem;
682
683 LogFlow(("remR3Load:\n"));
684 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
685
686 /*
687 * Validate version.
688 */
689 if ( uVersion != REM_SAVED_STATE_VERSION
690 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
693 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
694 }
695
696 /*
697 * Do a reset to be on the safe side...
698 */
699 REMR3Reset(pVM);
700
701 /*
702 * Ignore all ignorable notifications.
703 * (Not doing this will cause serious trouble.)
704 */
705 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
706
707 /*
708 * Load the required CPU Env bits.
709 * (Not much because we're never in REM when doing the save.)
710 */
711 pRem = &pVM->rem.s;
712 Assert(!pRem->fInREM);
713 SSMR3GetU32(pSSM, &pRem->Env.hflags);
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /* Redundant REM CPU state has to be loaded, but can be ignored. */
717 CPUX86State_Ver16 temp;
718 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
719 }
720
721 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
722 if (RT_FAILURE(rc))
723 return rc;
724 if (u32Sep != ~0U)
725 {
726 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729
730 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
731 SSMR3GetUInt(pSSM, &fRawRing0);
732 if (fRawRing0)
733 pRem->Env.state |= CPU_RAW_RING0;
734
735 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
736 {
737 /*
738 * Load the REM stuff.
739 */
740 /** @todo r=bird: We should just drop all these items, restoring doesn't make
741 * sense. */
742 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
743 if (RT_FAILURE(rc))
744 return rc;
745 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
746 {
747 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
748 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
749 }
750 for (i = 0; i < pRem->cInvalidatedPages; i++)
751 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
752 }
753
754 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
755 if (RT_FAILURE(rc))
756 return rc;
757
758 /* check the terminator. */
759 rc = SSMR3GetU32(pSSM, &u32Sep);
760 if (RT_FAILURE(rc))
761 return rc;
762 if (u32Sep != ~0U)
763 {
764 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
765 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
766 }
767
768 /*
769 * Get the CPUID features.
770 */
771 PVMCPU pVCpu = VMMGetCpu(pVM);
772 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
773 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
774
775 /*
776 * Stop ignoring ignorable notifications.
777 */
778 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
779
780 /*
781 * Sync the whole CPU state when executing code in the recompiler.
782 */
783 for (i = 0; i < pVM->cCpus; i++)
784 {
785 PVMCPU pVCpu = &pVM->aCpus[i];
786 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
787 }
788 return VINF_SUCCESS;
789}
790
791
792
793#undef LOG_GROUP
794#define LOG_GROUP LOG_GROUP_REM_RUN
795
796/**
797 * Single steps an instruction in recompiled mode.
798 *
799 * Before calling this function the REM state needs to be in sync with
800 * the VM. Call REMR3State() to perform the sync. It's only necessary
801 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
802 * and after calling REMR3StateBack().
803 *
804 * @returns VBox status code.
805 *
806 * @param pVM VM Handle.
807 * @param pVCpu VMCPU Handle.
808 */
809REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
810{
811 int rc, interrupt_request;
812 RTGCPTR GCPtrPC;
813 bool fBp;
814
815 /*
816 * Lock the REM - we don't wanna have anyone interrupting us
817 * while stepping - and enabled single stepping. We also ignore
818 * pending interrupts and suchlike.
819 */
820 interrupt_request = pVM->rem.s.Env.interrupt_request;
821 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
822 pVM->rem.s.Env.interrupt_request = 0;
823 cpu_single_step(&pVM->rem.s.Env, 1);
824
825 /*
826 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
827 */
828 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
829 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
830
831 /*
832 * Execute and handle the return code.
833 * We execute without enabling the cpu tick, so on success we'll
834 * just flip it on and off to make sure it moves
835 */
836 rc = cpu_exec(&pVM->rem.s.Env);
837 if (rc == EXCP_DEBUG)
838 {
839 TMR3NotifyResume(pVM, pVCpu);
840 TMR3NotifySuspend(pVM, pVCpu);
841 rc = VINF_EM_DBG_STEPPED;
842 }
843 else
844 {
845 switch (rc)
846 {
847 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
848 case EXCP_HLT:
849 case EXCP_HALTED: rc = VINF_EM_HALT; break;
850 case EXCP_RC:
851 rc = pVM->rem.s.rc;
852 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
853 break;
854 case EXCP_EXECUTE_RAW:
855 case EXCP_EXECUTE_HWACC:
856 /** @todo: is it correct? No! */
857 rc = VINF_SUCCESS;
858 break;
859 default:
860 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
861 rc = VERR_INTERNAL_ERROR;
862 break;
863 }
864 }
865
866 /*
867 * Restore the stuff we changed to prevent interruption.
868 * Unlock the REM.
869 */
870 if (fBp)
871 {
872 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
873 Assert(rc2 == 0); NOREF(rc2);
874 }
875 cpu_single_step(&pVM->rem.s.Env, 0);
876 pVM->rem.s.Env.interrupt_request = interrupt_request;
877
878 return rc;
879}
880
881
882/**
883 * Set a breakpoint using the REM facilities.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param Address The breakpoint address.
888 * @thread The emulation thread.
889 */
890REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
891{
892 VM_ASSERT_EMT(pVM);
893 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
894 {
895 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
896 return VINF_SUCCESS;
897 }
898 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
899 return VERR_REM_NO_MORE_BP_SLOTS;
900}
901
902
903/**
904 * Clears a breakpoint set by REMR3BreakpointSet().
905 *
906 * @returns VBox status code.
907 * @param pVM The VM handle.
908 * @param Address The breakpoint address.
909 * @thread The emulation thread.
910 */
911REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
912{
913 VM_ASSERT_EMT(pVM);
914 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
915 {
916 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
917 return VINF_SUCCESS;
918 }
919 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
920 return VERR_REM_BP_NOT_FOUND;
921}
922
923
924/**
925 * Emulate an instruction.
926 *
927 * This function executes one instruction without letting anyone
928 * interrupt it. This is intended for being called while being in
929 * raw mode and thus will take care of all the state syncing between
930 * REM and the rest.
931 *
932 * @returns VBox status code.
933 * @param pVM VM handle.
934 * @param pVCpu VMCPU Handle.
935 */
936REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
937{
938 bool fFlushTBs;
939
940 int rc, rc2;
941 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
942
943 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
944 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
945 */
946 if (HWACCMIsEnabled(pVM))
947 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
948
949 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
950 fFlushTBs = pVM->rem.s.fFlushTBs;
951 pVM->rem.s.fFlushTBs = false;
952
953 /*
954 * Sync the state and enable single instruction / single stepping.
955 */
956 rc = REMR3State(pVM, pVCpu);
957 pVM->rem.s.fFlushTBs = fFlushTBs;
958 if (RT_SUCCESS(rc))
959 {
960 int interrupt_request = pVM->rem.s.Env.interrupt_request;
961 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HWACC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1045 rc = VINF_EM_RESCHEDULE_HWACC;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */
1116 0, /* GCPtr */
1117 DBGF_DISAS_FLAGS_CURRENT_GUEST
1118 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1119 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1120 szBuf,
1121 sizeof(szBuf),
1122 NULL);
1123 if (RT_FAILURE(rc))
1124 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1125 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1126
1127 /*
1128 * Execute the instruction.
1129 */
1130 TMNotifyStartOfExecution(pVCpu);
1131
1132 if ( pVM->rem.s.Env.exception_index < 0
1133 || pVM->rem.s.Env.exception_index > 256)
1134 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1135
1136#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1137 pVM->rem.s.Env.interrupt_request = 0;
1138#else
1139 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1140#endif
1141 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1142 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1143 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1144 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 rc = cpu_exec(&pVM->rem.s.Env);
1151
1152 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1153 pVM->rem.s.Env.interrupt_request,
1154 pVM->rem.s.Env.halted,
1155 pVM->rem.s.Env.exception_index
1156 );
1157
1158 TMNotifyEndOfExecution(pVCpu);
1159
1160 switch (rc)
1161 {
1162#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1163 /*
1164 * The normal exit.
1165 */
1166 case EXCP_SINGLE_INSTR:
1167 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1168 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1169 continue;
1170 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1171 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1172 rc = VINF_SUCCESS;
1173 break;
1174
1175#else
1176 /*
1177 * The normal exit, check for breakpoints at PC just to be sure.
1178 */
1179#endif
1180 case EXCP_DEBUG:
1181 if (pVM->rem.s.Env.watchpoint_hit)
1182 {
1183 /** @todo deal with watchpoints */
1184 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1185 rc = VINF_EM_DBG_BREAKPOINT;
1186 }
1187 else
1188 {
1189 CPUBreakpoint *pBP;
1190 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1191 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1192 if (pBP->pc == GCPtrPC)
1193 break;
1194 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1195 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1196 }
1197#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1198 if (rc == VINF_EM_DBG_STEPPED)
1199 {
1200 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1201 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1202 continue;
1203
1204 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1205 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1206 rc = VINF_SUCCESS;
1207 }
1208#endif
1209 break;
1210
1211 /*
1212 * If we take a trap or start servicing a pending interrupt, we might end up here.
1213 * (Timer thread or some other thread wishing EMT's attention.)
1214 */
1215 case EXCP_INTERRUPT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1217 rc = VINF_SUCCESS;
1218 break;
1219
1220 /*
1221 * hlt instruction.
1222 */
1223 case EXCP_HLT:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * The VM has halted.
1230 */
1231 case EXCP_HALTED:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1233 rc = VINF_EM_HALT;
1234 break;
1235
1236 /*
1237 * Switch to RAW-mode.
1238 */
1239 case EXCP_EXECUTE_RAW:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1241 rc = VINF_EM_RESCHEDULE_RAW;
1242 break;
1243
1244 /*
1245 * Switch to hardware accelerated RAW-mode.
1246 */
1247 case EXCP_EXECUTE_HWACC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1249 rc = VINF_EM_RESCHEDULE_HWACC;
1250 break;
1251
1252 /*
1253 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1254 */
1255 case EXCP_RC:
1256 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1257 rc = pVM->rem.s.rc;
1258 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1259 break;
1260
1261 /*
1262 * Figure out the rest when they arrive....
1263 */
1264 default:
1265 AssertMsgFailed(("rc=%d\n", rc));
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1267 rc = VINF_EM_RESCHEDULE;
1268 break;
1269 }
1270 break;
1271 }
1272
1273#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1274// cpu_single_step(&pVM->rem.s.Env, 0);
1275#else
1276 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1277#endif
1278 return rc;
1279}
1280
1281
1282/**
1283 * Runs code in recompiled mode.
1284 *
1285 * Before calling this function the REM state needs to be in sync with
1286 * the VM. Call REMR3State() to perform the sync. It's only necessary
1287 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1288 * and after calling REMR3StateBack().
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM VM Handle.
1293 * @param pVCpu VMCPU Handle.
1294 */
1295REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1296{
1297 int rc;
1298
1299 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1300 return remR3RunLoggingStep(pVM, pVCpu);
1301
1302 Assert(pVM->rem.s.fInREM);
1303 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1304
1305 TMNotifyStartOfExecution(pVCpu);
1306 rc = cpu_exec(&pVM->rem.s.Env);
1307 TMNotifyEndOfExecution(pVCpu);
1308 switch (rc)
1309 {
1310 /*
1311 * This happens when the execution was interrupted
1312 * by an external event, like pending timers.
1313 */
1314 case EXCP_INTERRUPT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1316 rc = VINF_SUCCESS;
1317 break;
1318
1319 /*
1320 * hlt instruction.
1321 */
1322 case EXCP_HLT:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * The VM has halted.
1329 */
1330 case EXCP_HALTED:
1331 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1332 rc = VINF_EM_HALT;
1333 break;
1334
1335 /*
1336 * Breakpoint/single step.
1337 */
1338 case EXCP_DEBUG:
1339 if (pVM->rem.s.Env.watchpoint_hit)
1340 {
1341 /** @todo deal with watchpoints */
1342 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1343 rc = VINF_EM_DBG_BREAKPOINT;
1344 }
1345 else
1346 {
1347 CPUBreakpoint *pBP;
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1350 if (pBP->pc == GCPtrPC)
1351 break;
1352 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1353 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1354 }
1355 break;
1356
1357 /*
1358 * Switch to RAW-mode.
1359 */
1360 case EXCP_EXECUTE_RAW:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1362 rc = VINF_EM_RESCHEDULE_RAW;
1363 break;
1364
1365 /*
1366 * Switch to hardware accelerated RAW-mode.
1367 */
1368 case EXCP_EXECUTE_HWACC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1370 rc = VINF_EM_RESCHEDULE_HWACC;
1371 break;
1372
1373 /*
1374 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1375 */
1376 case EXCP_RC:
1377 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1378 rc = pVM->rem.s.rc;
1379 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1380 break;
1381
1382 /*
1383 * Figure out the rest when they arrive....
1384 */
1385 default:
1386 AssertMsgFailed(("rc=%d\n", rc));
1387 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1388 rc = VINF_SUCCESS;
1389 break;
1390 }
1391
1392 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1393 return rc;
1394}
1395
1396
1397/**
1398 * Check if the cpu state is suitable for Raw execution.
1399 *
1400 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1401 *
1402 * @param env The CPU env struct.
1403 * @param eip The EIP to check this for (might differ from env->eip).
1404 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1405 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1406 *
1407 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1408 */
1409bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1410{
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1413 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1414 uint32_t u32CR0;
1415
1416#ifdef IEM_VERIFICATION_MODE
1417 return false;
1418#endif
1419
1420 /* Update counter. */
1421 env->pVM->rem.s.cCanExecuteRaw++;
1422
1423 /* Never when single stepping+logging guest code. */
1424 if (env->state & CPU_EMULATE_SINGLE_STEP)
1425 return false;
1426
1427 if (HWACCMIsEnabled(env->pVM))
1428 {
1429 CPUMCTX Ctx;
1430
1431 env->state |= CPU_RAW_HWACC;
1432
1433 /*
1434 * Create partial context for HWACCMR3CanExecuteGuest
1435 */
1436 Ctx.cr0 = env->cr[0];
1437 Ctx.cr3 = env->cr[3];
1438 Ctx.cr4 = env->cr[4];
1439
1440 Ctx.tr = env->tr.selector;
1441 Ctx.trHid.u64Base = env->tr.base;
1442 Ctx.trHid.u32Limit = env->tr.limit;
1443 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1444
1445 Ctx.ldtr = env->ldt.selector;
1446 Ctx.ldtrHid.u64Base = env->ldt.base;
1447 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1448 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1449
1450 Ctx.idtr.cbIdt = env->idt.limit;
1451 Ctx.idtr.pIdt = env->idt.base;
1452
1453 Ctx.gdtr.cbGdt = env->gdt.limit;
1454 Ctx.gdtr.pGdt = env->gdt.base;
1455
1456 Ctx.rsp = env->regs[R_ESP];
1457 Ctx.rip = env->eip;
1458
1459 Ctx.eflags.u32 = env->eflags;
1460
1461 Ctx.cs = env->segs[R_CS].selector;
1462 Ctx.csHid.u64Base = env->segs[R_CS].base;
1463 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1464 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1465
1466 Ctx.ds = env->segs[R_DS].selector;
1467 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1468 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1469 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1470
1471 Ctx.es = env->segs[R_ES].selector;
1472 Ctx.esHid.u64Base = env->segs[R_ES].base;
1473 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1474 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1475
1476 Ctx.fs = env->segs[R_FS].selector;
1477 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1478 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1479 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1480
1481 Ctx.gs = env->segs[R_GS].selector;
1482 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1483 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1484 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1485
1486 Ctx.ss = env->segs[R_SS].selector;
1487 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1488 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1489 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1490
1491 Ctx.msrEFER = env->efer;
1492
1493 /* Hardware accelerated raw-mode:
1494 *
1495 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1496 */
1497 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1498 {
1499 *piException = EXCP_EXECUTE_HWACC;
1500 return true;
1501 }
1502 return false;
1503 }
1504
1505 /*
1506 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1507 * or 32 bits protected mode ring 0 code
1508 *
1509 * The tests are ordered by the likelihood of being true during normal execution.
1510 */
1511 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1512 {
1513 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1514 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1515 return false;
1516 }
1517
1518#ifndef VBOX_RAW_V86
1519 if (fFlags & VM_MASK) {
1520 STAM_COUNTER_INC(&gStatRefuseVM86);
1521 Log2(("raw mode refused: VM_MASK\n"));
1522 return false;
1523 }
1524#endif
1525
1526 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1527 {
1528#ifndef DEBUG_bird
1529 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1530#endif
1531 return false;
1532 }
1533
1534 if (env->singlestep_enabled)
1535 {
1536 //Log2(("raw mode refused: Single step\n"));
1537 return false;
1538 }
1539
1540 if (!QTAILQ_EMPTY(&env->breakpoints))
1541 {
1542 //Log2(("raw mode refused: Breakpoints\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->watchpoints))
1547 {
1548 //Log2(("raw mode refused: Watchpoints\n"));
1549 return false;
1550 }
1551
1552 u32CR0 = env->cr[0];
1553 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1554 {
1555 STAM_COUNTER_INC(&gStatRefusePaging);
1556 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1557 return false;
1558 }
1559
1560 if (env->cr[4] & CR4_PAE_MASK)
1561 {
1562 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1563 {
1564 STAM_COUNTER_INC(&gStatRefusePAE);
1565 return false;
1566 }
1567 }
1568
1569 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1570 {
1571 if (!EMIsRawRing3Enabled(env->pVM))
1572 return false;
1573
1574 if (!(env->eflags & IF_MASK))
1575 {
1576 STAM_COUNTER_INC(&gStatRefuseIF0);
1577 Log2(("raw mode refused: IF (RawR3)\n"));
1578 return false;
1579 }
1580
1581 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1582 {
1583 STAM_COUNTER_INC(&gStatRefuseWP0);
1584 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1585 return false;
1586 }
1587 }
1588 else
1589 {
1590 if (!EMIsRawRing0Enabled(env->pVM))
1591 return false;
1592
1593 // Let's start with pure 32 bits ring 0 code first
1594 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1595 {
1596 STAM_COUNTER_INC(&gStatRefuseCode16);
1597 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1598 return false;
1599 }
1600
1601 // Only R0
1602 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1603 {
1604 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1605 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1606 return false;
1607 }
1608
1609 if (!(u32CR0 & CR0_WP_MASK))
1610 {
1611 STAM_COUNTER_INC(&gStatRefuseWP0);
1612 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1613 return false;
1614 }
1615
1616 if (PATMIsPatchGCAddr(env->pVM, eip))
1617 {
1618 Log2(("raw r0 mode forced: patch code\n"));
1619 *piException = EXCP_EXECUTE_RAW;
1620 return true;
1621 }
1622
1623#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1624 if (!(env->eflags & IF_MASK))
1625 {
1626 STAM_COUNTER_INC(&gStatRefuseIF0);
1627 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1628 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1629 return false;
1630 }
1631#endif
1632
1633 env->state |= CPU_RAW_RING0;
1634 }
1635
1636 /*
1637 * Don't reschedule the first time we're called, because there might be
1638 * special reasons why we're here that is not covered by the above checks.
1639 */
1640 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1641 {
1642 Log2(("raw mode refused: first scheduling\n"));
1643 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1644 return false;
1645 }
1646
1647 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1648 *piException = EXCP_EXECUTE_RAW;
1649 return true;
1650}
1651
1652
1653/**
1654 * Fetches a code byte.
1655 *
1656 * @returns Success indicator (bool) for ease of use.
1657 * @param env The CPU environment structure.
1658 * @param GCPtrInstr Where to fetch code.
1659 * @param pu8Byte Where to store the byte on success
1660 */
1661bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1662{
1663 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1664 if (RT_SUCCESS(rc))
1665 return true;
1666 return false;
1667}
1668
1669
1670/**
1671 * Flush (or invalidate if you like) page table/dir entry.
1672 *
1673 * (invlpg instruction; tlb_flush_page)
1674 *
1675 * @param env Pointer to cpu environment.
1676 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1677 */
1678void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1679{
1680 PVM pVM = env->pVM;
1681 PCPUMCTX pCtx;
1682 int rc;
1683
1684 Assert(EMRemIsLockOwner(env->pVM));
1685
1686 /*
1687 * When we're replaying invlpg instructions or restoring a saved
1688 * state we disable this path.
1689 */
1690 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1691 return;
1692 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1693 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1694
1695 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1696
1697 /*
1698 * Update the control registers before calling PGMFlushPage.
1699 */
1700 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1701 Assert(pCtx);
1702 pCtx->cr0 = env->cr[0];
1703 pCtx->cr3 = env->cr[3];
1704 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1705 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1706 pCtx->cr4 = env->cr[4];
1707
1708 /*
1709 * Let PGM do the rest.
1710 */
1711 Assert(env->pVCpu);
1712 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1713 if (RT_FAILURE(rc))
1714 {
1715 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1716 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1717 }
1718 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1719}
1720
1721
1722#ifndef REM_PHYS_ADDR_IN_TLB
1723/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1724void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1725{
1726 void *pv;
1727 int rc;
1728
1729 /* Address must be aligned enough to fiddle with lower bits */
1730 Assert((physAddr & 0x3) == 0);
1731
1732 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1733 Assert( rc == VINF_SUCCESS
1734 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1735 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1736 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1737 if (RT_FAILURE(rc))
1738 return (void *)1;
1739 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1740 return (void *)((uintptr_t)pv | 2);
1741 return pv;
1742}
1743#endif /* REM_PHYS_ADDR_IN_TLB */
1744
1745
1746/**
1747 * Called from tlb_protect_code in order to write monitor a code page.
1748 *
1749 * @param env Pointer to the CPU environment.
1750 * @param GCPtr Code page to monitor
1751 */
1752void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1753{
1754#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1755 Assert(env->pVM->rem.s.fInREM);
1756 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1757 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1758 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1759 && !(env->eflags & VM_MASK) /* no V86 mode */
1760 && !HWACCMIsEnabled(env->pVM))
1761 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1762#endif
1763}
1764
1765
1766/**
1767 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1768 *
1769 * @param env Pointer to the CPU environment.
1770 * @param GCPtr Code page to monitor
1771 */
1772void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1773{
1774 Assert(env->pVM->rem.s.fInREM);
1775#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1776 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1777 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1778 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1779 && !(env->eflags & VM_MASK) /* no V86 mode */
1780 && !HWACCMIsEnabled(env->pVM))
1781 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1782#endif
1783}
1784
1785
1786/**
1787 * Called when the CPU is initialized, any of the CRx registers are changed or
1788 * when the A20 line is modified.
1789 *
1790 * @param env Pointer to the CPU environment.
1791 * @param fGlobal Set if the flush is global.
1792 */
1793void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1794{
1795 PVM pVM = env->pVM;
1796 PCPUMCTX pCtx;
1797 Assert(EMRemIsLockOwner(pVM));
1798
1799 /*
1800 * When we're replaying invlpg instructions or restoring a saved
1801 * state we disable this path.
1802 */
1803 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1804 return;
1805 Assert(pVM->rem.s.fInREM);
1806
1807 /*
1808 * The caller doesn't check cr4, so we have to do that for ourselves.
1809 */
1810 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1811 fGlobal = true;
1812 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1813
1814 /*
1815 * Update the control registers before calling PGMR3FlushTLB.
1816 */
1817 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1818 Assert(pCtx);
1819 pCtx->cr0 = env->cr[0];
1820 pCtx->cr3 = env->cr[3];
1821 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1822 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1823 pCtx->cr4 = env->cr[4];
1824
1825 /*
1826 * Let PGM do the rest.
1827 */
1828 Assert(env->pVCpu);
1829 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1830}
1831
1832
1833/**
1834 * Called when any of the cr0, cr4 or efer registers is updated.
1835 *
1836 * @param env Pointer to the CPU environment.
1837 */
1838void remR3ChangeCpuMode(CPUX86State *env)
1839{
1840 PVM pVM = env->pVM;
1841 uint64_t efer;
1842 PCPUMCTX pCtx;
1843 int rc;
1844
1845 /*
1846 * When we're replaying loads or restoring a saved
1847 * state this path is disabled.
1848 */
1849 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1850 return;
1851 Assert(pVM->rem.s.fInREM);
1852
1853 /*
1854 * Update the control registers before calling PGMChangeMode()
1855 * as it may need to map whatever cr3 is pointing to.
1856 */
1857 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1858 Assert(pCtx);
1859 pCtx->cr0 = env->cr[0];
1860 pCtx->cr3 = env->cr[3];
1861 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1862 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1863 pCtx->cr4 = env->cr[4];
1864#ifdef TARGET_X86_64
1865 efer = env->efer;
1866 pCtx->msrEFER = efer;
1867#else
1868 efer = 0;
1869#endif
1870 Assert(env->pVCpu);
1871 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1872 if (rc != VINF_SUCCESS)
1873 {
1874 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1875 {
1876 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1877 remR3RaiseRC(env->pVM, rc);
1878 }
1879 else
1880 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1881 }
1882}
1883
1884
1885/**
1886 * Called from compiled code to run dma.
1887 *
1888 * @param env Pointer to the CPU environment.
1889 */
1890void remR3DmaRun(CPUX86State *env)
1891{
1892 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1893 PDMR3DmaRun(env->pVM);
1894 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1895}
1896
1897
1898/**
1899 * Called from compiled code to schedule pending timers in VMM
1900 *
1901 * @param env Pointer to the CPU environment.
1902 */
1903void remR3TimersRun(CPUX86State *env)
1904{
1905 LogFlow(("remR3TimersRun:\n"));
1906 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1907 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1908 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1909 TMR3TimerQueuesDo(env->pVM);
1910 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1911 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1912}
1913
1914
1915/**
1916 * Record trap occurrence
1917 *
1918 * @returns VBox status code
1919 * @param env Pointer to the CPU environment.
1920 * @param uTrap Trap nr
1921 * @param uErrorCode Error code
1922 * @param pvNextEIP Next EIP
1923 */
1924int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1925{
1926 PVM pVM = env->pVM;
1927#ifdef VBOX_WITH_STATISTICS
1928 static STAMCOUNTER s_aStatTrap[255];
1929 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1930#endif
1931
1932#ifdef VBOX_WITH_STATISTICS
1933 if (uTrap < 255)
1934 {
1935 if (!s_aRegisters[uTrap])
1936 {
1937 char szStatName[64];
1938 s_aRegisters[uTrap] = true;
1939 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1940 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1941 }
1942 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1943 }
1944#endif
1945 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1946 if( uTrap < 0x20
1947 && (env->cr[0] & X86_CR0_PE)
1948 && !(env->eflags & X86_EFL_VM))
1949 {
1950#ifdef DEBUG
1951 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1952#endif
1953 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1954 {
1955 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1956 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1957 return VERR_REM_TOO_MANY_TRAPS;
1958 }
1959 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1960 pVM->rem.s.cPendingExceptions = 1;
1961 pVM->rem.s.uPendingException = uTrap;
1962 pVM->rem.s.uPendingExcptEIP = env->eip;
1963 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1964 }
1965 else
1966 {
1967 pVM->rem.s.cPendingExceptions = 0;
1968 pVM->rem.s.uPendingException = uTrap;
1969 pVM->rem.s.uPendingExcptEIP = env->eip;
1970 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1971 }
1972 return VINF_SUCCESS;
1973}
1974
1975
1976/*
1977 * Clear current active trap
1978 *
1979 * @param pVM VM Handle.
1980 */
1981void remR3TrapClear(PVM pVM)
1982{
1983 pVM->rem.s.cPendingExceptions = 0;
1984 pVM->rem.s.uPendingException = 0;
1985 pVM->rem.s.uPendingExcptEIP = 0;
1986 pVM->rem.s.uPendingExcptCR2 = 0;
1987}
1988
1989
1990/*
1991 * Record previous call instruction addresses
1992 *
1993 * @param env Pointer to the CPU environment.
1994 */
1995void remR3RecordCall(CPUX86State *env)
1996{
1997 CSAMR3RecordCallAddress(env->pVM, env->eip);
1998}
1999
2000
2001/**
2002 * Syncs the internal REM state with the VM.
2003 *
2004 * This must be called before REMR3Run() is invoked whenever when the REM
2005 * state is not up to date. Calling it several times in a row is not
2006 * permitted.
2007 *
2008 * @returns VBox status code.
2009 *
2010 * @param pVM VM Handle.
2011 * @param pVCpu VMCPU Handle.
2012 *
2013 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2014 * no do this since the majority of the callers don't want any unnecessary of events
2015 * pending that would immediately interrupt execution.
2016 */
2017REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2018{
2019 register const CPUMCTX *pCtx;
2020 register unsigned fFlags;
2021 bool fHiddenSelRegsValid;
2022 unsigned i;
2023 TRPMEVENT enmType;
2024 uint8_t u8TrapNo;
2025 uint32_t uCpl;
2026 int rc;
2027
2028 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2029 Log2(("REMR3State:\n"));
2030
2031 pVM->rem.s.Env.pVCpu = pVCpu;
2032 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2033 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2034
2035 Assert(!pVM->rem.s.fInREM);
2036 pVM->rem.s.fInStateSync = true;
2037
2038 /*
2039 * If we have to flush TBs, do that immediately.
2040 */
2041 if (pVM->rem.s.fFlushTBs)
2042 {
2043 STAM_COUNTER_INC(&gStatFlushTBs);
2044 tb_flush(&pVM->rem.s.Env);
2045 pVM->rem.s.fFlushTBs = false;
2046 }
2047
2048 /*
2049 * Copy the registers which require no special handling.
2050 */
2051#ifdef TARGET_X86_64
2052 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2053 Assert(R_EAX == 0);
2054 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2055 Assert(R_ECX == 1);
2056 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2057 Assert(R_EDX == 2);
2058 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2059 Assert(R_EBX == 3);
2060 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2061 Assert(R_ESP == 4);
2062 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2063 Assert(R_EBP == 5);
2064 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2065 Assert(R_ESI == 6);
2066 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2067 Assert(R_EDI == 7);
2068 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2069 pVM->rem.s.Env.regs[8] = pCtx->r8;
2070 pVM->rem.s.Env.regs[9] = pCtx->r9;
2071 pVM->rem.s.Env.regs[10] = pCtx->r10;
2072 pVM->rem.s.Env.regs[11] = pCtx->r11;
2073 pVM->rem.s.Env.regs[12] = pCtx->r12;
2074 pVM->rem.s.Env.regs[13] = pCtx->r13;
2075 pVM->rem.s.Env.regs[14] = pCtx->r14;
2076 pVM->rem.s.Env.regs[15] = pCtx->r15;
2077
2078 pVM->rem.s.Env.eip = pCtx->rip;
2079
2080 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2081#else
2082 Assert(R_EAX == 0);
2083 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2084 Assert(R_ECX == 1);
2085 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2086 Assert(R_EDX == 2);
2087 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2088 Assert(R_EBX == 3);
2089 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2090 Assert(R_ESP == 4);
2091 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2092 Assert(R_EBP == 5);
2093 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2094 Assert(R_ESI == 6);
2095 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2096 Assert(R_EDI == 7);
2097 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2098 pVM->rem.s.Env.eip = pCtx->eip;
2099
2100 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2101#endif
2102
2103 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2104
2105 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2106 for (i=0;i<8;i++)
2107 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2108
2109#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2110 /*
2111 * Clear the halted hidden flag (the interrupt waking up the CPU can
2112 * have been dispatched in raw mode).
2113 */
2114 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2115#endif
2116
2117 /*
2118 * Replay invlpg? Only if we're not flushing the TLB.
2119 */
2120 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2121 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2122 if (pVM->rem.s.cInvalidatedPages)
2123 {
2124 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2125 {
2126 RTUINT i;
2127
2128 pVM->rem.s.fIgnoreCR3Load = true;
2129 pVM->rem.s.fIgnoreInvlPg = true;
2130 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2131 {
2132 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2133 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2134 }
2135 pVM->rem.s.fIgnoreInvlPg = false;
2136 pVM->rem.s.fIgnoreCR3Load = false;
2137 }
2138 pVM->rem.s.cInvalidatedPages = 0;
2139 }
2140
2141 /* Replay notification changes. */
2142 REMR3ReplayHandlerNotifications(pVM);
2143
2144 /* Update MSRs; before CRx registers! */
2145 pVM->rem.s.Env.efer = pCtx->msrEFER;
2146 pVM->rem.s.Env.star = pCtx->msrSTAR;
2147 pVM->rem.s.Env.pat = pCtx->msrPAT;
2148#ifdef TARGET_X86_64
2149 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2150 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2151 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2152 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2153
2154 /* Update the internal long mode activate flag according to the new EFER value. */
2155 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2156 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2157 else
2158 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2159#endif
2160
2161 /*
2162 * Sync the A20 gate.
2163 */
2164 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2165 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2166 {
2167 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2168 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2169 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2170 }
2171
2172 /*
2173 * Registers which are rarely changed and require special handling / order when changed.
2174 */
2175 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2176 | CPUM_CHANGED_CR4
2177 | CPUM_CHANGED_CR0
2178 | CPUM_CHANGED_CR3
2179 | CPUM_CHANGED_GDTR
2180 | CPUM_CHANGED_IDTR
2181 | CPUM_CHANGED_SYSENTER_MSR
2182 | CPUM_CHANGED_LDTR
2183 | CPUM_CHANGED_CPUID
2184 | CPUM_CHANGED_FPU_REM
2185 )
2186 )
2187 {
2188 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2189 {
2190 pVM->rem.s.fIgnoreCR3Load = true;
2191 tlb_flush(&pVM->rem.s.Env, true);
2192 pVM->rem.s.fIgnoreCR3Load = false;
2193 }
2194
2195 /* CR4 before CR0! */
2196 if (fFlags & CPUM_CHANGED_CR4)
2197 {
2198 pVM->rem.s.fIgnoreCR3Load = true;
2199 pVM->rem.s.fIgnoreCpuMode = true;
2200 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2201 pVM->rem.s.fIgnoreCpuMode = false;
2202 pVM->rem.s.fIgnoreCR3Load = false;
2203 }
2204
2205 if (fFlags & CPUM_CHANGED_CR0)
2206 {
2207 pVM->rem.s.fIgnoreCR3Load = true;
2208 pVM->rem.s.fIgnoreCpuMode = true;
2209 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2210 pVM->rem.s.fIgnoreCpuMode = false;
2211 pVM->rem.s.fIgnoreCR3Load = false;
2212 }
2213
2214 if (fFlags & CPUM_CHANGED_CR3)
2215 {
2216 pVM->rem.s.fIgnoreCR3Load = true;
2217 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2218 pVM->rem.s.fIgnoreCR3Load = false;
2219 }
2220
2221 if (fFlags & CPUM_CHANGED_GDTR)
2222 {
2223 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2224 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2225 }
2226
2227 if (fFlags & CPUM_CHANGED_IDTR)
2228 {
2229 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2230 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2231 }
2232
2233 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2234 {
2235 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2236 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2237 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2238 }
2239
2240 if (fFlags & CPUM_CHANGED_LDTR)
2241 {
2242 if (fHiddenSelRegsValid)
2243 {
2244 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2245 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2246 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2247 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2248 }
2249 else
2250 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2251 }
2252
2253 if (fFlags & CPUM_CHANGED_CPUID)
2254 {
2255 uint32_t u32Dummy;
2256
2257 /*
2258 * Get the CPUID features.
2259 */
2260 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2261 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2262 }
2263
2264 /* Sync FPU state after CR4, CPUID and EFER (!). */
2265 if (fFlags & CPUM_CHANGED_FPU_REM)
2266 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2267 }
2268
2269 /*
2270 * Sync TR unconditionally to make life simpler.
2271 */
2272 pVM->rem.s.Env.tr.selector = pCtx->tr;
2273 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2274 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2275 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2276 /* Note! do_interrupt will fault if the busy flag is still set... */
2277 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2278
2279 /*
2280 * Update selector registers.
2281 * This must be done *after* we've synced gdt, ldt and crX registers
2282 * since we're reading the GDT/LDT om sync_seg. This will happen with
2283 * saved state which takes a quick dip into rawmode for instance.
2284 */
2285 /*
2286 * Stack; Note first check this one as the CPL might have changed. The
2287 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2288 */
2289
2290 if (fHiddenSelRegsValid)
2291 {
2292 /* The hidden selector registers are valid in the CPU context. */
2293 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2294
2295 /* Set current CPL */
2296 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2297
2298 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2299 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2300 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2301 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2302 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2303 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2304 }
2305 else
2306 {
2307 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2308 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2309 {
2310 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2311
2312 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2313 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2314#ifdef VBOX_WITH_STATISTICS
2315 if (pVM->rem.s.Env.segs[R_SS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2318 }
2319#endif
2320 }
2321 else
2322 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2323
2324 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2325 {
2326 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2327 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2328#ifdef VBOX_WITH_STATISTICS
2329 if (pVM->rem.s.Env.segs[R_ES].newselector)
2330 {
2331 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2332 }
2333#endif
2334 }
2335 else
2336 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2337
2338 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2339 {
2340 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2341 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2342#ifdef VBOX_WITH_STATISTICS
2343 if (pVM->rem.s.Env.segs[R_CS].newselector)
2344 {
2345 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2346 }
2347#endif
2348 }
2349 else
2350 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2351
2352 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2353 {
2354 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2355 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2356#ifdef VBOX_WITH_STATISTICS
2357 if (pVM->rem.s.Env.segs[R_DS].newselector)
2358 {
2359 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2360 }
2361#endif
2362 }
2363 else
2364 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2365
2366 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2367 * be the same but not the base/limit. */
2368 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2369 {
2370 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2371 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2372#ifdef VBOX_WITH_STATISTICS
2373 if (pVM->rem.s.Env.segs[R_FS].newselector)
2374 {
2375 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2376 }
2377#endif
2378 }
2379 else
2380 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2381
2382 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2383 {
2384 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2385 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2386#ifdef VBOX_WITH_STATISTICS
2387 if (pVM->rem.s.Env.segs[R_GS].newselector)
2388 {
2389 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2390 }
2391#endif
2392 }
2393 else
2394 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2395 }
2396
2397 /*
2398 * Check for traps.
2399 */
2400 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2401 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2402 if (RT_SUCCESS(rc))
2403 {
2404#ifdef DEBUG
2405 if (u8TrapNo == 0x80)
2406 {
2407 remR3DumpLnxSyscall(pVCpu);
2408 remR3DumpOBsdSyscall(pVCpu);
2409 }
2410#endif
2411
2412 pVM->rem.s.Env.exception_index = u8TrapNo;
2413 if (enmType != TRPM_SOFTWARE_INT)
2414 {
2415 pVM->rem.s.Env.exception_is_int = 0;
2416 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2417 }
2418 else
2419 {
2420 /*
2421 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2422 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2423 * for int03 and into.
2424 */
2425 pVM->rem.s.Env.exception_is_int = 1;
2426 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2427 /* int 3 may be generated by one-byte 0xcc */
2428 if (u8TrapNo == 3)
2429 {
2430 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2431 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2432 }
2433 /* int 4 may be generated by one-byte 0xce */
2434 else if (u8TrapNo == 4)
2435 {
2436 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2437 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2438 }
2439 }
2440
2441 /* get error code and cr2 if needed. */
2442 if (enmType == TRPM_TRAP)
2443 {
2444 switch (u8TrapNo)
2445 {
2446 case 0x0e:
2447 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2448 /* fallthru */
2449 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2450 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2451 break;
2452
2453 case 0x11: case 0x08:
2454 default:
2455 pVM->rem.s.Env.error_code = 0;
2456 break;
2457 }
2458 }
2459 else
2460 pVM->rem.s.Env.error_code = 0;
2461
2462 /*
2463 * We can now reset the active trap since the recompiler is gonna have a go at it.
2464 */
2465 rc = TRPMResetTrap(pVCpu);
2466 AssertRC(rc);
2467 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2468 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2469 }
2470
2471 /*
2472 * Clear old interrupt request flags; Check for pending hardware interrupts.
2473 * (See @remark for why we don't check for other FFs.)
2474 */
2475 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2476 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2477 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2478 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2479
2480 /*
2481 * We're now in REM mode.
2482 */
2483 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2484 pVM->rem.s.fInREM = true;
2485 pVM->rem.s.fInStateSync = false;
2486 pVM->rem.s.cCanExecuteRaw = 0;
2487 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2488 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2489 return VINF_SUCCESS;
2490}
2491
2492
2493/**
2494 * Syncs back changes in the REM state to the the VM state.
2495 *
2496 * This must be called after invoking REMR3Run().
2497 * Calling it several times in a row is not permitted.
2498 *
2499 * @returns VBox status code.
2500 *
2501 * @param pVM VM Handle.
2502 * @param pVCpu VMCPU Handle.
2503 */
2504REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2505{
2506 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2507 Assert(pCtx);
2508 unsigned i;
2509
2510 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2511 Log2(("REMR3StateBack:\n"));
2512 Assert(pVM->rem.s.fInREM);
2513
2514 /*
2515 * Copy back the registers.
2516 * This is done in the order they are declared in the CPUMCTX structure.
2517 */
2518
2519 /** @todo FOP */
2520 /** @todo FPUIP */
2521 /** @todo CS */
2522 /** @todo FPUDP */
2523 /** @todo DS */
2524
2525 /** @todo check if FPU/XMM was actually used in the recompiler */
2526 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2527//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2528
2529#ifdef TARGET_X86_64
2530 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2531 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2532 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2533 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2534 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2535 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2536 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2537 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2538 pCtx->r8 = pVM->rem.s.Env.regs[8];
2539 pCtx->r9 = pVM->rem.s.Env.regs[9];
2540 pCtx->r10 = pVM->rem.s.Env.regs[10];
2541 pCtx->r11 = pVM->rem.s.Env.regs[11];
2542 pCtx->r12 = pVM->rem.s.Env.regs[12];
2543 pCtx->r13 = pVM->rem.s.Env.regs[13];
2544 pCtx->r14 = pVM->rem.s.Env.regs[14];
2545 pCtx->r15 = pVM->rem.s.Env.regs[15];
2546
2547 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2548
2549#else
2550 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2551 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2552 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2553 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2554 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2555 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2556 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2557
2558 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2559#endif
2560
2561 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2562
2563#ifdef VBOX_WITH_STATISTICS
2564 if (pVM->rem.s.Env.segs[R_SS].newselector)
2565 {
2566 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2567 }
2568 if (pVM->rem.s.Env.segs[R_GS].newselector)
2569 {
2570 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2571 }
2572 if (pVM->rem.s.Env.segs[R_FS].newselector)
2573 {
2574 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2575 }
2576 if (pVM->rem.s.Env.segs[R_ES].newselector)
2577 {
2578 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2579 }
2580 if (pVM->rem.s.Env.segs[R_DS].newselector)
2581 {
2582 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2583 }
2584 if (pVM->rem.s.Env.segs[R_CS].newselector)
2585 {
2586 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2587 }
2588#endif
2589 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2590 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2591 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2592 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2593 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2594
2595#ifdef TARGET_X86_64
2596 pCtx->rip = pVM->rem.s.Env.eip;
2597 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2598#else
2599 pCtx->eip = pVM->rem.s.Env.eip;
2600 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2601#endif
2602
2603 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2604 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2605 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2606 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2607 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2608 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2609
2610 for (i = 0; i < 8; i++)
2611 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2612
2613 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2614 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2615 {
2616 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2617 STAM_COUNTER_INC(&gStatREMGDTChange);
2618 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2619 }
2620
2621 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2622 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2623 {
2624 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2625 STAM_COUNTER_INC(&gStatREMIDTChange);
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2627 }
2628
2629 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2630 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2631 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2632 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2633 {
2634 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2635 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2636 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2637 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2638 STAM_COUNTER_INC(&gStatREMLDTRChange);
2639 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2640 }
2641
2642 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2643 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2644 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2645 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2646 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2647 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2648 : 0) )
2649 {
2650 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2651 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2652 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2653 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2654 pCtx->tr = pVM->rem.s.Env.tr.selector;
2655 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2656 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2657 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2658 if (pCtx->trHid.Attr.u)
2659 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2660 STAM_COUNTER_INC(&gStatREMTRChange);
2661 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2662 }
2663
2664 /** @todo These values could still be out of sync! */
2665 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2666 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2667 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2668 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2669
2670 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2671 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2672 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2673
2674 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2675 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2676 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2677
2678 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2679 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2680 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2681
2682 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2683 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2684 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2685
2686 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2687 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2688 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2689
2690 /* Sysenter MSR */
2691 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2692 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2693 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2694
2695 /* System MSRs. */
2696 pCtx->msrEFER = pVM->rem.s.Env.efer;
2697 pCtx->msrSTAR = pVM->rem.s.Env.star;
2698 pCtx->msrPAT = pVM->rem.s.Env.pat;
2699#ifdef TARGET_X86_64
2700 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2701 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2702 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2703 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2704#endif
2705
2706 remR3TrapClear(pVM);
2707
2708 /*
2709 * Check for traps.
2710 */
2711 if ( pVM->rem.s.Env.exception_index >= 0
2712 && pVM->rem.s.Env.exception_index < 256)
2713 {
2714 int rc;
2715
2716 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2717 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2718 AssertRC(rc);
2719 switch (pVM->rem.s.Env.exception_index)
2720 {
2721 case 0x0e:
2722 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2723 /* fallthru */
2724 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2725 case 0x11: case 0x08: /* 0 */
2726 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2727 break;
2728 }
2729
2730 }
2731
2732 /*
2733 * We're not longer in REM mode.
2734 */
2735 CPUMR3RemLeave(pVCpu,
2736 HWACCMIsEnabled(pVM)
2737 || ( pVM->rem.s.Env.segs[R_SS].newselector
2738 | pVM->rem.s.Env.segs[R_GS].newselector
2739 | pVM->rem.s.Env.segs[R_FS].newselector
2740 | pVM->rem.s.Env.segs[R_ES].newselector
2741 | pVM->rem.s.Env.segs[R_DS].newselector
2742 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2743 );
2744 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2745 pVM->rem.s.fInREM = false;
2746 pVM->rem.s.pCtx = NULL;
2747 pVM->rem.s.Env.pVCpu = NULL;
2748 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2749 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2750 return VINF_SUCCESS;
2751}
2752
2753
2754/**
2755 * This is called by the disassembler when it wants to update the cpu state
2756 * before for instance doing a register dump.
2757 */
2758static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2759{
2760 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2761 unsigned i;
2762
2763 Assert(pVM->rem.s.fInREM);
2764
2765 /*
2766 * Copy back the registers.
2767 * This is done in the order they are declared in the CPUMCTX structure.
2768 */
2769
2770 /** @todo FOP */
2771 /** @todo FPUIP */
2772 /** @todo CS */
2773 /** @todo FPUDP */
2774 /** @todo DS */
2775 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2776 pCtx->fpu.MXCSR = 0;
2777 pCtx->fpu.MXCSR_MASK = 0;
2778
2779 /** @todo check if FPU/XMM was actually used in the recompiler */
2780 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2781//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2782
2783#ifdef TARGET_X86_64
2784 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2785 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2786 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2787 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2788 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2789 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2790 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2791 pCtx->r8 = pVM->rem.s.Env.regs[8];
2792 pCtx->r9 = pVM->rem.s.Env.regs[9];
2793 pCtx->r10 = pVM->rem.s.Env.regs[10];
2794 pCtx->r11 = pVM->rem.s.Env.regs[11];
2795 pCtx->r12 = pVM->rem.s.Env.regs[12];
2796 pCtx->r13 = pVM->rem.s.Env.regs[13];
2797 pCtx->r14 = pVM->rem.s.Env.regs[14];
2798 pCtx->r15 = pVM->rem.s.Env.regs[15];
2799
2800 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2801#else
2802 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2803 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2804 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2805 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2806 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2807 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2808 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2809
2810 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2811#endif
2812
2813 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2814
2815 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2816 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2817 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2818 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2819 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2820
2821#ifdef TARGET_X86_64
2822 pCtx->rip = pVM->rem.s.Env.eip;
2823 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2824#else
2825 pCtx->eip = pVM->rem.s.Env.eip;
2826 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2827#endif
2828
2829 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2830 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2831 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2832 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2833 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2834 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2835
2836 for (i = 0; i < 8; i++)
2837 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2838
2839 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2840 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2841 {
2842 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2843 STAM_COUNTER_INC(&gStatREMGDTChange);
2844 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2845 }
2846
2847 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2848 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2849 {
2850 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2851 STAM_COUNTER_INC(&gStatREMIDTChange);
2852 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2853 }
2854
2855 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2856 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2857 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2858 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2859 {
2860 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2861 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2862 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2863 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2864 STAM_COUNTER_INC(&gStatREMLDTRChange);
2865 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2866 }
2867
2868 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2869 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2870 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2871 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2872 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2873 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2874 : 0) )
2875 {
2876 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2877 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2878 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2879 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2880 pCtx->tr = pVM->rem.s.Env.tr.selector;
2881 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2882 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2883 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2884 if (pCtx->trHid.Attr.u)
2885 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2886 STAM_COUNTER_INC(&gStatREMTRChange);
2887 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2888 }
2889
2890 /** @todo These values could still be out of sync! */
2891 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2892 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2893 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2894 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2895
2896 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2897 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2898 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2899
2900 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2901 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2902 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2903
2904 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2905 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2906 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2907
2908 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2909 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2910 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2911
2912 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2913 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2914 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2915
2916 /* Sysenter MSR */
2917 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2918 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2919 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2920
2921 /* System MSRs. */
2922 pCtx->msrEFER = pVM->rem.s.Env.efer;
2923 pCtx->msrSTAR = pVM->rem.s.Env.star;
2924 pCtx->msrPAT = pVM->rem.s.Env.pat;
2925#ifdef TARGET_X86_64
2926 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2927 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2928 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2929 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2930#endif
2931
2932}
2933
2934
2935/**
2936 * Update the VMM state information if we're currently in REM.
2937 *
2938 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2939 * we're currently executing in REM and the VMM state is invalid. This method will of
2940 * course check that we're executing in REM before syncing any data over to the VMM.
2941 *
2942 * @param pVM The VM handle.
2943 * @param pVCpu The VMCPU handle.
2944 */
2945REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2946{
2947 if (pVM->rem.s.fInREM)
2948 remR3StateUpdate(pVM, pVCpu);
2949}
2950
2951
2952#undef LOG_GROUP
2953#define LOG_GROUP LOG_GROUP_REM
2954
2955
2956/**
2957 * Notify the recompiler about Address Gate 20 state change.
2958 *
2959 * This notification is required since A20 gate changes are
2960 * initialized from a device driver and the VM might just as
2961 * well be in REM mode as in RAW mode.
2962 *
2963 * @param pVM VM handle.
2964 * @param pVCpu VMCPU handle.
2965 * @param fEnable True if the gate should be enabled.
2966 * False if the gate should be disabled.
2967 */
2968REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2969{
2970 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2971 VM_ASSERT_EMT(pVM);
2972
2973 /** @todo SMP and the A20 gate... */
2974 if (pVM->rem.s.Env.pVCpu == pVCpu)
2975 {
2976 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2977 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2978 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2979 }
2980}
2981
2982
2983/**
2984 * Replays the handler notification changes
2985 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2986 *
2987 * @param pVM VM handle.
2988 */
2989REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2990{
2991 /*
2992 * Replay the flushes.
2993 */
2994 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2995 VM_ASSERT_EMT(pVM);
2996
2997 /** @todo this isn't ensuring correct replay order. */
2998 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2999 {
3000 uint32_t idxNext;
3001 uint32_t idxRevHead;
3002 uint32_t idxHead;
3003#ifdef VBOX_STRICT
3004 int32_t c = 0;
3005#endif
3006
3007 /* Lockless purging of pending notifications. */
3008 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3009 if (idxHead == UINT32_MAX)
3010 return;
3011 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3012
3013 /*
3014 * Reverse the list to process it in FIFO order.
3015 */
3016 idxRevHead = UINT32_MAX;
3017 do
3018 {
3019 /* Save the index of the next rec. */
3020 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3021 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3022 /* Push the record onto the reversed list. */
3023 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3024 idxRevHead = idxHead;
3025 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3026 /* Advance. */
3027 idxHead = idxNext;
3028 } while (idxHead != UINT32_MAX);
3029
3030 /*
3031 * Loop thru the list, reinserting the record into the free list as they are
3032 * processed to avoid having other EMTs running out of entries while we're flushing.
3033 */
3034 idxHead = idxRevHead;
3035 do
3036 {
3037 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3038 uint32_t idxCur;
3039 Assert(--c >= 0);
3040
3041 switch (pCur->enmKind)
3042 {
3043 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3044 remR3NotifyHandlerPhysicalRegister(pVM,
3045 pCur->u.PhysicalRegister.enmType,
3046 pCur->u.PhysicalRegister.GCPhys,
3047 pCur->u.PhysicalRegister.cb,
3048 pCur->u.PhysicalRegister.fHasHCHandler);
3049 break;
3050
3051 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3052 remR3NotifyHandlerPhysicalDeregister(pVM,
3053 pCur->u.PhysicalDeregister.enmType,
3054 pCur->u.PhysicalDeregister.GCPhys,
3055 pCur->u.PhysicalDeregister.cb,
3056 pCur->u.PhysicalDeregister.fHasHCHandler,
3057 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3058 break;
3059
3060 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3061 remR3NotifyHandlerPhysicalModify(pVM,
3062 pCur->u.PhysicalModify.enmType,
3063 pCur->u.PhysicalModify.GCPhysOld,
3064 pCur->u.PhysicalModify.GCPhysNew,
3065 pCur->u.PhysicalModify.cb,
3066 pCur->u.PhysicalModify.fHasHCHandler,
3067 pCur->u.PhysicalModify.fRestoreAsRAM);
3068 break;
3069
3070 default:
3071 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3072 break;
3073 }
3074
3075 /*
3076 * Advance idxHead.
3077 */
3078 idxCur = idxHead;
3079 idxHead = pCur->idxNext;
3080 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3081
3082 /*
3083 * Put the record back into the free list.
3084 */
3085 do
3086 {
3087 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3088 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3089 ASMCompilerBarrier();
3090 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3091 } while (idxHead != UINT32_MAX);
3092
3093#ifdef VBOX_STRICT
3094 if (pVM->cCpus == 1)
3095 {
3096 unsigned c;
3097 /* Check that all records are now on the free list. */
3098 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3099 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3100 c++;
3101 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3102 }
3103#endif
3104 }
3105}
3106
3107
3108/**
3109 * Notify REM about changed code page.
3110 *
3111 * @returns VBox status code.
3112 * @param pVM VM handle.
3113 * @param pVCpu VMCPU handle.
3114 * @param pvCodePage Code page address
3115 */
3116REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3117{
3118#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3119 int rc;
3120 RTGCPHYS PhysGC;
3121 uint64_t flags;
3122
3123 VM_ASSERT_EMT(pVM);
3124
3125 /*
3126 * Get the physical page address.
3127 */
3128 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3129 if (rc == VINF_SUCCESS)
3130 {
3131 /*
3132 * Sync the required registers and flush the whole page.
3133 * (Easier to do the whole page than notifying it about each physical
3134 * byte that was changed.
3135 */
3136 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3137 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3138 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3139 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3140
3141 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3142 }
3143#endif
3144 return VINF_SUCCESS;
3145}
3146
3147
3148/**
3149 * Notification about a successful MMR3PhysRegister() call.
3150 *
3151 * @param pVM VM handle.
3152 * @param GCPhys The physical address the RAM.
3153 * @param cb Size of the memory.
3154 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3155 */
3156REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3157{
3158 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3159 VM_ASSERT_EMT(pVM);
3160
3161 /*
3162 * Validate input - we trust the caller.
3163 */
3164 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3165 Assert(cb);
3166 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3167 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3168
3169 /*
3170 * Base ram? Update GCPhysLastRam.
3171 */
3172 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3173 {
3174 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3175 {
3176 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3177 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3178 }
3179 }
3180
3181 /*
3182 * Register the ram.
3183 */
3184 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3185
3186 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3187 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3188 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3189
3190 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3191}
3192
3193
3194/**
3195 * Notification about a successful MMR3PhysRomRegister() call.
3196 *
3197 * @param pVM VM handle.
3198 * @param GCPhys The physical address of the ROM.
3199 * @param cb The size of the ROM.
3200 * @param pvCopy Pointer to the ROM copy.
3201 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3202 * This function will be called when ever the protection of the
3203 * shadow ROM changes (at reset and end of POST).
3204 */
3205REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3206{
3207 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3208 VM_ASSERT_EMT(pVM);
3209
3210 /*
3211 * Validate input - we trust the caller.
3212 */
3213 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3214 Assert(cb);
3215 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3216
3217 /*
3218 * Register the rom.
3219 */
3220 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3221
3222 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3223 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3224 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3225
3226 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3227}
3228
3229
3230/**
3231 * Notification about a successful memory deregistration or reservation.
3232 *
3233 * @param pVM VM Handle.
3234 * @param GCPhys Start physical address.
3235 * @param cb The size of the range.
3236 */
3237REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3238{
3239 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3240 VM_ASSERT_EMT(pVM);
3241
3242 /*
3243 * Validate input - we trust the caller.
3244 */
3245 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3246 Assert(cb);
3247 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3248
3249 /*
3250 * Unassigning the memory.
3251 */
3252 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3253
3254 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3255 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3256 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3257
3258 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3259}
3260
3261
3262/**
3263 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3264 *
3265 * @param pVM VM Handle.
3266 * @param enmType Handler type.
3267 * @param GCPhys Handler range address.
3268 * @param cb Size of the handler range.
3269 * @param fHasHCHandler Set if the handler has a HC callback function.
3270 *
3271 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3272 * Handler memory type to memory which has no HC handler.
3273 */
3274static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3275{
3276 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3277 enmType, GCPhys, cb, fHasHCHandler));
3278
3279 VM_ASSERT_EMT(pVM);
3280 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3281 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3282
3283
3284 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3285
3286 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3287 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3288 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3289 else if (fHasHCHandler)
3290 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3291 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3292
3293 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3294}
3295
3296/**
3297 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3298 *
3299 * @param pVM VM Handle.
3300 * @param enmType Handler type.
3301 * @param GCPhys Handler range address.
3302 * @param cb Size of the handler range.
3303 * @param fHasHCHandler Set if the handler has a HC callback function.
3304 *
3305 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3306 * Handler memory type to memory which has no HC handler.
3307 */
3308REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3309{
3310 REMR3ReplayHandlerNotifications(pVM);
3311
3312 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3313}
3314
3315/**
3316 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3317 *
3318 * @param pVM VM Handle.
3319 * @param enmType Handler type.
3320 * @param GCPhys Handler range address.
3321 * @param cb Size of the handler range.
3322 * @param fHasHCHandler Set if the handler has a HC callback function.
3323 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3324 */
3325static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3326{
3327 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3328 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3329 VM_ASSERT_EMT(pVM);
3330
3331
3332 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3333
3334 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3335 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3336 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3337 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3338 else if (fHasHCHandler)
3339 {
3340 if (!fRestoreAsRAM)
3341 {
3342 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3343 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3344 }
3345 else
3346 {
3347 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3348 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3349 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3350 }
3351 }
3352 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3353
3354 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3355}
3356
3357/**
3358 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3359 *
3360 * @param pVM VM Handle.
3361 * @param enmType Handler type.
3362 * @param GCPhys Handler range address.
3363 * @param cb Size of the handler range.
3364 * @param fHasHCHandler Set if the handler has a HC callback function.
3365 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3366 */
3367REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3368{
3369 REMR3ReplayHandlerNotifications(pVM);
3370 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3371}
3372
3373
3374/**
3375 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3376 *
3377 * @param pVM VM Handle.
3378 * @param enmType Handler type.
3379 * @param GCPhysOld Old handler range address.
3380 * @param GCPhysNew New handler range address.
3381 * @param cb Size of the handler range.
3382 * @param fHasHCHandler Set if the handler has a HC callback function.
3383 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3384 */
3385static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3386{
3387 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3388 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3389 VM_ASSERT_EMT(pVM);
3390 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3391
3392 if (fHasHCHandler)
3393 {
3394 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3395
3396 /*
3397 * Reset the old page.
3398 */
3399 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3400 if (!fRestoreAsRAM)
3401 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3402 else
3403 {
3404 /* This is not perfect, but it'll do for PD monitoring... */
3405 Assert(cb == PAGE_SIZE);
3406 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3407 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3408 }
3409
3410 /*
3411 * Update the new page.
3412 */
3413 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3414 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3415 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3416 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3417
3418 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3419 }
3420}
3421
3422/**
3423 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3424 *
3425 * @param pVM VM Handle.
3426 * @param enmType Handler type.
3427 * @param GCPhysOld Old handler range address.
3428 * @param GCPhysNew New handler range address.
3429 * @param cb Size of the handler range.
3430 * @param fHasHCHandler Set if the handler has a HC callback function.
3431 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3432 */
3433REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3434{
3435 REMR3ReplayHandlerNotifications(pVM);
3436
3437 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3438}
3439
3440/**
3441 * Checks if we're handling access to this page or not.
3442 *
3443 * @returns true if we're trapping access.
3444 * @returns false if we aren't.
3445 * @param pVM The VM handle.
3446 * @param GCPhys The physical address.
3447 *
3448 * @remark This function will only work correctly in VBOX_STRICT builds!
3449 */
3450REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3451{
3452#ifdef VBOX_STRICT
3453 unsigned long off;
3454 REMR3ReplayHandlerNotifications(pVM);
3455
3456 off = get_phys_page_offset(GCPhys);
3457 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3458 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3459 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3460#else
3461 return false;
3462#endif
3463}
3464
3465
3466/**
3467 * Deals with a rare case in get_phys_addr_code where the code
3468 * is being monitored.
3469 *
3470 * It could also be an MMIO page, in which case we will raise a fatal error.
3471 *
3472 * @returns The physical address corresponding to addr.
3473 * @param env The cpu environment.
3474 * @param addr The virtual address.
3475 * @param pTLBEntry The TLB entry.
3476 */
3477target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3478 target_ulong addr,
3479 CPUTLBEntry *pTLBEntry,
3480 target_phys_addr_t ioTLBEntry)
3481{
3482 PVM pVM = env->pVM;
3483
3484 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3485 {
3486 /* If code memory is being monitored, appropriate IOTLB entry will have
3487 handler IO type, and addend will provide real physical address, no
3488 matter if we store VA in TLB or not, as handlers are always passed PA */
3489 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3490 return ret;
3491 }
3492 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3493 "*** handlers\n",
3494 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3495 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3496 LogRel(("*** mmio\n"));
3497 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3498 LogRel(("*** phys\n"));
3499 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3500 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3501 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3502 AssertFatalFailed();
3503}
3504
3505/**
3506 * Read guest RAM and ROM.
3507 *
3508 * @param SrcGCPhys The source address (guest physical).
3509 * @param pvDst The destination address.
3510 * @param cb Number of bytes
3511 */
3512void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3513{
3514 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3515 VBOX_CHECK_ADDR(SrcGCPhys);
3516 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3517#ifdef VBOX_DEBUG_PHYS
3518 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3519#endif
3520 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3521}
3522
3523
3524/**
3525 * Read guest RAM and ROM, unsigned 8-bit.
3526 *
3527 * @param SrcGCPhys The source address (guest physical).
3528 */
3529RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3530{
3531 uint8_t val;
3532 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3533 VBOX_CHECK_ADDR(SrcGCPhys);
3534 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3535 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3536#ifdef VBOX_DEBUG_PHYS
3537 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3538#endif
3539 return val;
3540}
3541
3542
3543/**
3544 * Read guest RAM and ROM, signed 8-bit.
3545 *
3546 * @param SrcGCPhys The source address (guest physical).
3547 */
3548RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3549{
3550 int8_t val;
3551 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3552 VBOX_CHECK_ADDR(SrcGCPhys);
3553 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3554 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3555#ifdef VBOX_DEBUG_PHYS
3556 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3557#endif
3558 return val;
3559}
3560
3561
3562/**
3563 * Read guest RAM and ROM, unsigned 16-bit.
3564 *
3565 * @param SrcGCPhys The source address (guest physical).
3566 */
3567RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3568{
3569 uint16_t val;
3570 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3571 VBOX_CHECK_ADDR(SrcGCPhys);
3572 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3573 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3574#ifdef VBOX_DEBUG_PHYS
3575 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3576#endif
3577 return val;
3578}
3579
3580
3581/**
3582 * Read guest RAM and ROM, signed 16-bit.
3583 *
3584 * @param SrcGCPhys The source address (guest physical).
3585 */
3586RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3587{
3588 int16_t val;
3589 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3590 VBOX_CHECK_ADDR(SrcGCPhys);
3591 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3592 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3593#ifdef VBOX_DEBUG_PHYS
3594 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3595#endif
3596 return val;
3597}
3598
3599
3600/**
3601 * Read guest RAM and ROM, unsigned 32-bit.
3602 *
3603 * @param SrcGCPhys The source address (guest physical).
3604 */
3605RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3606{
3607 uint32_t val;
3608 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3609 VBOX_CHECK_ADDR(SrcGCPhys);
3610 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3611 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3612#ifdef VBOX_DEBUG_PHYS
3613 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3614#endif
3615 return val;
3616}
3617
3618
3619/**
3620 * Read guest RAM and ROM, signed 32-bit.
3621 *
3622 * @param SrcGCPhys The source address (guest physical).
3623 */
3624RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3625{
3626 int32_t val;
3627 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3628 VBOX_CHECK_ADDR(SrcGCPhys);
3629 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3630 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3631#ifdef VBOX_DEBUG_PHYS
3632 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3633#endif
3634 return val;
3635}
3636
3637
3638/**
3639 * Read guest RAM and ROM, unsigned 64-bit.
3640 *
3641 * @param SrcGCPhys The source address (guest physical).
3642 */
3643uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3644{
3645 uint64_t val;
3646 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3647 VBOX_CHECK_ADDR(SrcGCPhys);
3648 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3649 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3650#ifdef VBOX_DEBUG_PHYS
3651 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3652#endif
3653 return val;
3654}
3655
3656
3657/**
3658 * Read guest RAM and ROM, signed 64-bit.
3659 *
3660 * @param SrcGCPhys The source address (guest physical).
3661 */
3662int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3663{
3664 int64_t val;
3665 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3666 VBOX_CHECK_ADDR(SrcGCPhys);
3667 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3668 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3669#ifdef VBOX_DEBUG_PHYS
3670 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3671#endif
3672 return val;
3673}
3674
3675
3676/**
3677 * Write guest RAM.
3678 *
3679 * @param DstGCPhys The destination address (guest physical).
3680 * @param pvSrc The source address.
3681 * @param cb Number of bytes to write
3682 */
3683void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3684{
3685 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3686 VBOX_CHECK_ADDR(DstGCPhys);
3687 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3688 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3689#ifdef VBOX_DEBUG_PHYS
3690 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3691#endif
3692}
3693
3694
3695/**
3696 * Write guest RAM, unsigned 8-bit.
3697 *
3698 * @param DstGCPhys The destination address (guest physical).
3699 * @param val Value
3700 */
3701void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3702{
3703 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3704 VBOX_CHECK_ADDR(DstGCPhys);
3705 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3706 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3707#ifdef VBOX_DEBUG_PHYS
3708 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3709#endif
3710}
3711
3712
3713/**
3714 * Write guest RAM, unsigned 8-bit.
3715 *
3716 * @param DstGCPhys The destination address (guest physical).
3717 * @param val Value
3718 */
3719void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3720{
3721 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3722 VBOX_CHECK_ADDR(DstGCPhys);
3723 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3724 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3725#ifdef VBOX_DEBUG_PHYS
3726 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3727#endif
3728}
3729
3730
3731/**
3732 * Write guest RAM, unsigned 32-bit.
3733 *
3734 * @param DstGCPhys The destination address (guest physical).
3735 * @param val Value
3736 */
3737void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3738{
3739 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3740 VBOX_CHECK_ADDR(DstGCPhys);
3741 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3742 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3743#ifdef VBOX_DEBUG_PHYS
3744 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3745#endif
3746}
3747
3748
3749/**
3750 * Write guest RAM, unsigned 64-bit.
3751 *
3752 * @param DstGCPhys The destination address (guest physical).
3753 * @param val Value
3754 */
3755void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3756{
3757 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3758 VBOX_CHECK_ADDR(DstGCPhys);
3759 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3760 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3761#ifdef VBOX_DEBUG_PHYS
3762 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3763#endif
3764}
3765
3766#undef LOG_GROUP
3767#define LOG_GROUP LOG_GROUP_REM_MMIO
3768
3769/** Read MMIO memory. */
3770static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3771{
3772 uint32_t u32 = 0;
3773 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3774 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3775 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3776 return u32;
3777}
3778
3779/** Read MMIO memory. */
3780static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3781{
3782 uint32_t u32 = 0;
3783 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3784 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3785 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3786 return u32;
3787}
3788
3789/** Read MMIO memory. */
3790static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3791{
3792 uint32_t u32 = 0;
3793 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3794 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3795 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3796 return u32;
3797}
3798
3799/** Write to MMIO memory. */
3800static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3801{
3802 int rc;
3803 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3804 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3805 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3806}
3807
3808/** Write to MMIO memory. */
3809static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3810{
3811 int rc;
3812 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3813 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3814 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3815}
3816
3817/** Write to MMIO memory. */
3818static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3819{
3820 int rc;
3821 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3822 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3823 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3824}
3825
3826
3827#undef LOG_GROUP
3828#define LOG_GROUP LOG_GROUP_REM_HANDLER
3829
3830/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3831
3832static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3833{
3834 uint8_t u8;
3835 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3836 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3837 return u8;
3838}
3839
3840static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3841{
3842 uint16_t u16;
3843 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3844 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3845 return u16;
3846}
3847
3848static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3849{
3850 uint32_t u32;
3851 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3852 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3853 return u32;
3854}
3855
3856static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3857{
3858 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3859 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3860}
3861
3862static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3863{
3864 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3865 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3866}
3867
3868static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3869{
3870 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3871 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3872}
3873
3874/* -+- disassembly -+- */
3875
3876#undef LOG_GROUP
3877#define LOG_GROUP LOG_GROUP_REM_DISAS
3878
3879
3880/**
3881 * Enables or disables singled stepped disassembly.
3882 *
3883 * @returns VBox status code.
3884 * @param pVM VM handle.
3885 * @param fEnable To enable set this flag, to disable clear it.
3886 */
3887static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3888{
3889 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3890 VM_ASSERT_EMT(pVM);
3891
3892 if (fEnable)
3893 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3894 else
3895 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3896#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3897 cpu_single_step(&pVM->rem.s.Env, fEnable);
3898#endif
3899 return VINF_SUCCESS;
3900}
3901
3902
3903/**
3904 * Enables or disables singled stepped disassembly.
3905 *
3906 * @returns VBox status code.
3907 * @param pVM VM handle.
3908 * @param fEnable To enable set this flag, to disable clear it.
3909 */
3910REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3911{
3912 int rc;
3913
3914 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3915 if (VM_IS_EMT(pVM))
3916 return remR3DisasEnableStepping(pVM, fEnable);
3917
3918 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3919 AssertRC(rc);
3920 return rc;
3921}
3922
3923
3924#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3925/**
3926 * External Debugger Command: .remstep [on|off|1|0]
3927 */
3928static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3929{
3930 int rc;
3931
3932 if (cArgs == 0)
3933 /*
3934 * Print the current status.
3935 */
3936 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3937 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3938 else
3939 {
3940 /*
3941 * Convert the argument and change the mode.
3942 */
3943 bool fEnable;
3944 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3945 if (RT_SUCCESS(rc))
3946 {
3947 rc = REMR3DisasEnableStepping(pVM, fEnable);
3948 if (RT_SUCCESS(rc))
3949 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3950 else
3951 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3952 }
3953 else
3954 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3955 }
3956 return rc;
3957}
3958#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3959
3960
3961/**
3962 * Disassembles one instruction and prints it to the log.
3963 *
3964 * @returns Success indicator.
3965 * @param env Pointer to the recompiler CPU structure.
3966 * @param f32BitCode Indicates that whether or not the code should
3967 * be disassembled as 16 or 32 bit. If -1 the CS
3968 * selector will be inspected.
3969 * @param pszPrefix
3970 */
3971bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3972{
3973 PVM pVM = env->pVM;
3974 const bool fLog = LogIsEnabled();
3975 const bool fLog2 = LogIs2Enabled();
3976 int rc = VINF_SUCCESS;
3977
3978 /*
3979 * Don't bother if there ain't any log output to do.
3980 */
3981 if (!fLog && !fLog2)
3982 return true;
3983
3984 /*
3985 * Update the state so DBGF reads the correct register values.
3986 */
3987 remR3StateUpdate(pVM, env->pVCpu);
3988
3989 /*
3990 * Log registers if requested.
3991 */
3992 if (fLog2)
3993 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3994
3995 /*
3996 * Disassemble to log.
3997 */
3998 if (fLog)
3999 {
4000 PVMCPU pVCpu = VMMGetCpu(pVM);
4001 char szBuf[256];
4002 szBuf[0] = '\0';
4003 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
4004 pVCpu->idCpu,
4005 0, /* Sel */
4006 0, /* GCPtr */
4007 DBGF_DISAS_FLAGS_CURRENT_GUEST
4008 | DBGF_DISAS_FLAGS_DEFAULT_MODE
4009 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
4010 szBuf,
4011 sizeof(szBuf),
4012 NULL);
4013 if (RT_FAILURE(rc))
4014 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4015 if (pszPrefix && *pszPrefix)
4016 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4017 else
4018 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4019 }
4020
4021 return RT_SUCCESS(rc);
4022}
4023
4024
4025/**
4026 * Disassemble recompiled code.
4027 *
4028 * @param phFileIgnored Ignored, logfile usually.
4029 * @param pvCode Pointer to the code block.
4030 * @param cb Size of the code block.
4031 */
4032void disas(FILE *phFile, void *pvCode, unsigned long cb)
4033{
4034 if (LogIs2Enabled())
4035 {
4036 unsigned off = 0;
4037 char szOutput[256];
4038 DISCPUSTATE Cpu;
4039
4040 memset(&Cpu, 0, sizeof(Cpu));
4041#ifdef RT_ARCH_X86
4042 Cpu.mode = CPUMODE_32BIT;
4043#else
4044 Cpu.mode = CPUMODE_64BIT;
4045#endif
4046
4047 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4048 while (off < cb)
4049 {
4050 uint32_t cbInstr;
4051 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4052 RTLogPrintf("%s", szOutput);
4053 else
4054 {
4055 RTLogPrintf("disas error\n");
4056 cbInstr = 1;
4057#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4058 break;
4059#endif
4060 }
4061 off += cbInstr;
4062 }
4063 }
4064}
4065
4066
4067/**
4068 * Disassemble guest code.
4069 *
4070 * @param phFileIgnored Ignored, logfile usually.
4071 * @param uCode The guest address of the code to disassemble. (flat?)
4072 * @param cb Number of bytes to disassemble.
4073 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4074 */
4075void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4076{
4077 if (LogIs2Enabled())
4078 {
4079 PVM pVM = cpu_single_env->pVM;
4080 PVMCPU pVCpu = cpu_single_env->pVCpu;
4081 RTSEL cs;
4082 RTGCUINTPTR eip;
4083
4084 Assert(pVCpu);
4085
4086 /*
4087 * Update the state so DBGF reads the correct register values (flags).
4088 */
4089 remR3StateUpdate(pVM, pVCpu);
4090
4091 /*
4092 * Do the disassembling.
4093 */
4094 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4095 cs = cpu_single_env->segs[R_CS].selector;
4096 eip = uCode - cpu_single_env->segs[R_CS].base;
4097 for (;;)
4098 {
4099 char szBuf[256];
4100 uint32_t cbInstr;
4101 int rc = DBGFR3DisasInstrEx(pVM,
4102 pVCpu->idCpu,
4103 cs,
4104 eip,
4105 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szBuf, sizeof(szBuf),
4107 &cbInstr);
4108 if (RT_SUCCESS(rc))
4109 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4110 else
4111 {
4112 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4113 cbInstr = 1;
4114 }
4115
4116 /* next */
4117 if (cb <= cbInstr)
4118 break;
4119 cb -= cbInstr;
4120 uCode += cbInstr;
4121 eip += cbInstr;
4122 }
4123 }
4124}
4125
4126
4127/**
4128 * Looks up a guest symbol.
4129 *
4130 * @returns Pointer to symbol name. This is a static buffer.
4131 * @param orig_addr The address in question.
4132 */
4133const char *lookup_symbol(target_ulong orig_addr)
4134{
4135 PVM pVM = cpu_single_env->pVM;
4136 RTGCINTPTR off = 0;
4137 RTDBGSYMBOL Sym;
4138 DBGFADDRESS Addr;
4139
4140 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4141 if (RT_SUCCESS(rc))
4142 {
4143 static char szSym[sizeof(Sym.szName) + 48];
4144 if (!off)
4145 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4146 else if (off > 0)
4147 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4148 else
4149 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4150 return szSym;
4151 }
4152 return "<N/A>";
4153}
4154
4155
4156#undef LOG_GROUP
4157#define LOG_GROUP LOG_GROUP_REM
4158
4159
4160/* -+- FF notifications -+- */
4161
4162
4163/**
4164 * Notification about a pending interrupt.
4165 *
4166 * @param pVM VM Handle.
4167 * @param pVCpu VMCPU Handle.
4168 * @param u8Interrupt Interrupt
4169 * @thread The emulation thread.
4170 */
4171REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4172{
4173 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4174 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4175}
4176
4177/**
4178 * Notification about a pending interrupt.
4179 *
4180 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4181 * @param pVM VM Handle.
4182 * @param pVCpu VMCPU Handle.
4183 * @thread The emulation thread.
4184 */
4185REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4186{
4187 return pVM->rem.s.u32PendingInterrupt;
4188}
4189
4190/**
4191 * Notification about the interrupt FF being set.
4192 *
4193 * @param pVM VM Handle.
4194 * @param pVCpu VMCPU Handle.
4195 * @thread The emulation thread.
4196 */
4197REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4198{
4199#ifndef IEM_VERIFICATION_MODE
4200 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4201 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4202 if (pVM->rem.s.fInREM)
4203 {
4204 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4205 CPU_INTERRUPT_EXTERNAL_HARD);
4206 }
4207#endif
4208}
4209
4210
4211/**
4212 * Notification about the interrupt FF being set.
4213 *
4214 * @param pVM VM Handle.
4215 * @param pVCpu VMCPU Handle.
4216 * @thread Any.
4217 */
4218REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4219{
4220 LogFlow(("REMR3NotifyInterruptClear:\n"));
4221 if (pVM->rem.s.fInREM)
4222 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4223}
4224
4225
4226/**
4227 * Notification about pending timer(s).
4228 *
4229 * @param pVM VM Handle.
4230 * @param pVCpuDst The target cpu for this notification.
4231 * TM will not broadcast pending timer events, but use
4232 * a dedicated EMT for them. So, only interrupt REM
4233 * execution if the given CPU is executing in REM.
4234 * @thread Any.
4235 */
4236REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4237{
4238#ifndef IEM_VERIFICATION_MODE
4239#ifndef DEBUG_bird
4240 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4241#endif
4242 if (pVM->rem.s.fInREM)
4243 {
4244 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4245 {
4246 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4247 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4248 CPU_INTERRUPT_EXTERNAL_TIMER);
4249 }
4250 else
4251 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4252 }
4253 else
4254 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4255#endif
4256}
4257
4258
4259/**
4260 * Notification about pending DMA transfers.
4261 *
4262 * @param pVM VM Handle.
4263 * @thread Any.
4264 */
4265REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4266{
4267#ifndef IEM_VERIFICATION_MODE
4268 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4269 if (pVM->rem.s.fInREM)
4270 {
4271 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4272 CPU_INTERRUPT_EXTERNAL_DMA);
4273 }
4274#endif
4275}
4276
4277
4278/**
4279 * Notification about pending timer(s).
4280 *
4281 * @param pVM VM Handle.
4282 * @thread Any.
4283 */
4284REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4285{
4286#ifndef IEM_VERIFICATION_MODE
4287 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4288 if (pVM->rem.s.fInREM)
4289 {
4290 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4291 CPU_INTERRUPT_EXTERNAL_EXIT);
4292 }
4293#endif
4294}
4295
4296
4297/**
4298 * Notification about pending FF set by an external thread.
4299 *
4300 * @param pVM VM handle.
4301 * @thread Any.
4302 */
4303REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4304{
4305#ifndef IEM_VERIFICATION_MODE
4306 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4307 if (pVM->rem.s.fInREM)
4308 {
4309 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4310 CPU_INTERRUPT_EXTERNAL_EXIT);
4311 }
4312#endif
4313}
4314
4315
4316#ifdef VBOX_WITH_STATISTICS
4317void remR3ProfileStart(int statcode)
4318{
4319 STAMPROFILEADV *pStat;
4320 switch(statcode)
4321 {
4322 case STATS_EMULATE_SINGLE_INSTR:
4323 pStat = &gStatExecuteSingleInstr;
4324 break;
4325 case STATS_QEMU_COMPILATION:
4326 pStat = &gStatCompilationQEmu;
4327 break;
4328 case STATS_QEMU_RUN_EMULATED_CODE:
4329 pStat = &gStatRunCodeQEmu;
4330 break;
4331 case STATS_QEMU_TOTAL:
4332 pStat = &gStatTotalTimeQEmu;
4333 break;
4334 case STATS_QEMU_RUN_TIMERS:
4335 pStat = &gStatTimers;
4336 break;
4337 case STATS_TLB_LOOKUP:
4338 pStat= &gStatTBLookup;
4339 break;
4340 case STATS_IRQ_HANDLING:
4341 pStat= &gStatIRQ;
4342 break;
4343 case STATS_RAW_CHECK:
4344 pStat = &gStatRawCheck;
4345 break;
4346
4347 default:
4348 AssertMsgFailed(("unknown stat %d\n", statcode));
4349 return;
4350 }
4351 STAM_PROFILE_ADV_START(pStat, a);
4352}
4353
4354
4355void remR3ProfileStop(int statcode)
4356{
4357 STAMPROFILEADV *pStat;
4358 switch(statcode)
4359 {
4360 case STATS_EMULATE_SINGLE_INSTR:
4361 pStat = &gStatExecuteSingleInstr;
4362 break;
4363 case STATS_QEMU_COMPILATION:
4364 pStat = &gStatCompilationQEmu;
4365 break;
4366 case STATS_QEMU_RUN_EMULATED_CODE:
4367 pStat = &gStatRunCodeQEmu;
4368 break;
4369 case STATS_QEMU_TOTAL:
4370 pStat = &gStatTotalTimeQEmu;
4371 break;
4372 case STATS_QEMU_RUN_TIMERS:
4373 pStat = &gStatTimers;
4374 break;
4375 case STATS_TLB_LOOKUP:
4376 pStat= &gStatTBLookup;
4377 break;
4378 case STATS_IRQ_HANDLING:
4379 pStat= &gStatIRQ;
4380 break;
4381 case STATS_RAW_CHECK:
4382 pStat = &gStatRawCheck;
4383 break;
4384 default:
4385 AssertMsgFailed(("unknown stat %d\n", statcode));
4386 return;
4387 }
4388 STAM_PROFILE_ADV_STOP(pStat, a);
4389}
4390#endif
4391
4392/**
4393 * Raise an RC, force rem exit.
4394 *
4395 * @param pVM VM handle.
4396 * @param rc The rc.
4397 */
4398void remR3RaiseRC(PVM pVM, int rc)
4399{
4400 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4401 Assert(pVM->rem.s.fInREM);
4402 VM_ASSERT_EMT(pVM);
4403 pVM->rem.s.rc = rc;
4404 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4405}
4406
4407
4408/* -+- timers -+- */
4409
4410uint64_t cpu_get_tsc(CPUX86State *env)
4411{
4412 STAM_COUNTER_INC(&gStatCpuGetTSC);
4413 return TMCpuTickGet(env->pVCpu);
4414}
4415
4416
4417/* -+- interrupts -+- */
4418
4419void cpu_set_ferr(CPUX86State *env)
4420{
4421 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4422 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4423}
4424
4425int cpu_get_pic_interrupt(CPUX86State *env)
4426{
4427 uint8_t u8Interrupt;
4428 int rc;
4429
4430 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4431 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4432 * with the (a)pic.
4433 */
4434 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4435 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4436 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4437 * remove this kludge. */
4438 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4439 {
4440 rc = VINF_SUCCESS;
4441 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4442 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4443 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4444 }
4445 else
4446 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4447
4448 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4449 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4450 if (RT_SUCCESS(rc))
4451 {
4452 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4453 env->interrupt_request |= CPU_INTERRUPT_HARD;
4454 return u8Interrupt;
4455 }
4456 return -1;
4457}
4458
4459
4460/* -+- local apic -+- */
4461
4462#if 0 /* CPUMSetGuestMsr does this now. */
4463void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4464{
4465 int rc = PDMApicSetBase(env->pVM, val);
4466 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4467}
4468#endif
4469
4470uint64_t cpu_get_apic_base(CPUX86State *env)
4471{
4472 uint64_t u64;
4473 int rc = PDMApicGetBase(env->pVM, &u64);
4474 if (RT_SUCCESS(rc))
4475 {
4476 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4477 return u64;
4478 }
4479 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4480 return 0;
4481}
4482
4483void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4484{
4485 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4486 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4487}
4488
4489uint8_t cpu_get_apic_tpr(CPUX86State *env)
4490{
4491 uint8_t u8;
4492 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4493 if (RT_SUCCESS(rc))
4494 {
4495 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4496 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4497 }
4498 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4499 return 0;
4500}
4501
4502/**
4503 * Read an MSR.
4504 *
4505 * @retval 0 success.
4506 * @retval -1 failure, raise \#GP(0).
4507 * @param env The cpu state.
4508 * @param idMsr The MSR to read.
4509 * @param puValue Where to return the value.
4510 */
4511int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4512{
4513 Assert(env->pVCpu);
4514 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4515}
4516
4517/**
4518 * Write to an MSR.
4519 *
4520 * @retval 0 success.
4521 * @retval -1 failure, raise \#GP(0).
4522 * @param env The cpu state.
4523 * @param idMsr The MSR to read.
4524 * @param puValue Where to return the value.
4525 */
4526int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4527{
4528 Assert(env->pVCpu);
4529 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4530}
4531
4532/* -+- I/O Ports -+- */
4533
4534#undef LOG_GROUP
4535#define LOG_GROUP LOG_GROUP_REM_IOPORT
4536
4537void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4538{
4539 int rc;
4540
4541 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4542 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4543
4544 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4545 if (RT_LIKELY(rc == VINF_SUCCESS))
4546 return;
4547 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4548 {
4549 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4550 remR3RaiseRC(env->pVM, rc);
4551 return;
4552 }
4553 remAbort(rc, __FUNCTION__);
4554}
4555
4556void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4557{
4558 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4559 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4560 if (RT_LIKELY(rc == VINF_SUCCESS))
4561 return;
4562 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4563 {
4564 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4565 remR3RaiseRC(env->pVM, rc);
4566 return;
4567 }
4568 remAbort(rc, __FUNCTION__);
4569}
4570
4571void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4572{
4573 int rc;
4574 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4575 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4576 if (RT_LIKELY(rc == VINF_SUCCESS))
4577 return;
4578 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4579 {
4580 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4581 remR3RaiseRC(env->pVM, rc);
4582 return;
4583 }
4584 remAbort(rc, __FUNCTION__);
4585}
4586
4587uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4588{
4589 uint32_t u32 = 0;
4590 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4591 if (RT_LIKELY(rc == VINF_SUCCESS))
4592 {
4593 if (/*addr != 0x61 && */addr != 0x71)
4594 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4595 return (uint8_t)u32;
4596 }
4597 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4598 {
4599 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4600 remR3RaiseRC(env->pVM, rc);
4601 return (uint8_t)u32;
4602 }
4603 remAbort(rc, __FUNCTION__);
4604 return UINT8_C(0xff);
4605}
4606
4607uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4608{
4609 uint32_t u32 = 0;
4610 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4611 if (RT_LIKELY(rc == VINF_SUCCESS))
4612 {
4613 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4614 return (uint16_t)u32;
4615 }
4616 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4617 {
4618 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4619 remR3RaiseRC(env->pVM, rc);
4620 return (uint16_t)u32;
4621 }
4622 remAbort(rc, __FUNCTION__);
4623 return UINT16_C(0xffff);
4624}
4625
4626uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4627{
4628 uint32_t u32 = 0;
4629 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4630 if (RT_LIKELY(rc == VINF_SUCCESS))
4631 {
4632//if (addr==0x01f0 && u32 == 0x6b6d)
4633// loglevel = ~0;
4634 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4635 return u32;
4636 }
4637 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4638 {
4639 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4640 remR3RaiseRC(env->pVM, rc);
4641 return u32;
4642 }
4643 remAbort(rc, __FUNCTION__);
4644 return UINT32_C(0xffffffff);
4645}
4646
4647#undef LOG_GROUP
4648#define LOG_GROUP LOG_GROUP_REM
4649
4650
4651/* -+- helpers and misc other interfaces -+- */
4652
4653/**
4654 * Perform the CPUID instruction.
4655 *
4656 * @param env Pointer to the recompiler CPU structure.
4657 * @param idx The CPUID leaf (eax).
4658 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4659 * @param pvEAX Where to store eax.
4660 * @param pvEBX Where to store ebx.
4661 * @param pvECX Where to store ecx.
4662 * @param pvEDX Where to store edx.
4663 */
4664void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4665 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4666{
4667 NOREF(idxSub);
4668 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4669}
4670
4671
4672#if 0 /* not used */
4673/**
4674 * Interface for qemu hardware to report back fatal errors.
4675 */
4676void hw_error(const char *pszFormat, ...)
4677{
4678 /*
4679 * Bitch about it.
4680 */
4681 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4682 * this in my Odin32 tree at home! */
4683 va_list args;
4684 va_start(args, pszFormat);
4685 RTLogPrintf("fatal error in virtual hardware:");
4686 RTLogPrintfV(pszFormat, args);
4687 va_end(args);
4688 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4689
4690 /*
4691 * If we're in REM context we'll sync back the state before 'jumping' to
4692 * the EMs failure handling.
4693 */
4694 PVM pVM = cpu_single_env->pVM;
4695 if (pVM->rem.s.fInREM)
4696 REMR3StateBack(pVM);
4697 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4698 AssertMsgFailed(("EMR3FatalError returned!\n"));
4699}
4700#endif
4701
4702/**
4703 * Interface for the qemu cpu to report unhandled situation
4704 * raising a fatal VM error.
4705 */
4706void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4707{
4708 va_list va;
4709 PVM pVM;
4710 PVMCPU pVCpu;
4711 char szMsg[256];
4712
4713 /*
4714 * Bitch about it.
4715 */
4716 RTLogFlags(NULL, "nodisabled nobuffered");
4717 RTLogFlush(NULL);
4718
4719 va_start(va, pszFormat);
4720#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4721 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4722 unsigned cArgs = 0;
4723 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4724 const char *psz = strchr(pszFormat, '%');
4725 while (psz && cArgs < 6)
4726 {
4727 auArgs[cArgs++] = va_arg(va, uintptr_t);
4728 psz = strchr(psz + 1, '%');
4729 }
4730 switch (cArgs)
4731 {
4732 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4733 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4734 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4735 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4736 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4737 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4738 default:
4739 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4740 }
4741#else
4742 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4743#endif
4744 va_end(va);
4745
4746 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4747 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4748
4749 /*
4750 * If we're in REM context we'll sync back the state before 'jumping' to
4751 * the EMs failure handling.
4752 */
4753 pVM = cpu_single_env->pVM;
4754 pVCpu = cpu_single_env->pVCpu;
4755 Assert(pVCpu);
4756
4757 if (pVM->rem.s.fInREM)
4758 REMR3StateBack(pVM, pVCpu);
4759 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4760 AssertMsgFailed(("EMR3FatalError returned!\n"));
4761}
4762
4763
4764/**
4765 * Aborts the VM.
4766 *
4767 * @param rc VBox error code.
4768 * @param pszTip Hint about why/when this happened.
4769 */
4770void remAbort(int rc, const char *pszTip)
4771{
4772 PVM pVM;
4773 PVMCPU pVCpu;
4774
4775 /*
4776 * Bitch about it.
4777 */
4778 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4779 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4780
4781 /*
4782 * Jump back to where we entered the recompiler.
4783 */
4784 pVM = cpu_single_env->pVM;
4785 pVCpu = cpu_single_env->pVCpu;
4786 Assert(pVCpu);
4787
4788 if (pVM->rem.s.fInREM)
4789 REMR3StateBack(pVM, pVCpu);
4790
4791 EMR3FatalError(pVCpu, rc);
4792 AssertMsgFailed(("EMR3FatalError returned!\n"));
4793}
4794
4795
4796/**
4797 * Dumps a linux system call.
4798 * @param pVCpu VMCPU handle.
4799 */
4800void remR3DumpLnxSyscall(PVMCPU pVCpu)
4801{
4802 static const char *apsz[] =
4803 {
4804 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4805 "sys_exit",
4806 "sys_fork",
4807 "sys_read",
4808 "sys_write",
4809 "sys_open", /* 5 */
4810 "sys_close",
4811 "sys_waitpid",
4812 "sys_creat",
4813 "sys_link",
4814 "sys_unlink", /* 10 */
4815 "sys_execve",
4816 "sys_chdir",
4817 "sys_time",
4818 "sys_mknod",
4819 "sys_chmod", /* 15 */
4820 "sys_lchown16",
4821 "sys_ni_syscall", /* old break syscall holder */
4822 "sys_stat",
4823 "sys_lseek",
4824 "sys_getpid", /* 20 */
4825 "sys_mount",
4826 "sys_oldumount",
4827 "sys_setuid16",
4828 "sys_getuid16",
4829 "sys_stime", /* 25 */
4830 "sys_ptrace",
4831 "sys_alarm",
4832 "sys_fstat",
4833 "sys_pause",
4834 "sys_utime", /* 30 */
4835 "sys_ni_syscall", /* old stty syscall holder */
4836 "sys_ni_syscall", /* old gtty syscall holder */
4837 "sys_access",
4838 "sys_nice",
4839 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4840 "sys_sync",
4841 "sys_kill",
4842 "sys_rename",
4843 "sys_mkdir",
4844 "sys_rmdir", /* 40 */
4845 "sys_dup",
4846 "sys_pipe",
4847 "sys_times",
4848 "sys_ni_syscall", /* old prof syscall holder */
4849 "sys_brk", /* 45 */
4850 "sys_setgid16",
4851 "sys_getgid16",
4852 "sys_signal",
4853 "sys_geteuid16",
4854 "sys_getegid16", /* 50 */
4855 "sys_acct",
4856 "sys_umount", /* recycled never used phys() */
4857 "sys_ni_syscall", /* old lock syscall holder */
4858 "sys_ioctl",
4859 "sys_fcntl", /* 55 */
4860 "sys_ni_syscall", /* old mpx syscall holder */
4861 "sys_setpgid",
4862 "sys_ni_syscall", /* old ulimit syscall holder */
4863 "sys_olduname",
4864 "sys_umask", /* 60 */
4865 "sys_chroot",
4866 "sys_ustat",
4867 "sys_dup2",
4868 "sys_getppid",
4869 "sys_getpgrp", /* 65 */
4870 "sys_setsid",
4871 "sys_sigaction",
4872 "sys_sgetmask",
4873 "sys_ssetmask",
4874 "sys_setreuid16", /* 70 */
4875 "sys_setregid16",
4876 "sys_sigsuspend",
4877 "sys_sigpending",
4878 "sys_sethostname",
4879 "sys_setrlimit", /* 75 */
4880 "sys_old_getrlimit",
4881 "sys_getrusage",
4882 "sys_gettimeofday",
4883 "sys_settimeofday",
4884 "sys_getgroups16", /* 80 */
4885 "sys_setgroups16",
4886 "old_select",
4887 "sys_symlink",
4888 "sys_lstat",
4889 "sys_readlink", /* 85 */
4890 "sys_uselib",
4891 "sys_swapon",
4892 "sys_reboot",
4893 "old_readdir",
4894 "old_mmap", /* 90 */
4895 "sys_munmap",
4896 "sys_truncate",
4897 "sys_ftruncate",
4898 "sys_fchmod",
4899 "sys_fchown16", /* 95 */
4900 "sys_getpriority",
4901 "sys_setpriority",
4902 "sys_ni_syscall", /* old profil syscall holder */
4903 "sys_statfs",
4904 "sys_fstatfs", /* 100 */
4905 "sys_ioperm",
4906 "sys_socketcall",
4907 "sys_syslog",
4908 "sys_setitimer",
4909 "sys_getitimer", /* 105 */
4910 "sys_newstat",
4911 "sys_newlstat",
4912 "sys_newfstat",
4913 "sys_uname",
4914 "sys_iopl", /* 110 */
4915 "sys_vhangup",
4916 "sys_ni_syscall", /* old "idle" system call */
4917 "sys_vm86old",
4918 "sys_wait4",
4919 "sys_swapoff", /* 115 */
4920 "sys_sysinfo",
4921 "sys_ipc",
4922 "sys_fsync",
4923 "sys_sigreturn",
4924 "sys_clone", /* 120 */
4925 "sys_setdomainname",
4926 "sys_newuname",
4927 "sys_modify_ldt",
4928 "sys_adjtimex",
4929 "sys_mprotect", /* 125 */
4930 "sys_sigprocmask",
4931 "sys_ni_syscall", /* old "create_module" */
4932 "sys_init_module",
4933 "sys_delete_module",
4934 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4935 "sys_quotactl",
4936 "sys_getpgid",
4937 "sys_fchdir",
4938 "sys_bdflush",
4939 "sys_sysfs", /* 135 */
4940 "sys_personality",
4941 "sys_ni_syscall", /* reserved for afs_syscall */
4942 "sys_setfsuid16",
4943 "sys_setfsgid16",
4944 "sys_llseek", /* 140 */
4945 "sys_getdents",
4946 "sys_select",
4947 "sys_flock",
4948 "sys_msync",
4949 "sys_readv", /* 145 */
4950 "sys_writev",
4951 "sys_getsid",
4952 "sys_fdatasync",
4953 "sys_sysctl",
4954 "sys_mlock", /* 150 */
4955 "sys_munlock",
4956 "sys_mlockall",
4957 "sys_munlockall",
4958 "sys_sched_setparam",
4959 "sys_sched_getparam", /* 155 */
4960 "sys_sched_setscheduler",
4961 "sys_sched_getscheduler",
4962 "sys_sched_yield",
4963 "sys_sched_get_priority_max",
4964 "sys_sched_get_priority_min", /* 160 */
4965 "sys_sched_rr_get_interval",
4966 "sys_nanosleep",
4967 "sys_mremap",
4968 "sys_setresuid16",
4969 "sys_getresuid16", /* 165 */
4970 "sys_vm86",
4971 "sys_ni_syscall", /* Old sys_query_module */
4972 "sys_poll",
4973 "sys_nfsservctl",
4974 "sys_setresgid16", /* 170 */
4975 "sys_getresgid16",
4976 "sys_prctl",
4977 "sys_rt_sigreturn",
4978 "sys_rt_sigaction",
4979 "sys_rt_sigprocmask", /* 175 */
4980 "sys_rt_sigpending",
4981 "sys_rt_sigtimedwait",
4982 "sys_rt_sigqueueinfo",
4983 "sys_rt_sigsuspend",
4984 "sys_pread64", /* 180 */
4985 "sys_pwrite64",
4986 "sys_chown16",
4987 "sys_getcwd",
4988 "sys_capget",
4989 "sys_capset", /* 185 */
4990 "sys_sigaltstack",
4991 "sys_sendfile",
4992 "sys_ni_syscall", /* reserved for streams1 */
4993 "sys_ni_syscall", /* reserved for streams2 */
4994 "sys_vfork", /* 190 */
4995 "sys_getrlimit",
4996 "sys_mmap2",
4997 "sys_truncate64",
4998 "sys_ftruncate64",
4999 "sys_stat64", /* 195 */
5000 "sys_lstat64",
5001 "sys_fstat64",
5002 "sys_lchown",
5003 "sys_getuid",
5004 "sys_getgid", /* 200 */
5005 "sys_geteuid",
5006 "sys_getegid",
5007 "sys_setreuid",
5008 "sys_setregid",
5009 "sys_getgroups", /* 205 */
5010 "sys_setgroups",
5011 "sys_fchown",
5012 "sys_setresuid",
5013 "sys_getresuid",
5014 "sys_setresgid", /* 210 */
5015 "sys_getresgid",
5016 "sys_chown",
5017 "sys_setuid",
5018 "sys_setgid",
5019 "sys_setfsuid", /* 215 */
5020 "sys_setfsgid",
5021 "sys_pivot_root",
5022 "sys_mincore",
5023 "sys_madvise",
5024 "sys_getdents64", /* 220 */
5025 "sys_fcntl64",
5026 "sys_ni_syscall", /* reserved for TUX */
5027 "sys_ni_syscall",
5028 "sys_gettid",
5029 "sys_readahead", /* 225 */
5030 "sys_setxattr",
5031 "sys_lsetxattr",
5032 "sys_fsetxattr",
5033 "sys_getxattr",
5034 "sys_lgetxattr", /* 230 */
5035 "sys_fgetxattr",
5036 "sys_listxattr",
5037 "sys_llistxattr",
5038 "sys_flistxattr",
5039 "sys_removexattr", /* 235 */
5040 "sys_lremovexattr",
5041 "sys_fremovexattr",
5042 "sys_tkill",
5043 "sys_sendfile64",
5044 "sys_futex", /* 240 */
5045 "sys_sched_setaffinity",
5046 "sys_sched_getaffinity",
5047 "sys_set_thread_area",
5048 "sys_get_thread_area",
5049 "sys_io_setup", /* 245 */
5050 "sys_io_destroy",
5051 "sys_io_getevents",
5052 "sys_io_submit",
5053 "sys_io_cancel",
5054 "sys_fadvise64", /* 250 */
5055 "sys_ni_syscall",
5056 "sys_exit_group",
5057 "sys_lookup_dcookie",
5058 "sys_epoll_create",
5059 "sys_epoll_ctl", /* 255 */
5060 "sys_epoll_wait",
5061 "sys_remap_file_pages",
5062 "sys_set_tid_address",
5063 "sys_timer_create",
5064 "sys_timer_settime", /* 260 */
5065 "sys_timer_gettime",
5066 "sys_timer_getoverrun",
5067 "sys_timer_delete",
5068 "sys_clock_settime",
5069 "sys_clock_gettime", /* 265 */
5070 "sys_clock_getres",
5071 "sys_clock_nanosleep",
5072 "sys_statfs64",
5073 "sys_fstatfs64",
5074 "sys_tgkill", /* 270 */
5075 "sys_utimes",
5076 "sys_fadvise64_64",
5077 "sys_ni_syscall" /* sys_vserver */
5078 };
5079
5080 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5081 switch (uEAX)
5082 {
5083 default:
5084 if (uEAX < RT_ELEMENTS(apsz))
5085 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5086 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5087 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5088 else
5089 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5090 break;
5091
5092 }
5093}
5094
5095
5096/**
5097 * Dumps an OpenBSD system call.
5098 * @param pVCpu VMCPU handle.
5099 */
5100void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5101{
5102 static const char *apsz[] =
5103 {
5104 "SYS_syscall", //0
5105 "SYS_exit", //1
5106 "SYS_fork", //2
5107 "SYS_read", //3
5108 "SYS_write", //4
5109 "SYS_open", //5
5110 "SYS_close", //6
5111 "SYS_wait4", //7
5112 "SYS_8",
5113 "SYS_link", //9
5114 "SYS_unlink", //10
5115 "SYS_11",
5116 "SYS_chdir", //12
5117 "SYS_fchdir", //13
5118 "SYS_mknod", //14
5119 "SYS_chmod", //15
5120 "SYS_chown", //16
5121 "SYS_break", //17
5122 "SYS_18",
5123 "SYS_19",
5124 "SYS_getpid", //20
5125 "SYS_mount", //21
5126 "SYS_unmount", //22
5127 "SYS_setuid", //23
5128 "SYS_getuid", //24
5129 "SYS_geteuid", //25
5130 "SYS_ptrace", //26
5131 "SYS_recvmsg", //27
5132 "SYS_sendmsg", //28
5133 "SYS_recvfrom", //29
5134 "SYS_accept", //30
5135 "SYS_getpeername", //31
5136 "SYS_getsockname", //32
5137 "SYS_access", //33
5138 "SYS_chflags", //34
5139 "SYS_fchflags", //35
5140 "SYS_sync", //36
5141 "SYS_kill", //37
5142 "SYS_38",
5143 "SYS_getppid", //39
5144 "SYS_40",
5145 "SYS_dup", //41
5146 "SYS_opipe", //42
5147 "SYS_getegid", //43
5148 "SYS_profil", //44
5149 "SYS_ktrace", //45
5150 "SYS_sigaction", //46
5151 "SYS_getgid", //47
5152 "SYS_sigprocmask", //48
5153 "SYS_getlogin", //49
5154 "SYS_setlogin", //50
5155 "SYS_acct", //51
5156 "SYS_sigpending", //52
5157 "SYS_osigaltstack", //53
5158 "SYS_ioctl", //54
5159 "SYS_reboot", //55
5160 "SYS_revoke", //56
5161 "SYS_symlink", //57
5162 "SYS_readlink", //58
5163 "SYS_execve", //59
5164 "SYS_umask", //60
5165 "SYS_chroot", //61
5166 "SYS_62",
5167 "SYS_63",
5168 "SYS_64",
5169 "SYS_65",
5170 "SYS_vfork", //66
5171 "SYS_67",
5172 "SYS_68",
5173 "SYS_sbrk", //69
5174 "SYS_sstk", //70
5175 "SYS_61",
5176 "SYS_vadvise", //72
5177 "SYS_munmap", //73
5178 "SYS_mprotect", //74
5179 "SYS_madvise", //75
5180 "SYS_76",
5181 "SYS_77",
5182 "SYS_mincore", //78
5183 "SYS_getgroups", //79
5184 "SYS_setgroups", //80
5185 "SYS_getpgrp", //81
5186 "SYS_setpgid", //82
5187 "SYS_setitimer", //83
5188 "SYS_84",
5189 "SYS_85",
5190 "SYS_getitimer", //86
5191 "SYS_87",
5192 "SYS_88",
5193 "SYS_89",
5194 "SYS_dup2", //90
5195 "SYS_91",
5196 "SYS_fcntl", //92
5197 "SYS_select", //93
5198 "SYS_94",
5199 "SYS_fsync", //95
5200 "SYS_setpriority", //96
5201 "SYS_socket", //97
5202 "SYS_connect", //98
5203 "SYS_99",
5204 "SYS_getpriority", //100
5205 "SYS_101",
5206 "SYS_102",
5207 "SYS_sigreturn", //103
5208 "SYS_bind", //104
5209 "SYS_setsockopt", //105
5210 "SYS_listen", //106
5211 "SYS_107",
5212 "SYS_108",
5213 "SYS_109",
5214 "SYS_110",
5215 "SYS_sigsuspend", //111
5216 "SYS_112",
5217 "SYS_113",
5218 "SYS_114",
5219 "SYS_115",
5220 "SYS_gettimeofday", //116
5221 "SYS_getrusage", //117
5222 "SYS_getsockopt", //118
5223 "SYS_119",
5224 "SYS_readv", //120
5225 "SYS_writev", //121
5226 "SYS_settimeofday", //122
5227 "SYS_fchown", //123
5228 "SYS_fchmod", //124
5229 "SYS_125",
5230 "SYS_setreuid", //126
5231 "SYS_setregid", //127
5232 "SYS_rename", //128
5233 "SYS_129",
5234 "SYS_130",
5235 "SYS_flock", //131
5236 "SYS_mkfifo", //132
5237 "SYS_sendto", //133
5238 "SYS_shutdown", //134
5239 "SYS_socketpair", //135
5240 "SYS_mkdir", //136
5241 "SYS_rmdir", //137
5242 "SYS_utimes", //138
5243 "SYS_139",
5244 "SYS_adjtime", //140
5245 "SYS_141",
5246 "SYS_142",
5247 "SYS_143",
5248 "SYS_144",
5249 "SYS_145",
5250 "SYS_146",
5251 "SYS_setsid", //147
5252 "SYS_quotactl", //148
5253 "SYS_149",
5254 "SYS_150",
5255 "SYS_151",
5256 "SYS_152",
5257 "SYS_153",
5258 "SYS_154",
5259 "SYS_nfssvc", //155
5260 "SYS_156",
5261 "SYS_157",
5262 "SYS_158",
5263 "SYS_159",
5264 "SYS_160",
5265 "SYS_getfh", //161
5266 "SYS_162",
5267 "SYS_163",
5268 "SYS_164",
5269 "SYS_sysarch", //165
5270 "SYS_166",
5271 "SYS_167",
5272 "SYS_168",
5273 "SYS_169",
5274 "SYS_170",
5275 "SYS_171",
5276 "SYS_172",
5277 "SYS_pread", //173
5278 "SYS_pwrite", //174
5279 "SYS_175",
5280 "SYS_176",
5281 "SYS_177",
5282 "SYS_178",
5283 "SYS_179",
5284 "SYS_180",
5285 "SYS_setgid", //181
5286 "SYS_setegid", //182
5287 "SYS_seteuid", //183
5288 "SYS_lfs_bmapv", //184
5289 "SYS_lfs_markv", //185
5290 "SYS_lfs_segclean", //186
5291 "SYS_lfs_segwait", //187
5292 "SYS_188",
5293 "SYS_189",
5294 "SYS_190",
5295 "SYS_pathconf", //191
5296 "SYS_fpathconf", //192
5297 "SYS_swapctl", //193
5298 "SYS_getrlimit", //194
5299 "SYS_setrlimit", //195
5300 "SYS_getdirentries", //196
5301 "SYS_mmap", //197
5302 "SYS___syscall", //198
5303 "SYS_lseek", //199
5304 "SYS_truncate", //200
5305 "SYS_ftruncate", //201
5306 "SYS___sysctl", //202
5307 "SYS_mlock", //203
5308 "SYS_munlock", //204
5309 "SYS_205",
5310 "SYS_futimes", //206
5311 "SYS_getpgid", //207
5312 "SYS_xfspioctl", //208
5313 "SYS_209",
5314 "SYS_210",
5315 "SYS_211",
5316 "SYS_212",
5317 "SYS_213",
5318 "SYS_214",
5319 "SYS_215",
5320 "SYS_216",
5321 "SYS_217",
5322 "SYS_218",
5323 "SYS_219",
5324 "SYS_220",
5325 "SYS_semget", //221
5326 "SYS_222",
5327 "SYS_223",
5328 "SYS_224",
5329 "SYS_msgget", //225
5330 "SYS_msgsnd", //226
5331 "SYS_msgrcv", //227
5332 "SYS_shmat", //228
5333 "SYS_229",
5334 "SYS_shmdt", //230
5335 "SYS_231",
5336 "SYS_clock_gettime", //232
5337 "SYS_clock_settime", //233
5338 "SYS_clock_getres", //234
5339 "SYS_235",
5340 "SYS_236",
5341 "SYS_237",
5342 "SYS_238",
5343 "SYS_239",
5344 "SYS_nanosleep", //240
5345 "SYS_241",
5346 "SYS_242",
5347 "SYS_243",
5348 "SYS_244",
5349 "SYS_245",
5350 "SYS_246",
5351 "SYS_247",
5352 "SYS_248",
5353 "SYS_249",
5354 "SYS_minherit", //250
5355 "SYS_rfork", //251
5356 "SYS_poll", //252
5357 "SYS_issetugid", //253
5358 "SYS_lchown", //254
5359 "SYS_getsid", //255
5360 "SYS_msync", //256
5361 "SYS_257",
5362 "SYS_258",
5363 "SYS_259",
5364 "SYS_getfsstat", //260
5365 "SYS_statfs", //261
5366 "SYS_fstatfs", //262
5367 "SYS_pipe", //263
5368 "SYS_fhopen", //264
5369 "SYS_265",
5370 "SYS_fhstatfs", //266
5371 "SYS_preadv", //267
5372 "SYS_pwritev", //268
5373 "SYS_kqueue", //269
5374 "SYS_kevent", //270
5375 "SYS_mlockall", //271
5376 "SYS_munlockall", //272
5377 "SYS_getpeereid", //273
5378 "SYS_274",
5379 "SYS_275",
5380 "SYS_276",
5381 "SYS_277",
5382 "SYS_278",
5383 "SYS_279",
5384 "SYS_280",
5385 "SYS_getresuid", //281
5386 "SYS_setresuid", //282
5387 "SYS_getresgid", //283
5388 "SYS_setresgid", //284
5389 "SYS_285",
5390 "SYS_mquery", //286
5391 "SYS_closefrom", //287
5392 "SYS_sigaltstack", //288
5393 "SYS_shmget", //289
5394 "SYS_semop", //290
5395 "SYS_stat", //291
5396 "SYS_fstat", //292
5397 "SYS_lstat", //293
5398 "SYS_fhstat", //294
5399 "SYS___semctl", //295
5400 "SYS_shmctl", //296
5401 "SYS_msgctl", //297
5402 "SYS_MAXSYSCALL", //298
5403 //299
5404 //300
5405 };
5406 uint32_t uEAX;
5407 if (!LogIsEnabled())
5408 return;
5409 uEAX = CPUMGetGuestEAX(pVCpu);
5410 switch (uEAX)
5411 {
5412 default:
5413 if (uEAX < RT_ELEMENTS(apsz))
5414 {
5415 uint32_t au32Args[8] = {0};
5416 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5417 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5418 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5419 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5420 }
5421 else
5422 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5423 break;
5424 }
5425}
5426
5427
5428#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5429/**
5430 * The Dll main entry point (stub).
5431 */
5432bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5433{
5434 return true;
5435}
5436
5437void *memcpy(void *dst, const void *src, size_t size)
5438{
5439 uint8_t*pbDst = dst, *pbSrc = src;
5440 while (size-- > 0)
5441 *pbDst++ = *pbSrc++;
5442 return dst;
5443}
5444
5445#endif
5446
5447void cpu_smm_update(CPUX86State *env)
5448{
5449}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette