VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 37691

Last change on this file since 37691 was 37689, checked in by vboxsync, 13 years ago

recompiler: Merged in changes from 0.13.0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.3 KB
Line 
1/* $Id: VBoxRecompiler.c 37689 2011-06-29 16:01:23Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
223uint8_t *code_gen_prologue;
224
225
226/*******************************************************************************
227* Internal Functions *
228*******************************************************************************/
229void remAbort(int rc, const char *pszTip);
230extern int testmath(void);
231
232/* Put them here to avoid unused variable warning. */
233AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
234#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
235//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
236/* Why did this have to be identical?? */
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#else
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#endif
241
242
243/**
244 * Initializes the REM.
245 *
246 * @returns VBox status code.
247 * @param pVM The VM to operate on.
248 */
249REMR3DECL(int) REMR3Init(PVM pVM)
250{
251 PREMHANDLERNOTIFICATION pCur;
252 uint32_t u32Dummy;
253 int rc;
254 unsigned i;
255
256#ifdef VBOX_ENABLE_VBOXREM64
257 LogRel(("Using 64-bit aware REM\n"));
258#endif
259
260 /*
261 * Assert sanity.
262 */
263 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
264 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
265 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
266#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
267 Assert(!testmath());
268#endif
269
270 /*
271 * Init some internal data members.
272 */
273 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
274 pVM->rem.s.Env.pVM = pVM;
275#ifdef CPU_RAW_MODE_INIT
276 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
277#endif
278
279 /*
280 * Initialize the REM critical section.
281 *
282 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
283 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
284 * deadlocks. (mostly pgm vs rem locking)
285 */
286 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
287 AssertRCReturn(rc, rc);
288
289 /* ctx. */
290 pVM->rem.s.pCtx = NULL; /* set when executing code. */
291 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
292
293 /* ignore all notifications */
294 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
295
296 code_gen_prologue = RTMemExecAlloc(_1K);
297 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
298
299 cpu_exec_init_all(0);
300
301 /*
302 * Init the recompiler.
303 */
304 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
305 {
306 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
307 return VERR_GENERAL_FAILURE;
308 }
309 PVMCPU pVCpu = VMMGetCpu(pVM);
310 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
311 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
312
313 cpu_reset(&pVM->rem.s.Env);
314
315 /* allocate code buffer for single instruction emulation. */
316 pVM->rem.s.Env.cbCodeBuffer = 4096;
317 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
318 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
319
320 /* Finally, set the cpu_single_env global. */
321 cpu_single_env = &pVM->rem.s.Env;
322
323 /* Nothing is pending by default */
324 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
325
326 /*
327 * Register ram types.
328 */
329 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
330 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
331 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
332 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
333 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
334
335 /* stop ignoring. */
336 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
337
338 /*
339 * Register the saved state data unit.
340 */
341 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
342 NULL, NULL, NULL,
343 NULL, remR3Save, NULL,
344 NULL, remR3Load, NULL);
345 if (RT_FAILURE(rc))
346 return rc;
347
348#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
349 /*
350 * Debugger commands.
351 */
352 static bool fRegisteredCmds = false;
353 if (!fRegisteredCmds)
354 {
355 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
356 if (RT_SUCCESS(rc))
357 fRegisteredCmds = true;
358 }
359#endif
360
361#ifdef VBOX_WITH_STATISTICS
362 /*
363 * Statistics.
364 */
365 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
366 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
367 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
368 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
369 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
373 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
374 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
375 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
376 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
377
378 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
379
380 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
381 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
382 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
383 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
384 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
385 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
386 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
387 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
388 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
389 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
390 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
391
392 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
393 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
394 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
395 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
396
397 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
403
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
410
411 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
412#endif /* VBOX_WITH_STATISTICS */
413
414 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
415 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
416 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
417
418
419#ifdef DEBUG_ALL_LOGGING
420 loglevel = ~0;
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469/**
470 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
471 *
472 * @returns VBox status code.
473 * @param pVM The VM handle.
474 * @param fGuarded Whether to guard the map.
475 */
476static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
477{
478 int rc = VINF_SUCCESS;
479 RTGCPHYS cb;
480
481 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
482
483 cb = pVM->rem.s.GCPhysLastRam + 1;
484 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
485 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
486 VERR_OUT_OF_RANGE);
487
488 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
489 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
490
491 if (!fGuarded)
492 {
493 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
494 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
495 }
496 else
497 {
498 /*
499 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
500 */
501 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
502 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
503 if (cbBitmapFull == cbBitmapAligned)
504 cbBitmapFull += _4G >> PAGE_SHIFT;
505 else if (cbBitmapFull - cbBitmapAligned < _64K)
506 cbBitmapFull += _64K;
507
508 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
509 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
510
511 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
512 if (RT_FAILURE(rc))
513 {
514 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
515 AssertLogRelRCReturn(rc, rc);
516 }
517
518 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
519 }
520
521 /* initialize it. */
522 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
523 return rc;
524}
525
526
527/**
528 * Terminates the REM.
529 *
530 * Termination means cleaning up and freeing all resources,
531 * the VM it self is at this point powered off or suspended.
532 *
533 * @returns VBox status code.
534 * @param pVM The VM to operate on.
535 */
536REMR3DECL(int) REMR3Term(PVM pVM)
537{
538#ifdef VBOX_WITH_STATISTICS
539 /*
540 * Statistics.
541 */
542 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
543 STAM_DEREG(pVM, &gStatCompilationQEmu);
544 STAM_DEREG(pVM, &gStatRunCodeQEmu);
545 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
546 STAM_DEREG(pVM, &gStatTimers);
547 STAM_DEREG(pVM, &gStatTBLookup);
548 STAM_DEREG(pVM, &gStatIRQ);
549 STAM_DEREG(pVM, &gStatRawCheck);
550 STAM_DEREG(pVM, &gStatMemRead);
551 STAM_DEREG(pVM, &gStatMemWrite);
552 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
553 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
554
555 STAM_DEREG(pVM, &gStatCpuGetTSC);
556
557 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
558 STAM_DEREG(pVM, &gStatRefuseVM86);
559 STAM_DEREG(pVM, &gStatRefusePaging);
560 STAM_DEREG(pVM, &gStatRefusePAE);
561 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
562 STAM_DEREG(pVM, &gStatRefuseIF0);
563 STAM_DEREG(pVM, &gStatRefuseCode16);
564 STAM_DEREG(pVM, &gStatRefuseWP0);
565 STAM_DEREG(pVM, &gStatRefuseRing1or2);
566 STAM_DEREG(pVM, &gStatRefuseCanExecute);
567 STAM_DEREG(pVM, &gStatFlushTBs);
568
569 STAM_DEREG(pVM, &gStatREMGDTChange);
570 STAM_DEREG(pVM, &gStatREMLDTRChange);
571 STAM_DEREG(pVM, &gStatREMIDTChange);
572 STAM_DEREG(pVM, &gStatREMTRChange);
573
574 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
580
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
587
588 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
589#endif /* VBOX_WITH_STATISTICS */
590
591 STAM_REL_DEREG(pVM, &tb_flush_count);
592 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
593 STAM_REL_DEREG(pVM, &tlb_flush_count);
594
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * The VM is being reset.
601 *
602 * For the REM component this means to call the cpu_reset() and
603 * reinitialize some state variables.
604 *
605 * @param pVM VM handle.
606 */
607REMR3DECL(void) REMR3Reset(PVM pVM)
608{
609 /*
610 * Reset the REM cpu.
611 */
612 Assert(pVM->rem.s.cIgnoreAll == 0);
613 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
614 cpu_reset(&pVM->rem.s.Env);
615 pVM->rem.s.cInvalidatedPages = 0;
616 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
617 Assert(pVM->rem.s.cIgnoreAll == 0);
618
619 /* Clear raw ring 0 init state */
620 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
621
622 /* Flush the TBs the next time we execute code here. */
623 pVM->rem.s.fFlushTBs = true;
624}
625
626
627/**
628 * Execute state save operation.
629 *
630 * @returns VBox status code.
631 * @param pVM VM Handle.
632 * @param pSSM SSM operation handle.
633 */
634static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
635{
636 PREM pRem = &pVM->rem.s;
637
638 /*
639 * Save the required CPU Env bits.
640 * (Not much because we're never in REM when doing the save.)
641 */
642 LogFlow(("remR3Save:\n"));
643 Assert(!pRem->fInREM);
644 SSMR3PutU32(pSSM, pRem->Env.hflags);
645 SSMR3PutU32(pSSM, ~0); /* separator */
646
647 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
648 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
649 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
650
651 return SSMR3PutU32(pSSM, ~0); /* terminator */
652}
653
654
655/**
656 * Execute state load operation.
657 *
658 * @returns VBox status code.
659 * @param pVM VM Handle.
660 * @param pSSM SSM operation handle.
661 * @param uVersion Data layout version.
662 * @param uPass The data pass.
663 */
664static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
665{
666 uint32_t u32Dummy;
667 uint32_t fRawRing0 = false;
668 uint32_t u32Sep;
669 uint32_t i;
670 int rc;
671 PREM pRem;
672
673 LogFlow(("remR3Load:\n"));
674 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
675
676 /*
677 * Validate version.
678 */
679 if ( uVersion != REM_SAVED_STATE_VERSION
680 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
681 {
682 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
683 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
684 }
685
686 /*
687 * Do a reset to be on the safe side...
688 */
689 REMR3Reset(pVM);
690
691 /*
692 * Ignore all ignorable notifications.
693 * (Not doing this will cause serious trouble.)
694 */
695 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
696
697 /*
698 * Load the required CPU Env bits.
699 * (Not much because we're never in REM when doing the save.)
700 */
701 pRem = &pVM->rem.s;
702 Assert(!pRem->fInREM);
703 SSMR3GetU32(pSSM, &pRem->Env.hflags);
704 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
705 {
706 /* Redundant REM CPU state has to be loaded, but can be ignored. */
707 CPUX86State_Ver16 temp;
708 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
709 }
710
711 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
712 if (RT_FAILURE(rc))
713 return rc;
714 if (u32Sep != ~0U)
715 {
716 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
717 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
718 }
719
720 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
721 SSMR3GetUInt(pSSM, &fRawRing0);
722 if (fRawRing0)
723 pRem->Env.state |= CPU_RAW_RING0;
724
725 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
726 {
727 /*
728 * Load the REM stuff.
729 */
730 /** @todo r=bird: We should just drop all these items, restoring doesn't make
731 * sense. */
732 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
733 if (RT_FAILURE(rc))
734 return rc;
735 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
736 {
737 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
738 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
739 }
740 for (i = 0; i < pRem->cInvalidatedPages; i++)
741 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
742 }
743
744 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
745 if (RT_FAILURE(rc))
746 return rc;
747
748 /* check the terminator. */
749 rc = SSMR3GetU32(pSSM, &u32Sep);
750 if (RT_FAILURE(rc))
751 return rc;
752 if (u32Sep != ~0U)
753 {
754 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
755 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
756 }
757
758 /*
759 * Get the CPUID features.
760 */
761 PVMCPU pVCpu = VMMGetCpu(pVM);
762 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
763 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
764
765 /*
766 * Sync the Load Flush the TLB
767 */
768 tlb_flush(&pRem->Env, 1);
769
770 /*
771 * Stop ignoring ignorable notifications.
772 */
773 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
774
775 /*
776 * Sync the whole CPU state when executing code in the recompiler.
777 */
778 for (i = 0; i < pVM->cCpus; i++)
779 {
780 PVMCPU pVCpu = &pVM->aCpus[i];
781 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
782 }
783 return VINF_SUCCESS;
784}
785
786
787
788#undef LOG_GROUP
789#define LOG_GROUP LOG_GROUP_REM_RUN
790
791/**
792 * Single steps an instruction in recompiled mode.
793 *
794 * Before calling this function the REM state needs to be in sync with
795 * the VM. Call REMR3State() to perform the sync. It's only necessary
796 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
797 * and after calling REMR3StateBack().
798 *
799 * @returns VBox status code.
800 *
801 * @param pVM VM Handle.
802 * @param pVCpu VMCPU Handle.
803 */
804REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
805{
806 int rc, interrupt_request;
807 RTGCPTR GCPtrPC;
808 bool fBp;
809
810 /*
811 * Lock the REM - we don't wanna have anyone interrupting us
812 * while stepping - and enabled single stepping. We also ignore
813 * pending interrupts and suchlike.
814 */
815 interrupt_request = pVM->rem.s.Env.interrupt_request;
816 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
817 pVM->rem.s.Env.interrupt_request = 0;
818 cpu_single_step(&pVM->rem.s.Env, 1);
819
820 /*
821 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
822 */
823 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
824 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
825
826 /*
827 * Execute and handle the return code.
828 * We execute without enabling the cpu tick, so on success we'll
829 * just flip it on and off to make sure it moves
830 */
831 rc = cpu_exec(&pVM->rem.s.Env);
832 if (rc == EXCP_DEBUG)
833 {
834 TMR3NotifyResume(pVM, pVCpu);
835 TMR3NotifySuspend(pVM, pVCpu);
836 rc = VINF_EM_DBG_STEPPED;
837 }
838 else
839 {
840 switch (rc)
841 {
842 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
843 case EXCP_HLT:
844 case EXCP_HALTED: rc = VINF_EM_HALT; break;
845 case EXCP_RC:
846 rc = pVM->rem.s.rc;
847 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
848 break;
849 case EXCP_EXECUTE_RAW:
850 case EXCP_EXECUTE_HWACC:
851 /** @todo: is it correct? No! */
852 rc = VINF_SUCCESS;
853 break;
854 default:
855 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
856 rc = VERR_INTERNAL_ERROR;
857 break;
858 }
859 }
860
861 /*
862 * Restore the stuff we changed to prevent interruption.
863 * Unlock the REM.
864 */
865 if (fBp)
866 {
867 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
868 Assert(rc2 == 0); NOREF(rc2);
869 }
870 cpu_single_step(&pVM->rem.s.Env, 0);
871 pVM->rem.s.Env.interrupt_request = interrupt_request;
872
873 return rc;
874}
875
876
877/**
878 * Set a breakpoint using the REM facilities.
879 *
880 * @returns VBox status code.
881 * @param pVM The VM handle.
882 * @param Address The breakpoint address.
883 * @thread The emulation thread.
884 */
885REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
886{
887 VM_ASSERT_EMT(pVM);
888 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
889 {
890 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
891 return VINF_SUCCESS;
892 }
893 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
894 return VERR_REM_NO_MORE_BP_SLOTS;
895}
896
897
898/**
899 * Clears a breakpoint set by REMR3BreakpointSet().
900 *
901 * @returns VBox status code.
902 * @param pVM The VM handle.
903 * @param Address The breakpoint address.
904 * @thread The emulation thread.
905 */
906REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
907{
908 VM_ASSERT_EMT(pVM);
909 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
910 {
911 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
912 return VINF_SUCCESS;
913 }
914 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
915 return VERR_REM_BP_NOT_FOUND;
916}
917
918
919/**
920 * Emulate an instruction.
921 *
922 * This function executes one instruction without letting anyone
923 * interrupt it. This is intended for being called while being in
924 * raw mode and thus will take care of all the state syncing between
925 * REM and the rest.
926 *
927 * @returns VBox status code.
928 * @param pVM VM handle.
929 * @param pVCpu VMCPU Handle.
930 */
931REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
932{
933 bool fFlushTBs;
934
935 int rc, rc2;
936 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
937
938 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
939 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
940 */
941 if (HWACCMIsEnabled(pVM))
942 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
943
944 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
945 fFlushTBs = pVM->rem.s.fFlushTBs;
946 pVM->rem.s.fFlushTBs = false;
947
948 /*
949 * Sync the state and enable single instruction / single stepping.
950 */
951 rc = REMR3State(pVM, pVCpu);
952 pVM->rem.s.fFlushTBs = fFlushTBs;
953 if (RT_SUCCESS(rc))
954 {
955 int interrupt_request = pVM->rem.s.Env.interrupt_request;
956 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
957#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
958 cpu_single_step(&pVM->rem.s.Env, 0);
959#endif
960 Assert(!pVM->rem.s.Env.singlestep_enabled);
961
962 /*
963 * Now we set the execute single instruction flag and enter the cpu_exec loop.
964 */
965 TMNotifyStartOfExecution(pVCpu);
966 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
967 rc = cpu_exec(&pVM->rem.s.Env);
968 TMNotifyEndOfExecution(pVCpu);
969 switch (rc)
970 {
971 /*
972 * Executed without anything out of the way happening.
973 */
974 case EXCP_SINGLE_INSTR:
975 rc = VINF_EM_RESCHEDULE;
976 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
977 break;
978
979 /*
980 * If we take a trap or start servicing a pending interrupt, we might end up here.
981 * (Timer thread or some other thread wishing EMT's attention.)
982 */
983 case EXCP_INTERRUPT:
984 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
985 rc = VINF_EM_RESCHEDULE;
986 break;
987
988 /*
989 * Single step, we assume!
990 * If there was a breakpoint there we're fucked now.
991 */
992 case EXCP_DEBUG:
993 if (pVM->rem.s.Env.watchpoint_hit)
994 {
995 /** @todo deal with watchpoints */
996 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
997 rc = VINF_EM_DBG_BREAKPOINT;
998 }
999 else
1000 {
1001 CPUBreakpoint *pBP;
1002 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1003 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1004 if (pBP->pc == GCPtrPC)
1005 break;
1006 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1007 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1008 }
1009 break;
1010
1011 /*
1012 * hlt instruction.
1013 */
1014 case EXCP_HLT:
1015 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * The VM has halted.
1021 */
1022 case EXCP_HALTED:
1023 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1024 rc = VINF_EM_HALT;
1025 break;
1026
1027 /*
1028 * Switch to RAW-mode.
1029 */
1030 case EXCP_EXECUTE_RAW:
1031 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1032 rc = VINF_EM_RESCHEDULE_RAW;
1033 break;
1034
1035 /*
1036 * Switch to hardware accelerated RAW-mode.
1037 */
1038 case EXCP_EXECUTE_HWACC:
1039 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1040 rc = VINF_EM_RESCHEDULE_HWACC;
1041 break;
1042
1043 /*
1044 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1045 */
1046 case EXCP_RC:
1047 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1048 rc = pVM->rem.s.rc;
1049 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1050 break;
1051
1052 /*
1053 * Figure out the rest when they arrive....
1054 */
1055 default:
1056 AssertMsgFailed(("rc=%d\n", rc));
1057 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1058 rc = VINF_EM_RESCHEDULE;
1059 break;
1060 }
1061
1062 /*
1063 * Switch back the state.
1064 */
1065 pVM->rem.s.Env.interrupt_request = interrupt_request;
1066 rc2 = REMR3StateBack(pVM, pVCpu);
1067 AssertRC(rc2);
1068 }
1069
1070 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1071 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1072 return rc;
1073}
1074
1075
1076/**
1077 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1078 *
1079 * @returns VBox status code.
1080 *
1081 * @param pVM The VM handle.
1082 * @param pVCpu The Virtual CPU handle.
1083 */
1084static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1085{
1086 int rc;
1087
1088 Assert(pVM->rem.s.fInREM);
1089#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1090 cpu_single_step(&pVM->rem.s.Env, 1);
1091#else
1092 Assert(!pVM->rem.s.Env.singlestep_enabled);
1093#endif
1094
1095 /*
1096 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1097 */
1098 for (;;)
1099 {
1100 char szBuf[256];
1101
1102 /*
1103 * Log the current registers state and instruction.
1104 */
1105 remR3StateUpdate(pVM, pVCpu);
1106 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1107 szBuf[0] = '\0';
1108 rc = DBGFR3DisasInstrEx(pVM,
1109 pVCpu->idCpu,
1110 0, /* Sel */
1111 0, /* GCPtr */
1112 DBGF_DISAS_FLAGS_CURRENT_GUEST
1113 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1114 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1115 szBuf,
1116 sizeof(szBuf),
1117 NULL);
1118 if (RT_FAILURE(rc))
1119 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1120 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1121
1122 /*
1123 * Execute the instruction.
1124 */
1125 TMNotifyStartOfExecution(pVCpu);
1126
1127 if ( pVM->rem.s.Env.exception_index < 0
1128 || pVM->rem.s.Env.exception_index > 256)
1129 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1130
1131#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1132 pVM->rem.s.Env.interrupt_request = 0;
1133#else
1134 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1135#endif
1136 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1137 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1138 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1139 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1140 pVM->rem.s.Env.interrupt_request,
1141 pVM->rem.s.Env.halted,
1142 pVM->rem.s.Env.exception_index
1143 );
1144
1145 rc = cpu_exec(&pVM->rem.s.Env);
1146
1147 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1148 pVM->rem.s.Env.interrupt_request,
1149 pVM->rem.s.Env.halted,
1150 pVM->rem.s.Env.exception_index
1151 );
1152
1153 TMNotifyEndOfExecution(pVCpu);
1154
1155 switch (rc)
1156 {
1157#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1158 /*
1159 * The normal exit.
1160 */
1161 case EXCP_SINGLE_INSTR:
1162 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1163 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1164 continue;
1165 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1166 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1167 rc = VINF_SUCCESS;
1168 break;
1169
1170#else
1171 /*
1172 * The normal exit, check for breakpoints at PC just to be sure.
1173 */
1174#endif
1175 case EXCP_DEBUG:
1176 if (pVM->rem.s.Env.watchpoint_hit)
1177 {
1178 /** @todo deal with watchpoints */
1179 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1180 rc = VINF_EM_DBG_BREAKPOINT;
1181 }
1182 else
1183 {
1184 CPUBreakpoint *pBP;
1185 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1186 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1187 if (pBP->pc == GCPtrPC)
1188 break;
1189 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1190 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1191 }
1192#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1193 if (rc == VINF_EM_DBG_STEPPED)
1194 {
1195 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1196 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1197 continue;
1198
1199 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1200 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1201 rc = VINF_SUCCESS;
1202 }
1203#endif
1204 break;
1205
1206 /*
1207 * If we take a trap or start servicing a pending interrupt, we might end up here.
1208 * (Timer thread or some other thread wishing EMT's attention.)
1209 */
1210 case EXCP_INTERRUPT:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1212 rc = VINF_SUCCESS;
1213 break;
1214
1215 /*
1216 * hlt instruction.
1217 */
1218 case EXCP_HLT:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1220 rc = VINF_EM_HALT;
1221 break;
1222
1223 /*
1224 * The VM has halted.
1225 */
1226 case EXCP_HALTED:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1228 rc = VINF_EM_HALT;
1229 break;
1230
1231 /*
1232 * Switch to RAW-mode.
1233 */
1234 case EXCP_EXECUTE_RAW:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1236 rc = VINF_EM_RESCHEDULE_RAW;
1237 break;
1238
1239 /*
1240 * Switch to hardware accelerated RAW-mode.
1241 */
1242 case EXCP_EXECUTE_HWACC:
1243 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1244 rc = VINF_EM_RESCHEDULE_HWACC;
1245 break;
1246
1247 /*
1248 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1249 */
1250 case EXCP_RC:
1251 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1252 rc = pVM->rem.s.rc;
1253 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1254 break;
1255
1256 /*
1257 * Figure out the rest when they arrive....
1258 */
1259 default:
1260 AssertMsgFailed(("rc=%d\n", rc));
1261 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1262 rc = VINF_EM_RESCHEDULE;
1263 break;
1264 }
1265 break;
1266 }
1267
1268#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1269// cpu_single_step(&pVM->rem.s.Env, 0);
1270#else
1271 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1272#endif
1273 return rc;
1274}
1275
1276
1277/**
1278 * Runs code in recompiled mode.
1279 *
1280 * Before calling this function the REM state needs to be in sync with
1281 * the VM. Call REMR3State() to perform the sync. It's only necessary
1282 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1283 * and after calling REMR3StateBack().
1284 *
1285 * @returns VBox status code.
1286 *
1287 * @param pVM VM Handle.
1288 * @param pVCpu VMCPU Handle.
1289 */
1290REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1291{
1292 int rc;
1293
1294 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1295 return remR3RunLoggingStep(pVM, pVCpu);
1296
1297 Assert(pVM->rem.s.fInREM);
1298 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1299
1300 TMNotifyStartOfExecution(pVCpu);
1301 rc = cpu_exec(&pVM->rem.s.Env);
1302 TMNotifyEndOfExecution(pVCpu);
1303 switch (rc)
1304 {
1305 /*
1306 * This happens when the execution was interrupted
1307 * by an external event, like pending timers.
1308 */
1309 case EXCP_INTERRUPT:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1311 rc = VINF_SUCCESS;
1312 break;
1313
1314 /*
1315 * hlt instruction.
1316 */
1317 case EXCP_HLT:
1318 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1319 rc = VINF_EM_HALT;
1320 break;
1321
1322 /*
1323 * The VM has halted.
1324 */
1325 case EXCP_HALTED:
1326 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1327 rc = VINF_EM_HALT;
1328 break;
1329
1330 /*
1331 * Breakpoint/single step.
1332 */
1333 case EXCP_DEBUG:
1334 if (pVM->rem.s.Env.watchpoint_hit)
1335 {
1336 /** @todo deal with watchpoints */
1337 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1338 rc = VINF_EM_DBG_BREAKPOINT;
1339 }
1340 else
1341 {
1342 CPUBreakpoint *pBP;
1343 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1344 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1345 if (pBP->pc == GCPtrPC)
1346 break;
1347 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1348 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1349 }
1350 break;
1351
1352 /*
1353 * Switch to RAW-mode.
1354 */
1355 case EXCP_EXECUTE_RAW:
1356 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1357 rc = VINF_EM_RESCHEDULE_RAW;
1358 break;
1359
1360 /*
1361 * Switch to hardware accelerated RAW-mode.
1362 */
1363 case EXCP_EXECUTE_HWACC:
1364 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1365 rc = VINF_EM_RESCHEDULE_HWACC;
1366 break;
1367
1368 /*
1369 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1370 */
1371 case EXCP_RC:
1372 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1373 rc = pVM->rem.s.rc;
1374 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1375 break;
1376
1377 /*
1378 * Figure out the rest when they arrive....
1379 */
1380 default:
1381 AssertMsgFailed(("rc=%d\n", rc));
1382 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1383 rc = VINF_SUCCESS;
1384 break;
1385 }
1386
1387 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1388 return rc;
1389}
1390
1391
1392/**
1393 * Check if the cpu state is suitable for Raw execution.
1394 *
1395 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1396 *
1397 * @param env The CPU env struct.
1398 * @param eip The EIP to check this for (might differ from env->eip).
1399 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1400 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1401 *
1402 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1403 */
1404bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1405{
1406 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1407 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409 uint32_t u32CR0;
1410
1411#ifdef IEM_VERIFICATION_MODE
1412 return false;
1413#endif
1414
1415 /* Update counter. */
1416 env->pVM->rem.s.cCanExecuteRaw++;
1417
1418 /* Never when single stepping+logging guest code. */
1419 if (env->state & CPU_EMULATE_SINGLE_STEP)
1420 return false;
1421
1422 if (HWACCMIsEnabled(env->pVM))
1423 {
1424 CPUMCTX Ctx;
1425
1426 env->state |= CPU_RAW_HWACC;
1427
1428 /*
1429 * Create partial context for HWACCMR3CanExecuteGuest
1430 */
1431 Ctx.cr0 = env->cr[0];
1432 Ctx.cr3 = env->cr[3];
1433 Ctx.cr4 = env->cr[4];
1434
1435 Ctx.tr = env->tr.selector;
1436 Ctx.trHid.u64Base = env->tr.base;
1437 Ctx.trHid.u32Limit = env->tr.limit;
1438 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1439
1440 Ctx.ldtr = env->ldt.selector;
1441 Ctx.ldtrHid.u64Base = env->ldt.base;
1442 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1443 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1444
1445 Ctx.idtr.cbIdt = env->idt.limit;
1446 Ctx.idtr.pIdt = env->idt.base;
1447
1448 Ctx.gdtr.cbGdt = env->gdt.limit;
1449 Ctx.gdtr.pGdt = env->gdt.base;
1450
1451 Ctx.rsp = env->regs[R_ESP];
1452 Ctx.rip = env->eip;
1453
1454 Ctx.eflags.u32 = env->eflags;
1455
1456 Ctx.cs = env->segs[R_CS].selector;
1457 Ctx.csHid.u64Base = env->segs[R_CS].base;
1458 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1459 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1460
1461 Ctx.ds = env->segs[R_DS].selector;
1462 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1463 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1464 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1465
1466 Ctx.es = env->segs[R_ES].selector;
1467 Ctx.esHid.u64Base = env->segs[R_ES].base;
1468 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1469 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1470
1471 Ctx.fs = env->segs[R_FS].selector;
1472 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1473 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1474 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1475
1476 Ctx.gs = env->segs[R_GS].selector;
1477 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1478 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1479 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1480
1481 Ctx.ss = env->segs[R_SS].selector;
1482 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1483 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1484 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1485
1486 Ctx.msrEFER = env->efer;
1487
1488 /* Hardware accelerated raw-mode:
1489 *
1490 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1491 */
1492 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1493 {
1494 *piException = EXCP_EXECUTE_HWACC;
1495 return true;
1496 }
1497 return false;
1498 }
1499
1500 /*
1501 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1502 * or 32 bits protected mode ring 0 code
1503 *
1504 * The tests are ordered by the likelihood of being true during normal execution.
1505 */
1506 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1507 {
1508 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1509 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1510 return false;
1511 }
1512
1513#ifndef VBOX_RAW_V86
1514 if (fFlags & VM_MASK) {
1515 STAM_COUNTER_INC(&gStatRefuseVM86);
1516 Log2(("raw mode refused: VM_MASK\n"));
1517 return false;
1518 }
1519#endif
1520
1521 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1522 {
1523#ifndef DEBUG_bird
1524 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1525#endif
1526 return false;
1527 }
1528
1529 if (env->singlestep_enabled)
1530 {
1531 //Log2(("raw mode refused: Single step\n"));
1532 return false;
1533 }
1534
1535 if (!QTAILQ_EMPTY(&env->breakpoints))
1536 {
1537 //Log2(("raw mode refused: Breakpoints\n"));
1538 return false;
1539 }
1540
1541 if (!QTAILQ_EMPTY(&env->watchpoints))
1542 {
1543 //Log2(("raw mode refused: Watchpoints\n"));
1544 return false;
1545 }
1546
1547 u32CR0 = env->cr[0];
1548 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1549 {
1550 STAM_COUNTER_INC(&gStatRefusePaging);
1551 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1552 return false;
1553 }
1554
1555 if (env->cr[4] & CR4_PAE_MASK)
1556 {
1557 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1558 {
1559 STAM_COUNTER_INC(&gStatRefusePAE);
1560 return false;
1561 }
1562 }
1563
1564 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1565 {
1566 if (!EMIsRawRing3Enabled(env->pVM))
1567 return false;
1568
1569 if (!(env->eflags & IF_MASK))
1570 {
1571 STAM_COUNTER_INC(&gStatRefuseIF0);
1572 Log2(("raw mode refused: IF (RawR3)\n"));
1573 return false;
1574 }
1575
1576 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1577 {
1578 STAM_COUNTER_INC(&gStatRefuseWP0);
1579 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1580 return false;
1581 }
1582 }
1583 else
1584 {
1585 if (!EMIsRawRing0Enabled(env->pVM))
1586 return false;
1587
1588 // Let's start with pure 32 bits ring 0 code first
1589 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1590 {
1591 STAM_COUNTER_INC(&gStatRefuseCode16);
1592 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1593 return false;
1594 }
1595
1596 // Only R0
1597 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1598 {
1599 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1600 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1601 return false;
1602 }
1603
1604 if (!(u32CR0 & CR0_WP_MASK))
1605 {
1606 STAM_COUNTER_INC(&gStatRefuseWP0);
1607 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1608 return false;
1609 }
1610
1611 if (PATMIsPatchGCAddr(env->pVM, eip))
1612 {
1613 Log2(("raw r0 mode forced: patch code\n"));
1614 *piException = EXCP_EXECUTE_RAW;
1615 return true;
1616 }
1617
1618#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1619 if (!(env->eflags & IF_MASK))
1620 {
1621 STAM_COUNTER_INC(&gStatRefuseIF0);
1622 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1623 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1624 return false;
1625 }
1626#endif
1627
1628 env->state |= CPU_RAW_RING0;
1629 }
1630
1631 /*
1632 * Don't reschedule the first time we're called, because there might be
1633 * special reasons why we're here that is not covered by the above checks.
1634 */
1635 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1636 {
1637 Log2(("raw mode refused: first scheduling\n"));
1638 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1639 return false;
1640 }
1641
1642 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1643 *piException = EXCP_EXECUTE_RAW;
1644 return true;
1645}
1646
1647
1648/**
1649 * Fetches a code byte.
1650 *
1651 * @returns Success indicator (bool) for ease of use.
1652 * @param env The CPU environment structure.
1653 * @param GCPtrInstr Where to fetch code.
1654 * @param pu8Byte Where to store the byte on success
1655 */
1656bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1657{
1658 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1659 if (RT_SUCCESS(rc))
1660 return true;
1661 return false;
1662}
1663
1664
1665/**
1666 * Flush (or invalidate if you like) page table/dir entry.
1667 *
1668 * (invlpg instruction; tlb_flush_page)
1669 *
1670 * @param env Pointer to cpu environment.
1671 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1672 */
1673void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1674{
1675 PVM pVM = env->pVM;
1676 PCPUMCTX pCtx;
1677 int rc;
1678
1679 /*
1680 * When we're replaying invlpg instructions or restoring a saved
1681 * state we disable this path.
1682 */
1683 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1684 return;
1685 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1686 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1687
1688 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1689
1690 /*
1691 * Update the control registers before calling PGMFlushPage.
1692 */
1693 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1694 Assert(pCtx);
1695 pCtx->cr0 = env->cr[0];
1696 pCtx->cr3 = env->cr[3];
1697 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1698 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1699 pCtx->cr4 = env->cr[4];
1700
1701 /*
1702 * Let PGM do the rest.
1703 */
1704 Assert(env->pVCpu);
1705 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1706 if (RT_FAILURE(rc))
1707 {
1708 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1709 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1710 }
1711 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1712}
1713
1714
1715#ifndef REM_PHYS_ADDR_IN_TLB
1716/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1717void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1718{
1719 void *pv;
1720 int rc;
1721
1722 /* Address must be aligned enough to fiddle with lower bits */
1723 Assert((physAddr & 0x3) == 0);
1724
1725 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1726 Assert( rc == VINF_SUCCESS
1727 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1728 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1729 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1730 if (RT_FAILURE(rc))
1731 return (void *)1;
1732 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1733 return (void *)((uintptr_t)pv | 2);
1734 return pv;
1735}
1736#endif /* REM_PHYS_ADDR_IN_TLB */
1737
1738
1739/**
1740 * Called from tlb_protect_code in order to write monitor a code page.
1741 *
1742 * @param env Pointer to the CPU environment.
1743 * @param GCPtr Code page to monitor
1744 */
1745void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1746{
1747#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1748 Assert(env->pVM->rem.s.fInREM);
1749 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1750 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1751 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1752 && !(env->eflags & VM_MASK) /* no V86 mode */
1753 && !HWACCMIsEnabled(env->pVM))
1754 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1755#endif
1756}
1757
1758
1759/**
1760 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1761 *
1762 * @param env Pointer to the CPU environment.
1763 * @param GCPtr Code page to monitor
1764 */
1765void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1766{
1767 Assert(env->pVM->rem.s.fInREM);
1768#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1769 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1770 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1771 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1772 && !(env->eflags & VM_MASK) /* no V86 mode */
1773 && !HWACCMIsEnabled(env->pVM))
1774 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1775#endif
1776}
1777
1778
1779/**
1780 * Called when the CPU is initialized, any of the CRx registers are changed or
1781 * when the A20 line is modified.
1782 *
1783 * @param env Pointer to the CPU environment.
1784 * @param fGlobal Set if the flush is global.
1785 */
1786void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1787{
1788 PVM pVM = env->pVM;
1789 PCPUMCTX pCtx;
1790
1791 /*
1792 * When we're replaying invlpg instructions or restoring a saved
1793 * state we disable this path.
1794 */
1795 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1796 return;
1797 Assert(pVM->rem.s.fInREM);
1798
1799 /*
1800 * The caller doesn't check cr4, so we have to do that for ourselves.
1801 */
1802 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1803 fGlobal = true;
1804 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1805
1806 /*
1807 * Update the control registers before calling PGMR3FlushTLB.
1808 */
1809 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1810 Assert(pCtx);
1811 pCtx->cr0 = env->cr[0];
1812 pCtx->cr3 = env->cr[3];
1813 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1814 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1815 pCtx->cr4 = env->cr[4];
1816
1817 /*
1818 * Let PGM do the rest.
1819 */
1820 Assert(env->pVCpu);
1821 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1822}
1823
1824
1825/**
1826 * Called when any of the cr0, cr4 or efer registers is updated.
1827 *
1828 * @param env Pointer to the CPU environment.
1829 */
1830void remR3ChangeCpuMode(CPUX86State *env)
1831{
1832 PVM pVM = env->pVM;
1833 uint64_t efer;
1834 PCPUMCTX pCtx;
1835 int rc;
1836
1837 /*
1838 * When we're replaying loads or restoring a saved
1839 * state this path is disabled.
1840 */
1841 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1842 return;
1843 Assert(pVM->rem.s.fInREM);
1844
1845 /*
1846 * Update the control registers before calling PGMChangeMode()
1847 * as it may need to map whatever cr3 is pointing to.
1848 */
1849 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1850 Assert(pCtx);
1851 pCtx->cr0 = env->cr[0];
1852 pCtx->cr3 = env->cr[3];
1853 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1854 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1855 pCtx->cr4 = env->cr[4];
1856#ifdef TARGET_X86_64
1857 efer = env->efer;
1858 pCtx->msrEFER = efer;
1859#else
1860 efer = 0;
1861#endif
1862 Assert(env->pVCpu);
1863 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1864 if (rc != VINF_SUCCESS)
1865 {
1866 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1867 {
1868 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1869 remR3RaiseRC(env->pVM, rc);
1870 }
1871 else
1872 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1873 }
1874}
1875
1876
1877/**
1878 * Called from compiled code to run dma.
1879 *
1880 * @param env Pointer to the CPU environment.
1881 */
1882void remR3DmaRun(CPUX86State *env)
1883{
1884 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1885 PDMR3DmaRun(env->pVM);
1886 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1887}
1888
1889
1890/**
1891 * Called from compiled code to schedule pending timers in VMM
1892 *
1893 * @param env Pointer to the CPU environment.
1894 */
1895void remR3TimersRun(CPUX86State *env)
1896{
1897 LogFlow(("remR3TimersRun:\n"));
1898 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1899 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1900 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1901 TMR3TimerQueuesDo(env->pVM);
1902 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1903 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1904}
1905
1906
1907/**
1908 * Record trap occurrence
1909 *
1910 * @returns VBox status code
1911 * @param env Pointer to the CPU environment.
1912 * @param uTrap Trap nr
1913 * @param uErrorCode Error code
1914 * @param pvNextEIP Next EIP
1915 */
1916int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1917{
1918 PVM pVM = env->pVM;
1919#ifdef VBOX_WITH_STATISTICS
1920 static STAMCOUNTER s_aStatTrap[255];
1921 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1922#endif
1923
1924#ifdef VBOX_WITH_STATISTICS
1925 if (uTrap < 255)
1926 {
1927 if (!s_aRegisters[uTrap])
1928 {
1929 char szStatName[64];
1930 s_aRegisters[uTrap] = true;
1931 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1932 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1933 }
1934 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1935 }
1936#endif
1937 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1938 if( uTrap < 0x20
1939 && (env->cr[0] & X86_CR0_PE)
1940 && !(env->eflags & X86_EFL_VM))
1941 {
1942#ifdef DEBUG
1943 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1944#endif
1945 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1946 {
1947 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1948 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1949 return VERR_REM_TOO_MANY_TRAPS;
1950 }
1951 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1952 pVM->rem.s.cPendingExceptions = 1;
1953 pVM->rem.s.uPendingException = uTrap;
1954 pVM->rem.s.uPendingExcptEIP = env->eip;
1955 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1956 }
1957 else
1958 {
1959 pVM->rem.s.cPendingExceptions = 0;
1960 pVM->rem.s.uPendingException = uTrap;
1961 pVM->rem.s.uPendingExcptEIP = env->eip;
1962 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1963 }
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/*
1969 * Clear current active trap
1970 *
1971 * @param pVM VM Handle.
1972 */
1973void remR3TrapClear(PVM pVM)
1974{
1975 pVM->rem.s.cPendingExceptions = 0;
1976 pVM->rem.s.uPendingException = 0;
1977 pVM->rem.s.uPendingExcptEIP = 0;
1978 pVM->rem.s.uPendingExcptCR2 = 0;
1979}
1980
1981
1982/*
1983 * Record previous call instruction addresses
1984 *
1985 * @param env Pointer to the CPU environment.
1986 */
1987void remR3RecordCall(CPUX86State *env)
1988{
1989 CSAMR3RecordCallAddress(env->pVM, env->eip);
1990}
1991
1992
1993/**
1994 * Syncs the internal REM state with the VM.
1995 *
1996 * This must be called before REMR3Run() is invoked whenever when the REM
1997 * state is not up to date. Calling it several times in a row is not
1998 * permitted.
1999 *
2000 * @returns VBox status code.
2001 *
2002 * @param pVM VM Handle.
2003 * @param pVCpu VMCPU Handle.
2004 *
2005 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2006 * no do this since the majority of the callers don't want any unnecessary of events
2007 * pending that would immediately interrupt execution.
2008 */
2009REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2010{
2011 register const CPUMCTX *pCtx;
2012 register unsigned fFlags;
2013 bool fHiddenSelRegsValid;
2014 unsigned i;
2015 TRPMEVENT enmType;
2016 uint8_t u8TrapNo;
2017 uint32_t uCpl;
2018 int rc;
2019
2020 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2021 Log2(("REMR3State:\n"));
2022
2023 pVM->rem.s.Env.pVCpu = pVCpu;
2024 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2025 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2026
2027 Assert(!pVM->rem.s.fInREM);
2028 pVM->rem.s.fInStateSync = true;
2029
2030 /*
2031 * If we have to flush TBs, do that immediately.
2032 */
2033 if (pVM->rem.s.fFlushTBs)
2034 {
2035 STAM_COUNTER_INC(&gStatFlushTBs);
2036 tb_flush(&pVM->rem.s.Env);
2037 pVM->rem.s.fFlushTBs = false;
2038 }
2039
2040 /*
2041 * Copy the registers which require no special handling.
2042 */
2043#ifdef TARGET_X86_64
2044 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2045 Assert(R_EAX == 0);
2046 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2047 Assert(R_ECX == 1);
2048 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2049 Assert(R_EDX == 2);
2050 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2051 Assert(R_EBX == 3);
2052 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2053 Assert(R_ESP == 4);
2054 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2055 Assert(R_EBP == 5);
2056 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2057 Assert(R_ESI == 6);
2058 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2059 Assert(R_EDI == 7);
2060 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2061 pVM->rem.s.Env.regs[8] = pCtx->r8;
2062 pVM->rem.s.Env.regs[9] = pCtx->r9;
2063 pVM->rem.s.Env.regs[10] = pCtx->r10;
2064 pVM->rem.s.Env.regs[11] = pCtx->r11;
2065 pVM->rem.s.Env.regs[12] = pCtx->r12;
2066 pVM->rem.s.Env.regs[13] = pCtx->r13;
2067 pVM->rem.s.Env.regs[14] = pCtx->r14;
2068 pVM->rem.s.Env.regs[15] = pCtx->r15;
2069
2070 pVM->rem.s.Env.eip = pCtx->rip;
2071
2072 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2073#else
2074 Assert(R_EAX == 0);
2075 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2076 Assert(R_ECX == 1);
2077 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2078 Assert(R_EDX == 2);
2079 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2080 Assert(R_EBX == 3);
2081 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2082 Assert(R_ESP == 4);
2083 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2084 Assert(R_EBP == 5);
2085 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2086 Assert(R_ESI == 6);
2087 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2088 Assert(R_EDI == 7);
2089 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2090 pVM->rem.s.Env.eip = pCtx->eip;
2091
2092 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2093#endif
2094
2095 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2096
2097 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2098 for (i=0;i<8;i++)
2099 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2100
2101#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2102 /*
2103 * Clear the halted hidden flag (the interrupt waking up the CPU can
2104 * have been dispatched in raw mode).
2105 */
2106 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2107#endif
2108
2109 /*
2110 * Replay invlpg?
2111 */
2112 if (pVM->rem.s.cInvalidatedPages)
2113 {
2114 RTUINT i;
2115
2116 pVM->rem.s.fIgnoreInvlPg = true;
2117 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2118 {
2119 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2120 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2121 }
2122 pVM->rem.s.fIgnoreInvlPg = false;
2123 pVM->rem.s.cInvalidatedPages = 0;
2124 }
2125
2126 /* Replay notification changes. */
2127 REMR3ReplayHandlerNotifications(pVM);
2128
2129 /* Update MSRs; before CRx registers! */
2130 pVM->rem.s.Env.efer = pCtx->msrEFER;
2131 pVM->rem.s.Env.star = pCtx->msrSTAR;
2132 pVM->rem.s.Env.pat = pCtx->msrPAT;
2133#ifdef TARGET_X86_64
2134 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2135 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2136 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2137 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2138
2139 /* Update the internal long mode activate flag according to the new EFER value. */
2140 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2141 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2142 else
2143 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2144#endif
2145
2146 /*
2147 * Registers which are rarely changed and require special handling / order when changed.
2148 */
2149 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2150 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2151 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2152 | CPUM_CHANGED_CR4
2153 | CPUM_CHANGED_CR0
2154 | CPUM_CHANGED_CR3
2155 | CPUM_CHANGED_GDTR
2156 | CPUM_CHANGED_IDTR
2157 | CPUM_CHANGED_SYSENTER_MSR
2158 | CPUM_CHANGED_LDTR
2159 | CPUM_CHANGED_CPUID
2160 | CPUM_CHANGED_FPU_REM
2161 )
2162 )
2163 {
2164 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2165 {
2166 pVM->rem.s.fIgnoreCR3Load = true;
2167 tlb_flush(&pVM->rem.s.Env, true);
2168 pVM->rem.s.fIgnoreCR3Load = false;
2169 }
2170
2171 /* CR4 before CR0! */
2172 if (fFlags & CPUM_CHANGED_CR4)
2173 {
2174 pVM->rem.s.fIgnoreCR3Load = true;
2175 pVM->rem.s.fIgnoreCpuMode = true;
2176 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2177 pVM->rem.s.fIgnoreCpuMode = false;
2178 pVM->rem.s.fIgnoreCR3Load = false;
2179 }
2180
2181 if (fFlags & CPUM_CHANGED_CR0)
2182 {
2183 pVM->rem.s.fIgnoreCR3Load = true;
2184 pVM->rem.s.fIgnoreCpuMode = true;
2185 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2186 pVM->rem.s.fIgnoreCpuMode = false;
2187 pVM->rem.s.fIgnoreCR3Load = false;
2188 }
2189
2190 if (fFlags & CPUM_CHANGED_CR3)
2191 {
2192 pVM->rem.s.fIgnoreCR3Load = true;
2193 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2194 pVM->rem.s.fIgnoreCR3Load = false;
2195 }
2196
2197 if (fFlags & CPUM_CHANGED_GDTR)
2198 {
2199 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2200 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2201 }
2202
2203 if (fFlags & CPUM_CHANGED_IDTR)
2204 {
2205 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2206 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2207 }
2208
2209 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2210 {
2211 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2212 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2213 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2214 }
2215
2216 if (fFlags & CPUM_CHANGED_LDTR)
2217 {
2218 if (fHiddenSelRegsValid)
2219 {
2220 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2221 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2222 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2223 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2224 }
2225 else
2226 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2227 }
2228
2229 if (fFlags & CPUM_CHANGED_CPUID)
2230 {
2231 uint32_t u32Dummy;
2232
2233 /*
2234 * Get the CPUID features.
2235 */
2236 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2237 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2238 }
2239
2240 /* Sync FPU state after CR4, CPUID and EFER (!). */
2241 if (fFlags & CPUM_CHANGED_FPU_REM)
2242 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2243 }
2244
2245 /*
2246 * Sync TR unconditionally to make life simpler.
2247 */
2248 pVM->rem.s.Env.tr.selector = pCtx->tr;
2249 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2250 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2251 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2252 /* Note! do_interrupt will fault if the busy flag is still set... */
2253 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2254
2255 /*
2256 * Update selector registers.
2257 * This must be done *after* we've synced gdt, ldt and crX registers
2258 * since we're reading the GDT/LDT om sync_seg. This will happen with
2259 * saved state which takes a quick dip into rawmode for instance.
2260 */
2261 /*
2262 * Stack; Note first check this one as the CPL might have changed. The
2263 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2264 */
2265
2266 if (fHiddenSelRegsValid)
2267 {
2268 /* The hidden selector registers are valid in the CPU context. */
2269 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2270
2271 /* Set current CPL */
2272 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2273
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2277 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2278 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2279 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2280 }
2281 else
2282 {
2283 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2284 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2285 {
2286 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2287
2288 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2289 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2290#ifdef VBOX_WITH_STATISTICS
2291 if (pVM->rem.s.Env.segs[R_SS].newselector)
2292 {
2293 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2294 }
2295#endif
2296 }
2297 else
2298 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2299
2300 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2301 {
2302 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2303 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2304#ifdef VBOX_WITH_STATISTICS
2305 if (pVM->rem.s.Env.segs[R_ES].newselector)
2306 {
2307 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2308 }
2309#endif
2310 }
2311 else
2312 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2313
2314 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2315 {
2316 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2317 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2318#ifdef VBOX_WITH_STATISTICS
2319 if (pVM->rem.s.Env.segs[R_CS].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2322 }
2323#endif
2324 }
2325 else
2326 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2327
2328 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2329 {
2330 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2331 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2332#ifdef VBOX_WITH_STATISTICS
2333 if (pVM->rem.s.Env.segs[R_DS].newselector)
2334 {
2335 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2336 }
2337#endif
2338 }
2339 else
2340 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2341
2342 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2343 * be the same but not the base/limit. */
2344 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2345 {
2346 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2347 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2348#ifdef VBOX_WITH_STATISTICS
2349 if (pVM->rem.s.Env.segs[R_FS].newselector)
2350 {
2351 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2352 }
2353#endif
2354 }
2355 else
2356 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2357
2358 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2359 {
2360 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2361 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2362#ifdef VBOX_WITH_STATISTICS
2363 if (pVM->rem.s.Env.segs[R_GS].newselector)
2364 {
2365 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2366 }
2367#endif
2368 }
2369 else
2370 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2371 }
2372
2373 /*
2374 * Check for traps.
2375 */
2376 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2377 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2378 if (RT_SUCCESS(rc))
2379 {
2380#ifdef DEBUG
2381 if (u8TrapNo == 0x80)
2382 {
2383 remR3DumpLnxSyscall(pVCpu);
2384 remR3DumpOBsdSyscall(pVCpu);
2385 }
2386#endif
2387
2388 pVM->rem.s.Env.exception_index = u8TrapNo;
2389 if (enmType != TRPM_SOFTWARE_INT)
2390 {
2391 pVM->rem.s.Env.exception_is_int = 0;
2392 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2393 }
2394 else
2395 {
2396 /*
2397 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2398 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2399 * for int03 and into.
2400 */
2401 pVM->rem.s.Env.exception_is_int = 1;
2402 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2403 /* int 3 may be generated by one-byte 0xcc */
2404 if (u8TrapNo == 3)
2405 {
2406 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2407 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2408 }
2409 /* int 4 may be generated by one-byte 0xce */
2410 else if (u8TrapNo == 4)
2411 {
2412 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2413 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2414 }
2415 }
2416
2417 /* get error code and cr2 if needed. */
2418 if (enmType == TRPM_TRAP)
2419 {
2420 switch (u8TrapNo)
2421 {
2422 case 0x0e:
2423 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2424 /* fallthru */
2425 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2426 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2427 break;
2428
2429 case 0x11: case 0x08:
2430 default:
2431 pVM->rem.s.Env.error_code = 0;
2432 break;
2433 }
2434 }
2435 else
2436 pVM->rem.s.Env.error_code = 0;
2437
2438 /*
2439 * We can now reset the active trap since the recompiler is gonna have a go at it.
2440 */
2441 rc = TRPMResetTrap(pVCpu);
2442 AssertRC(rc);
2443 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2444 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2445 }
2446
2447 /*
2448 * Clear old interrupt request flags; Check for pending hardware interrupts.
2449 * (See @remark for why we don't check for other FFs.)
2450 */
2451 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2452 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2453 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2454 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2455
2456 /*
2457 * We're now in REM mode.
2458 */
2459 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2460 pVM->rem.s.fInREM = true;
2461 pVM->rem.s.fInStateSync = false;
2462 pVM->rem.s.cCanExecuteRaw = 0;
2463 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2464 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2465 return VINF_SUCCESS;
2466}
2467
2468
2469/**
2470 * Syncs back changes in the REM state to the the VM state.
2471 *
2472 * This must be called after invoking REMR3Run().
2473 * Calling it several times in a row is not permitted.
2474 *
2475 * @returns VBox status code.
2476 *
2477 * @param pVM VM Handle.
2478 * @param pVCpu VMCPU Handle.
2479 */
2480REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2481{
2482 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2483 Assert(pCtx);
2484 unsigned i;
2485
2486 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2487 Log2(("REMR3StateBack:\n"));
2488 Assert(pVM->rem.s.fInREM);
2489
2490 /*
2491 * Copy back the registers.
2492 * This is done in the order they are declared in the CPUMCTX structure.
2493 */
2494
2495 /** @todo FOP */
2496 /** @todo FPUIP */
2497 /** @todo CS */
2498 /** @todo FPUDP */
2499 /** @todo DS */
2500
2501 /** @todo check if FPU/XMM was actually used in the recompiler */
2502 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2503//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2504
2505#ifdef TARGET_X86_64
2506 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2507 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2508 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2509 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2510 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2511 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2512 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2513 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2514 pCtx->r8 = pVM->rem.s.Env.regs[8];
2515 pCtx->r9 = pVM->rem.s.Env.regs[9];
2516 pCtx->r10 = pVM->rem.s.Env.regs[10];
2517 pCtx->r11 = pVM->rem.s.Env.regs[11];
2518 pCtx->r12 = pVM->rem.s.Env.regs[12];
2519 pCtx->r13 = pVM->rem.s.Env.regs[13];
2520 pCtx->r14 = pVM->rem.s.Env.regs[14];
2521 pCtx->r15 = pVM->rem.s.Env.regs[15];
2522
2523 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2524
2525#else
2526 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2527 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2528 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2529 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2530 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2531 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2532 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2533
2534 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2535#endif
2536
2537 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2538
2539#ifdef VBOX_WITH_STATISTICS
2540 if (pVM->rem.s.Env.segs[R_SS].newselector)
2541 {
2542 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2543 }
2544 if (pVM->rem.s.Env.segs[R_GS].newselector)
2545 {
2546 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2547 }
2548 if (pVM->rem.s.Env.segs[R_FS].newselector)
2549 {
2550 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2551 }
2552 if (pVM->rem.s.Env.segs[R_ES].newselector)
2553 {
2554 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2555 }
2556 if (pVM->rem.s.Env.segs[R_DS].newselector)
2557 {
2558 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2559 }
2560 if (pVM->rem.s.Env.segs[R_CS].newselector)
2561 {
2562 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2563 }
2564#endif
2565 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2566 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2567 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2568 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2569 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2570
2571#ifdef TARGET_X86_64
2572 pCtx->rip = pVM->rem.s.Env.eip;
2573 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2574#else
2575 pCtx->eip = pVM->rem.s.Env.eip;
2576 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2577#endif
2578
2579 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2580 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2581 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2582 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2584 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2585
2586 for (i = 0; i < 8; i++)
2587 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2588
2589 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2590 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2591 {
2592 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2593 STAM_COUNTER_INC(&gStatREMGDTChange);
2594 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2595 }
2596
2597 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2598 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2599 {
2600 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2601 STAM_COUNTER_INC(&gStatREMIDTChange);
2602 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2603 }
2604
2605 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2606 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2607 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2608 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2609 {
2610 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2611 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2612 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2613 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2614 STAM_COUNTER_INC(&gStatREMLDTRChange);
2615 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2616 }
2617
2618 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2619 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2620 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2621 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2622 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2623 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2624 : 0) )
2625 {
2626 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2627 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2628 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2629 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2630 pCtx->tr = pVM->rem.s.Env.tr.selector;
2631 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2632 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2633 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2634 if (pCtx->trHid.Attr.u)
2635 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2636 STAM_COUNTER_INC(&gStatREMTRChange);
2637 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2638 }
2639
2640 /** @todo These values could still be out of sync! */
2641 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2642 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2643 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2644 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2645
2646 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2647 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2648 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2649
2650 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2651 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2652 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2653
2654 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2655 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2656 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2657
2658 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2659 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2660 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2661
2662 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2663 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2664 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2665
2666 /* Sysenter MSR */
2667 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2668 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2669 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2670
2671 /* System MSRs. */
2672 pCtx->msrEFER = pVM->rem.s.Env.efer;
2673 pCtx->msrSTAR = pVM->rem.s.Env.star;
2674 pCtx->msrPAT = pVM->rem.s.Env.pat;
2675#ifdef TARGET_X86_64
2676 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2677 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2678 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2679 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2680#endif
2681
2682 remR3TrapClear(pVM);
2683
2684 /*
2685 * Check for traps.
2686 */
2687 if ( pVM->rem.s.Env.exception_index >= 0
2688 && pVM->rem.s.Env.exception_index < 256)
2689 {
2690 int rc;
2691
2692 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2693 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2694 AssertRC(rc);
2695 switch (pVM->rem.s.Env.exception_index)
2696 {
2697 case 0x0e:
2698 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2699 /* fallthru */
2700 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2701 case 0x11: case 0x08: /* 0 */
2702 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2703 break;
2704 }
2705
2706 }
2707
2708 /*
2709 * We're not longer in REM mode.
2710 */
2711 CPUMR3RemLeave(pVCpu,
2712 HWACCMIsEnabled(pVM)
2713 || ( pVM->rem.s.Env.segs[R_SS].newselector
2714 | pVM->rem.s.Env.segs[R_GS].newselector
2715 | pVM->rem.s.Env.segs[R_FS].newselector
2716 | pVM->rem.s.Env.segs[R_ES].newselector
2717 | pVM->rem.s.Env.segs[R_DS].newselector
2718 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2719 );
2720 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2721 pVM->rem.s.fInREM = false;
2722 pVM->rem.s.pCtx = NULL;
2723 pVM->rem.s.Env.pVCpu = NULL;
2724 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2725 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2726 return VINF_SUCCESS;
2727}
2728
2729
2730/**
2731 * This is called by the disassembler when it wants to update the cpu state
2732 * before for instance doing a register dump.
2733 */
2734static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2735{
2736 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2737 unsigned i;
2738
2739 Assert(pVM->rem.s.fInREM);
2740
2741 /*
2742 * Copy back the registers.
2743 * This is done in the order they are declared in the CPUMCTX structure.
2744 */
2745
2746 /** @todo FOP */
2747 /** @todo FPUIP */
2748 /** @todo CS */
2749 /** @todo FPUDP */
2750 /** @todo DS */
2751 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2752 pCtx->fpu.MXCSR = 0;
2753 pCtx->fpu.MXCSR_MASK = 0;
2754
2755 /** @todo check if FPU/XMM was actually used in the recompiler */
2756 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2757//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2758
2759#ifdef TARGET_X86_64
2760 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2761 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2762 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2763 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2764 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2765 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2766 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2767 pCtx->r8 = pVM->rem.s.Env.regs[8];
2768 pCtx->r9 = pVM->rem.s.Env.regs[9];
2769 pCtx->r10 = pVM->rem.s.Env.regs[10];
2770 pCtx->r11 = pVM->rem.s.Env.regs[11];
2771 pCtx->r12 = pVM->rem.s.Env.regs[12];
2772 pCtx->r13 = pVM->rem.s.Env.regs[13];
2773 pCtx->r14 = pVM->rem.s.Env.regs[14];
2774 pCtx->r15 = pVM->rem.s.Env.regs[15];
2775
2776 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2777#else
2778 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2779 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2780 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2781 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2782 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2783 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2784 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2785
2786 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2787#endif
2788
2789 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2790
2791 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2792 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2793 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2794 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2795 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2796
2797#ifdef TARGET_X86_64
2798 pCtx->rip = pVM->rem.s.Env.eip;
2799 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2800#else
2801 pCtx->eip = pVM->rem.s.Env.eip;
2802 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2803#endif
2804
2805 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2806 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2807 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2808 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2809 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2810 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2811
2812 for (i = 0; i < 8; i++)
2813 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2814
2815 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2816 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2817 {
2818 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2819 STAM_COUNTER_INC(&gStatREMGDTChange);
2820 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2821 }
2822
2823 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2824 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2825 {
2826 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2827 STAM_COUNTER_INC(&gStatREMIDTChange);
2828 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2829 }
2830
2831 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2832 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2833 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2834 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2835 {
2836 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2837 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2838 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2839 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2840 STAM_COUNTER_INC(&gStatREMLDTRChange);
2841 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2842 }
2843
2844 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2845 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2846 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2847 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2848 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2849 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2850 : 0) )
2851 {
2852 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2853 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2854 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2855 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2856 pCtx->tr = pVM->rem.s.Env.tr.selector;
2857 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2858 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2859 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2860 if (pCtx->trHid.Attr.u)
2861 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2862 STAM_COUNTER_INC(&gStatREMTRChange);
2863 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2864 }
2865
2866 /** @todo These values could still be out of sync! */
2867 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2868 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2869 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2870 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2871
2872 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2873 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2874 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2875
2876 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2877 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2878 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2879
2880 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2881 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2882 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2883
2884 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2885 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2886 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2887
2888 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2889 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2890 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2891
2892 /* Sysenter MSR */
2893 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2894 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2895 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2896
2897 /* System MSRs. */
2898 pCtx->msrEFER = pVM->rem.s.Env.efer;
2899 pCtx->msrSTAR = pVM->rem.s.Env.star;
2900 pCtx->msrPAT = pVM->rem.s.Env.pat;
2901#ifdef TARGET_X86_64
2902 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2903 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2904 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2905 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2906#endif
2907
2908}
2909
2910
2911/**
2912 * Update the VMM state information if we're currently in REM.
2913 *
2914 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2915 * we're currently executing in REM and the VMM state is invalid. This method will of
2916 * course check that we're executing in REM before syncing any data over to the VMM.
2917 *
2918 * @param pVM The VM handle.
2919 * @param pVCpu The VMCPU handle.
2920 */
2921REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2922{
2923 if (pVM->rem.s.fInREM)
2924 remR3StateUpdate(pVM, pVCpu);
2925}
2926
2927
2928#undef LOG_GROUP
2929#define LOG_GROUP LOG_GROUP_REM
2930
2931
2932/**
2933 * Notify the recompiler about Address Gate 20 state change.
2934 *
2935 * This notification is required since A20 gate changes are
2936 * initialized from a device driver and the VM might just as
2937 * well be in REM mode as in RAW mode.
2938 *
2939 * @param pVM VM handle.
2940 * @param pVCpu VMCPU handle.
2941 * @param fEnable True if the gate should be enabled.
2942 * False if the gate should be disabled.
2943 */
2944REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2945{
2946 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2947 VM_ASSERT_EMT(pVM);
2948
2949 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2950 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2951 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2952}
2953
2954
2955/**
2956 * Replays the handler notification changes
2957 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2958 *
2959 * @param pVM VM handle.
2960 */
2961REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2962{
2963 /*
2964 * Replay the flushes.
2965 */
2966 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2967 VM_ASSERT_EMT(pVM);
2968
2969 /** @todo this isn't ensuring correct replay order. */
2970 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2971 {
2972 uint32_t idxNext;
2973 uint32_t idxRevHead;
2974 uint32_t idxHead;
2975#ifdef VBOX_STRICT
2976 int32_t c = 0;
2977#endif
2978
2979 /* Lockless purging of pending notifications. */
2980 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2981 if (idxHead == UINT32_MAX)
2982 return;
2983 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2984
2985 /*
2986 * Reverse the list to process it in FIFO order.
2987 */
2988 idxRevHead = UINT32_MAX;
2989 do
2990 {
2991 /* Save the index of the next rec. */
2992 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2993 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2994 /* Push the record onto the reversed list. */
2995 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2996 idxRevHead = idxHead;
2997 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2998 /* Advance. */
2999 idxHead = idxNext;
3000 } while (idxHead != UINT32_MAX);
3001
3002 /*
3003 * Loop thru the list, reinserting the record into the free list as they are
3004 * processed to avoid having other EMTs running out of entries while we're flushing.
3005 */
3006 idxHead = idxRevHead;
3007 do
3008 {
3009 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3010 uint32_t idxCur;
3011 Assert(--c >= 0);
3012
3013 switch (pCur->enmKind)
3014 {
3015 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3016 remR3NotifyHandlerPhysicalRegister(pVM,
3017 pCur->u.PhysicalRegister.enmType,
3018 pCur->u.PhysicalRegister.GCPhys,
3019 pCur->u.PhysicalRegister.cb,
3020 pCur->u.PhysicalRegister.fHasHCHandler);
3021 break;
3022
3023 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3024 remR3NotifyHandlerPhysicalDeregister(pVM,
3025 pCur->u.PhysicalDeregister.enmType,
3026 pCur->u.PhysicalDeregister.GCPhys,
3027 pCur->u.PhysicalDeregister.cb,
3028 pCur->u.PhysicalDeregister.fHasHCHandler,
3029 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3030 break;
3031
3032 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3033 remR3NotifyHandlerPhysicalModify(pVM,
3034 pCur->u.PhysicalModify.enmType,
3035 pCur->u.PhysicalModify.GCPhysOld,
3036 pCur->u.PhysicalModify.GCPhysNew,
3037 pCur->u.PhysicalModify.cb,
3038 pCur->u.PhysicalModify.fHasHCHandler,
3039 pCur->u.PhysicalModify.fRestoreAsRAM);
3040 break;
3041
3042 default:
3043 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3044 break;
3045 }
3046
3047 /*
3048 * Advance idxHead.
3049 */
3050 idxCur = idxHead;
3051 idxHead = pCur->idxNext;
3052 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3053
3054 /*
3055 * Put the record back into the free list.
3056 */
3057 do
3058 {
3059 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3060 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3061 ASMCompilerBarrier();
3062 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3063 } while (idxHead != UINT32_MAX);
3064
3065#ifdef VBOX_STRICT
3066 if (pVM->cCpus == 1)
3067 {
3068 unsigned c;
3069 /* Check that all records are now on the free list. */
3070 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3071 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3072 c++;
3073 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3074 }
3075#endif
3076 }
3077}
3078
3079
3080/**
3081 * Notify REM about changed code page.
3082 *
3083 * @returns VBox status code.
3084 * @param pVM VM handle.
3085 * @param pVCpu VMCPU handle.
3086 * @param pvCodePage Code page address
3087 */
3088REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3089{
3090#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3091 int rc;
3092 RTGCPHYS PhysGC;
3093 uint64_t flags;
3094
3095 VM_ASSERT_EMT(pVM);
3096
3097 /*
3098 * Get the physical page address.
3099 */
3100 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3101 if (rc == VINF_SUCCESS)
3102 {
3103 /*
3104 * Sync the required registers and flush the whole page.
3105 * (Easier to do the whole page than notifying it about each physical
3106 * byte that was changed.
3107 */
3108 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3109 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3110 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3111 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3112
3113 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3114 }
3115#endif
3116 return VINF_SUCCESS;
3117}
3118
3119
3120/**
3121 * Notification about a successful MMR3PhysRegister() call.
3122 *
3123 * @param pVM VM handle.
3124 * @param GCPhys The physical address the RAM.
3125 * @param cb Size of the memory.
3126 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3127 */
3128REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3129{
3130 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3131 VM_ASSERT_EMT(pVM);
3132
3133 /*
3134 * Validate input - we trust the caller.
3135 */
3136 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3137 Assert(cb);
3138 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3139 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3140
3141 /*
3142 * Base ram? Update GCPhysLastRam.
3143 */
3144 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3145 {
3146 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3147 {
3148 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3149 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3150 }
3151 }
3152
3153 /*
3154 * Register the ram.
3155 */
3156 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3157
3158 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3159 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3160 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3161
3162 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3163}
3164
3165
3166/**
3167 * Notification about a successful MMR3PhysRomRegister() call.
3168 *
3169 * @param pVM VM handle.
3170 * @param GCPhys The physical address of the ROM.
3171 * @param cb The size of the ROM.
3172 * @param pvCopy Pointer to the ROM copy.
3173 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3174 * This function will be called when ever the protection of the
3175 * shadow ROM changes (at reset and end of POST).
3176 */
3177REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3178{
3179 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3180 VM_ASSERT_EMT(pVM);
3181
3182 /*
3183 * Validate input - we trust the caller.
3184 */
3185 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3186 Assert(cb);
3187 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3188
3189 /*
3190 * Register the rom.
3191 */
3192 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3193
3194 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3195 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3196 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3197
3198 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3199}
3200
3201
3202/**
3203 * Notification about a successful memory deregistration or reservation.
3204 *
3205 * @param pVM VM Handle.
3206 * @param GCPhys Start physical address.
3207 * @param cb The size of the range.
3208 */
3209REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3210{
3211 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3212 VM_ASSERT_EMT(pVM);
3213
3214 /*
3215 * Validate input - we trust the caller.
3216 */
3217 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3218 Assert(cb);
3219 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3220
3221 /*
3222 * Unassigning the memory.
3223 */
3224 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3225
3226 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3227 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3228 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3229
3230 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3231}
3232
3233
3234/**
3235 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3236 *
3237 * @param pVM VM Handle.
3238 * @param enmType Handler type.
3239 * @param GCPhys Handler range address.
3240 * @param cb Size of the handler range.
3241 * @param fHasHCHandler Set if the handler has a HC callback function.
3242 *
3243 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3244 * Handler memory type to memory which has no HC handler.
3245 */
3246static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3247{
3248 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3249 enmType, GCPhys, cb, fHasHCHandler));
3250
3251 VM_ASSERT_EMT(pVM);
3252 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3253 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3254
3255
3256 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3257
3258 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3259 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3260 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3261 else if (fHasHCHandler)
3262 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3263 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3264
3265 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3266}
3267
3268/**
3269 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3270 *
3271 * @param pVM VM Handle.
3272 * @param enmType Handler type.
3273 * @param GCPhys Handler range address.
3274 * @param cb Size of the handler range.
3275 * @param fHasHCHandler Set if the handler has a HC callback function.
3276 *
3277 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3278 * Handler memory type to memory which has no HC handler.
3279 */
3280REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3281{
3282 REMR3ReplayHandlerNotifications(pVM);
3283
3284 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3285}
3286
3287/**
3288 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3289 *
3290 * @param pVM VM Handle.
3291 * @param enmType Handler type.
3292 * @param GCPhys Handler range address.
3293 * @param cb Size of the handler range.
3294 * @param fHasHCHandler Set if the handler has a HC callback function.
3295 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3296 */
3297static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3298{
3299 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3300 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3301 VM_ASSERT_EMT(pVM);
3302
3303
3304 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3305
3306 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3307 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3308 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3309 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3310 else if (fHasHCHandler)
3311 {
3312 if (!fRestoreAsRAM)
3313 {
3314 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3315 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3316 }
3317 else
3318 {
3319 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3320 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3321 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3322 }
3323 }
3324 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3325
3326 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3327}
3328
3329/**
3330 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3331 *
3332 * @param pVM VM Handle.
3333 * @param enmType Handler type.
3334 * @param GCPhys Handler range address.
3335 * @param cb Size of the handler range.
3336 * @param fHasHCHandler Set if the handler has a HC callback function.
3337 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3338 */
3339REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3340{
3341 REMR3ReplayHandlerNotifications(pVM);
3342 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3343}
3344
3345
3346/**
3347 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3348 *
3349 * @param pVM VM Handle.
3350 * @param enmType Handler type.
3351 * @param GCPhysOld Old handler range address.
3352 * @param GCPhysNew New handler range address.
3353 * @param cb Size of the handler range.
3354 * @param fHasHCHandler Set if the handler has a HC callback function.
3355 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3356 */
3357static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3358{
3359 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3360 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3361 VM_ASSERT_EMT(pVM);
3362 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3363
3364 if (fHasHCHandler)
3365 {
3366 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3367
3368 /*
3369 * Reset the old page.
3370 */
3371 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3372 if (!fRestoreAsRAM)
3373 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3374 else
3375 {
3376 /* This is not perfect, but it'll do for PD monitoring... */
3377 Assert(cb == PAGE_SIZE);
3378 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3379 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3380 }
3381
3382 /*
3383 * Update the new page.
3384 */
3385 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3386 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3387 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3388 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3389
3390 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3391 }
3392}
3393
3394/**
3395 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3396 *
3397 * @param pVM VM Handle.
3398 * @param enmType Handler type.
3399 * @param GCPhysOld Old handler range address.
3400 * @param GCPhysNew New handler range address.
3401 * @param cb Size of the handler range.
3402 * @param fHasHCHandler Set if the handler has a HC callback function.
3403 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3404 */
3405REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3406{
3407 REMR3ReplayHandlerNotifications(pVM);
3408
3409 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3410}
3411
3412/**
3413 * Checks if we're handling access to this page or not.
3414 *
3415 * @returns true if we're trapping access.
3416 * @returns false if we aren't.
3417 * @param pVM The VM handle.
3418 * @param GCPhys The physical address.
3419 *
3420 * @remark This function will only work correctly in VBOX_STRICT builds!
3421 */
3422REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3423{
3424#ifdef VBOX_STRICT
3425 unsigned long off;
3426 REMR3ReplayHandlerNotifications(pVM);
3427
3428 off = get_phys_page_offset(GCPhys);
3429 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3430 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3431 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3432#else
3433 return false;
3434#endif
3435}
3436
3437
3438/**
3439 * Deals with a rare case in get_phys_addr_code where the code
3440 * is being monitored.
3441 *
3442 * It could also be an MMIO page, in which case we will raise a fatal error.
3443 *
3444 * @returns The physical address corresponding to addr.
3445 * @param env The cpu environment.
3446 * @param addr The virtual address.
3447 * @param pTLBEntry The TLB entry.
3448 */
3449target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3450 target_ulong addr,
3451 CPUTLBEntry *pTLBEntry,
3452 target_phys_addr_t ioTLBEntry)
3453{
3454 PVM pVM = env->pVM;
3455
3456 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3457 {
3458 /* If code memory is being monitored, appropriate IOTLB entry will have
3459 handler IO type, and addend will provide real physical address, no
3460 matter if we store VA in TLB or not, as handlers are always passed PA */
3461 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3462 return ret;
3463 }
3464 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3465 "*** handlers\n",
3466 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3467 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3468 LogRel(("*** mmio\n"));
3469 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3470 LogRel(("*** phys\n"));
3471 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3472 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3473 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3474 AssertFatalFailed();
3475}
3476
3477/**
3478 * Read guest RAM and ROM.
3479 *
3480 * @param SrcGCPhys The source address (guest physical).
3481 * @param pvDst The destination address.
3482 * @param cb Number of bytes
3483 */
3484void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3485{
3486 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3487 VBOX_CHECK_ADDR(SrcGCPhys);
3488 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3489#ifdef VBOX_DEBUG_PHYS
3490 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3491#endif
3492 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3493}
3494
3495
3496/**
3497 * Read guest RAM and ROM, unsigned 8-bit.
3498 *
3499 * @param SrcGCPhys The source address (guest physical).
3500 */
3501RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3502{
3503 uint8_t val;
3504 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3505 VBOX_CHECK_ADDR(SrcGCPhys);
3506 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3507 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3508#ifdef VBOX_DEBUG_PHYS
3509 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3510#endif
3511 return val;
3512}
3513
3514
3515/**
3516 * Read guest RAM and ROM, signed 8-bit.
3517 *
3518 * @param SrcGCPhys The source address (guest physical).
3519 */
3520RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3521{
3522 int8_t val;
3523 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3524 VBOX_CHECK_ADDR(SrcGCPhys);
3525 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3526 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3527#ifdef VBOX_DEBUG_PHYS
3528 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3529#endif
3530 return val;
3531}
3532
3533
3534/**
3535 * Read guest RAM and ROM, unsigned 16-bit.
3536 *
3537 * @param SrcGCPhys The source address (guest physical).
3538 */
3539RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3540{
3541 uint16_t val;
3542 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3543 VBOX_CHECK_ADDR(SrcGCPhys);
3544 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3545 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3546#ifdef VBOX_DEBUG_PHYS
3547 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3548#endif
3549 return val;
3550}
3551
3552
3553/**
3554 * Read guest RAM and ROM, signed 16-bit.
3555 *
3556 * @param SrcGCPhys The source address (guest physical).
3557 */
3558RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3559{
3560 int16_t val;
3561 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3562 VBOX_CHECK_ADDR(SrcGCPhys);
3563 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3564 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3565#ifdef VBOX_DEBUG_PHYS
3566 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3567#endif
3568 return val;
3569}
3570
3571
3572/**
3573 * Read guest RAM and ROM, unsigned 32-bit.
3574 *
3575 * @param SrcGCPhys The source address (guest physical).
3576 */
3577RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3578{
3579 uint32_t val;
3580 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3581 VBOX_CHECK_ADDR(SrcGCPhys);
3582 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3583 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3584#ifdef VBOX_DEBUG_PHYS
3585 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3586#endif
3587 return val;
3588}
3589
3590
3591/**
3592 * Read guest RAM and ROM, signed 32-bit.
3593 *
3594 * @param SrcGCPhys The source address (guest physical).
3595 */
3596RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3597{
3598 int32_t val;
3599 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3600 VBOX_CHECK_ADDR(SrcGCPhys);
3601 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3602 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3603#ifdef VBOX_DEBUG_PHYS
3604 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3605#endif
3606 return val;
3607}
3608
3609
3610/**
3611 * Read guest RAM and ROM, unsigned 64-bit.
3612 *
3613 * @param SrcGCPhys The source address (guest physical).
3614 */
3615uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3616{
3617 uint64_t val;
3618 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3619 VBOX_CHECK_ADDR(SrcGCPhys);
3620 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3621 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3622#ifdef VBOX_DEBUG_PHYS
3623 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3624#endif
3625 return val;
3626}
3627
3628
3629/**
3630 * Read guest RAM and ROM, signed 64-bit.
3631 *
3632 * @param SrcGCPhys The source address (guest physical).
3633 */
3634int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3635{
3636 int64_t val;
3637 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3638 VBOX_CHECK_ADDR(SrcGCPhys);
3639 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3640 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3641#ifdef VBOX_DEBUG_PHYS
3642 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3643#endif
3644 return val;
3645}
3646
3647
3648/**
3649 * Write guest RAM.
3650 *
3651 * @param DstGCPhys The destination address (guest physical).
3652 * @param pvSrc The source address.
3653 * @param cb Number of bytes to write
3654 */
3655void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3656{
3657 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3658 VBOX_CHECK_ADDR(DstGCPhys);
3659 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3660 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3661#ifdef VBOX_DEBUG_PHYS
3662 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3663#endif
3664}
3665
3666
3667/**
3668 * Write guest RAM, unsigned 8-bit.
3669 *
3670 * @param DstGCPhys The destination address (guest physical).
3671 * @param val Value
3672 */
3673void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3674{
3675 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3676 VBOX_CHECK_ADDR(DstGCPhys);
3677 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3678 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3679#ifdef VBOX_DEBUG_PHYS
3680 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3681#endif
3682}
3683
3684
3685/**
3686 * Write guest RAM, unsigned 8-bit.
3687 *
3688 * @param DstGCPhys The destination address (guest physical).
3689 * @param val Value
3690 */
3691void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3692{
3693 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3694 VBOX_CHECK_ADDR(DstGCPhys);
3695 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3696 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3697#ifdef VBOX_DEBUG_PHYS
3698 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3699#endif
3700}
3701
3702
3703/**
3704 * Write guest RAM, unsigned 32-bit.
3705 *
3706 * @param DstGCPhys The destination address (guest physical).
3707 * @param val Value
3708 */
3709void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3710{
3711 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3712 VBOX_CHECK_ADDR(DstGCPhys);
3713 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3714 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3715#ifdef VBOX_DEBUG_PHYS
3716 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3717#endif
3718}
3719
3720
3721/**
3722 * Write guest RAM, unsigned 64-bit.
3723 *
3724 * @param DstGCPhys The destination address (guest physical).
3725 * @param val Value
3726 */
3727void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3728{
3729 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3730 VBOX_CHECK_ADDR(DstGCPhys);
3731 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3732 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3733#ifdef VBOX_DEBUG_PHYS
3734 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3735#endif
3736}
3737
3738#undef LOG_GROUP
3739#define LOG_GROUP LOG_GROUP_REM_MMIO
3740
3741/** Read MMIO memory. */
3742static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3743{
3744 uint32_t u32 = 0;
3745 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3746 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3747 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3748 return u32;
3749}
3750
3751/** Read MMIO memory. */
3752static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3753{
3754 uint32_t u32 = 0;
3755 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3756 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3757 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3758 return u32;
3759}
3760
3761/** Read MMIO memory. */
3762static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3763{
3764 uint32_t u32 = 0;
3765 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3766 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3767 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3768 return u32;
3769}
3770
3771/** Write to MMIO memory. */
3772static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3773{
3774 int rc;
3775 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3776 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3777 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3778}
3779
3780/** Write to MMIO memory. */
3781static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3782{
3783 int rc;
3784 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3785 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3786 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3787}
3788
3789/** Write to MMIO memory. */
3790static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3791{
3792 int rc;
3793 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3794 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3795 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3796}
3797
3798
3799#undef LOG_GROUP
3800#define LOG_GROUP LOG_GROUP_REM_HANDLER
3801
3802/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3803
3804static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3805{
3806 uint8_t u8;
3807 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3808 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3809 return u8;
3810}
3811
3812static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3813{
3814 uint16_t u16;
3815 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3816 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3817 return u16;
3818}
3819
3820static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3821{
3822 uint32_t u32;
3823 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3824 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3825 return u32;
3826}
3827
3828static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3829{
3830 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3831 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3832}
3833
3834static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3835{
3836 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3837 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3838}
3839
3840static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3841{
3842 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3843 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3844}
3845
3846/* -+- disassembly -+- */
3847
3848#undef LOG_GROUP
3849#define LOG_GROUP LOG_GROUP_REM_DISAS
3850
3851
3852/**
3853 * Enables or disables singled stepped disassembly.
3854 *
3855 * @returns VBox status code.
3856 * @param pVM VM handle.
3857 * @param fEnable To enable set this flag, to disable clear it.
3858 */
3859static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3860{
3861 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3862 VM_ASSERT_EMT(pVM);
3863
3864 if (fEnable)
3865 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3866 else
3867 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3868#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3869 cpu_single_step(&pVM->rem.s.Env, fEnable);
3870#endif
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * Enables or disables singled stepped disassembly.
3877 *
3878 * @returns VBox status code.
3879 * @param pVM VM handle.
3880 * @param fEnable To enable set this flag, to disable clear it.
3881 */
3882REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3883{
3884 int rc;
3885
3886 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3887 if (VM_IS_EMT(pVM))
3888 return remR3DisasEnableStepping(pVM, fEnable);
3889
3890 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3891 AssertRC(rc);
3892 return rc;
3893}
3894
3895
3896#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3897/**
3898 * External Debugger Command: .remstep [on|off|1|0]
3899 */
3900static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3901{
3902 int rc;
3903
3904 if (cArgs == 0)
3905 /*
3906 * Print the current status.
3907 */
3908 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3909 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3910 else
3911 {
3912 /*
3913 * Convert the argument and change the mode.
3914 */
3915 bool fEnable;
3916 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3917 if (RT_SUCCESS(rc))
3918 {
3919 rc = REMR3DisasEnableStepping(pVM, fEnable);
3920 if (RT_SUCCESS(rc))
3921 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3922 else
3923 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3924 }
3925 else
3926 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3927 }
3928 return rc;
3929}
3930#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3931
3932
3933/**
3934 * Disassembles one instruction and prints it to the log.
3935 *
3936 * @returns Success indicator.
3937 * @param env Pointer to the recompiler CPU structure.
3938 * @param f32BitCode Indicates that whether or not the code should
3939 * be disassembled as 16 or 32 bit. If -1 the CS
3940 * selector will be inspected.
3941 * @param pszPrefix
3942 */
3943bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3944{
3945 PVM pVM = env->pVM;
3946 const bool fLog = LogIsEnabled();
3947 const bool fLog2 = LogIs2Enabled();
3948 int rc = VINF_SUCCESS;
3949
3950 /*
3951 * Don't bother if there ain't any log output to do.
3952 */
3953 if (!fLog && !fLog2)
3954 return true;
3955
3956 /*
3957 * Update the state so DBGF reads the correct register values.
3958 */
3959 remR3StateUpdate(pVM, env->pVCpu);
3960
3961 /*
3962 * Log registers if requested.
3963 */
3964 if (fLog2)
3965 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3966
3967 /*
3968 * Disassemble to log.
3969 */
3970 if (fLog)
3971 {
3972 PVMCPU pVCpu = VMMGetCpu(pVM);
3973 char szBuf[256];
3974 szBuf[0] = '\0';
3975 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3976 pVCpu->idCpu,
3977 0, /* Sel */
3978 0, /* GCPtr */
3979 DBGF_DISAS_FLAGS_CURRENT_GUEST
3980 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3981 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3982 szBuf,
3983 sizeof(szBuf),
3984 NULL);
3985 if (RT_FAILURE(rc))
3986 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3987 if (pszPrefix && *pszPrefix)
3988 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3989 else
3990 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3991 }
3992
3993 return RT_SUCCESS(rc);
3994}
3995
3996
3997/**
3998 * Disassemble recompiled code.
3999 *
4000 * @param phFileIgnored Ignored, logfile usually.
4001 * @param pvCode Pointer to the code block.
4002 * @param cb Size of the code block.
4003 */
4004void disas(FILE *phFile, void *pvCode, unsigned long cb)
4005{
4006 if (LogIs2Enabled())
4007 {
4008 unsigned off = 0;
4009 char szOutput[256];
4010 DISCPUSTATE Cpu;
4011
4012 memset(&Cpu, 0, sizeof(Cpu));
4013#ifdef RT_ARCH_X86
4014 Cpu.mode = CPUMODE_32BIT;
4015#else
4016 Cpu.mode = CPUMODE_64BIT;
4017#endif
4018
4019 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4020 while (off < cb)
4021 {
4022 uint32_t cbInstr;
4023 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4024 RTLogPrintf("%s", szOutput);
4025 else
4026 {
4027 RTLogPrintf("disas error\n");
4028 cbInstr = 1;
4029#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4030 break;
4031#endif
4032 }
4033 off += cbInstr;
4034 }
4035 }
4036}
4037
4038
4039/**
4040 * Disassemble guest code.
4041 *
4042 * @param phFileIgnored Ignored, logfile usually.
4043 * @param uCode The guest address of the code to disassemble. (flat?)
4044 * @param cb Number of bytes to disassemble.
4045 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4046 */
4047void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4048{
4049 if (LogIs2Enabled())
4050 {
4051 PVM pVM = cpu_single_env->pVM;
4052 PVMCPU pVCpu = cpu_single_env->pVCpu;
4053 RTSEL cs;
4054 RTGCUINTPTR eip;
4055
4056 Assert(pVCpu);
4057
4058 /*
4059 * Update the state so DBGF reads the correct register values (flags).
4060 */
4061 remR3StateUpdate(pVM, pVCpu);
4062
4063 /*
4064 * Do the disassembling.
4065 */
4066 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4067 cs = cpu_single_env->segs[R_CS].selector;
4068 eip = uCode - cpu_single_env->segs[R_CS].base;
4069 for (;;)
4070 {
4071 char szBuf[256];
4072 uint32_t cbInstr;
4073 int rc = DBGFR3DisasInstrEx(pVM,
4074 pVCpu->idCpu,
4075 cs,
4076 eip,
4077 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4078 szBuf, sizeof(szBuf),
4079 &cbInstr);
4080 if (RT_SUCCESS(rc))
4081 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4082 else
4083 {
4084 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4085 cbInstr = 1;
4086 }
4087
4088 /* next */
4089 if (cb <= cbInstr)
4090 break;
4091 cb -= cbInstr;
4092 uCode += cbInstr;
4093 eip += cbInstr;
4094 }
4095 }
4096}
4097
4098
4099/**
4100 * Looks up a guest symbol.
4101 *
4102 * @returns Pointer to symbol name. This is a static buffer.
4103 * @param orig_addr The address in question.
4104 */
4105const char *lookup_symbol(target_ulong orig_addr)
4106{
4107 PVM pVM = cpu_single_env->pVM;
4108 RTGCINTPTR off = 0;
4109 RTDBGSYMBOL Sym;
4110 DBGFADDRESS Addr;
4111
4112 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4113 if (RT_SUCCESS(rc))
4114 {
4115 static char szSym[sizeof(Sym.szName) + 48];
4116 if (!off)
4117 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4118 else if (off > 0)
4119 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4120 else
4121 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4122 return szSym;
4123 }
4124 return "<N/A>";
4125}
4126
4127
4128#undef LOG_GROUP
4129#define LOG_GROUP LOG_GROUP_REM
4130
4131
4132/* -+- FF notifications -+- */
4133
4134
4135/**
4136 * Notification about a pending interrupt.
4137 *
4138 * @param pVM VM Handle.
4139 * @param pVCpu VMCPU Handle.
4140 * @param u8Interrupt Interrupt
4141 * @thread The emulation thread.
4142 */
4143REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4144{
4145 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4146 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4147}
4148
4149/**
4150 * Notification about a pending interrupt.
4151 *
4152 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4153 * @param pVM VM Handle.
4154 * @param pVCpu VMCPU Handle.
4155 * @thread The emulation thread.
4156 */
4157REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4158{
4159 return pVM->rem.s.u32PendingInterrupt;
4160}
4161
4162/**
4163 * Notification about the interrupt FF being set.
4164 *
4165 * @param pVM VM Handle.
4166 * @param pVCpu VMCPU Handle.
4167 * @thread The emulation thread.
4168 */
4169REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4170{
4171#ifndef IEM_VERIFICATION_MODE
4172 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4173 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4174 if (pVM->rem.s.fInREM)
4175 {
4176 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4177 CPU_INTERRUPT_EXTERNAL_HARD);
4178 }
4179#endif
4180}
4181
4182
4183/**
4184 * Notification about the interrupt FF being set.
4185 *
4186 * @param pVM VM Handle.
4187 * @param pVCpu VMCPU Handle.
4188 * @thread Any.
4189 */
4190REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4191{
4192 LogFlow(("REMR3NotifyInterruptClear:\n"));
4193 if (pVM->rem.s.fInREM)
4194 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4195}
4196
4197
4198/**
4199 * Notification about pending timer(s).
4200 *
4201 * @param pVM VM Handle.
4202 * @param pVCpuDst The target cpu for this notification.
4203 * TM will not broadcast pending timer events, but use
4204 * a dedicated EMT for them. So, only interrupt REM
4205 * execution if the given CPU is executing in REM.
4206 * @thread Any.
4207 */
4208REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4209{
4210#ifndef IEM_VERIFICATION_MODE
4211#ifndef DEBUG_bird
4212 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4213#endif
4214 if (pVM->rem.s.fInREM)
4215 {
4216 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4217 {
4218 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4219 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4220 CPU_INTERRUPT_EXTERNAL_TIMER);
4221 }
4222 else
4223 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4224 }
4225 else
4226 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4227#endif
4228}
4229
4230
4231/**
4232 * Notification about pending DMA transfers.
4233 *
4234 * @param pVM VM Handle.
4235 * @thread Any.
4236 */
4237REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4238{
4239#ifndef IEM_VERIFICATION_MODE
4240 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4241 if (pVM->rem.s.fInREM)
4242 {
4243 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4244 CPU_INTERRUPT_EXTERNAL_DMA);
4245 }
4246#endif
4247}
4248
4249
4250/**
4251 * Notification about pending timer(s).
4252 *
4253 * @param pVM VM Handle.
4254 * @thread Any.
4255 */
4256REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4257{
4258#ifndef IEM_VERIFICATION_MODE
4259 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4260 if (pVM->rem.s.fInREM)
4261 {
4262 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4263 CPU_INTERRUPT_EXTERNAL_EXIT);
4264 }
4265#endif
4266}
4267
4268
4269/**
4270 * Notification about pending FF set by an external thread.
4271 *
4272 * @param pVM VM handle.
4273 * @thread Any.
4274 */
4275REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4276{
4277#ifndef IEM_VERIFICATION_MODE
4278 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4279 if (pVM->rem.s.fInREM)
4280 {
4281 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4282 CPU_INTERRUPT_EXTERNAL_EXIT);
4283 }
4284#endif
4285}
4286
4287
4288#ifdef VBOX_WITH_STATISTICS
4289void remR3ProfileStart(int statcode)
4290{
4291 STAMPROFILEADV *pStat;
4292 switch(statcode)
4293 {
4294 case STATS_EMULATE_SINGLE_INSTR:
4295 pStat = &gStatExecuteSingleInstr;
4296 break;
4297 case STATS_QEMU_COMPILATION:
4298 pStat = &gStatCompilationQEmu;
4299 break;
4300 case STATS_QEMU_RUN_EMULATED_CODE:
4301 pStat = &gStatRunCodeQEmu;
4302 break;
4303 case STATS_QEMU_TOTAL:
4304 pStat = &gStatTotalTimeQEmu;
4305 break;
4306 case STATS_QEMU_RUN_TIMERS:
4307 pStat = &gStatTimers;
4308 break;
4309 case STATS_TLB_LOOKUP:
4310 pStat= &gStatTBLookup;
4311 break;
4312 case STATS_IRQ_HANDLING:
4313 pStat= &gStatIRQ;
4314 break;
4315 case STATS_RAW_CHECK:
4316 pStat = &gStatRawCheck;
4317 break;
4318
4319 default:
4320 AssertMsgFailed(("unknown stat %d\n", statcode));
4321 return;
4322 }
4323 STAM_PROFILE_ADV_START(pStat, a);
4324}
4325
4326
4327void remR3ProfileStop(int statcode)
4328{
4329 STAMPROFILEADV *pStat;
4330 switch(statcode)
4331 {
4332 case STATS_EMULATE_SINGLE_INSTR:
4333 pStat = &gStatExecuteSingleInstr;
4334 break;
4335 case STATS_QEMU_COMPILATION:
4336 pStat = &gStatCompilationQEmu;
4337 break;
4338 case STATS_QEMU_RUN_EMULATED_CODE:
4339 pStat = &gStatRunCodeQEmu;
4340 break;
4341 case STATS_QEMU_TOTAL:
4342 pStat = &gStatTotalTimeQEmu;
4343 break;
4344 case STATS_QEMU_RUN_TIMERS:
4345 pStat = &gStatTimers;
4346 break;
4347 case STATS_TLB_LOOKUP:
4348 pStat= &gStatTBLookup;
4349 break;
4350 case STATS_IRQ_HANDLING:
4351 pStat= &gStatIRQ;
4352 break;
4353 case STATS_RAW_CHECK:
4354 pStat = &gStatRawCheck;
4355 break;
4356 default:
4357 AssertMsgFailed(("unknown stat %d\n", statcode));
4358 return;
4359 }
4360 STAM_PROFILE_ADV_STOP(pStat, a);
4361}
4362#endif
4363
4364/**
4365 * Raise an RC, force rem exit.
4366 *
4367 * @param pVM VM handle.
4368 * @param rc The rc.
4369 */
4370void remR3RaiseRC(PVM pVM, int rc)
4371{
4372 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4373 Assert(pVM->rem.s.fInREM);
4374 VM_ASSERT_EMT(pVM);
4375 pVM->rem.s.rc = rc;
4376 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4377}
4378
4379
4380/* -+- timers -+- */
4381
4382uint64_t cpu_get_tsc(CPUX86State *env)
4383{
4384 STAM_COUNTER_INC(&gStatCpuGetTSC);
4385 return TMCpuTickGet(env->pVCpu);
4386}
4387
4388
4389/* -+- interrupts -+- */
4390
4391void cpu_set_ferr(CPUX86State *env)
4392{
4393 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4394 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4395}
4396
4397int cpu_get_pic_interrupt(CPUX86State *env)
4398{
4399 uint8_t u8Interrupt;
4400 int rc;
4401
4402 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4403 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4404 * with the (a)pic.
4405 */
4406 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4407 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4408 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4409 * remove this kludge. */
4410 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4411 {
4412 rc = VINF_SUCCESS;
4413 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4414 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4415 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4416 }
4417 else
4418 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4419
4420 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4421 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4422 if (RT_SUCCESS(rc))
4423 {
4424 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4425 env->interrupt_request |= CPU_INTERRUPT_HARD;
4426 return u8Interrupt;
4427 }
4428 return -1;
4429}
4430
4431
4432/* -+- local apic -+- */
4433
4434#if 0 /* CPUMSetGuestMsr does this now. */
4435void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4436{
4437 int rc = PDMApicSetBase(env->pVM, val);
4438 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4439}
4440#endif
4441
4442uint64_t cpu_get_apic_base(CPUX86State *env)
4443{
4444 uint64_t u64;
4445 int rc = PDMApicGetBase(env->pVM, &u64);
4446 if (RT_SUCCESS(rc))
4447 {
4448 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4449 return u64;
4450 }
4451 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4452 return 0;
4453}
4454
4455void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4456{
4457 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4458 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4459}
4460
4461uint8_t cpu_get_apic_tpr(CPUX86State *env)
4462{
4463 uint8_t u8;
4464 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4465 if (RT_SUCCESS(rc))
4466 {
4467 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4468 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4469 }
4470 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4471 return 0;
4472}
4473
4474/**
4475 * Read an MSR.
4476 *
4477 * @retval 0 success.
4478 * @retval -1 failure, raise \#GP(0).
4479 * @param env The cpu state.
4480 * @param idMsr The MSR to read.
4481 * @param puValue Where to return the value.
4482 */
4483int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4484{
4485 Assert(env->pVCpu);
4486 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4487}
4488
4489/**
4490 * Write to an MSR.
4491 *
4492 * @retval 0 success.
4493 * @retval -1 failure, raise \#GP(0).
4494 * @param env The cpu state.
4495 * @param idMsr The MSR to read.
4496 * @param puValue Where to return the value.
4497 */
4498int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4499{
4500 Assert(env->pVCpu);
4501 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4502}
4503
4504/* -+- I/O Ports -+- */
4505
4506#undef LOG_GROUP
4507#define LOG_GROUP LOG_GROUP_REM_IOPORT
4508
4509void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4510{
4511 int rc;
4512
4513 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4514 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4515
4516 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4517 if (RT_LIKELY(rc == VINF_SUCCESS))
4518 return;
4519 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4520 {
4521 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4522 remR3RaiseRC(env->pVM, rc);
4523 return;
4524 }
4525 remAbort(rc, __FUNCTION__);
4526}
4527
4528void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4529{
4530 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4531 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4532 if (RT_LIKELY(rc == VINF_SUCCESS))
4533 return;
4534 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4535 {
4536 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4537 remR3RaiseRC(env->pVM, rc);
4538 return;
4539 }
4540 remAbort(rc, __FUNCTION__);
4541}
4542
4543void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4544{
4545 int rc;
4546 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4547 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4548 if (RT_LIKELY(rc == VINF_SUCCESS))
4549 return;
4550 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4551 {
4552 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4553 remR3RaiseRC(env->pVM, rc);
4554 return;
4555 }
4556 remAbort(rc, __FUNCTION__);
4557}
4558
4559uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4560{
4561 uint32_t u32 = 0;
4562 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4563 if (RT_LIKELY(rc == VINF_SUCCESS))
4564 {
4565 if (/*addr != 0x61 && */addr != 0x71)
4566 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4567 return (uint8_t)u32;
4568 }
4569 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4570 {
4571 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4572 remR3RaiseRC(env->pVM, rc);
4573 return (uint8_t)u32;
4574 }
4575 remAbort(rc, __FUNCTION__);
4576 return UINT8_C(0xff);
4577}
4578
4579uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4580{
4581 uint32_t u32 = 0;
4582 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4583 if (RT_LIKELY(rc == VINF_SUCCESS))
4584 {
4585 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4586 return (uint16_t)u32;
4587 }
4588 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4589 {
4590 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4591 remR3RaiseRC(env->pVM, rc);
4592 return (uint16_t)u32;
4593 }
4594 remAbort(rc, __FUNCTION__);
4595 return UINT16_C(0xffff);
4596}
4597
4598uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4599{
4600 uint32_t u32 = 0;
4601 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4602 if (RT_LIKELY(rc == VINF_SUCCESS))
4603 {
4604//if (addr==0x01f0 && u32 == 0x6b6d)
4605// loglevel = ~0;
4606 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4607 return u32;
4608 }
4609 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4610 {
4611 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4612 remR3RaiseRC(env->pVM, rc);
4613 return u32;
4614 }
4615 remAbort(rc, __FUNCTION__);
4616 return UINT32_C(0xffffffff);
4617}
4618
4619#undef LOG_GROUP
4620#define LOG_GROUP LOG_GROUP_REM
4621
4622
4623/* -+- helpers and misc other interfaces -+- */
4624
4625/**
4626 * Perform the CPUID instruction.
4627 *
4628 * @param env Pointer to the recompiler CPU structure.
4629 * @param idx The CPUID leaf (eax).
4630 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4631 * @param pvEAX Where to store eax.
4632 * @param pvEBX Where to store ebx.
4633 * @param pvECX Where to store ecx.
4634 * @param pvEDX Where to store edx.
4635 */
4636void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4637 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4638{
4639 NOREF(idxSub);
4640 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4641}
4642
4643
4644#if 0 /* not used */
4645/**
4646 * Interface for qemu hardware to report back fatal errors.
4647 */
4648void hw_error(const char *pszFormat, ...)
4649{
4650 /*
4651 * Bitch about it.
4652 */
4653 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4654 * this in my Odin32 tree at home! */
4655 va_list args;
4656 va_start(args, pszFormat);
4657 RTLogPrintf("fatal error in virtual hardware:");
4658 RTLogPrintfV(pszFormat, args);
4659 va_end(args);
4660 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4661
4662 /*
4663 * If we're in REM context we'll sync back the state before 'jumping' to
4664 * the EMs failure handling.
4665 */
4666 PVM pVM = cpu_single_env->pVM;
4667 if (pVM->rem.s.fInREM)
4668 REMR3StateBack(pVM);
4669 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4670 AssertMsgFailed(("EMR3FatalError returned!\n"));
4671}
4672#endif
4673
4674/**
4675 * Interface for the qemu cpu to report unhandled situation
4676 * raising a fatal VM error.
4677 */
4678void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4679{
4680 va_list va;
4681 PVM pVM;
4682 PVMCPU pVCpu;
4683 char szMsg[256];
4684
4685 /*
4686 * Bitch about it.
4687 */
4688 RTLogFlags(NULL, "nodisabled nobuffered");
4689 RTLogFlush(NULL);
4690
4691 va_start(va, pszFormat);
4692#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4693 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4694 unsigned cArgs = 0;
4695 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4696 const char *psz = strchr(pszFormat, '%');
4697 while (psz && cArgs < 6)
4698 {
4699 auArgs[cArgs++] = va_arg(va, uintptr_t);
4700 psz = strchr(psz + 1, '%');
4701 }
4702 switch (cArgs)
4703 {
4704 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4705 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4706 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4707 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4708 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4709 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4710 default:
4711 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4712 }
4713#else
4714 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4715#endif
4716 va_end(va);
4717
4718 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4719 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4720
4721 /*
4722 * If we're in REM context we'll sync back the state before 'jumping' to
4723 * the EMs failure handling.
4724 */
4725 pVM = cpu_single_env->pVM;
4726 pVCpu = cpu_single_env->pVCpu;
4727 Assert(pVCpu);
4728
4729 if (pVM->rem.s.fInREM)
4730 REMR3StateBack(pVM, pVCpu);
4731 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4732 AssertMsgFailed(("EMR3FatalError returned!\n"));
4733}
4734
4735
4736/**
4737 * Aborts the VM.
4738 *
4739 * @param rc VBox error code.
4740 * @param pszTip Hint about why/when this happened.
4741 */
4742void remAbort(int rc, const char *pszTip)
4743{
4744 PVM pVM;
4745 PVMCPU pVCpu;
4746
4747 /*
4748 * Bitch about it.
4749 */
4750 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4751 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4752
4753 /*
4754 * Jump back to where we entered the recompiler.
4755 */
4756 pVM = cpu_single_env->pVM;
4757 pVCpu = cpu_single_env->pVCpu;
4758 Assert(pVCpu);
4759
4760 if (pVM->rem.s.fInREM)
4761 REMR3StateBack(pVM, pVCpu);
4762
4763 EMR3FatalError(pVCpu, rc);
4764 AssertMsgFailed(("EMR3FatalError returned!\n"));
4765}
4766
4767
4768/**
4769 * Dumps a linux system call.
4770 * @param pVCpu VMCPU handle.
4771 */
4772void remR3DumpLnxSyscall(PVMCPU pVCpu)
4773{
4774 static const char *apsz[] =
4775 {
4776 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4777 "sys_exit",
4778 "sys_fork",
4779 "sys_read",
4780 "sys_write",
4781 "sys_open", /* 5 */
4782 "sys_close",
4783 "sys_waitpid",
4784 "sys_creat",
4785 "sys_link",
4786 "sys_unlink", /* 10 */
4787 "sys_execve",
4788 "sys_chdir",
4789 "sys_time",
4790 "sys_mknod",
4791 "sys_chmod", /* 15 */
4792 "sys_lchown16",
4793 "sys_ni_syscall", /* old break syscall holder */
4794 "sys_stat",
4795 "sys_lseek",
4796 "sys_getpid", /* 20 */
4797 "sys_mount",
4798 "sys_oldumount",
4799 "sys_setuid16",
4800 "sys_getuid16",
4801 "sys_stime", /* 25 */
4802 "sys_ptrace",
4803 "sys_alarm",
4804 "sys_fstat",
4805 "sys_pause",
4806 "sys_utime", /* 30 */
4807 "sys_ni_syscall", /* old stty syscall holder */
4808 "sys_ni_syscall", /* old gtty syscall holder */
4809 "sys_access",
4810 "sys_nice",
4811 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4812 "sys_sync",
4813 "sys_kill",
4814 "sys_rename",
4815 "sys_mkdir",
4816 "sys_rmdir", /* 40 */
4817 "sys_dup",
4818 "sys_pipe",
4819 "sys_times",
4820 "sys_ni_syscall", /* old prof syscall holder */
4821 "sys_brk", /* 45 */
4822 "sys_setgid16",
4823 "sys_getgid16",
4824 "sys_signal",
4825 "sys_geteuid16",
4826 "sys_getegid16", /* 50 */
4827 "sys_acct",
4828 "sys_umount", /* recycled never used phys() */
4829 "sys_ni_syscall", /* old lock syscall holder */
4830 "sys_ioctl",
4831 "sys_fcntl", /* 55 */
4832 "sys_ni_syscall", /* old mpx syscall holder */
4833 "sys_setpgid",
4834 "sys_ni_syscall", /* old ulimit syscall holder */
4835 "sys_olduname",
4836 "sys_umask", /* 60 */
4837 "sys_chroot",
4838 "sys_ustat",
4839 "sys_dup2",
4840 "sys_getppid",
4841 "sys_getpgrp", /* 65 */
4842 "sys_setsid",
4843 "sys_sigaction",
4844 "sys_sgetmask",
4845 "sys_ssetmask",
4846 "sys_setreuid16", /* 70 */
4847 "sys_setregid16",
4848 "sys_sigsuspend",
4849 "sys_sigpending",
4850 "sys_sethostname",
4851 "sys_setrlimit", /* 75 */
4852 "sys_old_getrlimit",
4853 "sys_getrusage",
4854 "sys_gettimeofday",
4855 "sys_settimeofday",
4856 "sys_getgroups16", /* 80 */
4857 "sys_setgroups16",
4858 "old_select",
4859 "sys_symlink",
4860 "sys_lstat",
4861 "sys_readlink", /* 85 */
4862 "sys_uselib",
4863 "sys_swapon",
4864 "sys_reboot",
4865 "old_readdir",
4866 "old_mmap", /* 90 */
4867 "sys_munmap",
4868 "sys_truncate",
4869 "sys_ftruncate",
4870 "sys_fchmod",
4871 "sys_fchown16", /* 95 */
4872 "sys_getpriority",
4873 "sys_setpriority",
4874 "sys_ni_syscall", /* old profil syscall holder */
4875 "sys_statfs",
4876 "sys_fstatfs", /* 100 */
4877 "sys_ioperm",
4878 "sys_socketcall",
4879 "sys_syslog",
4880 "sys_setitimer",
4881 "sys_getitimer", /* 105 */
4882 "sys_newstat",
4883 "sys_newlstat",
4884 "sys_newfstat",
4885 "sys_uname",
4886 "sys_iopl", /* 110 */
4887 "sys_vhangup",
4888 "sys_ni_syscall", /* old "idle" system call */
4889 "sys_vm86old",
4890 "sys_wait4",
4891 "sys_swapoff", /* 115 */
4892 "sys_sysinfo",
4893 "sys_ipc",
4894 "sys_fsync",
4895 "sys_sigreturn",
4896 "sys_clone", /* 120 */
4897 "sys_setdomainname",
4898 "sys_newuname",
4899 "sys_modify_ldt",
4900 "sys_adjtimex",
4901 "sys_mprotect", /* 125 */
4902 "sys_sigprocmask",
4903 "sys_ni_syscall", /* old "create_module" */
4904 "sys_init_module",
4905 "sys_delete_module",
4906 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4907 "sys_quotactl",
4908 "sys_getpgid",
4909 "sys_fchdir",
4910 "sys_bdflush",
4911 "sys_sysfs", /* 135 */
4912 "sys_personality",
4913 "sys_ni_syscall", /* reserved for afs_syscall */
4914 "sys_setfsuid16",
4915 "sys_setfsgid16",
4916 "sys_llseek", /* 140 */
4917 "sys_getdents",
4918 "sys_select",
4919 "sys_flock",
4920 "sys_msync",
4921 "sys_readv", /* 145 */
4922 "sys_writev",
4923 "sys_getsid",
4924 "sys_fdatasync",
4925 "sys_sysctl",
4926 "sys_mlock", /* 150 */
4927 "sys_munlock",
4928 "sys_mlockall",
4929 "sys_munlockall",
4930 "sys_sched_setparam",
4931 "sys_sched_getparam", /* 155 */
4932 "sys_sched_setscheduler",
4933 "sys_sched_getscheduler",
4934 "sys_sched_yield",
4935 "sys_sched_get_priority_max",
4936 "sys_sched_get_priority_min", /* 160 */
4937 "sys_sched_rr_get_interval",
4938 "sys_nanosleep",
4939 "sys_mremap",
4940 "sys_setresuid16",
4941 "sys_getresuid16", /* 165 */
4942 "sys_vm86",
4943 "sys_ni_syscall", /* Old sys_query_module */
4944 "sys_poll",
4945 "sys_nfsservctl",
4946 "sys_setresgid16", /* 170 */
4947 "sys_getresgid16",
4948 "sys_prctl",
4949 "sys_rt_sigreturn",
4950 "sys_rt_sigaction",
4951 "sys_rt_sigprocmask", /* 175 */
4952 "sys_rt_sigpending",
4953 "sys_rt_sigtimedwait",
4954 "sys_rt_sigqueueinfo",
4955 "sys_rt_sigsuspend",
4956 "sys_pread64", /* 180 */
4957 "sys_pwrite64",
4958 "sys_chown16",
4959 "sys_getcwd",
4960 "sys_capget",
4961 "sys_capset", /* 185 */
4962 "sys_sigaltstack",
4963 "sys_sendfile",
4964 "sys_ni_syscall", /* reserved for streams1 */
4965 "sys_ni_syscall", /* reserved for streams2 */
4966 "sys_vfork", /* 190 */
4967 "sys_getrlimit",
4968 "sys_mmap2",
4969 "sys_truncate64",
4970 "sys_ftruncate64",
4971 "sys_stat64", /* 195 */
4972 "sys_lstat64",
4973 "sys_fstat64",
4974 "sys_lchown",
4975 "sys_getuid",
4976 "sys_getgid", /* 200 */
4977 "sys_geteuid",
4978 "sys_getegid",
4979 "sys_setreuid",
4980 "sys_setregid",
4981 "sys_getgroups", /* 205 */
4982 "sys_setgroups",
4983 "sys_fchown",
4984 "sys_setresuid",
4985 "sys_getresuid",
4986 "sys_setresgid", /* 210 */
4987 "sys_getresgid",
4988 "sys_chown",
4989 "sys_setuid",
4990 "sys_setgid",
4991 "sys_setfsuid", /* 215 */
4992 "sys_setfsgid",
4993 "sys_pivot_root",
4994 "sys_mincore",
4995 "sys_madvise",
4996 "sys_getdents64", /* 220 */
4997 "sys_fcntl64",
4998 "sys_ni_syscall", /* reserved for TUX */
4999 "sys_ni_syscall",
5000 "sys_gettid",
5001 "sys_readahead", /* 225 */
5002 "sys_setxattr",
5003 "sys_lsetxattr",
5004 "sys_fsetxattr",
5005 "sys_getxattr",
5006 "sys_lgetxattr", /* 230 */
5007 "sys_fgetxattr",
5008 "sys_listxattr",
5009 "sys_llistxattr",
5010 "sys_flistxattr",
5011 "sys_removexattr", /* 235 */
5012 "sys_lremovexattr",
5013 "sys_fremovexattr",
5014 "sys_tkill",
5015 "sys_sendfile64",
5016 "sys_futex", /* 240 */
5017 "sys_sched_setaffinity",
5018 "sys_sched_getaffinity",
5019 "sys_set_thread_area",
5020 "sys_get_thread_area",
5021 "sys_io_setup", /* 245 */
5022 "sys_io_destroy",
5023 "sys_io_getevents",
5024 "sys_io_submit",
5025 "sys_io_cancel",
5026 "sys_fadvise64", /* 250 */
5027 "sys_ni_syscall",
5028 "sys_exit_group",
5029 "sys_lookup_dcookie",
5030 "sys_epoll_create",
5031 "sys_epoll_ctl", /* 255 */
5032 "sys_epoll_wait",
5033 "sys_remap_file_pages",
5034 "sys_set_tid_address",
5035 "sys_timer_create",
5036 "sys_timer_settime", /* 260 */
5037 "sys_timer_gettime",
5038 "sys_timer_getoverrun",
5039 "sys_timer_delete",
5040 "sys_clock_settime",
5041 "sys_clock_gettime", /* 265 */
5042 "sys_clock_getres",
5043 "sys_clock_nanosleep",
5044 "sys_statfs64",
5045 "sys_fstatfs64",
5046 "sys_tgkill", /* 270 */
5047 "sys_utimes",
5048 "sys_fadvise64_64",
5049 "sys_ni_syscall" /* sys_vserver */
5050 };
5051
5052 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5053 switch (uEAX)
5054 {
5055 default:
5056 if (uEAX < RT_ELEMENTS(apsz))
5057 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5058 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5059 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5060 else
5061 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5062 break;
5063
5064 }
5065}
5066
5067
5068/**
5069 * Dumps an OpenBSD system call.
5070 * @param pVCpu VMCPU handle.
5071 */
5072void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5073{
5074 static const char *apsz[] =
5075 {
5076 "SYS_syscall", //0
5077 "SYS_exit", //1
5078 "SYS_fork", //2
5079 "SYS_read", //3
5080 "SYS_write", //4
5081 "SYS_open", //5
5082 "SYS_close", //6
5083 "SYS_wait4", //7
5084 "SYS_8",
5085 "SYS_link", //9
5086 "SYS_unlink", //10
5087 "SYS_11",
5088 "SYS_chdir", //12
5089 "SYS_fchdir", //13
5090 "SYS_mknod", //14
5091 "SYS_chmod", //15
5092 "SYS_chown", //16
5093 "SYS_break", //17
5094 "SYS_18",
5095 "SYS_19",
5096 "SYS_getpid", //20
5097 "SYS_mount", //21
5098 "SYS_unmount", //22
5099 "SYS_setuid", //23
5100 "SYS_getuid", //24
5101 "SYS_geteuid", //25
5102 "SYS_ptrace", //26
5103 "SYS_recvmsg", //27
5104 "SYS_sendmsg", //28
5105 "SYS_recvfrom", //29
5106 "SYS_accept", //30
5107 "SYS_getpeername", //31
5108 "SYS_getsockname", //32
5109 "SYS_access", //33
5110 "SYS_chflags", //34
5111 "SYS_fchflags", //35
5112 "SYS_sync", //36
5113 "SYS_kill", //37
5114 "SYS_38",
5115 "SYS_getppid", //39
5116 "SYS_40",
5117 "SYS_dup", //41
5118 "SYS_opipe", //42
5119 "SYS_getegid", //43
5120 "SYS_profil", //44
5121 "SYS_ktrace", //45
5122 "SYS_sigaction", //46
5123 "SYS_getgid", //47
5124 "SYS_sigprocmask", //48
5125 "SYS_getlogin", //49
5126 "SYS_setlogin", //50
5127 "SYS_acct", //51
5128 "SYS_sigpending", //52
5129 "SYS_osigaltstack", //53
5130 "SYS_ioctl", //54
5131 "SYS_reboot", //55
5132 "SYS_revoke", //56
5133 "SYS_symlink", //57
5134 "SYS_readlink", //58
5135 "SYS_execve", //59
5136 "SYS_umask", //60
5137 "SYS_chroot", //61
5138 "SYS_62",
5139 "SYS_63",
5140 "SYS_64",
5141 "SYS_65",
5142 "SYS_vfork", //66
5143 "SYS_67",
5144 "SYS_68",
5145 "SYS_sbrk", //69
5146 "SYS_sstk", //70
5147 "SYS_61",
5148 "SYS_vadvise", //72
5149 "SYS_munmap", //73
5150 "SYS_mprotect", //74
5151 "SYS_madvise", //75
5152 "SYS_76",
5153 "SYS_77",
5154 "SYS_mincore", //78
5155 "SYS_getgroups", //79
5156 "SYS_setgroups", //80
5157 "SYS_getpgrp", //81
5158 "SYS_setpgid", //82
5159 "SYS_setitimer", //83
5160 "SYS_84",
5161 "SYS_85",
5162 "SYS_getitimer", //86
5163 "SYS_87",
5164 "SYS_88",
5165 "SYS_89",
5166 "SYS_dup2", //90
5167 "SYS_91",
5168 "SYS_fcntl", //92
5169 "SYS_select", //93
5170 "SYS_94",
5171 "SYS_fsync", //95
5172 "SYS_setpriority", //96
5173 "SYS_socket", //97
5174 "SYS_connect", //98
5175 "SYS_99",
5176 "SYS_getpriority", //100
5177 "SYS_101",
5178 "SYS_102",
5179 "SYS_sigreturn", //103
5180 "SYS_bind", //104
5181 "SYS_setsockopt", //105
5182 "SYS_listen", //106
5183 "SYS_107",
5184 "SYS_108",
5185 "SYS_109",
5186 "SYS_110",
5187 "SYS_sigsuspend", //111
5188 "SYS_112",
5189 "SYS_113",
5190 "SYS_114",
5191 "SYS_115",
5192 "SYS_gettimeofday", //116
5193 "SYS_getrusage", //117
5194 "SYS_getsockopt", //118
5195 "SYS_119",
5196 "SYS_readv", //120
5197 "SYS_writev", //121
5198 "SYS_settimeofday", //122
5199 "SYS_fchown", //123
5200 "SYS_fchmod", //124
5201 "SYS_125",
5202 "SYS_setreuid", //126
5203 "SYS_setregid", //127
5204 "SYS_rename", //128
5205 "SYS_129",
5206 "SYS_130",
5207 "SYS_flock", //131
5208 "SYS_mkfifo", //132
5209 "SYS_sendto", //133
5210 "SYS_shutdown", //134
5211 "SYS_socketpair", //135
5212 "SYS_mkdir", //136
5213 "SYS_rmdir", //137
5214 "SYS_utimes", //138
5215 "SYS_139",
5216 "SYS_adjtime", //140
5217 "SYS_141",
5218 "SYS_142",
5219 "SYS_143",
5220 "SYS_144",
5221 "SYS_145",
5222 "SYS_146",
5223 "SYS_setsid", //147
5224 "SYS_quotactl", //148
5225 "SYS_149",
5226 "SYS_150",
5227 "SYS_151",
5228 "SYS_152",
5229 "SYS_153",
5230 "SYS_154",
5231 "SYS_nfssvc", //155
5232 "SYS_156",
5233 "SYS_157",
5234 "SYS_158",
5235 "SYS_159",
5236 "SYS_160",
5237 "SYS_getfh", //161
5238 "SYS_162",
5239 "SYS_163",
5240 "SYS_164",
5241 "SYS_sysarch", //165
5242 "SYS_166",
5243 "SYS_167",
5244 "SYS_168",
5245 "SYS_169",
5246 "SYS_170",
5247 "SYS_171",
5248 "SYS_172",
5249 "SYS_pread", //173
5250 "SYS_pwrite", //174
5251 "SYS_175",
5252 "SYS_176",
5253 "SYS_177",
5254 "SYS_178",
5255 "SYS_179",
5256 "SYS_180",
5257 "SYS_setgid", //181
5258 "SYS_setegid", //182
5259 "SYS_seteuid", //183
5260 "SYS_lfs_bmapv", //184
5261 "SYS_lfs_markv", //185
5262 "SYS_lfs_segclean", //186
5263 "SYS_lfs_segwait", //187
5264 "SYS_188",
5265 "SYS_189",
5266 "SYS_190",
5267 "SYS_pathconf", //191
5268 "SYS_fpathconf", //192
5269 "SYS_swapctl", //193
5270 "SYS_getrlimit", //194
5271 "SYS_setrlimit", //195
5272 "SYS_getdirentries", //196
5273 "SYS_mmap", //197
5274 "SYS___syscall", //198
5275 "SYS_lseek", //199
5276 "SYS_truncate", //200
5277 "SYS_ftruncate", //201
5278 "SYS___sysctl", //202
5279 "SYS_mlock", //203
5280 "SYS_munlock", //204
5281 "SYS_205",
5282 "SYS_futimes", //206
5283 "SYS_getpgid", //207
5284 "SYS_xfspioctl", //208
5285 "SYS_209",
5286 "SYS_210",
5287 "SYS_211",
5288 "SYS_212",
5289 "SYS_213",
5290 "SYS_214",
5291 "SYS_215",
5292 "SYS_216",
5293 "SYS_217",
5294 "SYS_218",
5295 "SYS_219",
5296 "SYS_220",
5297 "SYS_semget", //221
5298 "SYS_222",
5299 "SYS_223",
5300 "SYS_224",
5301 "SYS_msgget", //225
5302 "SYS_msgsnd", //226
5303 "SYS_msgrcv", //227
5304 "SYS_shmat", //228
5305 "SYS_229",
5306 "SYS_shmdt", //230
5307 "SYS_231",
5308 "SYS_clock_gettime", //232
5309 "SYS_clock_settime", //233
5310 "SYS_clock_getres", //234
5311 "SYS_235",
5312 "SYS_236",
5313 "SYS_237",
5314 "SYS_238",
5315 "SYS_239",
5316 "SYS_nanosleep", //240
5317 "SYS_241",
5318 "SYS_242",
5319 "SYS_243",
5320 "SYS_244",
5321 "SYS_245",
5322 "SYS_246",
5323 "SYS_247",
5324 "SYS_248",
5325 "SYS_249",
5326 "SYS_minherit", //250
5327 "SYS_rfork", //251
5328 "SYS_poll", //252
5329 "SYS_issetugid", //253
5330 "SYS_lchown", //254
5331 "SYS_getsid", //255
5332 "SYS_msync", //256
5333 "SYS_257",
5334 "SYS_258",
5335 "SYS_259",
5336 "SYS_getfsstat", //260
5337 "SYS_statfs", //261
5338 "SYS_fstatfs", //262
5339 "SYS_pipe", //263
5340 "SYS_fhopen", //264
5341 "SYS_265",
5342 "SYS_fhstatfs", //266
5343 "SYS_preadv", //267
5344 "SYS_pwritev", //268
5345 "SYS_kqueue", //269
5346 "SYS_kevent", //270
5347 "SYS_mlockall", //271
5348 "SYS_munlockall", //272
5349 "SYS_getpeereid", //273
5350 "SYS_274",
5351 "SYS_275",
5352 "SYS_276",
5353 "SYS_277",
5354 "SYS_278",
5355 "SYS_279",
5356 "SYS_280",
5357 "SYS_getresuid", //281
5358 "SYS_setresuid", //282
5359 "SYS_getresgid", //283
5360 "SYS_setresgid", //284
5361 "SYS_285",
5362 "SYS_mquery", //286
5363 "SYS_closefrom", //287
5364 "SYS_sigaltstack", //288
5365 "SYS_shmget", //289
5366 "SYS_semop", //290
5367 "SYS_stat", //291
5368 "SYS_fstat", //292
5369 "SYS_lstat", //293
5370 "SYS_fhstat", //294
5371 "SYS___semctl", //295
5372 "SYS_shmctl", //296
5373 "SYS_msgctl", //297
5374 "SYS_MAXSYSCALL", //298
5375 //299
5376 //300
5377 };
5378 uint32_t uEAX;
5379 if (!LogIsEnabled())
5380 return;
5381 uEAX = CPUMGetGuestEAX(pVCpu);
5382 switch (uEAX)
5383 {
5384 default:
5385 if (uEAX < RT_ELEMENTS(apsz))
5386 {
5387 uint32_t au32Args[8] = {0};
5388 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5389 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5390 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5391 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5392 }
5393 else
5394 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5395 break;
5396 }
5397}
5398
5399
5400#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5401/**
5402 * The Dll main entry point (stub).
5403 */
5404bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5405{
5406 return true;
5407}
5408
5409void *memcpy(void *dst, const void *src, size_t size)
5410{
5411 uint8_t*pbDst = dst, *pbSrc = src;
5412 while (size-- > 0)
5413 *pbDst++ = *pbSrc++;
5414 return dst;
5415}
5416
5417#endif
5418
5419void cpu_smm_update(CPUX86State *env)
5420{
5421}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette