VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36148

Last change on this file since 36148 was 36148, checked in by vboxsync, 14 years ago

rem: stdio.h build fix, 3rd try.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.8 KB
Line 
1/* $Id: VBoxRecompiler.c 36148 2011-03-03 14:20:31Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, NULL, NULL,
340 NULL, remR3Save, NULL,
341 NULL, remR3Load, NULL);
342 if (RT_FAILURE(rc))
343 return rc;
344
345#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
346 /*
347 * Debugger commands.
348 */
349 static bool fRegisteredCmds = false;
350 if (!fRegisteredCmds)
351 {
352 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
353 if (RT_SUCCESS(rc))
354 fRegisteredCmds = true;
355 }
356#endif
357
358#ifdef VBOX_WITH_STATISTICS
359 /*
360 * Statistics.
361 */
362 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
363 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
364 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
365 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
366 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
374
375 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
376
377 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
378 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
379 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
380 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
381 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
382 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
383 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
384 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
385 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
386 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
387 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
388
389 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
390 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
391 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
392 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
393
394 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
400
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
409#endif /* VBOX_WITH_STATISTICS */
410
411 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
412 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
413 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
414
415
416#ifdef DEBUG_ALL_LOGGING
417 loglevel = ~0;
418# ifdef DEBUG_TMP_LOGGING
419 logfile = fopen("/tmp/vbox-qemu.log", "w");
420# endif
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469
470/**
471 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM handle.
475 * @param fGuarded Whether to guard the map.
476 */
477static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
478{
479 int rc = VINF_SUCCESS;
480 RTGCPHYS cb;
481
482 cb = pVM->rem.s.GCPhysLastRam + 1;
483 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
485 VERR_OUT_OF_RANGE);
486 phys_ram_size = cb;
487 phys_ram_dirty_size = cb >> PAGE_SHIFT;
488 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
489
490 if (!fGuarded)
491 {
492 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
493 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
494 }
495 else
496 {
497 /*
498 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
499 */
500 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
501 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
502 if (cbBitmapFull == cbBitmapAligned)
503 cbBitmapFull += _4G >> PAGE_SHIFT;
504 else if (cbBitmapFull - cbBitmapAligned < _64K)
505 cbBitmapFull += _64K;
506
507 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
508 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
509
510 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
511 if (RT_FAILURE(rc))
512 {
513 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
514 AssertLogRelRCReturn(rc, rc);
515 }
516
517 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
518 }
519
520 /* initialize it. */
521 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
522 return rc;
523}
524
525
526/**
527 * Terminates the REM.
528 *
529 * Termination means cleaning up and freeing all resources,
530 * the VM it self is at this point powered off or suspended.
531 *
532 * @returns VBox status code.
533 * @param pVM The VM to operate on.
534 */
535REMR3DECL(int) REMR3Term(PVM pVM)
536{
537#ifdef VBOX_WITH_STATISTICS
538 /*
539 * Statistics.
540 */
541 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
542 STAM_DEREG(pVM, &gStatCompilationQEmu);
543 STAM_DEREG(pVM, &gStatRunCodeQEmu);
544 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
545 STAM_DEREG(pVM, &gStatTimers);
546 STAM_DEREG(pVM, &gStatTBLookup);
547 STAM_DEREG(pVM, &gStatIRQ);
548 STAM_DEREG(pVM, &gStatRawCheck);
549 STAM_DEREG(pVM, &gStatMemRead);
550 STAM_DEREG(pVM, &gStatMemWrite);
551 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
552 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
553
554 STAM_DEREG(pVM, &gStatCpuGetTSC);
555
556 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
557 STAM_DEREG(pVM, &gStatRefuseVM86);
558 STAM_DEREG(pVM, &gStatRefusePaging);
559 STAM_DEREG(pVM, &gStatRefusePAE);
560 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
561 STAM_DEREG(pVM, &gStatRefuseIF0);
562 STAM_DEREG(pVM, &gStatRefuseCode16);
563 STAM_DEREG(pVM, &gStatRefuseWP0);
564 STAM_DEREG(pVM, &gStatRefuseRing1or2);
565 STAM_DEREG(pVM, &gStatRefuseCanExecute);
566 STAM_DEREG(pVM, &gStatFlushTBs);
567
568 STAM_DEREG(pVM, &gStatREMGDTChange);
569 STAM_DEREG(pVM, &gStatREMLDTRChange);
570 STAM_DEREG(pVM, &gStatREMIDTChange);
571 STAM_DEREG(pVM, &gStatREMTRChange);
572
573 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
586
587 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
588#endif /* VBOX_WITH_STATISTICS */
589
590 STAM_REL_DEREG(pVM, &tb_flush_count);
591 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
592 STAM_REL_DEREG(pVM, &tlb_flush_count);
593
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * The VM is being reset.
600 *
601 * For the REM component this means to call the cpu_reset() and
602 * reinitialize some state variables.
603 *
604 * @param pVM VM handle.
605 */
606REMR3DECL(void) REMR3Reset(PVM pVM)
607{
608 /*
609 * Reset the REM cpu.
610 */
611 Assert(pVM->rem.s.cIgnoreAll == 0);
612 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
613 cpu_reset(&pVM->rem.s.Env);
614 pVM->rem.s.cInvalidatedPages = 0;
615 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
616 Assert(pVM->rem.s.cIgnoreAll == 0);
617
618 /* Clear raw ring 0 init state */
619 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
620
621 /* Flush the TBs the next time we execute code here. */
622 pVM->rem.s.fFlushTBs = true;
623}
624
625
626/**
627 * Execute state save operation.
628 *
629 * @returns VBox status code.
630 * @param pVM VM Handle.
631 * @param pSSM SSM operation handle.
632 */
633static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
634{
635 PREM pRem = &pVM->rem.s;
636
637 /*
638 * Save the required CPU Env bits.
639 * (Not much because we're never in REM when doing the save.)
640 */
641 LogFlow(("remR3Save:\n"));
642 Assert(!pRem->fInREM);
643 SSMR3PutU32(pSSM, pRem->Env.hflags);
644 SSMR3PutU32(pSSM, ~0); /* separator */
645
646 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
647 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
648 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
649
650 return SSMR3PutU32(pSSM, ~0); /* terminator */
651}
652
653
654/**
655 * Execute state load operation.
656 *
657 * @returns VBox status code.
658 * @param pVM VM Handle.
659 * @param pSSM SSM operation handle.
660 * @param uVersion Data layout version.
661 * @param uPass The data pass.
662 */
663static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
664{
665 uint32_t u32Dummy;
666 uint32_t fRawRing0 = false;
667 uint32_t u32Sep;
668 uint32_t i;
669 int rc;
670 PREM pRem;
671
672 LogFlow(("remR3Load:\n"));
673 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
674
675 /*
676 * Validate version.
677 */
678 if ( uVersion != REM_SAVED_STATE_VERSION
679 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
680 {
681 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
682 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
683 }
684
685 /*
686 * Do a reset to be on the safe side...
687 */
688 REMR3Reset(pVM);
689
690 /*
691 * Ignore all ignorable notifications.
692 * (Not doing this will cause serious trouble.)
693 */
694 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
695
696 /*
697 * Load the required CPU Env bits.
698 * (Not much because we're never in REM when doing the save.)
699 */
700 pRem = &pVM->rem.s;
701 Assert(!pRem->fInREM);
702 SSMR3GetU32(pSSM, &pRem->Env.hflags);
703 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 /* Redundant REM CPU state has to be loaded, but can be ignored. */
706 CPUX86State_Ver16 temp;
707 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
708 }
709
710 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
711 if (RT_FAILURE(rc))
712 return rc;
713 if (u32Sep != ~0U)
714 {
715 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
716 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
717 }
718
719 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
720 SSMR3GetUInt(pSSM, &fRawRing0);
721 if (fRawRing0)
722 pRem->Env.state |= CPU_RAW_RING0;
723
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /*
727 * Load the REM stuff.
728 */
729 /** @todo r=bird: We should just drop all these items, restoring doesn't make
730 * sense. */
731 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
732 if (RT_FAILURE(rc))
733 return rc;
734 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
735 {
736 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739 for (i = 0; i < pRem->cInvalidatedPages; i++)
740 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
741 }
742
743 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
744 if (RT_FAILURE(rc))
745 return rc;
746
747 /* check the terminator. */
748 rc = SSMR3GetU32(pSSM, &u32Sep);
749 if (RT_FAILURE(rc))
750 return rc;
751 if (u32Sep != ~0U)
752 {
753 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
754 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
755 }
756
757 /*
758 * Get the CPUID features.
759 */
760 PVMCPU pVCpu = VMMGetCpu(pVM);
761 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
762 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
763
764 /*
765 * Sync the Load Flush the TLB
766 */
767 tlb_flush(&pRem->Env, 1);
768
769 /*
770 * Stop ignoring ignorable notifications.
771 */
772 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
773
774 /*
775 * Sync the whole CPU state when executing code in the recompiler.
776 */
777 for (i = 0; i < pVM->cCpus; i++)
778 {
779 PVMCPU pVCpu = &pVM->aCpus[i];
780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
781 }
782 return VINF_SUCCESS;
783}
784
785
786
787#undef LOG_GROUP
788#define LOG_GROUP LOG_GROUP_REM_RUN
789
790/**
791 * Single steps an instruction in recompiled mode.
792 *
793 * Before calling this function the REM state needs to be in sync with
794 * the VM. Call REMR3State() to perform the sync. It's only necessary
795 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
796 * and after calling REMR3StateBack().
797 *
798 * @returns VBox status code.
799 *
800 * @param pVM VM Handle.
801 * @param pVCpu VMCPU Handle.
802 */
803REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
804{
805 int rc, interrupt_request;
806 RTGCPTR GCPtrPC;
807 bool fBp;
808
809 /*
810 * Lock the REM - we don't wanna have anyone interrupting us
811 * while stepping - and enabled single stepping. We also ignore
812 * pending interrupts and suchlike.
813 */
814 interrupt_request = pVM->rem.s.Env.interrupt_request;
815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
816 pVM->rem.s.Env.interrupt_request = 0;
817 cpu_single_step(&pVM->rem.s.Env, 1);
818
819 /*
820 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
821 */
822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
824
825 /*
826 * Execute and handle the return code.
827 * We execute without enabling the cpu tick, so on success we'll
828 * just flip it on and off to make sure it moves
829 */
830 rc = cpu_exec(&pVM->rem.s.Env);
831 if (rc == EXCP_DEBUG)
832 {
833 TMR3NotifyResume(pVM, pVCpu);
834 TMR3NotifySuspend(pVM, pVCpu);
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 else
838 {
839 switch (rc)
840 {
841 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
842 case EXCP_HLT:
843 case EXCP_HALTED: rc = VINF_EM_HALT; break;
844 case EXCP_RC:
845 rc = pVM->rem.s.rc;
846 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
847 break;
848 case EXCP_EXECUTE_RAW:
849 case EXCP_EXECUTE_HWACC:
850 /** @todo: is it correct? No! */
851 rc = VINF_SUCCESS;
852 break;
853 default:
854 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
855 rc = VERR_INTERNAL_ERROR;
856 break;
857 }
858 }
859
860 /*
861 * Restore the stuff we changed to prevent interruption.
862 * Unlock the REM.
863 */
864 if (fBp)
865 {
866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
867 Assert(rc2 == 0); NOREF(rc2);
868 }
869 cpu_single_step(&pVM->rem.s.Env, 0);
870 pVM->rem.s.Env.interrupt_request = interrupt_request;
871
872 return rc;
873}
874
875
876/**
877 * Set a breakpoint using the REM facilities.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle.
881 * @param Address The breakpoint address.
882 * @thread The emulation thread.
883 */
884REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
885{
886 VM_ASSERT_EMT(pVM);
887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
888 {
889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
890 return VINF_SUCCESS;
891 }
892 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
893 return VERR_REM_NO_MORE_BP_SLOTS;
894}
895
896
897/**
898 * Clears a breakpoint set by REMR3BreakpointSet().
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param Address The breakpoint address.
903 * @thread The emulation thread.
904 */
905REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
906{
907 VM_ASSERT_EMT(pVM);
908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
909 {
910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
911 return VINF_SUCCESS;
912 }
913 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
914 return VERR_REM_BP_NOT_FOUND;
915}
916
917
918/**
919 * Emulate an instruction.
920 *
921 * This function executes one instruction without letting anyone
922 * interrupt it. This is intended for being called while being in
923 * raw mode and thus will take care of all the state syncing between
924 * REM and the rest.
925 *
926 * @returns VBox status code.
927 * @param pVM VM handle.
928 * @param pVCpu VMCPU Handle.
929 */
930REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
931{
932 bool fFlushTBs;
933
934 int rc, rc2;
935 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
936
937 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
938 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
939 */
940 if (HWACCMIsEnabled(pVM))
941 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
942
943 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
944 fFlushTBs = pVM->rem.s.fFlushTBs;
945 pVM->rem.s.fFlushTBs = false;
946
947 /*
948 * Sync the state and enable single instruction / single stepping.
949 */
950 rc = REMR3State(pVM, pVCpu);
951 pVM->rem.s.fFlushTBs = fFlushTBs;
952 if (RT_SUCCESS(rc))
953 {
954 int interrupt_request = pVM->rem.s.Env.interrupt_request;
955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
956#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
957 cpu_single_step(&pVM->rem.s.Env, 0);
958#endif
959 Assert(!pVM->rem.s.Env.singlestep_enabled);
960
961 /*
962 * Now we set the execute single instruction flag and enter the cpu_exec loop.
963 */
964 TMNotifyStartOfExecution(pVCpu);
965 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
966 rc = cpu_exec(&pVM->rem.s.Env);
967 TMNotifyEndOfExecution(pVCpu);
968 switch (rc)
969 {
970 /*
971 * Executed without anything out of the way happening.
972 */
973 case EXCP_SINGLE_INSTR:
974 rc = VINF_EM_RESCHEDULE;
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
976 break;
977
978 /*
979 * If we take a trap or start servicing a pending interrupt, we might end up here.
980 * (Timer thread or some other thread wishing EMT's attention.)
981 */
982 case EXCP_INTERRUPT:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
984 rc = VINF_EM_RESCHEDULE;
985 break;
986
987 /*
988 * Single step, we assume!
989 * If there was a breakpoint there we're fucked now.
990 */
991 case EXCP_DEBUG:
992 {
993 /* breakpoint or single step? */
994 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
995 int iBP;
996 rc = VINF_EM_DBG_STEPPED;
997 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
998 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
999 {
1000 rc = VINF_EM_DBG_BREAKPOINT;
1001 break;
1002 }
1003 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1004 break;
1005 }
1006
1007 /*
1008 * hlt instruction.
1009 */
1010 case EXCP_HLT:
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1012 rc = VINF_EM_HALT;
1013 break;
1014
1015 /*
1016 * The VM has halted.
1017 */
1018 case EXCP_HALTED:
1019 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1020 rc = VINF_EM_HALT;
1021 break;
1022
1023 /*
1024 * Switch to RAW-mode.
1025 */
1026 case EXCP_EXECUTE_RAW:
1027 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1028 rc = VINF_EM_RESCHEDULE_RAW;
1029 break;
1030
1031 /*
1032 * Switch to hardware accelerated RAW-mode.
1033 */
1034 case EXCP_EXECUTE_HWACC:
1035 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1036 rc = VINF_EM_RESCHEDULE_HWACC;
1037 break;
1038
1039 /*
1040 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1041 */
1042 case EXCP_RC:
1043 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1044 rc = pVM->rem.s.rc;
1045 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1046 break;
1047
1048 /*
1049 * Figure out the rest when they arrive....
1050 */
1051 default:
1052 AssertMsgFailed(("rc=%d\n", rc));
1053 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1054 rc = VINF_EM_RESCHEDULE;
1055 break;
1056 }
1057
1058 /*
1059 * Switch back the state.
1060 */
1061 pVM->rem.s.Env.interrupt_request = interrupt_request;
1062 rc2 = REMR3StateBack(pVM, pVCpu);
1063 AssertRC(rc2);
1064 }
1065
1066 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1067 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1068 return rc;
1069}
1070
1071
1072/**
1073 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1074 *
1075 * @returns VBox status code.
1076 *
1077 * @param pVM The VM handle.
1078 * @param pVCpu The Virtual CPU handle.
1079 */
1080static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1081{
1082 int rc;
1083
1084 Assert(pVM->rem.s.fInREM);
1085#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1086 cpu_single_step(&pVM->rem.s.Env, 1);
1087#else
1088 Assert(!pVM->rem.s.Env.singlestep_enabled);
1089#endif
1090
1091 /*
1092 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1093 */
1094 for (;;)
1095 {
1096 char szBuf[256];
1097
1098 /*
1099 * Log the current registers state and instruction.
1100 */
1101 remR3StateUpdate(pVM, pVCpu);
1102 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1103 szBuf[0] = '\0';
1104 rc = DBGFR3DisasInstrEx(pVM,
1105 pVCpu->idCpu,
1106 0, /* Sel */
1107 0, /* GCPtr */
1108 DBGF_DISAS_FLAGS_CURRENT_GUEST
1109 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1110 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1111 szBuf,
1112 sizeof(szBuf),
1113 NULL);
1114 if (RT_FAILURE(rc))
1115 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1116 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1117
1118 /*
1119 * Execute the instruction.
1120 */
1121 TMNotifyStartOfExecution(pVCpu);
1122
1123 if ( pVM->rem.s.Env.exception_index < 0
1124 || pVM->rem.s.Env.exception_index > 256)
1125 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1126
1127#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1128 pVM->rem.s.Env.interrupt_request = 0;
1129#else
1130 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1131#endif
1132 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1133 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1134 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1135 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1136 pVM->rem.s.Env.interrupt_request,
1137 pVM->rem.s.Env.halted,
1138 pVM->rem.s.Env.exception_index
1139 );
1140
1141 rc = cpu_exec(&pVM->rem.s.Env);
1142
1143 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1144 pVM->rem.s.Env.interrupt_request,
1145 pVM->rem.s.Env.halted,
1146 pVM->rem.s.Env.exception_index
1147 );
1148
1149 TMNotifyEndOfExecution(pVCpu);
1150
1151 switch (rc)
1152 {
1153#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1154 /*
1155 * The normal exit.
1156 */
1157 case EXCP_SINGLE_INSTR:
1158 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1159 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1160 continue;
1161 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1162 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1163 rc = VINF_SUCCESS;
1164 break;
1165
1166#else
1167 /*
1168 * The normal exit, check for breakpoints at PC just to be sure.
1169 */
1170#endif
1171 case EXCP_DEBUG:
1172 rc = VINF_EM_DBG_STEPPED;
1173 if (pVM->rem.s.Env.nb_breakpoints > 0)
1174 {
1175 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1176 int iBP;
1177 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1178 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1179 {
1180 rc = VINF_EM_DBG_BREAKPOINT;
1181 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC);
1182 break;
1183 }
1184 }
1185#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1186 if (rc == VINF_EM_DBG_STEPPED)
1187 {
1188 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1189 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1190 continue;
1191
1192 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1193 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1194 rc = VINF_SUCCESS;
1195 }
1196#endif
1197 break;
1198
1199 /*
1200 * If we take a trap or start servicing a pending interrupt, we might end up here.
1201 * (Timer thread or some other thread wishing EMT's attention.)
1202 */
1203 case EXCP_INTERRUPT:
1204 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1205 rc = VINF_SUCCESS;
1206 break;
1207
1208 /*
1209 * hlt instruction.
1210 */
1211 case EXCP_HLT:
1212 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1213 rc = VINF_EM_HALT;
1214 break;
1215
1216 /*
1217 * The VM has halted.
1218 */
1219 case EXCP_HALTED:
1220 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1221 rc = VINF_EM_HALT;
1222 break;
1223
1224 /*
1225 * Switch to RAW-mode.
1226 */
1227 case EXCP_EXECUTE_RAW:
1228 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1229 rc = VINF_EM_RESCHEDULE_RAW;
1230 break;
1231
1232 /*
1233 * Switch to hardware accelerated RAW-mode.
1234 */
1235 case EXCP_EXECUTE_HWACC:
1236 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1237 rc = VINF_EM_RESCHEDULE_HWACC;
1238 break;
1239
1240 /*
1241 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1242 */
1243 case EXCP_RC:
1244 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1245 rc = pVM->rem.s.rc;
1246 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1247 break;
1248
1249 /*
1250 * Figure out the rest when they arrive....
1251 */
1252 default:
1253 AssertMsgFailed(("rc=%d\n", rc));
1254 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1255 rc = VINF_EM_RESCHEDULE;
1256 break;
1257 }
1258 break;
1259 }
1260
1261#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1262// cpu_single_step(&pVM->rem.s.Env, 0);
1263#else
1264 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1265#endif
1266 return rc;
1267}
1268
1269
1270/**
1271 * Runs code in recompiled mode.
1272 *
1273 * Before calling this function the REM state needs to be in sync with
1274 * the VM. Call REMR3State() to perform the sync. It's only necessary
1275 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1276 * and after calling REMR3StateBack().
1277 *
1278 * @returns VBox status code.
1279 *
1280 * @param pVM VM Handle.
1281 * @param pVCpu VMCPU Handle.
1282 */
1283REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1284{
1285 int rc;
1286
1287 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1288 return remR3RunLoggingStep(pVM, pVCpu);
1289
1290 Assert(pVM->rem.s.fInREM);
1291 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1292
1293 TMNotifyStartOfExecution(pVCpu);
1294 rc = cpu_exec(&pVM->rem.s.Env);
1295 TMNotifyEndOfExecution(pVCpu);
1296 switch (rc)
1297 {
1298 /*
1299 * This happens when the execution was interrupted
1300 * by an external event, like pending timers.
1301 */
1302 case EXCP_INTERRUPT:
1303 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1304 rc = VINF_SUCCESS;
1305 break;
1306
1307 /*
1308 * hlt instruction.
1309 */
1310 case EXCP_HLT:
1311 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1312 rc = VINF_EM_HALT;
1313 break;
1314
1315 /*
1316 * The VM has halted.
1317 */
1318 case EXCP_HALTED:
1319 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1320 rc = VINF_EM_HALT;
1321 break;
1322
1323 /*
1324 * Breakpoint/single step.
1325 */
1326 case EXCP_DEBUG:
1327 {
1328#if 0//def DEBUG_bird
1329 static int iBP = 0;
1330 printf("howdy, breakpoint! iBP=%d\n", iBP);
1331 switch (iBP)
1332 {
1333 case 0:
1334 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1335 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1336 //pVM->rem.s.Env.interrupt_request = 0;
1337 //pVM->rem.s.Env.exception_index = -1;
1338 //g_fInterruptDisabled = 1;
1339 rc = VINF_SUCCESS;
1340 asm("int3");
1341 break;
1342 default:
1343 asm("int3");
1344 break;
1345 }
1346 iBP++;
1347#else
1348 /* breakpoint or single step? */
1349 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1350 int iBP;
1351 rc = VINF_EM_DBG_STEPPED;
1352 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1353 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1354 {
1355 rc = VINF_EM_DBG_BREAKPOINT;
1356 break;
1357 }
1358 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1359#endif
1360 break;
1361 }
1362
1363 /*
1364 * Switch to RAW-mode.
1365 */
1366 case EXCP_EXECUTE_RAW:
1367 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1368 rc = VINF_EM_RESCHEDULE_RAW;
1369 break;
1370
1371 /*
1372 * Switch to hardware accelerated RAW-mode.
1373 */
1374 case EXCP_EXECUTE_HWACC:
1375 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1376 rc = VINF_EM_RESCHEDULE_HWACC;
1377 break;
1378
1379 /*
1380 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1381 */
1382 case EXCP_RC:
1383 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1384 rc = pVM->rem.s.rc;
1385 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1386 break;
1387
1388 /*
1389 * Figure out the rest when they arrive....
1390 */
1391 default:
1392 AssertMsgFailed(("rc=%d\n", rc));
1393 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1394 rc = VINF_SUCCESS;
1395 break;
1396 }
1397
1398 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1399 return rc;
1400}
1401
1402
1403/**
1404 * Check if the cpu state is suitable for Raw execution.
1405 *
1406 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1407 *
1408 * @param env The CPU env struct.
1409 * @param eip The EIP to check this for (might differ from env->eip).
1410 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1411 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1412 *
1413 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1414 */
1415bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1416{
1417 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1418 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1419 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1420 uint32_t u32CR0;
1421
1422 /* Update counter. */
1423 env->pVM->rem.s.cCanExecuteRaw++;
1424
1425 /* Never when single stepping+logging guest code. */
1426 if (env->state & CPU_EMULATE_SINGLE_STEP)
1427 return false;
1428
1429 if (HWACCMIsEnabled(env->pVM))
1430 {
1431 CPUMCTX Ctx;
1432
1433 env->state |= CPU_RAW_HWACC;
1434
1435 /*
1436 * Create partial context for HWACCMR3CanExecuteGuest
1437 */
1438 Ctx.cr0 = env->cr[0];
1439 Ctx.cr3 = env->cr[3];
1440 Ctx.cr4 = env->cr[4];
1441
1442 Ctx.tr = env->tr.selector;
1443 Ctx.trHid.u64Base = env->tr.base;
1444 Ctx.trHid.u32Limit = env->tr.limit;
1445 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1446
1447 Ctx.ldtr = env->ldt.selector;
1448 Ctx.ldtrHid.u64Base = env->ldt.base;
1449 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1450 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1451
1452 Ctx.idtr.cbIdt = env->idt.limit;
1453 Ctx.idtr.pIdt = env->idt.base;
1454
1455 Ctx.gdtr.cbGdt = env->gdt.limit;
1456 Ctx.gdtr.pGdt = env->gdt.base;
1457
1458 Ctx.rsp = env->regs[R_ESP];
1459 Ctx.rip = env->eip;
1460
1461 Ctx.eflags.u32 = env->eflags;
1462
1463 Ctx.cs = env->segs[R_CS].selector;
1464 Ctx.csHid.u64Base = env->segs[R_CS].base;
1465 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1466 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1467
1468 Ctx.ds = env->segs[R_DS].selector;
1469 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1470 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1471 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1472
1473 Ctx.es = env->segs[R_ES].selector;
1474 Ctx.esHid.u64Base = env->segs[R_ES].base;
1475 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1476 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1477
1478 Ctx.fs = env->segs[R_FS].selector;
1479 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1480 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1481 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1482
1483 Ctx.gs = env->segs[R_GS].selector;
1484 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1485 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1486 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1487
1488 Ctx.ss = env->segs[R_SS].selector;
1489 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1490 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1491 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1492
1493 Ctx.msrEFER = env->efer;
1494
1495 /* Hardware accelerated raw-mode:
1496 *
1497 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1498 */
1499 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1500 {
1501 *piException = EXCP_EXECUTE_HWACC;
1502 return true;
1503 }
1504 return false;
1505 }
1506
1507 /*
1508 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1509 * or 32 bits protected mode ring 0 code
1510 *
1511 * The tests are ordered by the likelihood of being true during normal execution.
1512 */
1513 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1514 {
1515 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1516 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1517 return false;
1518 }
1519
1520#ifndef VBOX_RAW_V86
1521 if (fFlags & VM_MASK) {
1522 STAM_COUNTER_INC(&gStatRefuseVM86);
1523 Log2(("raw mode refused: VM_MASK\n"));
1524 return false;
1525 }
1526#endif
1527
1528 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1529 {
1530#ifndef DEBUG_bird
1531 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1532#endif
1533 return false;
1534 }
1535
1536 if (env->singlestep_enabled)
1537 {
1538 //Log2(("raw mode refused: Single step\n"));
1539 return false;
1540 }
1541
1542 if (env->nb_breakpoints > 0)
1543 {
1544 //Log2(("raw mode refused: Breakpoints\n"));
1545 return false;
1546 }
1547
1548 u32CR0 = env->cr[0];
1549 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1550 {
1551 STAM_COUNTER_INC(&gStatRefusePaging);
1552 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1553 return false;
1554 }
1555
1556 if (env->cr[4] & CR4_PAE_MASK)
1557 {
1558 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1559 {
1560 STAM_COUNTER_INC(&gStatRefusePAE);
1561 return false;
1562 }
1563 }
1564
1565 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1566 {
1567 if (!EMIsRawRing3Enabled(env->pVM))
1568 return false;
1569
1570 if (!(env->eflags & IF_MASK))
1571 {
1572 STAM_COUNTER_INC(&gStatRefuseIF0);
1573 Log2(("raw mode refused: IF (RawR3)\n"));
1574 return false;
1575 }
1576
1577 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1578 {
1579 STAM_COUNTER_INC(&gStatRefuseWP0);
1580 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1581 return false;
1582 }
1583 }
1584 else
1585 {
1586 if (!EMIsRawRing0Enabled(env->pVM))
1587 return false;
1588
1589 // Let's start with pure 32 bits ring 0 code first
1590 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1591 {
1592 STAM_COUNTER_INC(&gStatRefuseCode16);
1593 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1594 return false;
1595 }
1596
1597 // Only R0
1598 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1599 {
1600 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1601 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1602 return false;
1603 }
1604
1605 if (!(u32CR0 & CR0_WP_MASK))
1606 {
1607 STAM_COUNTER_INC(&gStatRefuseWP0);
1608 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1609 return false;
1610 }
1611
1612 if (PATMIsPatchGCAddr(env->pVM, eip))
1613 {
1614 Log2(("raw r0 mode forced: patch code\n"));
1615 *piException = EXCP_EXECUTE_RAW;
1616 return true;
1617 }
1618
1619#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1620 if (!(env->eflags & IF_MASK))
1621 {
1622 STAM_COUNTER_INC(&gStatRefuseIF0);
1623 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1624 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1625 return false;
1626 }
1627#endif
1628
1629 env->state |= CPU_RAW_RING0;
1630 }
1631
1632 /*
1633 * Don't reschedule the first time we're called, because there might be
1634 * special reasons why we're here that is not covered by the above checks.
1635 */
1636 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1637 {
1638 Log2(("raw mode refused: first scheduling\n"));
1639 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1640 return false;
1641 }
1642
1643 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1644 *piException = EXCP_EXECUTE_RAW;
1645 return true;
1646}
1647
1648
1649/**
1650 * Fetches a code byte.
1651 *
1652 * @returns Success indicator (bool) for ease of use.
1653 * @param env The CPU environment structure.
1654 * @param GCPtrInstr Where to fetch code.
1655 * @param pu8Byte Where to store the byte on success
1656 */
1657bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1658{
1659 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1660 if (RT_SUCCESS(rc))
1661 return true;
1662 return false;
1663}
1664
1665
1666/**
1667 * Flush (or invalidate if you like) page table/dir entry.
1668 *
1669 * (invlpg instruction; tlb_flush_page)
1670 *
1671 * @param env Pointer to cpu environment.
1672 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1673 */
1674void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1675{
1676 PVM pVM = env->pVM;
1677 PCPUMCTX pCtx;
1678 int rc;
1679
1680 /*
1681 * When we're replaying invlpg instructions or restoring a saved
1682 * state we disable this path.
1683 */
1684 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1685 return;
1686 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1687 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1688
1689 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1690
1691 /*
1692 * Update the control registers before calling PGMFlushPage.
1693 */
1694 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1695 Assert(pCtx);
1696 pCtx->cr0 = env->cr[0];
1697 pCtx->cr3 = env->cr[3];
1698 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1699 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1700 pCtx->cr4 = env->cr[4];
1701
1702 /*
1703 * Let PGM do the rest.
1704 */
1705 Assert(env->pVCpu);
1706 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1707 if (RT_FAILURE(rc))
1708 {
1709 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1710 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1711 }
1712 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1713}
1714
1715
1716#ifndef REM_PHYS_ADDR_IN_TLB
1717/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1718void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1719{
1720 void *pv;
1721 int rc;
1722
1723 /* Address must be aligned enough to fiddle with lower bits */
1724 Assert((physAddr & 0x3) == 0);
1725
1726 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1727 Assert( rc == VINF_SUCCESS
1728 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1729 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1730 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1731 if (RT_FAILURE(rc))
1732 return (void *)1;
1733 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1734 return (void *)((uintptr_t)pv | 2);
1735 return pv;
1736}
1737#endif /* REM_PHYS_ADDR_IN_TLB */
1738
1739
1740/**
1741 * Called from tlb_protect_code in order to write monitor a code page.
1742 *
1743 * @param env Pointer to the CPU environment.
1744 * @param GCPtr Code page to monitor
1745 */
1746void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1747{
1748#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1749 Assert(env->pVM->rem.s.fInREM);
1750 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1751 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1752 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1753 && !(env->eflags & VM_MASK) /* no V86 mode */
1754 && !HWACCMIsEnabled(env->pVM))
1755 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1756#endif
1757}
1758
1759
1760/**
1761 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1762 *
1763 * @param env Pointer to the CPU environment.
1764 * @param GCPtr Code page to monitor
1765 */
1766void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1767{
1768 Assert(env->pVM->rem.s.fInREM);
1769#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1770 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1771 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1772 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1773 && !(env->eflags & VM_MASK) /* no V86 mode */
1774 && !HWACCMIsEnabled(env->pVM))
1775 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1776#endif
1777}
1778
1779
1780/**
1781 * Called when the CPU is initialized, any of the CRx registers are changed or
1782 * when the A20 line is modified.
1783 *
1784 * @param env Pointer to the CPU environment.
1785 * @param fGlobal Set if the flush is global.
1786 */
1787void remR3FlushTLB(CPUState *env, bool fGlobal)
1788{
1789 PVM pVM = env->pVM;
1790 PCPUMCTX pCtx;
1791
1792 /*
1793 * When we're replaying invlpg instructions or restoring a saved
1794 * state we disable this path.
1795 */
1796 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1797 return;
1798 Assert(pVM->rem.s.fInREM);
1799
1800 /*
1801 * The caller doesn't check cr4, so we have to do that for ourselves.
1802 */
1803 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1804 fGlobal = true;
1805 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1806
1807 /*
1808 * Update the control registers before calling PGMR3FlushTLB.
1809 */
1810 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1811 Assert(pCtx);
1812 pCtx->cr0 = env->cr[0];
1813 pCtx->cr3 = env->cr[3];
1814 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1815 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1816 pCtx->cr4 = env->cr[4];
1817
1818 /*
1819 * Let PGM do the rest.
1820 */
1821 Assert(env->pVCpu);
1822 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1823}
1824
1825
1826/**
1827 * Called when any of the cr0, cr4 or efer registers is updated.
1828 *
1829 * @param env Pointer to the CPU environment.
1830 */
1831void remR3ChangeCpuMode(CPUState *env)
1832{
1833 PVM pVM = env->pVM;
1834 uint64_t efer;
1835 PCPUMCTX pCtx;
1836 int rc;
1837
1838 /*
1839 * When we're replaying loads or restoring a saved
1840 * state this path is disabled.
1841 */
1842 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1843 return;
1844 Assert(pVM->rem.s.fInREM);
1845
1846 /*
1847 * Update the control registers before calling PGMChangeMode()
1848 * as it may need to map whatever cr3 is pointing to.
1849 */
1850 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1851 Assert(pCtx);
1852 pCtx->cr0 = env->cr[0];
1853 pCtx->cr3 = env->cr[3];
1854 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1855 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1856 pCtx->cr4 = env->cr[4];
1857
1858#ifdef TARGET_X86_64
1859 efer = env->efer;
1860#else
1861 efer = 0;
1862#endif
1863 Assert(env->pVCpu);
1864 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1865 if (rc != VINF_SUCCESS)
1866 {
1867 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1868 {
1869 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1870 remR3RaiseRC(env->pVM, rc);
1871 }
1872 else
1873 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1874 }
1875}
1876
1877
1878/**
1879 * Called from compiled code to run dma.
1880 *
1881 * @param env Pointer to the CPU environment.
1882 */
1883void remR3DmaRun(CPUState *env)
1884{
1885 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1886 PDMR3DmaRun(env->pVM);
1887 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1888}
1889
1890
1891/**
1892 * Called from compiled code to schedule pending timers in VMM
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3TimersRun(CPUState *env)
1897{
1898 LogFlow(("remR3TimersRun:\n"));
1899 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1900 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1901 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1902 TMR3TimerQueuesDo(env->pVM);
1903 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1904 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1905}
1906
1907
1908/**
1909 * Record trap occurrence
1910 *
1911 * @returns VBox status code
1912 * @param env Pointer to the CPU environment.
1913 * @param uTrap Trap nr
1914 * @param uErrorCode Error code
1915 * @param pvNextEIP Next EIP
1916 */
1917int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1918{
1919 PVM pVM = env->pVM;
1920#ifdef VBOX_WITH_STATISTICS
1921 static STAMCOUNTER s_aStatTrap[255];
1922 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1923#endif
1924
1925#ifdef VBOX_WITH_STATISTICS
1926 if (uTrap < 255)
1927 {
1928 if (!s_aRegisters[uTrap])
1929 {
1930 char szStatName[64];
1931 s_aRegisters[uTrap] = true;
1932 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1933 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1934 }
1935 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1936 }
1937#endif
1938 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1939 if( uTrap < 0x20
1940 && (env->cr[0] & X86_CR0_PE)
1941 && !(env->eflags & X86_EFL_VM))
1942 {
1943#ifdef DEBUG
1944 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1945#endif
1946 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1947 {
1948 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1949 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1950 return VERR_REM_TOO_MANY_TRAPS;
1951 }
1952 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1953 pVM->rem.s.cPendingExceptions = 1;
1954 pVM->rem.s.uPendingException = uTrap;
1955 pVM->rem.s.uPendingExcptEIP = env->eip;
1956 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1957 }
1958 else
1959 {
1960 pVM->rem.s.cPendingExceptions = 0;
1961 pVM->rem.s.uPendingException = uTrap;
1962 pVM->rem.s.uPendingExcptEIP = env->eip;
1963 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1964 }
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/*
1970 * Clear current active trap
1971 *
1972 * @param pVM VM Handle.
1973 */
1974void remR3TrapClear(PVM pVM)
1975{
1976 pVM->rem.s.cPendingExceptions = 0;
1977 pVM->rem.s.uPendingException = 0;
1978 pVM->rem.s.uPendingExcptEIP = 0;
1979 pVM->rem.s.uPendingExcptCR2 = 0;
1980}
1981
1982
1983/*
1984 * Record previous call instruction addresses
1985 *
1986 * @param env Pointer to the CPU environment.
1987 */
1988void remR3RecordCall(CPUState *env)
1989{
1990 CSAMR3RecordCallAddress(env->pVM, env->eip);
1991}
1992
1993
1994/**
1995 * Syncs the internal REM state with the VM.
1996 *
1997 * This must be called before REMR3Run() is invoked whenever when the REM
1998 * state is not up to date. Calling it several times in a row is not
1999 * permitted.
2000 *
2001 * @returns VBox status code.
2002 *
2003 * @param pVM VM Handle.
2004 * @param pVCpu VMCPU Handle.
2005 *
2006 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2007 * no do this since the majority of the callers don't want any unnecessary of events
2008 * pending that would immediately interrupt execution.
2009 */
2010REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2011{
2012 register const CPUMCTX *pCtx;
2013 register unsigned fFlags;
2014 bool fHiddenSelRegsValid;
2015 unsigned i;
2016 TRPMEVENT enmType;
2017 uint8_t u8TrapNo;
2018 uint32_t uCpl;
2019 int rc;
2020
2021 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2022 Log2(("REMR3State:\n"));
2023
2024 pVM->rem.s.Env.pVCpu = pVCpu;
2025 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2026 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2027
2028 Assert(!pVM->rem.s.fInREM);
2029 pVM->rem.s.fInStateSync = true;
2030
2031 /*
2032 * If we have to flush TBs, do that immediately.
2033 */
2034 if (pVM->rem.s.fFlushTBs)
2035 {
2036 STAM_COUNTER_INC(&gStatFlushTBs);
2037 tb_flush(&pVM->rem.s.Env);
2038 pVM->rem.s.fFlushTBs = false;
2039 }
2040
2041 /*
2042 * Copy the registers which require no special handling.
2043 */
2044#ifdef TARGET_X86_64
2045 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2046 Assert(R_EAX == 0);
2047 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2048 Assert(R_ECX == 1);
2049 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2050 Assert(R_EDX == 2);
2051 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2052 Assert(R_EBX == 3);
2053 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2054 Assert(R_ESP == 4);
2055 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2056 Assert(R_EBP == 5);
2057 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2058 Assert(R_ESI == 6);
2059 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2060 Assert(R_EDI == 7);
2061 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2062 pVM->rem.s.Env.regs[8] = pCtx->r8;
2063 pVM->rem.s.Env.regs[9] = pCtx->r9;
2064 pVM->rem.s.Env.regs[10] = pCtx->r10;
2065 pVM->rem.s.Env.regs[11] = pCtx->r11;
2066 pVM->rem.s.Env.regs[12] = pCtx->r12;
2067 pVM->rem.s.Env.regs[13] = pCtx->r13;
2068 pVM->rem.s.Env.regs[14] = pCtx->r14;
2069 pVM->rem.s.Env.regs[15] = pCtx->r15;
2070
2071 pVM->rem.s.Env.eip = pCtx->rip;
2072
2073 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2074#else
2075 Assert(R_EAX == 0);
2076 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2077 Assert(R_ECX == 1);
2078 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2079 Assert(R_EDX == 2);
2080 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2081 Assert(R_EBX == 3);
2082 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2083 Assert(R_ESP == 4);
2084 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2085 Assert(R_EBP == 5);
2086 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2087 Assert(R_ESI == 6);
2088 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2089 Assert(R_EDI == 7);
2090 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2091 pVM->rem.s.Env.eip = pCtx->eip;
2092
2093 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2094#endif
2095
2096 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2097
2098 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2099 for (i=0;i<8;i++)
2100 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2101
2102#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2103 /*
2104 * Clear the halted hidden flag (the interrupt waking up the CPU can
2105 * have been dispatched in raw mode).
2106 */
2107 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2108#endif
2109
2110 /*
2111 * Replay invlpg?
2112 */
2113 if (pVM->rem.s.cInvalidatedPages)
2114 {
2115 RTUINT i;
2116
2117 pVM->rem.s.fIgnoreInvlPg = true;
2118 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2119 {
2120 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2121 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2122 }
2123 pVM->rem.s.fIgnoreInvlPg = false;
2124 pVM->rem.s.cInvalidatedPages = 0;
2125 }
2126
2127 /* Replay notification changes. */
2128 REMR3ReplayHandlerNotifications(pVM);
2129
2130 /* Update MSRs; before CRx registers! */
2131 pVM->rem.s.Env.efer = pCtx->msrEFER;
2132 pVM->rem.s.Env.star = pCtx->msrSTAR;
2133 pVM->rem.s.Env.pat = pCtx->msrPAT;
2134#ifdef TARGET_X86_64
2135 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2136 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2137 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2138 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2139
2140 /* Update the internal long mode activate flag according to the new EFER value. */
2141 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2142 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2143 else
2144 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2145#endif
2146
2147 /*
2148 * Registers which are rarely changed and require special handling / order when changed.
2149 */
2150 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2151 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2152 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2153 | CPUM_CHANGED_CR4
2154 | CPUM_CHANGED_CR0
2155 | CPUM_CHANGED_CR3
2156 | CPUM_CHANGED_GDTR
2157 | CPUM_CHANGED_IDTR
2158 | CPUM_CHANGED_SYSENTER_MSR
2159 | CPUM_CHANGED_LDTR
2160 | CPUM_CHANGED_CPUID
2161 | CPUM_CHANGED_FPU_REM
2162 )
2163 )
2164 {
2165 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2166 {
2167 pVM->rem.s.fIgnoreCR3Load = true;
2168 tlb_flush(&pVM->rem.s.Env, true);
2169 pVM->rem.s.fIgnoreCR3Load = false;
2170 }
2171
2172 /* CR4 before CR0! */
2173 if (fFlags & CPUM_CHANGED_CR4)
2174 {
2175 pVM->rem.s.fIgnoreCR3Load = true;
2176 pVM->rem.s.fIgnoreCpuMode = true;
2177 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2178 pVM->rem.s.fIgnoreCpuMode = false;
2179 pVM->rem.s.fIgnoreCR3Load = false;
2180 }
2181
2182 if (fFlags & CPUM_CHANGED_CR0)
2183 {
2184 pVM->rem.s.fIgnoreCR3Load = true;
2185 pVM->rem.s.fIgnoreCpuMode = true;
2186 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2187 pVM->rem.s.fIgnoreCpuMode = false;
2188 pVM->rem.s.fIgnoreCR3Load = false;
2189 }
2190
2191 if (fFlags & CPUM_CHANGED_CR3)
2192 {
2193 pVM->rem.s.fIgnoreCR3Load = true;
2194 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2195 pVM->rem.s.fIgnoreCR3Load = false;
2196 }
2197
2198 if (fFlags & CPUM_CHANGED_GDTR)
2199 {
2200 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2201 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2202 }
2203
2204 if (fFlags & CPUM_CHANGED_IDTR)
2205 {
2206 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2207 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2208 }
2209
2210 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2211 {
2212 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2213 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2214 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2215 }
2216
2217 if (fFlags & CPUM_CHANGED_LDTR)
2218 {
2219 if (fHiddenSelRegsValid)
2220 {
2221 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2222 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2223 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2224 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2225 }
2226 else
2227 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2228 }
2229
2230 if (fFlags & CPUM_CHANGED_CPUID)
2231 {
2232 uint32_t u32Dummy;
2233
2234 /*
2235 * Get the CPUID features.
2236 */
2237 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2238 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2239 }
2240
2241 /* Sync FPU state after CR4, CPUID and EFER (!). */
2242 if (fFlags & CPUM_CHANGED_FPU_REM)
2243 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2244 }
2245
2246 /*
2247 * Sync TR unconditionally to make life simpler.
2248 */
2249 pVM->rem.s.Env.tr.selector = pCtx->tr;
2250 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2251 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2252 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2253 /* Note! do_interrupt will fault if the busy flag is still set... */
2254 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2255
2256 /*
2257 * Update selector registers.
2258 * This must be done *after* we've synced gdt, ldt and crX registers
2259 * since we're reading the GDT/LDT om sync_seg. This will happen with
2260 * saved state which takes a quick dip into rawmode for instance.
2261 */
2262 /*
2263 * Stack; Note first check this one as the CPL might have changed. The
2264 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2265 */
2266
2267 if (fHiddenSelRegsValid)
2268 {
2269 /* The hidden selector registers are valid in the CPU context. */
2270 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2271
2272 /* Set current CPL */
2273 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2274
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2277 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2278 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2279 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2280 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2281 }
2282 else
2283 {
2284 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2285 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2286 {
2287 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2288
2289 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2290 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2291#ifdef VBOX_WITH_STATISTICS
2292 if (pVM->rem.s.Env.segs[R_SS].newselector)
2293 {
2294 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2295 }
2296#endif
2297 }
2298 else
2299 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2300
2301 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2302 {
2303 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2304 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2305#ifdef VBOX_WITH_STATISTICS
2306 if (pVM->rem.s.Env.segs[R_ES].newselector)
2307 {
2308 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2309 }
2310#endif
2311 }
2312 else
2313 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2314
2315 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2316 {
2317 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2318 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2319#ifdef VBOX_WITH_STATISTICS
2320 if (pVM->rem.s.Env.segs[R_CS].newselector)
2321 {
2322 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2323 }
2324#endif
2325 }
2326 else
2327 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2328
2329 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2330 {
2331 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2332 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2333#ifdef VBOX_WITH_STATISTICS
2334 if (pVM->rem.s.Env.segs[R_DS].newselector)
2335 {
2336 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2337 }
2338#endif
2339 }
2340 else
2341 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2342
2343 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2344 * be the same but not the base/limit. */
2345 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2346 {
2347 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2348 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2349#ifdef VBOX_WITH_STATISTICS
2350 if (pVM->rem.s.Env.segs[R_FS].newselector)
2351 {
2352 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2353 }
2354#endif
2355 }
2356 else
2357 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2358
2359 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2360 {
2361 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2362 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2363#ifdef VBOX_WITH_STATISTICS
2364 if (pVM->rem.s.Env.segs[R_GS].newselector)
2365 {
2366 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2367 }
2368#endif
2369 }
2370 else
2371 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2372 }
2373
2374 /*
2375 * Check for traps.
2376 */
2377 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2378 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2379 if (RT_SUCCESS(rc))
2380 {
2381#ifdef DEBUG
2382 if (u8TrapNo == 0x80)
2383 {
2384 remR3DumpLnxSyscall(pVCpu);
2385 remR3DumpOBsdSyscall(pVCpu);
2386 }
2387#endif
2388
2389 pVM->rem.s.Env.exception_index = u8TrapNo;
2390 if (enmType != TRPM_SOFTWARE_INT)
2391 {
2392 pVM->rem.s.Env.exception_is_int = 0;
2393 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2394 }
2395 else
2396 {
2397 /*
2398 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2399 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2400 * for int03 and into.
2401 */
2402 pVM->rem.s.Env.exception_is_int = 1;
2403 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2404 /* int 3 may be generated by one-byte 0xcc */
2405 if (u8TrapNo == 3)
2406 {
2407 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2408 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2409 }
2410 /* int 4 may be generated by one-byte 0xce */
2411 else if (u8TrapNo == 4)
2412 {
2413 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2414 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2415 }
2416 }
2417
2418 /* get error code and cr2 if needed. */
2419 switch (u8TrapNo)
2420 {
2421 case 0x0e:
2422 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2423 /* fallthru */
2424 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2425 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2426 break;
2427
2428 case 0x11: case 0x08:
2429 default:
2430 pVM->rem.s.Env.error_code = 0;
2431 break;
2432 }
2433
2434 /*
2435 * We can now reset the active trap since the recompiler is gonna have a go at it.
2436 */
2437 rc = TRPMResetTrap(pVCpu);
2438 AssertRC(rc);
2439 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2440 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2441 }
2442
2443 /*
2444 * Clear old interrupt request flags; Check for pending hardware interrupts.
2445 * (See @remark for why we don't check for other FFs.)
2446 */
2447 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2448 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2449 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2450 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2451
2452 /*
2453 * We're now in REM mode.
2454 */
2455 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2456 pVM->rem.s.fInREM = true;
2457 pVM->rem.s.fInStateSync = false;
2458 pVM->rem.s.cCanExecuteRaw = 0;
2459 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2460 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2461 return VINF_SUCCESS;
2462}
2463
2464
2465/**
2466 * Syncs back changes in the REM state to the the VM state.
2467 *
2468 * This must be called after invoking REMR3Run().
2469 * Calling it several times in a row is not permitted.
2470 *
2471 * @returns VBox status code.
2472 *
2473 * @param pVM VM Handle.
2474 * @param pVCpu VMCPU Handle.
2475 */
2476REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2477{
2478 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2479 Assert(pCtx);
2480 unsigned i;
2481
2482 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2483 Log2(("REMR3StateBack:\n"));
2484 Assert(pVM->rem.s.fInREM);
2485
2486 /*
2487 * Copy back the registers.
2488 * This is done in the order they are declared in the CPUMCTX structure.
2489 */
2490
2491 /** @todo FOP */
2492 /** @todo FPUIP */
2493 /** @todo CS */
2494 /** @todo FPUDP */
2495 /** @todo DS */
2496
2497 /** @todo check if FPU/XMM was actually used in the recompiler */
2498 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2499//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2500
2501#ifdef TARGET_X86_64
2502 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2503 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2504 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2505 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2506 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2507 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2508 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2509 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2510 pCtx->r8 = pVM->rem.s.Env.regs[8];
2511 pCtx->r9 = pVM->rem.s.Env.regs[9];
2512 pCtx->r10 = pVM->rem.s.Env.regs[10];
2513 pCtx->r11 = pVM->rem.s.Env.regs[11];
2514 pCtx->r12 = pVM->rem.s.Env.regs[12];
2515 pCtx->r13 = pVM->rem.s.Env.regs[13];
2516 pCtx->r14 = pVM->rem.s.Env.regs[14];
2517 pCtx->r15 = pVM->rem.s.Env.regs[15];
2518
2519 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2520
2521#else
2522 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2523 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2524 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2525 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2526 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2527 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2528 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2529
2530 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2531#endif
2532
2533 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2534
2535#ifdef VBOX_WITH_STATISTICS
2536 if (pVM->rem.s.Env.segs[R_SS].newselector)
2537 {
2538 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2539 }
2540 if (pVM->rem.s.Env.segs[R_GS].newselector)
2541 {
2542 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2543 }
2544 if (pVM->rem.s.Env.segs[R_FS].newselector)
2545 {
2546 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2547 }
2548 if (pVM->rem.s.Env.segs[R_ES].newselector)
2549 {
2550 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2551 }
2552 if (pVM->rem.s.Env.segs[R_DS].newselector)
2553 {
2554 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2555 }
2556 if (pVM->rem.s.Env.segs[R_CS].newselector)
2557 {
2558 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2559 }
2560#endif
2561 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2562 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2563 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2564 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2565 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2566
2567#ifdef TARGET_X86_64
2568 pCtx->rip = pVM->rem.s.Env.eip;
2569 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2570#else
2571 pCtx->eip = pVM->rem.s.Env.eip;
2572 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2573#endif
2574
2575 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2576 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2577 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2578 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2579 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2580 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2581
2582 for (i = 0; i < 8; i++)
2583 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2584
2585 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2586 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2587 {
2588 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2589 STAM_COUNTER_INC(&gStatREMGDTChange);
2590 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2591 }
2592
2593 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2594 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2595 {
2596 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2597 STAM_COUNTER_INC(&gStatREMIDTChange);
2598 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2599 }
2600
2601 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2602 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2603 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2604 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2605 {
2606 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2607 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2608 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2609 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2610 STAM_COUNTER_INC(&gStatREMLDTRChange);
2611 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2612 }
2613
2614 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2615 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2616 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2617 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2618 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2619 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2620 : 0) )
2621 {
2622 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2623 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2624 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2625 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2626 pCtx->tr = pVM->rem.s.Env.tr.selector;
2627 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2628 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2629 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2630 if (pCtx->trHid.Attr.u)
2631 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2632 STAM_COUNTER_INC(&gStatREMTRChange);
2633 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2634 }
2635
2636 /** @todo These values could still be out of sync! */
2637 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2638 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2639 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2640 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2641
2642 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2643 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2644 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2645
2646 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2647 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2648 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2649
2650 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2651 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2652 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2653
2654 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2655 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2656 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2657
2658 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2659 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2660 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2661
2662 /* Sysenter MSR */
2663 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2664 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2665 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2666
2667 /* System MSRs. */
2668 pCtx->msrEFER = pVM->rem.s.Env.efer;
2669 pCtx->msrSTAR = pVM->rem.s.Env.star;
2670 pCtx->msrPAT = pVM->rem.s.Env.pat;
2671#ifdef TARGET_X86_64
2672 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2673 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2674 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2675 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2676#endif
2677
2678 remR3TrapClear(pVM);
2679
2680 /*
2681 * Check for traps.
2682 */
2683 if ( pVM->rem.s.Env.exception_index >= 0
2684 && pVM->rem.s.Env.exception_index < 256)
2685 {
2686 int rc;
2687
2688 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2689 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2690 AssertRC(rc);
2691 switch (pVM->rem.s.Env.exception_index)
2692 {
2693 case 0x0e:
2694 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2695 /* fallthru */
2696 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2697 case 0x11: case 0x08: /* 0 */
2698 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2699 break;
2700 }
2701
2702 }
2703
2704 /*
2705 * We're not longer in REM mode.
2706 */
2707 CPUMR3RemLeave(pVCpu,
2708 HWACCMIsEnabled(pVM)
2709 || ( pVM->rem.s.Env.segs[R_SS].newselector
2710 | pVM->rem.s.Env.segs[R_GS].newselector
2711 | pVM->rem.s.Env.segs[R_FS].newselector
2712 | pVM->rem.s.Env.segs[R_ES].newselector
2713 | pVM->rem.s.Env.segs[R_DS].newselector
2714 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2715 );
2716 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2717 pVM->rem.s.fInREM = false;
2718 pVM->rem.s.pCtx = NULL;
2719 pVM->rem.s.Env.pVCpu = NULL;
2720 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2721 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2722 return VINF_SUCCESS;
2723}
2724
2725
2726/**
2727 * This is called by the disassembler when it wants to update the cpu state
2728 * before for instance doing a register dump.
2729 */
2730static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2731{
2732 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2733 unsigned i;
2734
2735 Assert(pVM->rem.s.fInREM);
2736
2737 /*
2738 * Copy back the registers.
2739 * This is done in the order they are declared in the CPUMCTX structure.
2740 */
2741
2742 /** @todo FOP */
2743 /** @todo FPUIP */
2744 /** @todo CS */
2745 /** @todo FPUDP */
2746 /** @todo DS */
2747 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2748 pCtx->fpu.MXCSR = 0;
2749 pCtx->fpu.MXCSR_MASK = 0;
2750
2751 /** @todo check if FPU/XMM was actually used in the recompiler */
2752 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2753//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2754
2755#ifdef TARGET_X86_64
2756 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2757 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2758 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2759 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2760 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2761 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2762 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2763 pCtx->r8 = pVM->rem.s.Env.regs[8];
2764 pCtx->r9 = pVM->rem.s.Env.regs[9];
2765 pCtx->r10 = pVM->rem.s.Env.regs[10];
2766 pCtx->r11 = pVM->rem.s.Env.regs[11];
2767 pCtx->r12 = pVM->rem.s.Env.regs[12];
2768 pCtx->r13 = pVM->rem.s.Env.regs[13];
2769 pCtx->r14 = pVM->rem.s.Env.regs[14];
2770 pCtx->r15 = pVM->rem.s.Env.regs[15];
2771
2772 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2773#else
2774 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2775 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2776 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2777 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2778 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2779 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2780 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2781
2782 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2783#endif
2784
2785 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2786
2787 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2788 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2789 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2790 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2791 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2792
2793#ifdef TARGET_X86_64
2794 pCtx->rip = pVM->rem.s.Env.eip;
2795 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2796#else
2797 pCtx->eip = pVM->rem.s.Env.eip;
2798 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2799#endif
2800
2801 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2802 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2803 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2804 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2805 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2806 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2807
2808 for (i = 0; i < 8; i++)
2809 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2810
2811 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2812 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2813 {
2814 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2815 STAM_COUNTER_INC(&gStatREMGDTChange);
2816 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2817 }
2818
2819 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2820 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2821 {
2822 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2823 STAM_COUNTER_INC(&gStatREMIDTChange);
2824 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2825 }
2826
2827 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2828 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2829 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2830 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2831 {
2832 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2833 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2834 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2835 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2836 STAM_COUNTER_INC(&gStatREMLDTRChange);
2837 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2838 }
2839
2840 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2841 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2842 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2843 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2844 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2845 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2846 : 0) )
2847 {
2848 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2849 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2850 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2851 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2852 pCtx->tr = pVM->rem.s.Env.tr.selector;
2853 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2854 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2855 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2856 if (pCtx->trHid.Attr.u)
2857 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2858 STAM_COUNTER_INC(&gStatREMTRChange);
2859 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2860 }
2861
2862 /** @todo These values could still be out of sync! */
2863 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2864 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2865 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2866 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2867
2868 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2869 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2870 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2871
2872 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2873 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2874 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2875
2876 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2877 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2878 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2879
2880 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2881 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2882 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2883
2884 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2885 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2886 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2887
2888 /* Sysenter MSR */
2889 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2890 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2891 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2892
2893 /* System MSRs. */
2894 pCtx->msrEFER = pVM->rem.s.Env.efer;
2895 pCtx->msrSTAR = pVM->rem.s.Env.star;
2896 pCtx->msrPAT = pVM->rem.s.Env.pat;
2897#ifdef TARGET_X86_64
2898 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2899 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2900 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2901 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2902#endif
2903
2904}
2905
2906
2907/**
2908 * Update the VMM state information if we're currently in REM.
2909 *
2910 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2911 * we're currently executing in REM and the VMM state is invalid. This method will of
2912 * course check that we're executing in REM before syncing any data over to the VMM.
2913 *
2914 * @param pVM The VM handle.
2915 * @param pVCpu The VMCPU handle.
2916 */
2917REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2918{
2919 if (pVM->rem.s.fInREM)
2920 remR3StateUpdate(pVM, pVCpu);
2921}
2922
2923
2924#undef LOG_GROUP
2925#define LOG_GROUP LOG_GROUP_REM
2926
2927
2928/**
2929 * Notify the recompiler about Address Gate 20 state change.
2930 *
2931 * This notification is required since A20 gate changes are
2932 * initialized from a device driver and the VM might just as
2933 * well be in REM mode as in RAW mode.
2934 *
2935 * @param pVM VM handle.
2936 * @param pVCpu VMCPU handle.
2937 * @param fEnable True if the gate should be enabled.
2938 * False if the gate should be disabled.
2939 */
2940REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2941{
2942 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2943 VM_ASSERT_EMT(pVM);
2944
2945 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2946 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2947 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2948}
2949
2950
2951/**
2952 * Replays the handler notification changes
2953 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2954 *
2955 * @param pVM VM handle.
2956 */
2957REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2958{
2959 /*
2960 * Replay the flushes.
2961 */
2962 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2963 VM_ASSERT_EMT(pVM);
2964
2965 /** @todo this isn't ensuring correct replay order. */
2966 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2967 {
2968 uint32_t idxNext;
2969 uint32_t idxRevHead;
2970 uint32_t idxHead;
2971#ifdef VBOX_STRICT
2972 int32_t c = 0;
2973#endif
2974
2975 /* Lockless purging of pending notifications. */
2976 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2977 if (idxHead == UINT32_MAX)
2978 return;
2979 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2980
2981 /*
2982 * Reverse the list to process it in FIFO order.
2983 */
2984 idxRevHead = UINT32_MAX;
2985 do
2986 {
2987 /* Save the index of the next rec. */
2988 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2989 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2990 /* Push the record onto the reversed list. */
2991 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2992 idxRevHead = idxHead;
2993 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2994 /* Advance. */
2995 idxHead = idxNext;
2996 } while (idxHead != UINT32_MAX);
2997
2998 /*
2999 * Loop thru the list, reinserting the record into the free list as they are
3000 * processed to avoid having other EMTs running out of entries while we're flushing.
3001 */
3002 idxHead = idxRevHead;
3003 do
3004 {
3005 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3006 uint32_t idxCur;
3007 Assert(--c >= 0);
3008
3009 switch (pCur->enmKind)
3010 {
3011 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3012 remR3NotifyHandlerPhysicalRegister(pVM,
3013 pCur->u.PhysicalRegister.enmType,
3014 pCur->u.PhysicalRegister.GCPhys,
3015 pCur->u.PhysicalRegister.cb,
3016 pCur->u.PhysicalRegister.fHasHCHandler);
3017 break;
3018
3019 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3020 remR3NotifyHandlerPhysicalDeregister(pVM,
3021 pCur->u.PhysicalDeregister.enmType,
3022 pCur->u.PhysicalDeregister.GCPhys,
3023 pCur->u.PhysicalDeregister.cb,
3024 pCur->u.PhysicalDeregister.fHasHCHandler,
3025 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3026 break;
3027
3028 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3029 remR3NotifyHandlerPhysicalModify(pVM,
3030 pCur->u.PhysicalModify.enmType,
3031 pCur->u.PhysicalModify.GCPhysOld,
3032 pCur->u.PhysicalModify.GCPhysNew,
3033 pCur->u.PhysicalModify.cb,
3034 pCur->u.PhysicalModify.fHasHCHandler,
3035 pCur->u.PhysicalModify.fRestoreAsRAM);
3036 break;
3037
3038 default:
3039 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3040 break;
3041 }
3042
3043 /*
3044 * Advance idxHead.
3045 */
3046 idxCur = idxHead;
3047 idxHead = pCur->idxNext;
3048 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3049
3050 /*
3051 * Put the record back into the free list.
3052 */
3053 do
3054 {
3055 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3056 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3057 ASMCompilerBarrier();
3058 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3059 } while (idxHead != UINT32_MAX);
3060
3061#ifdef VBOX_STRICT
3062 if (pVM->cCpus == 1)
3063 {
3064 unsigned c;
3065 /* Check that all records are now on the free list. */
3066 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3067 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3068 c++;
3069 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3070 }
3071#endif
3072 }
3073}
3074
3075
3076/**
3077 * Notify REM about changed code page.
3078 *
3079 * @returns VBox status code.
3080 * @param pVM VM handle.
3081 * @param pVCpu VMCPU handle.
3082 * @param pvCodePage Code page address
3083 */
3084REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3085{
3086#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3087 int rc;
3088 RTGCPHYS PhysGC;
3089 uint64_t flags;
3090
3091 VM_ASSERT_EMT(pVM);
3092
3093 /*
3094 * Get the physical page address.
3095 */
3096 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3097 if (rc == VINF_SUCCESS)
3098 {
3099 /*
3100 * Sync the required registers and flush the whole page.
3101 * (Easier to do the whole page than notifying it about each physical
3102 * byte that was changed.
3103 */
3104 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3105 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3106 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3107 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3108
3109 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3110 }
3111#endif
3112 return VINF_SUCCESS;
3113}
3114
3115
3116/**
3117 * Notification about a successful MMR3PhysRegister() call.
3118 *
3119 * @param pVM VM handle.
3120 * @param GCPhys The physical address the RAM.
3121 * @param cb Size of the memory.
3122 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3123 */
3124REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3125{
3126 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3127 VM_ASSERT_EMT(pVM);
3128
3129 /*
3130 * Validate input - we trust the caller.
3131 */
3132 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3133 Assert(cb);
3134 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3135 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3136
3137 /*
3138 * Base ram? Update GCPhysLastRam.
3139 */
3140 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3141 {
3142 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3143 {
3144 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3145 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3146 }
3147 }
3148
3149 /*
3150 * Register the ram.
3151 */
3152 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3153
3154 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3155 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3156 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3157
3158 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3159}
3160
3161
3162/**
3163 * Notification about a successful MMR3PhysRomRegister() call.
3164 *
3165 * @param pVM VM handle.
3166 * @param GCPhys The physical address of the ROM.
3167 * @param cb The size of the ROM.
3168 * @param pvCopy Pointer to the ROM copy.
3169 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3170 * This function will be called when ever the protection of the
3171 * shadow ROM changes (at reset and end of POST).
3172 */
3173REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3174{
3175 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3176 VM_ASSERT_EMT(pVM);
3177
3178 /*
3179 * Validate input - we trust the caller.
3180 */
3181 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3182 Assert(cb);
3183 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3184
3185 /*
3186 * Register the rom.
3187 */
3188 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3189
3190 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3191 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3192 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3193
3194 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3195}
3196
3197
3198/**
3199 * Notification about a successful memory deregistration or reservation.
3200 *
3201 * @param pVM VM Handle.
3202 * @param GCPhys Start physical address.
3203 * @param cb The size of the range.
3204 */
3205REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3206{
3207 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3208 VM_ASSERT_EMT(pVM);
3209
3210 /*
3211 * Validate input - we trust the caller.
3212 */
3213 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3214 Assert(cb);
3215 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3216
3217 /*
3218 * Unassigning the memory.
3219 */
3220 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3221
3222 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3223 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3224 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3225
3226 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3227}
3228
3229
3230/**
3231 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3232 *
3233 * @param pVM VM Handle.
3234 * @param enmType Handler type.
3235 * @param GCPhys Handler range address.
3236 * @param cb Size of the handler range.
3237 * @param fHasHCHandler Set if the handler has a HC callback function.
3238 *
3239 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3240 * Handler memory type to memory which has no HC handler.
3241 */
3242static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3243{
3244 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3245 enmType, GCPhys, cb, fHasHCHandler));
3246
3247 VM_ASSERT_EMT(pVM);
3248 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3249 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3250
3251
3252 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3253
3254 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3255 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3256 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3257 else if (fHasHCHandler)
3258 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3259 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3260
3261 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3262}
3263
3264/**
3265 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3266 *
3267 * @param pVM VM Handle.
3268 * @param enmType Handler type.
3269 * @param GCPhys Handler range address.
3270 * @param cb Size of the handler range.
3271 * @param fHasHCHandler Set if the handler has a HC callback function.
3272 *
3273 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3274 * Handler memory type to memory which has no HC handler.
3275 */
3276REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3277{
3278 REMR3ReplayHandlerNotifications(pVM);
3279
3280 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3281}
3282
3283/**
3284 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3285 *
3286 * @param pVM VM Handle.
3287 * @param enmType Handler type.
3288 * @param GCPhys Handler range address.
3289 * @param cb Size of the handler range.
3290 * @param fHasHCHandler Set if the handler has a HC callback function.
3291 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3292 */
3293static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3294{
3295 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3296 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3297 VM_ASSERT_EMT(pVM);
3298
3299
3300 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3301
3302 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3303 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3304 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3305 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3306 else if (fHasHCHandler)
3307 {
3308 if (!fRestoreAsRAM)
3309 {
3310 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3311 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3312 }
3313 else
3314 {
3315 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3316 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3317 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3318 }
3319 }
3320 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3321
3322 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3323}
3324
3325/**
3326 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3327 *
3328 * @param pVM VM Handle.
3329 * @param enmType Handler type.
3330 * @param GCPhys Handler range address.
3331 * @param cb Size of the handler range.
3332 * @param fHasHCHandler Set if the handler has a HC callback function.
3333 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3334 */
3335REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3336{
3337 REMR3ReplayHandlerNotifications(pVM);
3338 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3339}
3340
3341
3342/**
3343 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3344 *
3345 * @param pVM VM Handle.
3346 * @param enmType Handler type.
3347 * @param GCPhysOld Old handler range address.
3348 * @param GCPhysNew New handler range address.
3349 * @param cb Size of the handler range.
3350 * @param fHasHCHandler Set if the handler has a HC callback function.
3351 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3352 */
3353static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3354{
3355 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3356 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3357 VM_ASSERT_EMT(pVM);
3358 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3359
3360 if (fHasHCHandler)
3361 {
3362 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3363
3364 /*
3365 * Reset the old page.
3366 */
3367 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3368 if (!fRestoreAsRAM)
3369 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3370 else
3371 {
3372 /* This is not perfect, but it'll do for PD monitoring... */
3373 Assert(cb == PAGE_SIZE);
3374 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3375 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3376 }
3377
3378 /*
3379 * Update the new page.
3380 */
3381 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3382 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3383 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3384 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3385
3386 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3387 }
3388}
3389
3390/**
3391 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3392 *
3393 * @param pVM VM Handle.
3394 * @param enmType Handler type.
3395 * @param GCPhysOld Old handler range address.
3396 * @param GCPhysNew New handler range address.
3397 * @param cb Size of the handler range.
3398 * @param fHasHCHandler Set if the handler has a HC callback function.
3399 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3400 */
3401REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3402{
3403 REMR3ReplayHandlerNotifications(pVM);
3404
3405 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3406}
3407
3408/**
3409 * Checks if we're handling access to this page or not.
3410 *
3411 * @returns true if we're trapping access.
3412 * @returns false if we aren't.
3413 * @param pVM The VM handle.
3414 * @param GCPhys The physical address.
3415 *
3416 * @remark This function will only work correctly in VBOX_STRICT builds!
3417 */
3418REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3419{
3420#ifdef VBOX_STRICT
3421 unsigned long off;
3422 REMR3ReplayHandlerNotifications(pVM);
3423
3424 off = get_phys_page_offset(GCPhys);
3425 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3426 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3427 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3428#else
3429 return false;
3430#endif
3431}
3432
3433
3434/**
3435 * Deals with a rare case in get_phys_addr_code where the code
3436 * is being monitored.
3437 *
3438 * It could also be an MMIO page, in which case we will raise a fatal error.
3439 *
3440 * @returns The physical address corresponding to addr.
3441 * @param env The cpu environment.
3442 * @param addr The virtual address.
3443 * @param pTLBEntry The TLB entry.
3444 */
3445target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3446 target_ulong addr,
3447 CPUTLBEntry* pTLBEntry,
3448 target_phys_addr_t ioTLBEntry)
3449{
3450 PVM pVM = env->pVM;
3451
3452 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3453 {
3454 /* If code memory is being monitored, appropriate IOTLB entry will have
3455 handler IO type, and addend will provide real physical address, no
3456 matter if we store VA in TLB or not, as handlers are always passed PA */
3457 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3458 return ret;
3459 }
3460 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3461 "*** handlers\n",
3462 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3463 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3464 LogRel(("*** mmio\n"));
3465 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3466 LogRel(("*** phys\n"));
3467 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3468 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3469 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3470 AssertFatalFailed();
3471}
3472
3473/**
3474 * Read guest RAM and ROM.
3475 *
3476 * @param SrcGCPhys The source address (guest physical).
3477 * @param pvDst The destination address.
3478 * @param cb Number of bytes
3479 */
3480void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3481{
3482 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3483 VBOX_CHECK_ADDR(SrcGCPhys);
3484 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3485#ifdef VBOX_DEBUG_PHYS
3486 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3487#endif
3488 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3489}
3490
3491
3492/**
3493 * Read guest RAM and ROM, unsigned 8-bit.
3494 *
3495 * @param SrcGCPhys The source address (guest physical).
3496 */
3497RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3498{
3499 uint8_t val;
3500 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3501 VBOX_CHECK_ADDR(SrcGCPhys);
3502 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3503 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3504#ifdef VBOX_DEBUG_PHYS
3505 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3506#endif
3507 return val;
3508}
3509
3510
3511/**
3512 * Read guest RAM and ROM, signed 8-bit.
3513 *
3514 * @param SrcGCPhys The source address (guest physical).
3515 */
3516RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3517{
3518 int8_t val;
3519 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3520 VBOX_CHECK_ADDR(SrcGCPhys);
3521 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3522 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3523#ifdef VBOX_DEBUG_PHYS
3524 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3525#endif
3526 return val;
3527}
3528
3529
3530/**
3531 * Read guest RAM and ROM, unsigned 16-bit.
3532 *
3533 * @param SrcGCPhys The source address (guest physical).
3534 */
3535RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3536{
3537 uint16_t val;
3538 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3539 VBOX_CHECK_ADDR(SrcGCPhys);
3540 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3541 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3542#ifdef VBOX_DEBUG_PHYS
3543 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3544#endif
3545 return val;
3546}
3547
3548
3549/**
3550 * Read guest RAM and ROM, signed 16-bit.
3551 *
3552 * @param SrcGCPhys The source address (guest physical).
3553 */
3554RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3555{
3556 int16_t val;
3557 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3558 VBOX_CHECK_ADDR(SrcGCPhys);
3559 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3560 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3561#ifdef VBOX_DEBUG_PHYS
3562 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3563#endif
3564 return val;
3565}
3566
3567
3568/**
3569 * Read guest RAM and ROM, unsigned 32-bit.
3570 *
3571 * @param SrcGCPhys The source address (guest physical).
3572 */
3573RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3574{
3575 uint32_t val;
3576 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3577 VBOX_CHECK_ADDR(SrcGCPhys);
3578 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3579 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3580#ifdef VBOX_DEBUG_PHYS
3581 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3582#endif
3583 return val;
3584}
3585
3586
3587/**
3588 * Read guest RAM and ROM, signed 32-bit.
3589 *
3590 * @param SrcGCPhys The source address (guest physical).
3591 */
3592RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3593{
3594 int32_t val;
3595 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3596 VBOX_CHECK_ADDR(SrcGCPhys);
3597 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3598 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3599#ifdef VBOX_DEBUG_PHYS
3600 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3601#endif
3602 return val;
3603}
3604
3605
3606/**
3607 * Read guest RAM and ROM, unsigned 64-bit.
3608 *
3609 * @param SrcGCPhys The source address (guest physical).
3610 */
3611uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3612{
3613 uint64_t val;
3614 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3615 VBOX_CHECK_ADDR(SrcGCPhys);
3616 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3617 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3618#ifdef VBOX_DEBUG_PHYS
3619 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3620#endif
3621 return val;
3622}
3623
3624
3625/**
3626 * Read guest RAM and ROM, signed 64-bit.
3627 *
3628 * @param SrcGCPhys The source address (guest physical).
3629 */
3630int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3631{
3632 int64_t val;
3633 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3634 VBOX_CHECK_ADDR(SrcGCPhys);
3635 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3636 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3637#ifdef VBOX_DEBUG_PHYS
3638 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3639#endif
3640 return val;
3641}
3642
3643
3644/**
3645 * Write guest RAM.
3646 *
3647 * @param DstGCPhys The destination address (guest physical).
3648 * @param pvSrc The source address.
3649 * @param cb Number of bytes to write
3650 */
3651void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3652{
3653 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3654 VBOX_CHECK_ADDR(DstGCPhys);
3655 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3656 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3657#ifdef VBOX_DEBUG_PHYS
3658 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3659#endif
3660}
3661
3662
3663/**
3664 * Write guest RAM, unsigned 8-bit.
3665 *
3666 * @param DstGCPhys The destination address (guest physical).
3667 * @param val Value
3668 */
3669void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3670{
3671 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3672 VBOX_CHECK_ADDR(DstGCPhys);
3673 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3674 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3675#ifdef VBOX_DEBUG_PHYS
3676 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3677#endif
3678}
3679
3680
3681/**
3682 * Write guest RAM, unsigned 8-bit.
3683 *
3684 * @param DstGCPhys The destination address (guest physical).
3685 * @param val Value
3686 */
3687void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3688{
3689 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3690 VBOX_CHECK_ADDR(DstGCPhys);
3691 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3692 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3693#ifdef VBOX_DEBUG_PHYS
3694 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3695#endif
3696}
3697
3698
3699/**
3700 * Write guest RAM, unsigned 32-bit.
3701 *
3702 * @param DstGCPhys The destination address (guest physical).
3703 * @param val Value
3704 */
3705void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3706{
3707 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3708 VBOX_CHECK_ADDR(DstGCPhys);
3709 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3710 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3711#ifdef VBOX_DEBUG_PHYS
3712 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3713#endif
3714}
3715
3716
3717/**
3718 * Write guest RAM, unsigned 64-bit.
3719 *
3720 * @param DstGCPhys The destination address (guest physical).
3721 * @param val Value
3722 */
3723void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3724{
3725 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3726 VBOX_CHECK_ADDR(DstGCPhys);
3727 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3728 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3729#ifdef VBOX_DEBUG_PHYS
3730 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3731#endif
3732}
3733
3734#undef LOG_GROUP
3735#define LOG_GROUP LOG_GROUP_REM_MMIO
3736
3737/** Read MMIO memory. */
3738static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3739{
3740 uint32_t u32 = 0;
3741 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3742 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3743 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3744 return u32;
3745}
3746
3747/** Read MMIO memory. */
3748static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3749{
3750 uint32_t u32 = 0;
3751 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3752 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3753 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3754 return u32;
3755}
3756
3757/** Read MMIO memory. */
3758static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3759{
3760 uint32_t u32 = 0;
3761 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3762 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3763 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3764 return u32;
3765}
3766
3767/** Write to MMIO memory. */
3768static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3769{
3770 int rc;
3771 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3772 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3773 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3774}
3775
3776/** Write to MMIO memory. */
3777static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3778{
3779 int rc;
3780 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3781 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3782 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3783}
3784
3785/** Write to MMIO memory. */
3786static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3787{
3788 int rc;
3789 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3790 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3791 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3792}
3793
3794
3795#undef LOG_GROUP
3796#define LOG_GROUP LOG_GROUP_REM_HANDLER
3797
3798/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3799
3800static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3801{
3802 uint8_t u8;
3803 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3804 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3805 return u8;
3806}
3807
3808static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3809{
3810 uint16_t u16;
3811 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3812 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3813 return u16;
3814}
3815
3816static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3817{
3818 uint32_t u32;
3819 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3820 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3821 return u32;
3822}
3823
3824static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3825{
3826 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3827 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3828}
3829
3830static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3831{
3832 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3833 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3834}
3835
3836static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3837{
3838 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3839 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3840}
3841
3842/* -+- disassembly -+- */
3843
3844#undef LOG_GROUP
3845#define LOG_GROUP LOG_GROUP_REM_DISAS
3846
3847
3848/**
3849 * Enables or disables singled stepped disassembly.
3850 *
3851 * @returns VBox status code.
3852 * @param pVM VM handle.
3853 * @param fEnable To enable set this flag, to disable clear it.
3854 */
3855static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3856{
3857 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3858 VM_ASSERT_EMT(pVM);
3859
3860 if (fEnable)
3861 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3862 else
3863 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3864#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3865 cpu_single_step(&pVM->rem.s.Env, fEnable);
3866#endif
3867 return VINF_SUCCESS;
3868}
3869
3870
3871/**
3872 * Enables or disables singled stepped disassembly.
3873 *
3874 * @returns VBox status code.
3875 * @param pVM VM handle.
3876 * @param fEnable To enable set this flag, to disable clear it.
3877 */
3878REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3879{
3880 int rc;
3881
3882 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3883 if (VM_IS_EMT(pVM))
3884 return remR3DisasEnableStepping(pVM, fEnable);
3885
3886 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3887 AssertRC(rc);
3888 return rc;
3889}
3890
3891
3892#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3893/**
3894 * External Debugger Command: .remstep [on|off|1|0]
3895 */
3896static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3897{
3898 int rc;
3899
3900 if (cArgs == 0)
3901 /*
3902 * Print the current status.
3903 */
3904 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3905 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3906 else
3907 {
3908 /*
3909 * Convert the argument and change the mode.
3910 */
3911 bool fEnable;
3912 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3913 if (RT_SUCCESS(rc))
3914 {
3915 rc = REMR3DisasEnableStepping(pVM, fEnable);
3916 if (RT_SUCCESS(rc))
3917 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3918 else
3919 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3920 }
3921 else
3922 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3923 }
3924 return rc;
3925}
3926#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3927
3928
3929/**
3930 * Disassembles one instruction and prints it to the log.
3931 *
3932 * @returns Success indicator.
3933 * @param env Pointer to the recompiler CPU structure.
3934 * @param f32BitCode Indicates that whether or not the code should
3935 * be disassembled as 16 or 32 bit. If -1 the CS
3936 * selector will be inspected.
3937 * @param pszPrefix
3938 */
3939bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3940{
3941 PVM pVM = env->pVM;
3942 const bool fLog = LogIsEnabled();
3943 const bool fLog2 = LogIs2Enabled();
3944 int rc = VINF_SUCCESS;
3945
3946 /*
3947 * Don't bother if there ain't any log output to do.
3948 */
3949 if (!fLog && !fLog2)
3950 return true;
3951
3952 /*
3953 * Update the state so DBGF reads the correct register values.
3954 */
3955 remR3StateUpdate(pVM, env->pVCpu);
3956
3957 /*
3958 * Log registers if requested.
3959 */
3960 if (fLog2)
3961 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3962
3963 /*
3964 * Disassemble to log.
3965 */
3966 if (fLog)
3967 {
3968 PVMCPU pVCpu = VMMGetCpu(pVM);
3969 char szBuf[256];
3970 szBuf[0] = '\0';
3971 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3972 pVCpu->idCpu,
3973 0, /* Sel */
3974 0, /* GCPtr */
3975 DBGF_DISAS_FLAGS_CURRENT_GUEST
3976 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3977 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3978 szBuf,
3979 sizeof(szBuf),
3980 NULL);
3981 if (RT_FAILURE(rc))
3982 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3983 if (pszPrefix && *pszPrefix)
3984 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3985 else
3986 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3987 }
3988
3989 return RT_SUCCESS(rc);
3990}
3991
3992
3993/**
3994 * Disassemble recompiled code.
3995 *
3996 * @param phFileIgnored Ignored, logfile usually.
3997 * @param pvCode Pointer to the code block.
3998 * @param cb Size of the code block.
3999 */
4000void disas(FILE *phFile, void *pvCode, unsigned long cb)
4001{
4002#ifdef DEBUG_TMP_LOGGING
4003# define DISAS_PRINTF(x...) fprintf(phFile, x)
4004#else
4005# define DISAS_PRINTF(x...) RTLogPrintf(x)
4006 if (LogIs2Enabled())
4007#endif
4008 {
4009 unsigned off = 0;
4010 char szOutput[256];
4011 DISCPUSTATE Cpu;
4012
4013 memset(&Cpu, 0, sizeof(Cpu));
4014#ifdef RT_ARCH_X86
4015 Cpu.mode = CPUMODE_32BIT;
4016#else
4017 Cpu.mode = CPUMODE_64BIT;
4018#endif
4019
4020 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4021 while (off < cb)
4022 {
4023 uint32_t cbInstr;
4024 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4025 DISAS_PRINTF("%s", szOutput);
4026 else
4027 {
4028 DISAS_PRINTF("disas error\n");
4029 cbInstr = 1;
4030#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4031 break;
4032#endif
4033 }
4034 off += cbInstr;
4035 }
4036 }
4037
4038#undef DISAS_PRINTF
4039}
4040
4041
4042/**
4043 * Disassemble guest code.
4044 *
4045 * @param phFileIgnored Ignored, logfile usually.
4046 * @param uCode The guest address of the code to disassemble. (flat?)
4047 * @param cb Number of bytes to disassemble.
4048 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4049 */
4050void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4051{
4052#ifdef DEBUG_TMP_LOGGING
4053# define DISAS_PRINTF(x...) fprintf(phFile, x)
4054#else
4055# define DISAS_PRINTF(x...) RTLogPrintf(x)
4056 if (LogIs2Enabled())
4057#endif
4058 {
4059 PVM pVM = cpu_single_env->pVM;
4060 PVMCPU pVCpu = cpu_single_env->pVCpu;
4061 RTSEL cs;
4062 RTGCUINTPTR eip;
4063
4064 Assert(pVCpu);
4065
4066 /*
4067 * Update the state so DBGF reads the correct register values (flags).
4068 */
4069 remR3StateUpdate(pVM, pVCpu);
4070
4071 /*
4072 * Do the disassembling.
4073 */
4074 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4075 cs = cpu_single_env->segs[R_CS].selector;
4076 eip = uCode - cpu_single_env->segs[R_CS].base;
4077 for (;;)
4078 {
4079 char szBuf[256];
4080 uint32_t cbInstr;
4081 int rc = DBGFR3DisasInstrEx(pVM,
4082 pVCpu->idCpu,
4083 cs,
4084 eip,
4085 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4086 szBuf, sizeof(szBuf),
4087 &cbInstr);
4088 if (RT_SUCCESS(rc))
4089 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4090 else
4091 {
4092 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4093 cbInstr = 1;
4094 }
4095
4096 /* next */
4097 if (cb <= cbInstr)
4098 break;
4099 cb -= cbInstr;
4100 uCode += cbInstr;
4101 eip += cbInstr;
4102 }
4103 }
4104#undef DISAS_PRINTF
4105}
4106
4107
4108/**
4109 * Looks up a guest symbol.
4110 *
4111 * @returns Pointer to symbol name. This is a static buffer.
4112 * @param orig_addr The address in question.
4113 */
4114const char *lookup_symbol(target_ulong orig_addr)
4115{
4116 PVM pVM = cpu_single_env->pVM;
4117 RTGCINTPTR off = 0;
4118 RTDBGSYMBOL Sym;
4119 DBGFADDRESS Addr;
4120
4121 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4122 if (RT_SUCCESS(rc))
4123 {
4124 static char szSym[sizeof(Sym.szName) + 48];
4125 if (!off)
4126 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4127 else if (off > 0)
4128 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4129 else
4130 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4131 return szSym;
4132 }
4133 return "<N/A>";
4134}
4135
4136
4137#undef LOG_GROUP
4138#define LOG_GROUP LOG_GROUP_REM
4139
4140
4141/* -+- FF notifications -+- */
4142
4143
4144/**
4145 * Notification about a pending interrupt.
4146 *
4147 * @param pVM VM Handle.
4148 * @param pVCpu VMCPU Handle.
4149 * @param u8Interrupt Interrupt
4150 * @thread The emulation thread.
4151 */
4152REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4153{
4154 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4155 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4156}
4157
4158/**
4159 * Notification about a pending interrupt.
4160 *
4161 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4162 * @param pVM VM Handle.
4163 * @param pVCpu VMCPU Handle.
4164 * @thread The emulation thread.
4165 */
4166REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4167{
4168 return pVM->rem.s.u32PendingInterrupt;
4169}
4170
4171/**
4172 * Notification about the interrupt FF being set.
4173 *
4174 * @param pVM VM Handle.
4175 * @param pVCpu VMCPU Handle.
4176 * @thread The emulation thread.
4177 */
4178REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4179{
4180 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4181 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4182 if (pVM->rem.s.fInREM)
4183 {
4184 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4185 CPU_INTERRUPT_EXTERNAL_HARD);
4186 }
4187}
4188
4189
4190/**
4191 * Notification about the interrupt FF being set.
4192 *
4193 * @param pVM VM Handle.
4194 * @param pVCpu VMCPU Handle.
4195 * @thread Any.
4196 */
4197REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4198{
4199 LogFlow(("REMR3NotifyInterruptClear:\n"));
4200 if (pVM->rem.s.fInREM)
4201 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4202}
4203
4204
4205/**
4206 * Notification about pending timer(s).
4207 *
4208 * @param pVM VM Handle.
4209 * @param pVCpuDst The target cpu for this notification.
4210 * TM will not broadcast pending timer events, but use
4211 * a dedicated EMT for them. So, only interrupt REM
4212 * execution if the given CPU is executing in REM.
4213 * @thread Any.
4214 */
4215REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4216{
4217#ifndef DEBUG_bird
4218 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4219#endif
4220 if (pVM->rem.s.fInREM)
4221 {
4222 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4223 {
4224 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4225 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4226 CPU_INTERRUPT_EXTERNAL_TIMER);
4227 }
4228 else
4229 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4230 }
4231 else
4232 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4233}
4234
4235
4236/**
4237 * Notification about pending DMA transfers.
4238 *
4239 * @param pVM VM Handle.
4240 * @thread Any.
4241 */
4242REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4243{
4244 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4245 if (pVM->rem.s.fInREM)
4246 {
4247 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4248 CPU_INTERRUPT_EXTERNAL_DMA);
4249 }
4250}
4251
4252
4253/**
4254 * Notification about pending timer(s).
4255 *
4256 * @param pVM VM Handle.
4257 * @thread Any.
4258 */
4259REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4260{
4261 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4262 if (pVM->rem.s.fInREM)
4263 {
4264 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4265 CPU_INTERRUPT_EXTERNAL_EXIT);
4266 }
4267}
4268
4269
4270/**
4271 * Notification about pending FF set by an external thread.
4272 *
4273 * @param pVM VM handle.
4274 * @thread Any.
4275 */
4276REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4277{
4278 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4279 if (pVM->rem.s.fInREM)
4280 {
4281 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4282 CPU_INTERRUPT_EXTERNAL_EXIT);
4283 }
4284}
4285
4286
4287#ifdef VBOX_WITH_STATISTICS
4288void remR3ProfileStart(int statcode)
4289{
4290 STAMPROFILEADV *pStat;
4291 switch(statcode)
4292 {
4293 case STATS_EMULATE_SINGLE_INSTR:
4294 pStat = &gStatExecuteSingleInstr;
4295 break;
4296 case STATS_QEMU_COMPILATION:
4297 pStat = &gStatCompilationQEmu;
4298 break;
4299 case STATS_QEMU_RUN_EMULATED_CODE:
4300 pStat = &gStatRunCodeQEmu;
4301 break;
4302 case STATS_QEMU_TOTAL:
4303 pStat = &gStatTotalTimeQEmu;
4304 break;
4305 case STATS_QEMU_RUN_TIMERS:
4306 pStat = &gStatTimers;
4307 break;
4308 case STATS_TLB_LOOKUP:
4309 pStat= &gStatTBLookup;
4310 break;
4311 case STATS_IRQ_HANDLING:
4312 pStat= &gStatIRQ;
4313 break;
4314 case STATS_RAW_CHECK:
4315 pStat = &gStatRawCheck;
4316 break;
4317
4318 default:
4319 AssertMsgFailed(("unknown stat %d\n", statcode));
4320 return;
4321 }
4322 STAM_PROFILE_ADV_START(pStat, a);
4323}
4324
4325
4326void remR3ProfileStop(int statcode)
4327{
4328 STAMPROFILEADV *pStat;
4329 switch(statcode)
4330 {
4331 case STATS_EMULATE_SINGLE_INSTR:
4332 pStat = &gStatExecuteSingleInstr;
4333 break;
4334 case STATS_QEMU_COMPILATION:
4335 pStat = &gStatCompilationQEmu;
4336 break;
4337 case STATS_QEMU_RUN_EMULATED_CODE:
4338 pStat = &gStatRunCodeQEmu;
4339 break;
4340 case STATS_QEMU_TOTAL:
4341 pStat = &gStatTotalTimeQEmu;
4342 break;
4343 case STATS_QEMU_RUN_TIMERS:
4344 pStat = &gStatTimers;
4345 break;
4346 case STATS_TLB_LOOKUP:
4347 pStat= &gStatTBLookup;
4348 break;
4349 case STATS_IRQ_HANDLING:
4350 pStat= &gStatIRQ;
4351 break;
4352 case STATS_RAW_CHECK:
4353 pStat = &gStatRawCheck;
4354 break;
4355 default:
4356 AssertMsgFailed(("unknown stat %d\n", statcode));
4357 return;
4358 }
4359 STAM_PROFILE_ADV_STOP(pStat, a);
4360}
4361#endif
4362
4363/**
4364 * Raise an RC, force rem exit.
4365 *
4366 * @param pVM VM handle.
4367 * @param rc The rc.
4368 */
4369void remR3RaiseRC(PVM pVM, int rc)
4370{
4371 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4372 Assert(pVM->rem.s.fInREM);
4373 VM_ASSERT_EMT(pVM);
4374 pVM->rem.s.rc = rc;
4375 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4376}
4377
4378
4379/* -+- timers -+- */
4380
4381uint64_t cpu_get_tsc(CPUX86State *env)
4382{
4383 STAM_COUNTER_INC(&gStatCpuGetTSC);
4384 return TMCpuTickGet(env->pVCpu);
4385}
4386
4387
4388/* -+- interrupts -+- */
4389
4390void cpu_set_ferr(CPUX86State *env)
4391{
4392 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4393 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4394}
4395
4396int cpu_get_pic_interrupt(CPUState *env)
4397{
4398 uint8_t u8Interrupt;
4399 int rc;
4400
4401 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4402 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4403 * with the (a)pic.
4404 */
4405 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4406 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4407 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4408 * remove this kludge. */
4409 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4410 {
4411 rc = VINF_SUCCESS;
4412 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4413 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4414 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4415 }
4416 else
4417 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4418
4419 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4420 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4421 if (RT_SUCCESS(rc))
4422 {
4423 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4424 env->interrupt_request |= CPU_INTERRUPT_HARD;
4425 return u8Interrupt;
4426 }
4427 return -1;
4428}
4429
4430
4431/* -+- local apic -+- */
4432
4433#if 0 /* CPUMSetGuestMsr does this now. */
4434void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4435{
4436 int rc = PDMApicSetBase(env->pVM, val);
4437 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4438}
4439#endif
4440
4441uint64_t cpu_get_apic_base(CPUX86State *env)
4442{
4443 uint64_t u64;
4444 int rc = PDMApicGetBase(env->pVM, &u64);
4445 if (RT_SUCCESS(rc))
4446 {
4447 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4448 return u64;
4449 }
4450 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4451 return 0;
4452}
4453
4454void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4455{
4456 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4457 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4458}
4459
4460uint8_t cpu_get_apic_tpr(CPUX86State *env)
4461{
4462 uint8_t u8;
4463 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4464 if (RT_SUCCESS(rc))
4465 {
4466 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4467 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4468 }
4469 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4470 return 0;
4471}
4472
4473/**
4474 * Read an MSR.
4475 *
4476 * @retval 0 success.
4477 * @retval -1 failure, raise \#GP(0).
4478 * @param env The cpu state.
4479 * @param idMsr The MSR to read.
4480 * @param puValue Where to return the value.
4481 */
4482int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4483{
4484 Assert(env->pVCpu);
4485 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4486}
4487
4488/**
4489 * Write to an MSR.
4490 *
4491 * @retval 0 success.
4492 * @retval -1 failure, raise \#GP(0).
4493 * @param env The cpu state.
4494 * @param idMsr The MSR to read.
4495 * @param puValue Where to return the value.
4496 */
4497int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4498{
4499 Assert(env->pVCpu);
4500 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4501}
4502
4503/* -+- I/O Ports -+- */
4504
4505#undef LOG_GROUP
4506#define LOG_GROUP LOG_GROUP_REM_IOPORT
4507
4508void cpu_outb(CPUState *env, int addr, int val)
4509{
4510 int rc;
4511
4512 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4513 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4514
4515 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4516 if (RT_LIKELY(rc == VINF_SUCCESS))
4517 return;
4518 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4519 {
4520 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4521 remR3RaiseRC(env->pVM, rc);
4522 return;
4523 }
4524 remAbort(rc, __FUNCTION__);
4525}
4526
4527void cpu_outw(CPUState *env, int addr, int val)
4528{
4529 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4530 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4531 if (RT_LIKELY(rc == VINF_SUCCESS))
4532 return;
4533 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4534 {
4535 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4536 remR3RaiseRC(env->pVM, rc);
4537 return;
4538 }
4539 remAbort(rc, __FUNCTION__);
4540}
4541
4542void cpu_outl(CPUState *env, int addr, int val)
4543{
4544 int rc;
4545 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4546 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4547 if (RT_LIKELY(rc == VINF_SUCCESS))
4548 return;
4549 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4550 {
4551 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4552 remR3RaiseRC(env->pVM, rc);
4553 return;
4554 }
4555 remAbort(rc, __FUNCTION__);
4556}
4557
4558int cpu_inb(CPUState *env, int addr)
4559{
4560 uint32_t u32 = 0;
4561 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4562 if (RT_LIKELY(rc == VINF_SUCCESS))
4563 {
4564 if (/*addr != 0x61 && */addr != 0x71)
4565 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4566 return (int)u32;
4567 }
4568 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4569 {
4570 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4571 remR3RaiseRC(env->pVM, rc);
4572 return (int)u32;
4573 }
4574 remAbort(rc, __FUNCTION__);
4575 return 0xff;
4576}
4577
4578int cpu_inw(CPUState *env, int addr)
4579{
4580 uint32_t u32 = 0;
4581 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4582 if (RT_LIKELY(rc == VINF_SUCCESS))
4583 {
4584 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4585 return (int)u32;
4586 }
4587 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4588 {
4589 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4590 remR3RaiseRC(env->pVM, rc);
4591 return (int)u32;
4592 }
4593 remAbort(rc, __FUNCTION__);
4594 return 0xffff;
4595}
4596
4597int cpu_inl(CPUState *env, int addr)
4598{
4599 uint32_t u32 = 0;
4600 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4601 if (RT_LIKELY(rc == VINF_SUCCESS))
4602 {
4603//if (addr==0x01f0 && u32 == 0x6b6d)
4604// loglevel = ~0;
4605 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4606 return (int)u32;
4607 }
4608 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4609 {
4610 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4611 remR3RaiseRC(env->pVM, rc);
4612 return (int)u32;
4613 }
4614 remAbort(rc, __FUNCTION__);
4615 return 0xffffffff;
4616}
4617
4618#undef LOG_GROUP
4619#define LOG_GROUP LOG_GROUP_REM
4620
4621
4622/* -+- helpers and misc other interfaces -+- */
4623
4624/**
4625 * Perform the CPUID instruction.
4626 *
4627 * ASMCpuId cannot be invoked from some source files where this is used because of global
4628 * register allocations.
4629 *
4630 * @param env Pointer to the recompiler CPU structure.
4631 * @param uOperator CPUID operation (eax).
4632 * @param pvEAX Where to store eax.
4633 * @param pvEBX Where to store ebx.
4634 * @param pvECX Where to store ecx.
4635 * @param pvEDX Where to store edx.
4636 */
4637void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4638{
4639 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4640}
4641
4642
4643#if 0 /* not used */
4644/**
4645 * Interface for qemu hardware to report back fatal errors.
4646 */
4647void hw_error(const char *pszFormat, ...)
4648{
4649 /*
4650 * Bitch about it.
4651 */
4652 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4653 * this in my Odin32 tree at home! */
4654 va_list args;
4655 va_start(args, pszFormat);
4656 RTLogPrintf("fatal error in virtual hardware:");
4657 RTLogPrintfV(pszFormat, args);
4658 va_end(args);
4659 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4660
4661 /*
4662 * If we're in REM context we'll sync back the state before 'jumping' to
4663 * the EMs failure handling.
4664 */
4665 PVM pVM = cpu_single_env->pVM;
4666 if (pVM->rem.s.fInREM)
4667 REMR3StateBack(pVM);
4668 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4669 AssertMsgFailed(("EMR3FatalError returned!\n"));
4670}
4671#endif
4672
4673/**
4674 * Interface for the qemu cpu to report unhandled situation
4675 * raising a fatal VM error.
4676 */
4677void cpu_abort(CPUState *env, const char *pszFormat, ...)
4678{
4679 va_list va;
4680 PVM pVM;
4681 PVMCPU pVCpu;
4682 char szMsg[256];
4683
4684 /*
4685 * Bitch about it.
4686 */
4687 RTLogFlags(NULL, "nodisabled nobuffered");
4688 RTLogFlush(NULL);
4689
4690 va_start(va, pszFormat);
4691#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4692 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4693 unsigned cArgs = 0;
4694 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4695 const char *psz = strchr(pszFormat, '%');
4696 while (psz && cArgs < 6)
4697 {
4698 auArgs[cArgs++] = va_arg(va, uintptr_t);
4699 psz = strchr(psz + 1, '%');
4700 }
4701 switch (cArgs)
4702 {
4703 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4704 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4705 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4706 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4707 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4708 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4709 default:
4710 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4711 }
4712#else
4713 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4714#endif
4715 va_end(va);
4716
4717 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4718 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4719
4720 /*
4721 * If we're in REM context we'll sync back the state before 'jumping' to
4722 * the EMs failure handling.
4723 */
4724 pVM = cpu_single_env->pVM;
4725 pVCpu = cpu_single_env->pVCpu;
4726 Assert(pVCpu);
4727
4728 if (pVM->rem.s.fInREM)
4729 REMR3StateBack(pVM, pVCpu);
4730 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4731 AssertMsgFailed(("EMR3FatalError returned!\n"));
4732}
4733
4734
4735/**
4736 * Aborts the VM.
4737 *
4738 * @param rc VBox error code.
4739 * @param pszTip Hint about why/when this happened.
4740 */
4741void remAbort(int rc, const char *pszTip)
4742{
4743 PVM pVM;
4744 PVMCPU pVCpu;
4745
4746 /*
4747 * Bitch about it.
4748 */
4749 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4750 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4751
4752 /*
4753 * Jump back to where we entered the recompiler.
4754 */
4755 pVM = cpu_single_env->pVM;
4756 pVCpu = cpu_single_env->pVCpu;
4757 Assert(pVCpu);
4758
4759 if (pVM->rem.s.fInREM)
4760 REMR3StateBack(pVM, pVCpu);
4761
4762 EMR3FatalError(pVCpu, rc);
4763 AssertMsgFailed(("EMR3FatalError returned!\n"));
4764}
4765
4766
4767/**
4768 * Dumps a linux system call.
4769 * @param pVCpu VMCPU handle.
4770 */
4771void remR3DumpLnxSyscall(PVMCPU pVCpu)
4772{
4773 static const char *apsz[] =
4774 {
4775 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4776 "sys_exit",
4777 "sys_fork",
4778 "sys_read",
4779 "sys_write",
4780 "sys_open", /* 5 */
4781 "sys_close",
4782 "sys_waitpid",
4783 "sys_creat",
4784 "sys_link",
4785 "sys_unlink", /* 10 */
4786 "sys_execve",
4787 "sys_chdir",
4788 "sys_time",
4789 "sys_mknod",
4790 "sys_chmod", /* 15 */
4791 "sys_lchown16",
4792 "sys_ni_syscall", /* old break syscall holder */
4793 "sys_stat",
4794 "sys_lseek",
4795 "sys_getpid", /* 20 */
4796 "sys_mount",
4797 "sys_oldumount",
4798 "sys_setuid16",
4799 "sys_getuid16",
4800 "sys_stime", /* 25 */
4801 "sys_ptrace",
4802 "sys_alarm",
4803 "sys_fstat",
4804 "sys_pause",
4805 "sys_utime", /* 30 */
4806 "sys_ni_syscall", /* old stty syscall holder */
4807 "sys_ni_syscall", /* old gtty syscall holder */
4808 "sys_access",
4809 "sys_nice",
4810 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4811 "sys_sync",
4812 "sys_kill",
4813 "sys_rename",
4814 "sys_mkdir",
4815 "sys_rmdir", /* 40 */
4816 "sys_dup",
4817 "sys_pipe",
4818 "sys_times",
4819 "sys_ni_syscall", /* old prof syscall holder */
4820 "sys_brk", /* 45 */
4821 "sys_setgid16",
4822 "sys_getgid16",
4823 "sys_signal",
4824 "sys_geteuid16",
4825 "sys_getegid16", /* 50 */
4826 "sys_acct",
4827 "sys_umount", /* recycled never used phys() */
4828 "sys_ni_syscall", /* old lock syscall holder */
4829 "sys_ioctl",
4830 "sys_fcntl", /* 55 */
4831 "sys_ni_syscall", /* old mpx syscall holder */
4832 "sys_setpgid",
4833 "sys_ni_syscall", /* old ulimit syscall holder */
4834 "sys_olduname",
4835 "sys_umask", /* 60 */
4836 "sys_chroot",
4837 "sys_ustat",
4838 "sys_dup2",
4839 "sys_getppid",
4840 "sys_getpgrp", /* 65 */
4841 "sys_setsid",
4842 "sys_sigaction",
4843 "sys_sgetmask",
4844 "sys_ssetmask",
4845 "sys_setreuid16", /* 70 */
4846 "sys_setregid16",
4847 "sys_sigsuspend",
4848 "sys_sigpending",
4849 "sys_sethostname",
4850 "sys_setrlimit", /* 75 */
4851 "sys_old_getrlimit",
4852 "sys_getrusage",
4853 "sys_gettimeofday",
4854 "sys_settimeofday",
4855 "sys_getgroups16", /* 80 */
4856 "sys_setgroups16",
4857 "old_select",
4858 "sys_symlink",
4859 "sys_lstat",
4860 "sys_readlink", /* 85 */
4861 "sys_uselib",
4862 "sys_swapon",
4863 "sys_reboot",
4864 "old_readdir",
4865 "old_mmap", /* 90 */
4866 "sys_munmap",
4867 "sys_truncate",
4868 "sys_ftruncate",
4869 "sys_fchmod",
4870 "sys_fchown16", /* 95 */
4871 "sys_getpriority",
4872 "sys_setpriority",
4873 "sys_ni_syscall", /* old profil syscall holder */
4874 "sys_statfs",
4875 "sys_fstatfs", /* 100 */
4876 "sys_ioperm",
4877 "sys_socketcall",
4878 "sys_syslog",
4879 "sys_setitimer",
4880 "sys_getitimer", /* 105 */
4881 "sys_newstat",
4882 "sys_newlstat",
4883 "sys_newfstat",
4884 "sys_uname",
4885 "sys_iopl", /* 110 */
4886 "sys_vhangup",
4887 "sys_ni_syscall", /* old "idle" system call */
4888 "sys_vm86old",
4889 "sys_wait4",
4890 "sys_swapoff", /* 115 */
4891 "sys_sysinfo",
4892 "sys_ipc",
4893 "sys_fsync",
4894 "sys_sigreturn",
4895 "sys_clone", /* 120 */
4896 "sys_setdomainname",
4897 "sys_newuname",
4898 "sys_modify_ldt",
4899 "sys_adjtimex",
4900 "sys_mprotect", /* 125 */
4901 "sys_sigprocmask",
4902 "sys_ni_syscall", /* old "create_module" */
4903 "sys_init_module",
4904 "sys_delete_module",
4905 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4906 "sys_quotactl",
4907 "sys_getpgid",
4908 "sys_fchdir",
4909 "sys_bdflush",
4910 "sys_sysfs", /* 135 */
4911 "sys_personality",
4912 "sys_ni_syscall", /* reserved for afs_syscall */
4913 "sys_setfsuid16",
4914 "sys_setfsgid16",
4915 "sys_llseek", /* 140 */
4916 "sys_getdents",
4917 "sys_select",
4918 "sys_flock",
4919 "sys_msync",
4920 "sys_readv", /* 145 */
4921 "sys_writev",
4922 "sys_getsid",
4923 "sys_fdatasync",
4924 "sys_sysctl",
4925 "sys_mlock", /* 150 */
4926 "sys_munlock",
4927 "sys_mlockall",
4928 "sys_munlockall",
4929 "sys_sched_setparam",
4930 "sys_sched_getparam", /* 155 */
4931 "sys_sched_setscheduler",
4932 "sys_sched_getscheduler",
4933 "sys_sched_yield",
4934 "sys_sched_get_priority_max",
4935 "sys_sched_get_priority_min", /* 160 */
4936 "sys_sched_rr_get_interval",
4937 "sys_nanosleep",
4938 "sys_mremap",
4939 "sys_setresuid16",
4940 "sys_getresuid16", /* 165 */
4941 "sys_vm86",
4942 "sys_ni_syscall", /* Old sys_query_module */
4943 "sys_poll",
4944 "sys_nfsservctl",
4945 "sys_setresgid16", /* 170 */
4946 "sys_getresgid16",
4947 "sys_prctl",
4948 "sys_rt_sigreturn",
4949 "sys_rt_sigaction",
4950 "sys_rt_sigprocmask", /* 175 */
4951 "sys_rt_sigpending",
4952 "sys_rt_sigtimedwait",
4953 "sys_rt_sigqueueinfo",
4954 "sys_rt_sigsuspend",
4955 "sys_pread64", /* 180 */
4956 "sys_pwrite64",
4957 "sys_chown16",
4958 "sys_getcwd",
4959 "sys_capget",
4960 "sys_capset", /* 185 */
4961 "sys_sigaltstack",
4962 "sys_sendfile",
4963 "sys_ni_syscall", /* reserved for streams1 */
4964 "sys_ni_syscall", /* reserved for streams2 */
4965 "sys_vfork", /* 190 */
4966 "sys_getrlimit",
4967 "sys_mmap2",
4968 "sys_truncate64",
4969 "sys_ftruncate64",
4970 "sys_stat64", /* 195 */
4971 "sys_lstat64",
4972 "sys_fstat64",
4973 "sys_lchown",
4974 "sys_getuid",
4975 "sys_getgid", /* 200 */
4976 "sys_geteuid",
4977 "sys_getegid",
4978 "sys_setreuid",
4979 "sys_setregid",
4980 "sys_getgroups", /* 205 */
4981 "sys_setgroups",
4982 "sys_fchown",
4983 "sys_setresuid",
4984 "sys_getresuid",
4985 "sys_setresgid", /* 210 */
4986 "sys_getresgid",
4987 "sys_chown",
4988 "sys_setuid",
4989 "sys_setgid",
4990 "sys_setfsuid", /* 215 */
4991 "sys_setfsgid",
4992 "sys_pivot_root",
4993 "sys_mincore",
4994 "sys_madvise",
4995 "sys_getdents64", /* 220 */
4996 "sys_fcntl64",
4997 "sys_ni_syscall", /* reserved for TUX */
4998 "sys_ni_syscall",
4999 "sys_gettid",
5000 "sys_readahead", /* 225 */
5001 "sys_setxattr",
5002 "sys_lsetxattr",
5003 "sys_fsetxattr",
5004 "sys_getxattr",
5005 "sys_lgetxattr", /* 230 */
5006 "sys_fgetxattr",
5007 "sys_listxattr",
5008 "sys_llistxattr",
5009 "sys_flistxattr",
5010 "sys_removexattr", /* 235 */
5011 "sys_lremovexattr",
5012 "sys_fremovexattr",
5013 "sys_tkill",
5014 "sys_sendfile64",
5015 "sys_futex", /* 240 */
5016 "sys_sched_setaffinity",
5017 "sys_sched_getaffinity",
5018 "sys_set_thread_area",
5019 "sys_get_thread_area",
5020 "sys_io_setup", /* 245 */
5021 "sys_io_destroy",
5022 "sys_io_getevents",
5023 "sys_io_submit",
5024 "sys_io_cancel",
5025 "sys_fadvise64", /* 250 */
5026 "sys_ni_syscall",
5027 "sys_exit_group",
5028 "sys_lookup_dcookie",
5029 "sys_epoll_create",
5030 "sys_epoll_ctl", /* 255 */
5031 "sys_epoll_wait",
5032 "sys_remap_file_pages",
5033 "sys_set_tid_address",
5034 "sys_timer_create",
5035 "sys_timer_settime", /* 260 */
5036 "sys_timer_gettime",
5037 "sys_timer_getoverrun",
5038 "sys_timer_delete",
5039 "sys_clock_settime",
5040 "sys_clock_gettime", /* 265 */
5041 "sys_clock_getres",
5042 "sys_clock_nanosleep",
5043 "sys_statfs64",
5044 "sys_fstatfs64",
5045 "sys_tgkill", /* 270 */
5046 "sys_utimes",
5047 "sys_fadvise64_64",
5048 "sys_ni_syscall" /* sys_vserver */
5049 };
5050
5051 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5052 switch (uEAX)
5053 {
5054 default:
5055 if (uEAX < RT_ELEMENTS(apsz))
5056 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5057 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5058 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5059 else
5060 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5061 break;
5062
5063 }
5064}
5065
5066
5067/**
5068 * Dumps an OpenBSD system call.
5069 * @param pVCpu VMCPU handle.
5070 */
5071void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5072{
5073 static const char *apsz[] =
5074 {
5075 "SYS_syscall", //0
5076 "SYS_exit", //1
5077 "SYS_fork", //2
5078 "SYS_read", //3
5079 "SYS_write", //4
5080 "SYS_open", //5
5081 "SYS_close", //6
5082 "SYS_wait4", //7
5083 "SYS_8",
5084 "SYS_link", //9
5085 "SYS_unlink", //10
5086 "SYS_11",
5087 "SYS_chdir", //12
5088 "SYS_fchdir", //13
5089 "SYS_mknod", //14
5090 "SYS_chmod", //15
5091 "SYS_chown", //16
5092 "SYS_break", //17
5093 "SYS_18",
5094 "SYS_19",
5095 "SYS_getpid", //20
5096 "SYS_mount", //21
5097 "SYS_unmount", //22
5098 "SYS_setuid", //23
5099 "SYS_getuid", //24
5100 "SYS_geteuid", //25
5101 "SYS_ptrace", //26
5102 "SYS_recvmsg", //27
5103 "SYS_sendmsg", //28
5104 "SYS_recvfrom", //29
5105 "SYS_accept", //30
5106 "SYS_getpeername", //31
5107 "SYS_getsockname", //32
5108 "SYS_access", //33
5109 "SYS_chflags", //34
5110 "SYS_fchflags", //35
5111 "SYS_sync", //36
5112 "SYS_kill", //37
5113 "SYS_38",
5114 "SYS_getppid", //39
5115 "SYS_40",
5116 "SYS_dup", //41
5117 "SYS_opipe", //42
5118 "SYS_getegid", //43
5119 "SYS_profil", //44
5120 "SYS_ktrace", //45
5121 "SYS_sigaction", //46
5122 "SYS_getgid", //47
5123 "SYS_sigprocmask", //48
5124 "SYS_getlogin", //49
5125 "SYS_setlogin", //50
5126 "SYS_acct", //51
5127 "SYS_sigpending", //52
5128 "SYS_osigaltstack", //53
5129 "SYS_ioctl", //54
5130 "SYS_reboot", //55
5131 "SYS_revoke", //56
5132 "SYS_symlink", //57
5133 "SYS_readlink", //58
5134 "SYS_execve", //59
5135 "SYS_umask", //60
5136 "SYS_chroot", //61
5137 "SYS_62",
5138 "SYS_63",
5139 "SYS_64",
5140 "SYS_65",
5141 "SYS_vfork", //66
5142 "SYS_67",
5143 "SYS_68",
5144 "SYS_sbrk", //69
5145 "SYS_sstk", //70
5146 "SYS_61",
5147 "SYS_vadvise", //72
5148 "SYS_munmap", //73
5149 "SYS_mprotect", //74
5150 "SYS_madvise", //75
5151 "SYS_76",
5152 "SYS_77",
5153 "SYS_mincore", //78
5154 "SYS_getgroups", //79
5155 "SYS_setgroups", //80
5156 "SYS_getpgrp", //81
5157 "SYS_setpgid", //82
5158 "SYS_setitimer", //83
5159 "SYS_84",
5160 "SYS_85",
5161 "SYS_getitimer", //86
5162 "SYS_87",
5163 "SYS_88",
5164 "SYS_89",
5165 "SYS_dup2", //90
5166 "SYS_91",
5167 "SYS_fcntl", //92
5168 "SYS_select", //93
5169 "SYS_94",
5170 "SYS_fsync", //95
5171 "SYS_setpriority", //96
5172 "SYS_socket", //97
5173 "SYS_connect", //98
5174 "SYS_99",
5175 "SYS_getpriority", //100
5176 "SYS_101",
5177 "SYS_102",
5178 "SYS_sigreturn", //103
5179 "SYS_bind", //104
5180 "SYS_setsockopt", //105
5181 "SYS_listen", //106
5182 "SYS_107",
5183 "SYS_108",
5184 "SYS_109",
5185 "SYS_110",
5186 "SYS_sigsuspend", //111
5187 "SYS_112",
5188 "SYS_113",
5189 "SYS_114",
5190 "SYS_115",
5191 "SYS_gettimeofday", //116
5192 "SYS_getrusage", //117
5193 "SYS_getsockopt", //118
5194 "SYS_119",
5195 "SYS_readv", //120
5196 "SYS_writev", //121
5197 "SYS_settimeofday", //122
5198 "SYS_fchown", //123
5199 "SYS_fchmod", //124
5200 "SYS_125",
5201 "SYS_setreuid", //126
5202 "SYS_setregid", //127
5203 "SYS_rename", //128
5204 "SYS_129",
5205 "SYS_130",
5206 "SYS_flock", //131
5207 "SYS_mkfifo", //132
5208 "SYS_sendto", //133
5209 "SYS_shutdown", //134
5210 "SYS_socketpair", //135
5211 "SYS_mkdir", //136
5212 "SYS_rmdir", //137
5213 "SYS_utimes", //138
5214 "SYS_139",
5215 "SYS_adjtime", //140
5216 "SYS_141",
5217 "SYS_142",
5218 "SYS_143",
5219 "SYS_144",
5220 "SYS_145",
5221 "SYS_146",
5222 "SYS_setsid", //147
5223 "SYS_quotactl", //148
5224 "SYS_149",
5225 "SYS_150",
5226 "SYS_151",
5227 "SYS_152",
5228 "SYS_153",
5229 "SYS_154",
5230 "SYS_nfssvc", //155
5231 "SYS_156",
5232 "SYS_157",
5233 "SYS_158",
5234 "SYS_159",
5235 "SYS_160",
5236 "SYS_getfh", //161
5237 "SYS_162",
5238 "SYS_163",
5239 "SYS_164",
5240 "SYS_sysarch", //165
5241 "SYS_166",
5242 "SYS_167",
5243 "SYS_168",
5244 "SYS_169",
5245 "SYS_170",
5246 "SYS_171",
5247 "SYS_172",
5248 "SYS_pread", //173
5249 "SYS_pwrite", //174
5250 "SYS_175",
5251 "SYS_176",
5252 "SYS_177",
5253 "SYS_178",
5254 "SYS_179",
5255 "SYS_180",
5256 "SYS_setgid", //181
5257 "SYS_setegid", //182
5258 "SYS_seteuid", //183
5259 "SYS_lfs_bmapv", //184
5260 "SYS_lfs_markv", //185
5261 "SYS_lfs_segclean", //186
5262 "SYS_lfs_segwait", //187
5263 "SYS_188",
5264 "SYS_189",
5265 "SYS_190",
5266 "SYS_pathconf", //191
5267 "SYS_fpathconf", //192
5268 "SYS_swapctl", //193
5269 "SYS_getrlimit", //194
5270 "SYS_setrlimit", //195
5271 "SYS_getdirentries", //196
5272 "SYS_mmap", //197
5273 "SYS___syscall", //198
5274 "SYS_lseek", //199
5275 "SYS_truncate", //200
5276 "SYS_ftruncate", //201
5277 "SYS___sysctl", //202
5278 "SYS_mlock", //203
5279 "SYS_munlock", //204
5280 "SYS_205",
5281 "SYS_futimes", //206
5282 "SYS_getpgid", //207
5283 "SYS_xfspioctl", //208
5284 "SYS_209",
5285 "SYS_210",
5286 "SYS_211",
5287 "SYS_212",
5288 "SYS_213",
5289 "SYS_214",
5290 "SYS_215",
5291 "SYS_216",
5292 "SYS_217",
5293 "SYS_218",
5294 "SYS_219",
5295 "SYS_220",
5296 "SYS_semget", //221
5297 "SYS_222",
5298 "SYS_223",
5299 "SYS_224",
5300 "SYS_msgget", //225
5301 "SYS_msgsnd", //226
5302 "SYS_msgrcv", //227
5303 "SYS_shmat", //228
5304 "SYS_229",
5305 "SYS_shmdt", //230
5306 "SYS_231",
5307 "SYS_clock_gettime", //232
5308 "SYS_clock_settime", //233
5309 "SYS_clock_getres", //234
5310 "SYS_235",
5311 "SYS_236",
5312 "SYS_237",
5313 "SYS_238",
5314 "SYS_239",
5315 "SYS_nanosleep", //240
5316 "SYS_241",
5317 "SYS_242",
5318 "SYS_243",
5319 "SYS_244",
5320 "SYS_245",
5321 "SYS_246",
5322 "SYS_247",
5323 "SYS_248",
5324 "SYS_249",
5325 "SYS_minherit", //250
5326 "SYS_rfork", //251
5327 "SYS_poll", //252
5328 "SYS_issetugid", //253
5329 "SYS_lchown", //254
5330 "SYS_getsid", //255
5331 "SYS_msync", //256
5332 "SYS_257",
5333 "SYS_258",
5334 "SYS_259",
5335 "SYS_getfsstat", //260
5336 "SYS_statfs", //261
5337 "SYS_fstatfs", //262
5338 "SYS_pipe", //263
5339 "SYS_fhopen", //264
5340 "SYS_265",
5341 "SYS_fhstatfs", //266
5342 "SYS_preadv", //267
5343 "SYS_pwritev", //268
5344 "SYS_kqueue", //269
5345 "SYS_kevent", //270
5346 "SYS_mlockall", //271
5347 "SYS_munlockall", //272
5348 "SYS_getpeereid", //273
5349 "SYS_274",
5350 "SYS_275",
5351 "SYS_276",
5352 "SYS_277",
5353 "SYS_278",
5354 "SYS_279",
5355 "SYS_280",
5356 "SYS_getresuid", //281
5357 "SYS_setresuid", //282
5358 "SYS_getresgid", //283
5359 "SYS_setresgid", //284
5360 "SYS_285",
5361 "SYS_mquery", //286
5362 "SYS_closefrom", //287
5363 "SYS_sigaltstack", //288
5364 "SYS_shmget", //289
5365 "SYS_semop", //290
5366 "SYS_stat", //291
5367 "SYS_fstat", //292
5368 "SYS_lstat", //293
5369 "SYS_fhstat", //294
5370 "SYS___semctl", //295
5371 "SYS_shmctl", //296
5372 "SYS_msgctl", //297
5373 "SYS_MAXSYSCALL", //298
5374 //299
5375 //300
5376 };
5377 uint32_t uEAX;
5378 if (!LogIsEnabled())
5379 return;
5380 uEAX = CPUMGetGuestEAX(pVCpu);
5381 switch (uEAX)
5382 {
5383 default:
5384 if (uEAX < RT_ELEMENTS(apsz))
5385 {
5386 uint32_t au32Args[8] = {0};
5387 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5388 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5389 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5390 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5391 }
5392 else
5393 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5394 break;
5395 }
5396}
5397
5398
5399#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5400/**
5401 * The Dll main entry point (stub).
5402 */
5403bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5404{
5405 return true;
5406}
5407
5408void *memcpy(void *dst, const void *src, size_t size)
5409{
5410 uint8_t*pbDst = dst, *pbSrc = src;
5411 while (size-- > 0)
5412 *pbDst++ = *pbSrc++;
5413 return dst;
5414}
5415
5416#endif
5417
5418void cpu_smm_update(CPUState *env)
5419{
5420}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette