VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 13708

Last change on this file since 13708 was 13708, checked in by vboxsync, 16 years ago

Skip the TB flush as that's rather expensive and not necessary for single instruction emulation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.4 KB
Line 
1/* $Id: VBoxRecompiler.c 13708 2008-10-31 10:26:14Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "exec-all.h"
29
30#include <VBox/rem.h>
31#include <VBox/vmapi.h>
32#include <VBox/tm.h>
33#include <VBox/ssm.h>
34#include <VBox/em.h>
35#include <VBox/trpm.h>
36#include <VBox/iom.h>
37#include <VBox/mm.h>
38#include <VBox/pgm.h>
39#include <VBox/pdm.h>
40#include <VBox/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/hwaccm.h>
43#include <VBox/patm.h>
44#include <VBox/csam.h>
45#include "REMInternal.h"
46#include <VBox/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65extern int sync_tr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82
83/*******************************************************************************
84* Internal Functions *
85*******************************************************************************/
86static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
88static void remR3StateUpdate(PVM pVM);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104
105/*******************************************************************************
106* Global Variables *
107*******************************************************************************/
108
109/** @todo Move stats to REM::s some rainy day we have nothing do to. */
110#ifdef VBOX_WITH_STATISTICS
111static STAMPROFILEADV gStatExecuteSingleInstr;
112static STAMPROFILEADV gStatCompilationQEmu;
113static STAMPROFILEADV gStatRunCodeQEmu;
114static STAMPROFILEADV gStatTotalTimeQEmu;
115static STAMPROFILEADV gStatTimers;
116static STAMPROFILEADV gStatTBLookup;
117static STAMPROFILEADV gStatIRQ;
118static STAMPROFILEADV gStatRawCheck;
119static STAMPROFILEADV gStatMemRead;
120static STAMPROFILEADV gStatMemWrite;
121static STAMPROFILE gStatGCPhys2HCVirt;
122static STAMPROFILE gStatHCVirt2GCPhys;
123static STAMCOUNTER gStatCpuGetTSC;
124static STAMCOUNTER gStatRefuseTFInhibit;
125static STAMCOUNTER gStatRefuseVM86;
126static STAMCOUNTER gStatRefusePaging;
127static STAMCOUNTER gStatRefusePAE;
128static STAMCOUNTER gStatRefuseIOPLNot0;
129static STAMCOUNTER gStatRefuseIF0;
130static STAMCOUNTER gStatRefuseCode16;
131static STAMCOUNTER gStatRefuseWP0;
132static STAMCOUNTER gStatRefuseRing1or2;
133static STAMCOUNTER gStatRefuseCanExecute;
134static STAMCOUNTER gStatREMGDTChange;
135static STAMCOUNTER gStatREMIDTChange;
136static STAMCOUNTER gStatREMLDTRChange;
137static STAMCOUNTER gStatREMTRChange;
138static STAMCOUNTER gStatSelOutOfSync[6];
139static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
140static STAMCOUNTER gStatFlushTBs;
141/* in exec.c */
142extern uint32_t tlb_flush_count;
143extern uint32_t tb_flush_count;
144extern uint32_t tb_phys_invalidate_count;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/* Instantiate the structure signatures. */
218#define REM_STRUCT_OP 0
219#include "Sun/structs.h"
220
221
222
223/*******************************************************************************
224* Internal Functions *
225*******************************************************************************/
226static void remAbort(int rc, const char *pszTip);
227extern int testmath(void);
228
229/* Put them here to avoid unused variable warning. */
230AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
231#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
232//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
233/* Why did this have to be identical?? */
234AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
235#else
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#endif
238
239
240/**
241 * Initializes the REM.
242 *
243 * @returns VBox status code.
244 * @param pVM The VM to operate on.
245 */
246REMR3DECL(int) REMR3Init(PVM pVM)
247{
248 uint32_t u32Dummy;
249 unsigned i;
250
251 /*
252 * Assert sanity.
253 */
254 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
255 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
256 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
257#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
258 Assert(!testmath());
259#endif
260 ASSERT_STRUCT_TABLE(Misc);
261 ASSERT_STRUCT_TABLE(TLB);
262 ASSERT_STRUCT_TABLE(SegmentCache);
263 ASSERT_STRUCT_TABLE(XMMReg);
264 ASSERT_STRUCT_TABLE(MMXReg);
265 ASSERT_STRUCT_TABLE(float_status);
266 ASSERT_STRUCT_TABLE(float32u);
267 ASSERT_STRUCT_TABLE(float64u);
268 ASSERT_STRUCT_TABLE(floatx80u);
269 ASSERT_STRUCT_TABLE(CPUState);
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /* ctx. */
281 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
282 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
283
284 /* ignore all notifications */
285 pVM->rem.s.fIgnoreAll = true;
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 int rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (VBOX_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], ELEMENTS(g_aCmds));
338 if (VBOX_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
394 STAM_REG(pVM, &tb_phys_invalidate_count,STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
395 STAM_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
396
397
398#endif
399
400#ifdef DEBUG_ALL_LOGGING
401 loglevel = ~0;
402#endif
403
404 return rc;
405}
406
407
408/**
409 * Terminates the REM.
410 *
411 * Termination means cleaning up and freeing all resources,
412 * the VM it self is at this point powered off or suspended.
413 *
414 * @returns VBox status code.
415 * @param pVM The VM to operate on.
416 */
417REMR3DECL(int) REMR3Term(PVM pVM)
418{
419 return VINF_SUCCESS;
420}
421
422
423/**
424 * The VM is being reset.
425 *
426 * For the REM component this means to call the cpu_reset() and
427 * reinitialize some state variables.
428 *
429 * @param pVM VM handle.
430 */
431REMR3DECL(void) REMR3Reset(PVM pVM)
432{
433 /*
434 * Reset the REM cpu.
435 */
436 pVM->rem.s.fIgnoreAll = true;
437 cpu_reset(&pVM->rem.s.Env);
438 pVM->rem.s.cInvalidatedPages = 0;
439 pVM->rem.s.fIgnoreAll = false;
440
441 /* Clear raw ring 0 init state */
442 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
443}
444
445
446/**
447 * Execute state save operation.
448 *
449 * @returns VBox status code.
450 * @param pVM VM Handle.
451 * @param pSSM SSM operation handle.
452 */
453static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
454{
455 LogFlow(("remR3Save:\n"));
456
457 /*
458 * Save the required CPU Env bits.
459 * (Not much because we're never in REM when doing the save.)
460 */
461 PREM pRem = &pVM->rem.s;
462 Assert(!pRem->fInREM);
463 SSMR3PutU32(pSSM, pRem->Env.hflags);
464 SSMR3PutU32(pSSM, ~0); /* separator */
465
466 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
467 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
468 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
469
470 return SSMR3PutU32(pSSM, ~0); /* terminator */
471}
472
473
474/**
475 * Execute state load operation.
476 *
477 * @returns VBox status code.
478 * @param pVM VM Handle.
479 * @param pSSM SSM operation handle.
480 * @param u32Version Data layout version.
481 */
482static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
483{
484 uint32_t u32Dummy;
485 uint32_t fRawRing0 = false;
486 LogFlow(("remR3Load:\n"));
487
488 /*
489 * Validate version.
490 */
491 if ( u32Version != REM_SAVED_STATE_VERSION
492 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
493 {
494 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
495 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
496 }
497
498 /*
499 * Do a reset to be on the safe side...
500 */
501 REMR3Reset(pVM);
502
503 /*
504 * Ignore all ignorable notifications.
505 * (Not doing this will cause serious trouble.)
506 */
507 pVM->rem.s.fIgnoreAll = true;
508
509 /*
510 * Load the required CPU Env bits.
511 * (Not much because we're never in REM when doing the save.)
512 */
513 PREM pRem = &pVM->rem.s;
514 Assert(!pRem->fInREM);
515 SSMR3GetU32(pSSM, &pRem->Env.hflags);
516 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
517 {
518 /* Redundant REM CPU state has to be loaded, but can be ignored. */
519 CPUX86State_Ver16 temp;
520 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
521 }
522
523 uint32_t u32Sep;
524 int rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
525 if (VBOX_FAILURE(rc))
526 return rc;
527 if (u32Sep != ~0U)
528 {
529 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
530 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
531 }
532
533 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
534 SSMR3GetUInt(pSSM, &fRawRing0);
535 if (fRawRing0)
536 pRem->Env.state |= CPU_RAW_RING0;
537
538 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
539 {
540 /*
541 * Load the REM stuff.
542 */
543 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
544 if (VBOX_FAILURE(rc))
545 return rc;
546 if (pRem->cInvalidatedPages > ELEMENTS(pRem->aGCPtrInvalidatedPages))
547 {
548 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
549 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
550 }
551 unsigned i;
552 for (i = 0; i < pRem->cInvalidatedPages; i++)
553 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
554 }
555
556 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
557 if (VBOX_FAILURE(rc))
558 return rc;
559
560 /* check the terminator. */
561 rc = SSMR3GetU32(pSSM, &u32Sep);
562 if (VBOX_FAILURE(rc))
563 return rc;
564 if (u32Sep != ~0U)
565 {
566 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
567 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
568 }
569
570 /*
571 * Get the CPUID features.
572 */
573 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
574 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
575
576 /*
577 * Sync the Load Flush the TLB
578 */
579 tlb_flush(&pRem->Env, 1);
580
581 /*
582 * Stop ignoring ignornable notifications.
583 */
584 pVM->rem.s.fIgnoreAll = false;
585
586 /*
587 * Sync the whole CPU state when executing code in the recompiler.
588 */
589 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
590 return VINF_SUCCESS;
591}
592
593
594
595#undef LOG_GROUP
596#define LOG_GROUP LOG_GROUP_REM_RUN
597
598/**
599 * Single steps an instruction in recompiled mode.
600 *
601 * Before calling this function the REM state needs to be in sync with
602 * the VM. Call REMR3State() to perform the sync. It's only necessary
603 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
604 * and after calling REMR3StateBack().
605 *
606 * @returns VBox status code.
607 *
608 * @param pVM VM Handle.
609 */
610REMR3DECL(int) REMR3Step(PVM pVM)
611{
612 /*
613 * Lock the REM - we don't wanna have anyone interrupting us
614 * while stepping - and enabled single stepping. We also ignore
615 * pending interrupts and suchlike.
616 */
617 int interrupt_request = pVM->rem.s.Env.interrupt_request;
618 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
619 pVM->rem.s.Env.interrupt_request = 0;
620 cpu_single_step(&pVM->rem.s.Env, 1);
621
622 /*
623 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
624 */
625 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
626 bool fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
627
628 /*
629 * Execute and handle the return code.
630 * We execute without enabling the cpu tick, so on success we'll
631 * just flip it on and off to make sure it moves
632 */
633 int rc = cpu_exec(&pVM->rem.s.Env);
634 if (rc == EXCP_DEBUG)
635 {
636 TMCpuTickResume(pVM);
637 TMCpuTickPause(pVM);
638 TMVirtualResume(pVM);
639 TMVirtualPause(pVM);
640 rc = VINF_EM_DBG_STEPPED;
641 }
642 else
643 {
644 AssertMsgFailed(("Damn, this shouldn't happen! cpu_exec returned %d while singlestepping\n", rc));
645 switch (rc)
646 {
647 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
648 case EXCP_HLT:
649 case EXCP_HALTED: rc = VINF_EM_HALT; break;
650 case EXCP_RC:
651 rc = pVM->rem.s.rc;
652 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
653 break;
654 default:
655 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
656 rc = VERR_INTERNAL_ERROR;
657 break;
658 }
659 }
660
661 /*
662 * Restore the stuff we changed to prevent interruption.
663 * Unlock the REM.
664 */
665 if (fBp)
666 {
667 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
668 Assert(rc2 == 0); NOREF(rc2);
669 }
670 cpu_single_step(&pVM->rem.s.Env, 0);
671 pVM->rem.s.Env.interrupt_request = interrupt_request;
672
673 return rc;
674}
675
676
677/**
678 * Set a breakpoint using the REM facilities.
679 *
680 * @returns VBox status code.
681 * @param pVM The VM handle.
682 * @param Address The breakpoint address.
683 * @thread The emulation thread.
684 */
685REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
686{
687 VM_ASSERT_EMT(pVM);
688 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
689 {
690 LogFlow(("REMR3BreakpointSet: Address=%VGv\n", Address));
691 return VINF_SUCCESS;
692 }
693 LogFlow(("REMR3BreakpointSet: Address=%VGv - failed!\n", Address));
694 return VERR_REM_NO_MORE_BP_SLOTS;
695}
696
697
698/**
699 * Clears a breakpoint set by REMR3BreakpointSet().
700 *
701 * @returns VBox status code.
702 * @param pVM The VM handle.
703 * @param Address The breakpoint address.
704 * @thread The emulation thread.
705 */
706REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
707{
708 VM_ASSERT_EMT(pVM);
709 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
710 {
711 LogFlow(("REMR3BreakpointClear: Address=%VGv\n", Address));
712 return VINF_SUCCESS;
713 }
714 LogFlow(("REMR3BreakpointClear: Address=%VGv - not found!\n", Address));
715 return VERR_REM_BP_NOT_FOUND;
716}
717
718
719/**
720 * Emulate an instruction.
721 *
722 * This function executes one instruction without letting anyone
723 * interrupt it. This is intended for being called while being in
724 * raw mode and thus will take care of all the state syncing between
725 * REM and the rest.
726 *
727 * @returns VBox status code.
728 * @param pVM VM handle.
729 */
730REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
731{
732 bool fFlushTBs;
733
734 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
735
736 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
737 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
738 */
739 if (HWACCMIsEnabled(pVM))
740 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
741
742 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
743 fFlushTBs = pVM->rem.s.fFlushTBs;
744 pVM->rem.s.fFlushTBs = false;
745
746 /*
747 * Sync the state and enable single instruction / single stepping.
748 */
749 int rc = REMR3State(pVM);
750 pVM->rem.s.fFlushTBs = fFlushTBs;
751 if (VBOX_SUCCESS(rc))
752 {
753 int interrupt_request = pVM->rem.s.Env.interrupt_request;
754 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
755 Assert(!pVM->rem.s.Env.singlestep_enabled);
756#if 1
757
758 /*
759 * Now we set the execute single instruction flag and enter the cpu_exec loop.
760 */
761 TMNotifyStartOfExecution(pVM);
762 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
763 rc = cpu_exec(&pVM->rem.s.Env);
764 TMNotifyEndOfExecution(pVM);
765 switch (rc)
766 {
767 /*
768 * Executed without anything out of the way happening.
769 */
770 case EXCP_SINGLE_INSTR:
771 rc = VINF_EM_RESCHEDULE;
772 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
773 break;
774
775 /*
776 * If we take a trap or start servicing a pending interrupt, we might end up here.
777 * (Timer thread or some other thread wishing EMT's attention.)
778 */
779 case EXCP_INTERRUPT:
780 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
781 rc = VINF_EM_RESCHEDULE;
782 break;
783
784 /*
785 * Single step, we assume!
786 * If there was a breakpoint there we're fucked now.
787 */
788 case EXCP_DEBUG:
789 {
790 /* breakpoint or single step? */
791 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
792 int iBP;
793 rc = VINF_EM_DBG_STEPPED;
794 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
795 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
796 {
797 rc = VINF_EM_DBG_BREAKPOINT;
798 break;
799 }
800 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
801 break;
802 }
803
804 /*
805 * hlt instruction.
806 */
807 case EXCP_HLT:
808 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
809 rc = VINF_EM_HALT;
810 break;
811
812 /*
813 * The VM has halted.
814 */
815 case EXCP_HALTED:
816 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
817 rc = VINF_EM_HALT;
818 break;
819
820 /*
821 * Switch to RAW-mode.
822 */
823 case EXCP_EXECUTE_RAW:
824 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
825 rc = VINF_EM_RESCHEDULE_RAW;
826 break;
827
828 /*
829 * Switch to hardware accelerated RAW-mode.
830 */
831 case EXCP_EXECUTE_HWACC:
832 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
833 rc = VINF_EM_RESCHEDULE_HWACC;
834 break;
835
836 /*
837 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
838 */
839 case EXCP_RC:
840 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
841 rc = pVM->rem.s.rc;
842 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
843 break;
844
845 /*
846 * Figure out the rest when they arrive....
847 */
848 default:
849 AssertMsgFailed(("rc=%d\n", rc));
850 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
851 rc = VINF_EM_RESCHEDULE;
852 break;
853 }
854
855 /*
856 * Switch back the state.
857 */
858#else
859 pVM->rem.s.Env.interrupt_request = 0;
860 cpu_single_step(&pVM->rem.s.Env, 1);
861
862 /*
863 * Execute and handle the return code.
864 * We execute without enabling the cpu tick, so on success we'll
865 * just flip it on and off to make sure it moves.
866 *
867 * (We do not use emulate_single_instr() because that doesn't enter the
868 * right way in will cause serious trouble if a longjmp was attempted.)
869 */
870# ifdef DEBUG_bird
871 remR3DisasInstr(&pVM->rem.s.Env, 1, "REMR3EmulateInstruction");
872# endif
873 TMNotifyStartOfExecution(pVM);
874 int cTimesMax = 16384;
875 uint32_t eip = pVM->rem.s.Env.eip;
876 do
877 {
878 rc = cpu_exec(&pVM->rem.s.Env);
879
880 } while ( eip == pVM->rem.s.Env.eip
881 && (rc == EXCP_DEBUG || rc == EXCP_EXECUTE_RAW)
882 && --cTimesMax > 0);
883 TMNotifyEndOfExecution(pVM);
884 switch (rc)
885 {
886 /*
887 * Single step, we assume!
888 * If there was a breakpoint there we're fucked now.
889 */
890 case EXCP_DEBUG:
891 {
892 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG\n"));
893 rc = VINF_EM_RESCHEDULE;
894 break;
895 }
896
897 /*
898 * We cannot be interrupted!
899 */
900 case EXCP_INTERRUPT:
901 AssertMsgFailed(("Shouldn't happen! Everything was locked!\n"));
902 rc = VERR_INTERNAL_ERROR;
903 break;
904
905 /*
906 * hlt instruction.
907 */
908 case EXCP_HLT:
909 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
910 rc = VINF_EM_HALT;
911 break;
912
913 /*
914 * The VM has halted.
915 */
916 case EXCP_HALTED:
917 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
918 rc = VINF_EM_HALT;
919 break;
920
921 /*
922 * Switch to RAW-mode.
923 */
924 case EXCP_EXECUTE_RAW:
925 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
926 rc = VINF_EM_RESCHEDULE_RAW;
927 break;
928
929 /*
930 * Switch to hardware accelerated RAW-mode.
931 */
932 case EXCP_EXECUTE_HWACC:
933 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
934 rc = VINF_EM_RESCHEDULE_HWACC;
935 break;
936
937 /*
938 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
939 */
940 case EXCP_RC:
941 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
942 rc = pVM->rem.s.rc;
943 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
944 break;
945
946 /*
947 * Figure out the rest when they arrive....
948 */
949 default:
950 AssertMsgFailed(("rc=%d\n", rc));
951 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
952 rc = VINF_SUCCESS;
953 break;
954 }
955
956 /*
957 * Switch back the state.
958 */
959 cpu_single_step(&pVM->rem.s.Env, 0);
960#endif
961 pVM->rem.s.Env.interrupt_request = interrupt_request;
962 int rc2 = REMR3StateBack(pVM);
963 AssertRC(rc2);
964 }
965
966 Log2(("REMR3EmulateInstruction: returns %Vrc (cs:eip=%04x:%VGv)\n",
967 rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
968 return rc;
969}
970
971
972/**
973 * Runs code in recompiled mode.
974 *
975 * Before calling this function the REM state needs to be in sync with
976 * the VM. Call REMR3State() to perform the sync. It's only necessary
977 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
978 * and after calling REMR3StateBack().
979 *
980 * @returns VBox status code.
981 *
982 * @param pVM VM Handle.
983 */
984REMR3DECL(int) REMR3Run(PVM pVM)
985{
986 Log2(("REMR3Run: (cs:eip=%04x:%VGv)\n", pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
987 Assert(pVM->rem.s.fInREM);
988
989 TMNotifyStartOfExecution(pVM);
990 int rc = cpu_exec(&pVM->rem.s.Env);
991 TMNotifyEndOfExecution(pVM);
992 switch (rc)
993 {
994 /*
995 * This happens when the execution was interrupted
996 * by an external event, like pending timers.
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_SUCCESS;
1001 break;
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Breakpoint/single step.
1021 */
1022 case EXCP_DEBUG:
1023 {
1024#if 0//def DEBUG_bird
1025 static int iBP = 0;
1026 printf("howdy, breakpoint! iBP=%d\n", iBP);
1027 switch (iBP)
1028 {
1029 case 0:
1030 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1031 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1032 //pVM->rem.s.Env.interrupt_request = 0;
1033 //pVM->rem.s.Env.exception_index = -1;
1034 //g_fInterruptDisabled = 1;
1035 rc = VINF_SUCCESS;
1036 asm("int3");
1037 break;
1038 default:
1039 asm("int3");
1040 break;
1041 }
1042 iBP++;
1043#else
1044 /* breakpoint or single step? */
1045 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1046 int iBP;
1047 rc = VINF_EM_DBG_STEPPED;
1048 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1049 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1050 {
1051 rc = VINF_EM_DBG_BREAKPOINT;
1052 break;
1053 }
1054 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
1055#endif
1056 break;
1057 }
1058
1059 /*
1060 * Switch to RAW-mode.
1061 */
1062 case EXCP_EXECUTE_RAW:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1064 rc = VINF_EM_RESCHEDULE_RAW;
1065 break;
1066
1067 /*
1068 * Switch to hardware accelerated RAW-mode.
1069 */
1070 case EXCP_EXECUTE_HWACC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1072 rc = VINF_EM_RESCHEDULE_HWACC;
1073 break;
1074
1075#ifdef VBOX_WITH_VMI
1076 /*
1077 *
1078 */
1079 case EXCP_PARAV_CALL:
1080 Log2(("REMR3Run: cpu_exec -> EXCP_PARAV_CALL\n"));
1081 rc = VINF_EM_RESCHEDULE_PARAV;
1082 break;
1083#endif
1084
1085 /*
1086 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1087 */
1088 case EXCP_RC:
1089 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
1090 rc = pVM->rem.s.rc;
1091 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1092 break;
1093
1094 /*
1095 * Figure out the rest when they arrive....
1096 */
1097 default:
1098 AssertMsgFailed(("rc=%d\n", rc));
1099 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1100 rc = VINF_SUCCESS;
1101 break;
1102 }
1103
1104 Log2(("REMR3Run: returns %Vrc (cs:eip=%04x:%VGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
1105 return rc;
1106}
1107
1108
1109/**
1110 * Check if the cpu state is suitable for Raw execution.
1111 *
1112 * @returns boolean
1113 * @param env The CPU env struct.
1114 * @param eip The EIP to check this for (might differ from env->eip).
1115 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1116 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1117 *
1118 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1119 */
1120bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1121{
1122 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1123 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1124 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1125
1126 /* Update counter. */
1127 env->pVM->rem.s.cCanExecuteRaw++;
1128
1129 if (HWACCMIsEnabled(env->pVM))
1130 {
1131 env->state |= CPU_RAW_HWACC;
1132
1133 /*
1134 * Create partial context for HWACCMR3CanExecuteGuest
1135 */
1136 CPUMCTX Ctx;
1137 Ctx.cr0 = env->cr[0];
1138 Ctx.cr3 = env->cr[3];
1139 Ctx.cr4 = env->cr[4];
1140
1141 Ctx.tr = env->tr.selector;
1142 Ctx.trHid.u64Base = env->tr.base;
1143 Ctx.trHid.u32Limit = env->tr.limit;
1144 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1145
1146 Ctx.idtr.cbIdt = env->idt.limit;
1147 Ctx.idtr.pIdt = env->idt.base;
1148
1149 Ctx.eflags.u32 = env->eflags;
1150
1151 Ctx.cs = env->segs[R_CS].selector;
1152 Ctx.csHid.u64Base = env->segs[R_CS].base;
1153 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1154 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1155
1156 Ctx.ds = env->segs[R_DS].selector;
1157 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1158 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1159 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1160
1161 Ctx.es = env->segs[R_ES].selector;
1162 Ctx.esHid.u64Base = env->segs[R_ES].base;
1163 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1164 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1165
1166 Ctx.fs = env->segs[R_FS].selector;
1167 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1168 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1169 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1170
1171 Ctx.gs = env->segs[R_GS].selector;
1172 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1173 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1174 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1175
1176 Ctx.ss = env->segs[R_SS].selector;
1177 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1178 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1179 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1180
1181 Ctx.msrEFER = env->efer;
1182
1183 /* Hardware accelerated raw-mode:
1184 *
1185 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1186 */
1187 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1188 {
1189 *piException = EXCP_EXECUTE_HWACC;
1190 return true;
1191 }
1192 return false;
1193 }
1194
1195 /*
1196 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1197 * or 32 bits protected mode ring 0 code
1198 *
1199 * The tests are ordered by the likelyhood of being true during normal execution.
1200 */
1201 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1202 {
1203 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1204 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1205 return false;
1206 }
1207
1208#ifndef VBOX_RAW_V86
1209 if (fFlags & VM_MASK) {
1210 STAM_COUNTER_INC(&gStatRefuseVM86);
1211 Log2(("raw mode refused: VM_MASK\n"));
1212 return false;
1213 }
1214#endif
1215
1216 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1217 {
1218#ifndef DEBUG_bird
1219 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1220#endif
1221 return false;
1222 }
1223
1224 if (env->singlestep_enabled)
1225 {
1226 //Log2(("raw mode refused: Single step\n"));
1227 return false;
1228 }
1229
1230 if (env->nb_breakpoints > 0)
1231 {
1232 //Log2(("raw mode refused: Breakpoints\n"));
1233 return false;
1234 }
1235
1236 uint32_t u32CR0 = env->cr[0];
1237 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1238 {
1239 STAM_COUNTER_INC(&gStatRefusePaging);
1240 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1241 return false;
1242 }
1243
1244 if (env->cr[4] & CR4_PAE_MASK)
1245 {
1246 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1247 {
1248 STAM_COUNTER_INC(&gStatRefusePAE);
1249 return false;
1250 }
1251 }
1252
1253 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1254 {
1255 if (!EMIsRawRing3Enabled(env->pVM))
1256 return false;
1257
1258 if (!(env->eflags & IF_MASK))
1259 {
1260 STAM_COUNTER_INC(&gStatRefuseIF0);
1261 Log2(("raw mode refused: IF (RawR3)\n"));
1262 return false;
1263 }
1264
1265 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1266 {
1267 STAM_COUNTER_INC(&gStatRefuseWP0);
1268 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1269 return false;
1270 }
1271 }
1272 else
1273 {
1274 if (!EMIsRawRing0Enabled(env->pVM))
1275 return false;
1276
1277 // Let's start with pure 32 bits ring 0 code first
1278 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1279 {
1280 STAM_COUNTER_INC(&gStatRefuseCode16);
1281 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1282 return false;
1283 }
1284
1285 // Only R0
1286 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1287 {
1288 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1289 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1290 return false;
1291 }
1292
1293 if (!(u32CR0 & CR0_WP_MASK))
1294 {
1295 STAM_COUNTER_INC(&gStatRefuseWP0);
1296 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1297 return false;
1298 }
1299
1300 if (PATMIsPatchGCAddr(env->pVM, eip))
1301 {
1302 Log2(("raw r0 mode forced: patch code\n"));
1303 *piException = EXCP_EXECUTE_RAW;
1304 return true;
1305 }
1306
1307#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1308 if (!(env->eflags & IF_MASK))
1309 {
1310 STAM_COUNTER_INC(&gStatRefuseIF0);
1311 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1312 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1313 return false;
1314 }
1315#endif
1316
1317 env->state |= CPU_RAW_RING0;
1318 }
1319
1320 /*
1321 * Don't reschedule the first time we're called, because there might be
1322 * special reasons why we're here that is not covered by the above checks.
1323 */
1324 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1325 {
1326 Log2(("raw mode refused: first scheduling\n"));
1327 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1328 return false;
1329 }
1330
1331 Assert(PGMPhysIsA20Enabled(env->pVM));
1332 *piException = EXCP_EXECUTE_RAW;
1333 return true;
1334}
1335
1336
1337/**
1338 * Fetches a code byte.
1339 *
1340 * @returns Success indicator (bool) for ease of use.
1341 * @param env The CPU environment structure.
1342 * @param GCPtrInstr Where to fetch code.
1343 * @param pu8Byte Where to store the byte on success
1344 */
1345bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1346{
1347 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1348 if (VBOX_SUCCESS(rc))
1349 return true;
1350 return false;
1351}
1352
1353
1354/**
1355 * Flush (or invalidate if you like) page table/dir entry.
1356 *
1357 * (invlpg instruction; tlb_flush_page)
1358 *
1359 * @param env Pointer to cpu environment.
1360 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1361 */
1362void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1363{
1364 PVM pVM = env->pVM;
1365
1366 /*
1367 * When we're replaying invlpg instructions or restoring a saved
1368 * state we disable this path.
1369 */
1370 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1371 return;
1372 Log(("remR3FlushPage: GCPtr=%VGv\n", GCPtr));
1373 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1374
1375 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1376
1377 /*
1378 * Update the control registers before calling PGMFlushPage.
1379 */
1380 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1381 pCtx->cr0 = env->cr[0];
1382 pCtx->cr3 = env->cr[3];
1383 pCtx->cr4 = env->cr[4];
1384
1385 /*
1386 * Let PGM do the rest.
1387 */
1388 int rc = PGMInvalidatePage(pVM, GCPtr);
1389 if (VBOX_FAILURE(rc))
1390 {
1391 AssertMsgFailed(("remR3FlushPage %VGv failed with %d!!\n", GCPtr, rc));
1392 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1393 }
1394 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1395}
1396
1397
1398/**
1399 * Called from tlb_protect_code in order to write monitor a code page.
1400 *
1401 * @param env Pointer to the CPU environment.
1402 * @param GCPtr Code page to monitor
1403 */
1404void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1405{
1406#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1407 Assert(env->pVM->rem.s.fInREM);
1408 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1409 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1410 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1411 && !(env->eflags & VM_MASK) /* no V86 mode */
1412 && !HWACCMIsEnabled(env->pVM))
1413 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1414#endif
1415}
1416
1417/**
1418 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1419 *
1420 * @param env Pointer to the CPU environment.
1421 * @param GCPtr Code page to monitor
1422 */
1423void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1424{
1425 Assert(env->pVM->rem.s.fInREM);
1426#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1427 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1428 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1429 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1430 && !(env->eflags & VM_MASK) /* no V86 mode */
1431 && !HWACCMIsEnabled(env->pVM))
1432 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1433#endif
1434}
1435
1436
1437/**
1438 * Called when the CPU is initialized, any of the CRx registers are changed or
1439 * when the A20 line is modified.
1440 *
1441 * @param env Pointer to the CPU environment.
1442 * @param fGlobal Set if the flush is global.
1443 */
1444void remR3FlushTLB(CPUState *env, bool fGlobal)
1445{
1446 PVM pVM = env->pVM;
1447
1448 /*
1449 * When we're replaying invlpg instructions or restoring a saved
1450 * state we disable this path.
1451 */
1452 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1453 return;
1454 Assert(pVM->rem.s.fInREM);
1455
1456 /*
1457 * The caller doesn't check cr4, so we have to do that for ourselves.
1458 */
1459 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1460 fGlobal = true;
1461 Log(("remR3FlushTLB: CR0=%RGr CR3=%RGr CR4=%RGr %s\n", env->cr[0], env->cr[3], env->cr[4], fGlobal ? " global" : ""));
1462
1463 /*
1464 * Update the control registers before calling PGMR3FlushTLB.
1465 */
1466 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1467 pCtx->cr0 = env->cr[0];
1468 pCtx->cr3 = env->cr[3];
1469 pCtx->cr4 = env->cr[4];
1470
1471 /*
1472 * Let PGM do the rest.
1473 */
1474 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1475}
1476
1477
1478/**
1479 * Called when any of the cr0, cr4 or efer registers is updated.
1480 *
1481 * @param env Pointer to the CPU environment.
1482 */
1483void remR3ChangeCpuMode(CPUState *env)
1484{
1485 int rc;
1486 PVM pVM = env->pVM;
1487
1488 /*
1489 * When we're replaying loads or restoring a saved
1490 * state this path is disabled.
1491 */
1492 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1493 return;
1494 Assert(pVM->rem.s.fInREM);
1495
1496 /*
1497 * Update the control registers before calling PGMChangeMode()
1498 * as it may need to map whatever cr3 is pointing to.
1499 */
1500 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1501 pCtx->cr0 = env->cr[0];
1502 pCtx->cr3 = env->cr[3];
1503 pCtx->cr4 = env->cr[4];
1504
1505#ifdef TARGET_X86_64
1506 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1507 if (rc != VINF_SUCCESS)
1508 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Vrc\n", env->cr[0], env->cr[4], env->efer, rc);
1509#else
1510 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1511 if (rc != VINF_SUCCESS)
1512 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Vrc\n", env->cr[0], env->cr[4], 0LL, rc);
1513#endif
1514}
1515
1516
1517/**
1518 * Called from compiled code to run dma.
1519 *
1520 * @param env Pointer to the CPU environment.
1521 */
1522void remR3DmaRun(CPUState *env)
1523{
1524 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1525 PDMR3DmaRun(env->pVM);
1526 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1527}
1528
1529
1530/**
1531 * Called from compiled code to schedule pending timers in VMM
1532 *
1533 * @param env Pointer to the CPU environment.
1534 */
1535void remR3TimersRun(CPUState *env)
1536{
1537 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1538 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1539 TMR3TimerQueuesDo(env->pVM);
1540 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1541 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1542}
1543
1544
1545/**
1546 * Record trap occurance
1547 *
1548 * @returns VBox status code
1549 * @param env Pointer to the CPU environment.
1550 * @param uTrap Trap nr
1551 * @param uErrorCode Error code
1552 * @param pvNextEIP Next EIP
1553 */
1554int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, uint32_t pvNextEIP)
1555{
1556 PVM pVM = env->pVM;
1557#ifdef VBOX_WITH_STATISTICS
1558 static STAMCOUNTER s_aStatTrap[255];
1559 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1560#endif
1561
1562#ifdef VBOX_WITH_STATISTICS
1563 if (uTrap < 255)
1564 {
1565 if (!s_aRegisters[uTrap])
1566 {
1567 s_aRegisters[uTrap] = true;
1568 char szStatName[64];
1569 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1570 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1571 }
1572 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1573 }
1574#endif
1575 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%VGv\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1576 if( uTrap < 0x20
1577 && (env->cr[0] & X86_CR0_PE)
1578 && !(env->eflags & X86_EFL_VM))
1579 {
1580#ifdef DEBUG
1581 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1582#endif
1583 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1584 {
1585 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%VGv\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1586 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1587 return VERR_REM_TOO_MANY_TRAPS;
1588 }
1589 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1590 pVM->rem.s.cPendingExceptions = 1;
1591 pVM->rem.s.uPendingException = uTrap;
1592 pVM->rem.s.uPendingExcptEIP = env->eip;
1593 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1594 }
1595 else
1596 {
1597 pVM->rem.s.cPendingExceptions = 0;
1598 pVM->rem.s.uPendingException = uTrap;
1599 pVM->rem.s.uPendingExcptEIP = env->eip;
1600 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1601 }
1602 return VINF_SUCCESS;
1603}
1604
1605
1606/*
1607 * Clear current active trap
1608 *
1609 * @param pVM VM Handle.
1610 */
1611void remR3TrapClear(PVM pVM)
1612{
1613 pVM->rem.s.cPendingExceptions = 0;
1614 pVM->rem.s.uPendingException = 0;
1615 pVM->rem.s.uPendingExcptEIP = 0;
1616 pVM->rem.s.uPendingExcptCR2 = 0;
1617}
1618
1619
1620/*
1621 * Record previous call instruction addresses
1622 *
1623 * @param env Pointer to the CPU environment.
1624 */
1625void remR3RecordCall(CPUState *env)
1626{
1627 CSAMR3RecordCallAddress(env->pVM, env->eip);
1628}
1629
1630
1631/**
1632 * Syncs the internal REM state with the VM.
1633 *
1634 * This must be called before REMR3Run() is invoked whenever when the REM
1635 * state is not up to date. Calling it several times in a row is not
1636 * permitted.
1637 *
1638 * @returns VBox status code.
1639 *
1640 * @param pVM VM Handle.
1641 *
1642 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1643 * no do this since the majority of the callers don't want any unnecessary of events
1644 * pending that would immediatly interrupt execution.
1645 */
1646REMR3DECL(int) REMR3State(PVM pVM)
1647{
1648 Log2(("REMR3State:\n"));
1649 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1650 register const CPUMCTX *pCtx = pVM->rem.s.pCtx;
1651 register unsigned fFlags;
1652 bool fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1653 unsigned i;
1654
1655 Assert(!pVM->rem.s.fInREM);
1656 pVM->rem.s.fInStateSync = true;
1657
1658 /*
1659 * If we have to flush TBs, do that immediately.
1660 */
1661 if (pVM->rem.s.fFlushTBs)
1662 {
1663 STAM_COUNTER_INC(&gStatFlushTBs);
1664 tb_flush(&pVM->rem.s.Env);
1665 pVM->rem.s.fFlushTBs = false;
1666 }
1667
1668 /*
1669 * Copy the registers which require no special handling.
1670 */
1671#ifdef TARGET_X86_64
1672 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1673 Assert(R_EAX == 0);
1674 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1675 Assert(R_ECX == 1);
1676 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1677 Assert(R_EDX == 2);
1678 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1679 Assert(R_EBX == 3);
1680 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1681 Assert(R_ESP == 4);
1682 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1683 Assert(R_EBP == 5);
1684 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1685 Assert(R_ESI == 6);
1686 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1687 Assert(R_EDI == 7);
1688 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1689 pVM->rem.s.Env.regs[8] = pCtx->r8;
1690 pVM->rem.s.Env.regs[9] = pCtx->r9;
1691 pVM->rem.s.Env.regs[10] = pCtx->r10;
1692 pVM->rem.s.Env.regs[11] = pCtx->r11;
1693 pVM->rem.s.Env.regs[12] = pCtx->r12;
1694 pVM->rem.s.Env.regs[13] = pCtx->r13;
1695 pVM->rem.s.Env.regs[14] = pCtx->r14;
1696 pVM->rem.s.Env.regs[15] = pCtx->r15;
1697
1698 pVM->rem.s.Env.eip = pCtx->rip;
1699
1700 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1701#else
1702 Assert(R_EAX == 0);
1703 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1704 Assert(R_ECX == 1);
1705 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1706 Assert(R_EDX == 2);
1707 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1708 Assert(R_EBX == 3);
1709 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1710 Assert(R_ESP == 4);
1711 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1712 Assert(R_EBP == 5);
1713 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1714 Assert(R_ESI == 6);
1715 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1716 Assert(R_EDI == 7);
1717 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1718 pVM->rem.s.Env.eip = pCtx->eip;
1719
1720 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1721#endif
1722
1723 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1724
1725 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1726 for (i=0;i<8;i++)
1727 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1728
1729 /*
1730 * Clear the halted hidden flag (the interrupt waking up the CPU can
1731 * have been dispatched in raw mode).
1732 */
1733 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1734
1735 /*
1736 * Replay invlpg?
1737 */
1738 if (pVM->rem.s.cInvalidatedPages)
1739 {
1740 pVM->rem.s.fIgnoreInvlPg = true;
1741 RTUINT i;
1742 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1743 {
1744 Log2(("REMR3State: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1745 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1746 }
1747 pVM->rem.s.fIgnoreInvlPg = false;
1748 pVM->rem.s.cInvalidatedPages = 0;
1749 }
1750
1751 /* Replay notification changes? */
1752 if (pVM->rem.s.cHandlerNotifications)
1753 REMR3ReplayHandlerNotifications(pVM);
1754
1755 /* Update MSRs; before CRx registers! */
1756 pVM->rem.s.Env.efer = pCtx->msrEFER;
1757 pVM->rem.s.Env.star = pCtx->msrSTAR;
1758 pVM->rem.s.Env.pat = pCtx->msrPAT;
1759#ifdef TARGET_X86_64
1760 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1761 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1762 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1763 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1764
1765 /* Update the internal long mode activate flag according to the new EFER value. */
1766 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1767 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1768 else
1769 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1770#endif
1771
1772
1773 /*
1774 * Registers which are rarely changed and require special handling / order when changed.
1775 */
1776 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1777 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1778 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1779 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1780 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1781 {
1782 if (fFlags & CPUM_CHANGED_FPU_REM)
1783 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1784
1785 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1786 {
1787 pVM->rem.s.fIgnoreCR3Load = true;
1788 tlb_flush(&pVM->rem.s.Env, true);
1789 pVM->rem.s.fIgnoreCR3Load = false;
1790 }
1791
1792 /* CR4 before CR0! */
1793 if (fFlags & CPUM_CHANGED_CR4)
1794 {
1795 pVM->rem.s.fIgnoreCR3Load = true;
1796 pVM->rem.s.fIgnoreCpuMode = true;
1797 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1798 pVM->rem.s.fIgnoreCpuMode = false;
1799 pVM->rem.s.fIgnoreCR3Load = false;
1800 }
1801
1802 if (fFlags & CPUM_CHANGED_CR0)
1803 {
1804 pVM->rem.s.fIgnoreCR3Load = true;
1805 pVM->rem.s.fIgnoreCpuMode = true;
1806 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1807 pVM->rem.s.fIgnoreCpuMode = false;
1808 pVM->rem.s.fIgnoreCR3Load = false;
1809 }
1810
1811 if (fFlags & CPUM_CHANGED_CR3)
1812 {
1813 pVM->rem.s.fIgnoreCR3Load = true;
1814 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1815 pVM->rem.s.fIgnoreCR3Load = false;
1816 }
1817
1818 if (fFlags & CPUM_CHANGED_GDTR)
1819 {
1820 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1821 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1822 }
1823
1824 if (fFlags & CPUM_CHANGED_IDTR)
1825 {
1826 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1827 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1828 }
1829
1830 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1831 {
1832 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1833 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1834 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1835 }
1836
1837 if (fFlags & CPUM_CHANGED_LDTR)
1838 {
1839 if (fHiddenSelRegsValid)
1840 {
1841 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1842 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1843 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1844 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1845 }
1846 else
1847 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1848 }
1849
1850 if (fFlags & CPUM_CHANGED_TR)
1851 {
1852 if (fHiddenSelRegsValid)
1853 {
1854 pVM->rem.s.Env.tr.selector = pCtx->tr;
1855 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1856 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1857 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1858 }
1859 else
1860 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1861
1862 /** @note do_interrupt will fault if the busy flag is still set.... */
1863 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1864 }
1865
1866 if (fFlags & CPUM_CHANGED_CPUID)
1867 {
1868 uint32_t u32Dummy;
1869
1870 /*
1871 * Get the CPUID features.
1872 */
1873 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1874 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1875 }
1876 }
1877
1878 /*
1879 * Update selector registers.
1880 * This must be done *after* we've synced gdt, ldt and crX registers
1881 * since we're reading the GDT/LDT om sync_seg. This will happen with
1882 * saved state which takes a quick dip into rawmode for instance.
1883 */
1884 /*
1885 * Stack; Note first check this one as the CPL might have changed. The
1886 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1887 */
1888
1889 if (fHiddenSelRegsValid)
1890 {
1891 /* The hidden selector registers are valid in the CPU context. */
1892 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1893
1894 /* Set current CPL */
1895 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1896
1897 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1898 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1899 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1900 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1901 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1902 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1903 }
1904 else
1905 {
1906 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1907 if (pVM->rem.s.Env.segs[R_SS].selector != (uint16_t)pCtx->ss)
1908 {
1909 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1910
1911 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1912 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1913#ifdef VBOX_WITH_STATISTICS
1914 if (pVM->rem.s.Env.segs[R_SS].newselector)
1915 {
1916 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1917 }
1918#endif
1919 }
1920 else
1921 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1922
1923 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1924 {
1925 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1926 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1927#ifdef VBOX_WITH_STATISTICS
1928 if (pVM->rem.s.Env.segs[R_ES].newselector)
1929 {
1930 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1931 }
1932#endif
1933 }
1934 else
1935 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1936
1937 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1938 {
1939 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1940 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1941#ifdef VBOX_WITH_STATISTICS
1942 if (pVM->rem.s.Env.segs[R_CS].newselector)
1943 {
1944 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1945 }
1946#endif
1947 }
1948 else
1949 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1950
1951 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1952 {
1953 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1954 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1955#ifdef VBOX_WITH_STATISTICS
1956 if (pVM->rem.s.Env.segs[R_DS].newselector)
1957 {
1958 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1959 }
1960#endif
1961 }
1962 else
1963 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1964
1965 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1966 * be the same but not the base/limit. */
1967 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1968 {
1969 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1970 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1971#ifdef VBOX_WITH_STATISTICS
1972 if (pVM->rem.s.Env.segs[R_FS].newselector)
1973 {
1974 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1975 }
1976#endif
1977 }
1978 else
1979 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1980
1981 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1982 {
1983 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1984 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1985#ifdef VBOX_WITH_STATISTICS
1986 if (pVM->rem.s.Env.segs[R_GS].newselector)
1987 {
1988 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1989 }
1990#endif
1991 }
1992 else
1993 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1994 }
1995
1996 /*
1997 * Check for traps.
1998 */
1999 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2000 TRPMEVENT enmType;
2001 uint8_t u8TrapNo;
2002 int rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2003 if (VBOX_SUCCESS(rc))
2004 {
2005#ifdef DEBUG
2006 if (u8TrapNo == 0x80)
2007 {
2008 remR3DumpLnxSyscall(pVM);
2009 remR3DumpOBsdSyscall(pVM);
2010 }
2011#endif
2012
2013 pVM->rem.s.Env.exception_index = u8TrapNo;
2014 if (enmType != TRPM_SOFTWARE_INT)
2015 {
2016 pVM->rem.s.Env.exception_is_int = 0;
2017 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2018 }
2019 else
2020 {
2021 /*
2022 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2023 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2024 * for int03 and into.
2025 */
2026 pVM->rem.s.Env.exception_is_int = 1;
2027 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2028 /* int 3 may be generated by one-byte 0xcc */
2029 if (u8TrapNo == 3)
2030 {
2031 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2032 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2033 }
2034 /* int 4 may be generated by one-byte 0xce */
2035 else if (u8TrapNo == 4)
2036 {
2037 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2038 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2039 }
2040 }
2041
2042 /* get error code and cr2 if needed. */
2043 switch (u8TrapNo)
2044 {
2045 case 0x0e:
2046 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2047 /* fallthru */
2048 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2049 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2050 break;
2051
2052 case 0x11: case 0x08:
2053 default:
2054 pVM->rem.s.Env.error_code = 0;
2055 break;
2056 }
2057
2058 /*
2059 * We can now reset the active trap since the recompiler is gonna have a go at it.
2060 */
2061 rc = TRPMResetTrap(pVM);
2062 AssertRC(rc);
2063 Log2(("REMR3State: trap=%02x errcd=%VGv cr2=%VGv nexteip=%VGv%s\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.error_code,
2064 pVM->rem.s.Env.cr[2], pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2065 }
2066
2067 /*
2068 * Clear old interrupt request flags; Check for pending hardware interrupts.
2069 * (See @remark for why we don't check for other FFs.)
2070 */
2071 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2072 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2073 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2074 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2075
2076 /*
2077 * We're now in REM mode.
2078 */
2079 pVM->rem.s.fInREM = true;
2080 pVM->rem.s.fInStateSync = false;
2081 pVM->rem.s.cCanExecuteRaw = 0;
2082 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2083 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2084 return VINF_SUCCESS;
2085}
2086
2087
2088/**
2089 * Syncs back changes in the REM state to the the VM state.
2090 *
2091 * This must be called after invoking REMR3Run().
2092 * Calling it several times in a row is not permitted.
2093 *
2094 * @returns VBox status code.
2095 *
2096 * @param pVM VM Handle.
2097 */
2098REMR3DECL(int) REMR3StateBack(PVM pVM)
2099{
2100 Log2(("REMR3StateBack:\n"));
2101 Assert(pVM->rem.s.fInREM);
2102 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2103 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2104 unsigned i;
2105
2106 /*
2107 * Copy back the registers.
2108 * This is done in the order they are declared in the CPUMCTX structure.
2109 */
2110
2111 /** @todo FOP */
2112 /** @todo FPUIP */
2113 /** @todo CS */
2114 /** @todo FPUDP */
2115 /** @todo DS */
2116 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2117 pCtx->fpu.MXCSR = 0;
2118 pCtx->fpu.MXCSR_MASK = 0;
2119
2120 /** @todo check if FPU/XMM was actually used in the recompiler */
2121 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2122//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2123
2124#ifdef TARGET_X86_64
2125 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2126 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2127 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2128 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2129 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2130 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2131 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2132 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2133 pCtx->r8 = pVM->rem.s.Env.regs[8];
2134 pCtx->r9 = pVM->rem.s.Env.regs[9];
2135 pCtx->r10 = pVM->rem.s.Env.regs[10];
2136 pCtx->r11 = pVM->rem.s.Env.regs[11];
2137 pCtx->r12 = pVM->rem.s.Env.regs[12];
2138 pCtx->r13 = pVM->rem.s.Env.regs[13];
2139 pCtx->r14 = pVM->rem.s.Env.regs[14];
2140 pCtx->r15 = pVM->rem.s.Env.regs[15];
2141
2142 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2143
2144#else
2145 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2146 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2147 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2148 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2149 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2150 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2151 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2152
2153 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2154#endif
2155
2156 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2157
2158#ifdef VBOX_WITH_STATISTICS
2159 if (pVM->rem.s.Env.segs[R_SS].newselector)
2160 {
2161 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2162 }
2163 if (pVM->rem.s.Env.segs[R_GS].newselector)
2164 {
2165 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2166 }
2167 if (pVM->rem.s.Env.segs[R_FS].newselector)
2168 {
2169 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2170 }
2171 if (pVM->rem.s.Env.segs[R_ES].newselector)
2172 {
2173 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2174 }
2175 if (pVM->rem.s.Env.segs[R_DS].newselector)
2176 {
2177 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2178 }
2179 if (pVM->rem.s.Env.segs[R_CS].newselector)
2180 {
2181 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2182 }
2183#endif
2184 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2185 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2186 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2187 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2188 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2189
2190#ifdef TARGET_X86_64
2191 pCtx->rip = pVM->rem.s.Env.eip;
2192 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2193#else
2194 pCtx->eip = pVM->rem.s.Env.eip;
2195 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2196#endif
2197
2198 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2199 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2200 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2201 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2202
2203 for (i=0;i<8;i++)
2204 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2205
2206 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2207 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2208 {
2209 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2210 STAM_COUNTER_INC(&gStatREMGDTChange);
2211 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2212 }
2213
2214 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2215 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2216 {
2217 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2218 STAM_COUNTER_INC(&gStatREMIDTChange);
2219 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2220 }
2221
2222 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2223 {
2224 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2225 STAM_COUNTER_INC(&gStatREMLDTRChange);
2226 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2227 }
2228 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2229 {
2230 pCtx->tr = pVM->rem.s.Env.tr.selector;
2231 STAM_COUNTER_INC(&gStatREMTRChange);
2232 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2233 }
2234
2235 /** @todo These values could still be out of sync! */
2236 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2237 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2238 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2239 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2240
2241 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2242 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2243 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2244
2245 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2246 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2247 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2248
2249 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2250 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2251 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2252
2253 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2254 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2255 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2256
2257 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2258 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2259 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2260
2261 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2262 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2263 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2264
2265 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2266 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2267 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2268
2269 /* Sysenter MSR */
2270 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2271 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2272 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2273
2274 /* System MSRs. */
2275 pCtx->msrEFER = pVM->rem.s.Env.efer;
2276 pCtx->msrSTAR = pVM->rem.s.Env.star;
2277 pCtx->msrPAT = pVM->rem.s.Env.pat;
2278#ifdef TARGET_X86_64
2279 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2280 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2281 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2282 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2283#endif
2284
2285 remR3TrapClear(pVM);
2286
2287 /*
2288 * Check for traps.
2289 */
2290 if ( pVM->rem.s.Env.exception_index >= 0
2291 && pVM->rem.s.Env.exception_index < 256)
2292 {
2293 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2294 int rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2295 AssertRC(rc);
2296 switch (pVM->rem.s.Env.exception_index)
2297 {
2298 case 0x0e:
2299 TRPMSetFaultAddress(pVM, pCtx->cr2);
2300 /* fallthru */
2301 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2302 case 0x11: case 0x08: /* 0 */
2303 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2304 break;
2305 }
2306
2307 }
2308
2309 /*
2310 * We're not longer in REM mode.
2311 */
2312 pVM->rem.s.fInREM = false;
2313 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2314 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/**
2320 * This is called by the disassembler when it wants to update the cpu state
2321 * before for instance doing a register dump.
2322 */
2323static void remR3StateUpdate(PVM pVM)
2324{
2325 Assert(pVM->rem.s.fInREM);
2326 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2327 unsigned i;
2328
2329 /*
2330 * Copy back the registers.
2331 * This is done in the order they are declared in the CPUMCTX structure.
2332 */
2333
2334 /** @todo FOP */
2335 /** @todo FPUIP */
2336 /** @todo CS */
2337 /** @todo FPUDP */
2338 /** @todo DS */
2339 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2340 pCtx->fpu.MXCSR = 0;
2341 pCtx->fpu.MXCSR_MASK = 0;
2342
2343 /** @todo check if FPU/XMM was actually used in the recompiler */
2344 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2345//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2346
2347#ifdef TARGET_X86_64
2348 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2349 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2350 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2351 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2352 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2353 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2354 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2355 pCtx->r8 = pVM->rem.s.Env.regs[8];
2356 pCtx->r9 = pVM->rem.s.Env.regs[9];
2357 pCtx->r10 = pVM->rem.s.Env.regs[10];
2358 pCtx->r11 = pVM->rem.s.Env.regs[11];
2359 pCtx->r12 = pVM->rem.s.Env.regs[12];
2360 pCtx->r13 = pVM->rem.s.Env.regs[13];
2361 pCtx->r14 = pVM->rem.s.Env.regs[14];
2362 pCtx->r15 = pVM->rem.s.Env.regs[15];
2363
2364 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2365#else
2366 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2367 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2368 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2369 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2370 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2371 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2372 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2373
2374 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2375#endif
2376
2377 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2378
2379 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2380 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2381 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2382 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2383 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2384
2385#ifdef TARGET_X86_64
2386 pCtx->rip = pVM->rem.s.Env.eip;
2387 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2388#else
2389 pCtx->eip = pVM->rem.s.Env.eip;
2390 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2391#endif
2392
2393 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2394 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2395 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2396 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2397
2398 for (i=0;i<8;i++)
2399 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2400
2401 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2402 if (pCtx->gdtr.pGdt != (uint32_t)pVM->rem.s.Env.gdt.base)
2403 {
2404 pCtx->gdtr.pGdt = (uint32_t)pVM->rem.s.Env.gdt.base;
2405 STAM_COUNTER_INC(&gStatREMGDTChange);
2406 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2407 }
2408
2409 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2410 if (pCtx->idtr.pIdt != (uint32_t)pVM->rem.s.Env.idt.base)
2411 {
2412 pCtx->idtr.pIdt = (uint32_t)pVM->rem.s.Env.idt.base;
2413 STAM_COUNTER_INC(&gStatREMIDTChange);
2414 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2415 }
2416
2417 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2418 {
2419 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2420 STAM_COUNTER_INC(&gStatREMLDTRChange);
2421 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2422 }
2423 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2424 {
2425 pCtx->tr = pVM->rem.s.Env.tr.selector;
2426 STAM_COUNTER_INC(&gStatREMTRChange);
2427 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2428 }
2429
2430 /** @todo These values could still be out of sync! */
2431 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2432 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2433 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2434 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2435
2436 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2437 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2438 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2439
2440 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2441 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2442 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2443
2444 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2445 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2446 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2447
2448 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2449 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2450 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2451
2452 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2453 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2454 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2455
2456 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2457 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2458 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2459
2460 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2461 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2462 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2463
2464 /* Sysenter MSR */
2465 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2466 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2467 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2468
2469 /* System MSRs. */
2470 pCtx->msrEFER = pVM->rem.s.Env.efer;
2471 pCtx->msrSTAR = pVM->rem.s.Env.star;
2472 pCtx->msrPAT = pVM->rem.s.Env.pat;
2473#ifdef TARGET_X86_64
2474 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2475 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2476 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2477 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2478#endif
2479
2480}
2481
2482
2483/**
2484 * Update the VMM state information if we're currently in REM.
2485 *
2486 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2487 * we're currently executing in REM and the VMM state is invalid. This method will of
2488 * course check that we're executing in REM before syncing any data over to the VMM.
2489 *
2490 * @param pVM The VM handle.
2491 */
2492REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2493{
2494 if (pVM->rem.s.fInREM)
2495 remR3StateUpdate(pVM);
2496}
2497
2498
2499#undef LOG_GROUP
2500#define LOG_GROUP LOG_GROUP_REM
2501
2502
2503/**
2504 * Notify the recompiler about Address Gate 20 state change.
2505 *
2506 * This notification is required since A20 gate changes are
2507 * initialized from a device driver and the VM might just as
2508 * well be in REM mode as in RAW mode.
2509 *
2510 * @param pVM VM handle.
2511 * @param fEnable True if the gate should be enabled.
2512 * False if the gate should be disabled.
2513 */
2514REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2515{
2516 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2517 VM_ASSERT_EMT(pVM);
2518
2519 bool fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2520 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2521
2522 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2523
2524 pVM->rem.s.fIgnoreAll = fSaved;
2525}
2526
2527
2528/**
2529 * Replays the invalidated recorded pages.
2530 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2531 *
2532 * @param pVM VM handle.
2533 */
2534REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2535{
2536 VM_ASSERT_EMT(pVM);
2537
2538 /*
2539 * Sync the required registers.
2540 */
2541 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2542 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2543 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2544 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2545
2546 /*
2547 * Replay the flushes.
2548 */
2549 pVM->rem.s.fIgnoreInvlPg = true;
2550 RTUINT i;
2551 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2552 {
2553 Log2(("REMR3ReplayInvalidatedPages: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2554 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2555 }
2556 pVM->rem.s.fIgnoreInvlPg = false;
2557 pVM->rem.s.cInvalidatedPages = 0;
2558}
2559
2560
2561/**
2562 * Replays the handler notification changes
2563 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2564 *
2565 * @param pVM VM handle.
2566 */
2567REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2568{
2569 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2570 VM_ASSERT_EMT(pVM);
2571
2572 /*
2573 * Replay the flushes.
2574 */
2575 RTUINT i;
2576 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2577 pVM->rem.s.cHandlerNotifications = 0;
2578 for (i = 0; i < c; i++)
2579 {
2580 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2581 switch (pRec->enmKind)
2582 {
2583 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2584 REMR3NotifyHandlerPhysicalRegister(pVM,
2585 pRec->u.PhysicalRegister.enmType,
2586 pRec->u.PhysicalRegister.GCPhys,
2587 pRec->u.PhysicalRegister.cb,
2588 pRec->u.PhysicalRegister.fHasHCHandler);
2589 break;
2590
2591 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2592 REMR3NotifyHandlerPhysicalDeregister(pVM,
2593 pRec->u.PhysicalDeregister.enmType,
2594 pRec->u.PhysicalDeregister.GCPhys,
2595 pRec->u.PhysicalDeregister.cb,
2596 pRec->u.PhysicalDeregister.fHasHCHandler,
2597 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2598 break;
2599
2600 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2601 REMR3NotifyHandlerPhysicalModify(pVM,
2602 pRec->u.PhysicalModify.enmType,
2603 pRec->u.PhysicalModify.GCPhysOld,
2604 pRec->u.PhysicalModify.GCPhysNew,
2605 pRec->u.PhysicalModify.cb,
2606 pRec->u.PhysicalModify.fHasHCHandler,
2607 pRec->u.PhysicalModify.fRestoreAsRAM);
2608 break;
2609
2610 default:
2611 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2612 break;
2613 }
2614 }
2615 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2616}
2617
2618
2619/**
2620 * Notify REM about changed code page.
2621 *
2622 * @returns VBox status code.
2623 * @param pVM VM handle.
2624 * @param pvCodePage Code page address
2625 */
2626REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2627{
2628#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2629 int rc;
2630 RTGCPHYS PhysGC;
2631 uint64_t flags;
2632
2633 VM_ASSERT_EMT(pVM);
2634
2635 /*
2636 * Get the physical page address.
2637 */
2638 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2639 if (rc == VINF_SUCCESS)
2640 {
2641 /*
2642 * Sync the required registers and flush the whole page.
2643 * (Easier to do the whole page than notifying it about each physical
2644 * byte that was changed.
2645 */
2646 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2647 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2648 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2649 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2650
2651 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2652 }
2653#endif
2654 return VINF_SUCCESS;
2655}
2656
2657
2658/**
2659 * Notification about a successful MMR3PhysRegister() call.
2660 *
2661 * @param pVM VM handle.
2662 * @param GCPhys The physical address the RAM.
2663 * @param cb Size of the memory.
2664 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2665 */
2666REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2667{
2668 Log(("REMR3NotifyPhysRamRegister: GCPhys=%VGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2669 VM_ASSERT_EMT(pVM);
2670
2671 /*
2672 * Validate input - we trust the caller.
2673 */
2674 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2675 Assert(cb);
2676 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2677
2678 /*
2679 * Base ram?
2680 */
2681 if (!GCPhys)
2682 {
2683 phys_ram_size = cb;
2684 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2685#ifndef VBOX_STRICT
2686 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2687 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2688#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2689 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2690 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2691 uint32_t cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2692 int rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2693 AssertRC(rc);
2694 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2695#endif
2696 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2697 }
2698
2699 /*
2700 * Register the ram.
2701 */
2702 Assert(!pVM->rem.s.fIgnoreAll);
2703 pVM->rem.s.fIgnoreAll = true;
2704
2705#ifdef VBOX_WITH_NEW_PHYS_CODE
2706 if (fFlags & MM_RAM_FLAGS_RESERVED)
2707 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2708 else
2709 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2710#else
2711 if (!GCPhys)
2712 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2713 else
2714 {
2715 if (fFlags & MM_RAM_FLAGS_RESERVED)
2716 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2717 else
2718 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2719 }
2720#endif
2721 Assert(pVM->rem.s.fIgnoreAll);
2722 pVM->rem.s.fIgnoreAll = false;
2723}
2724
2725#ifndef VBOX_WITH_NEW_PHYS_CODE
2726
2727/**
2728 * Notification about a successful PGMR3PhysRegisterChunk() call.
2729 *
2730 * @param pVM VM handle.
2731 * @param GCPhys The physical address the RAM.
2732 * @param cb Size of the memory.
2733 * @param pvRam The HC address of the RAM.
2734 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2735 */
2736REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2737{
2738 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2739 VM_ASSERT_EMT(pVM);
2740
2741 /*
2742 * Validate input - we trust the caller.
2743 */
2744 Assert(pvRam);
2745 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2746 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2747 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2748 Assert(fFlags == 0 /* normal RAM */);
2749 Assert(!pVM->rem.s.fIgnoreAll);
2750 pVM->rem.s.fIgnoreAll = true;
2751
2752 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2753
2754 Assert(pVM->rem.s.fIgnoreAll);
2755 pVM->rem.s.fIgnoreAll = false;
2756}
2757
2758
2759/**
2760 * Grows dynamically allocated guest RAM.
2761 * Will raise a fatal error if the operation fails.
2762 *
2763 * @param physaddr The physical address.
2764 */
2765void remR3GrowDynRange(unsigned long physaddr)
2766{
2767 int rc;
2768 PVM pVM = cpu_single_env->pVM;
2769
2770 LogFlow(("remR3GrowDynRange %VGp\n", physaddr));
2771 const RTGCPHYS GCPhys = physaddr;
2772 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2773 if (VBOX_SUCCESS(rc))
2774 return;
2775
2776 LogRel(("\nUnable to allocate guest RAM chunk at %VGp\n", physaddr));
2777 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %VGp\n", physaddr);
2778 AssertFatalFailed();
2779}
2780
2781#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2782
2783/**
2784 * Notification about a successful MMR3PhysRomRegister() call.
2785 *
2786 * @param pVM VM handle.
2787 * @param GCPhys The physical address of the ROM.
2788 * @param cb The size of the ROM.
2789 * @param pvCopy Pointer to the ROM copy.
2790 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2791 * This function will be called when ever the protection of the
2792 * shadow ROM changes (at reset and end of POST).
2793 */
2794REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2795{
2796 Log(("REMR3NotifyPhysRomRegister: GCPhys=%VGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2797 VM_ASSERT_EMT(pVM);
2798
2799 /*
2800 * Validate input - we trust the caller.
2801 */
2802 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2803 Assert(cb);
2804 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2805 Assert(pvCopy);
2806 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2807
2808 /*
2809 * Register the rom.
2810 */
2811 Assert(!pVM->rem.s.fIgnoreAll);
2812 pVM->rem.s.fIgnoreAll = true;
2813
2814 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2815
2816 Log2(("%.64Vhxd\n", (char *)pvCopy + cb - 64));
2817
2818 Assert(pVM->rem.s.fIgnoreAll);
2819 pVM->rem.s.fIgnoreAll = false;
2820}
2821
2822
2823/**
2824 * Notification about a successful memory deregistration or reservation.
2825 *
2826 * @param pVM VM Handle.
2827 * @param GCPhys Start physical address.
2828 * @param cb The size of the range.
2829 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2830 * reserve any memory soon.
2831 */
2832REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2833{
2834 Log(("REMR3NotifyPhysReserve: GCPhys=%VGp cb=%d\n", GCPhys, cb));
2835 VM_ASSERT_EMT(pVM);
2836
2837 /*
2838 * Validate input - we trust the caller.
2839 */
2840 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2841 Assert(cb);
2842 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2843
2844 /*
2845 * Unassigning the memory.
2846 */
2847 Assert(!pVM->rem.s.fIgnoreAll);
2848 pVM->rem.s.fIgnoreAll = true;
2849
2850 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2851
2852 Assert(pVM->rem.s.fIgnoreAll);
2853 pVM->rem.s.fIgnoreAll = false;
2854}
2855
2856
2857/**
2858 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2859 *
2860 * @param pVM VM Handle.
2861 * @param enmType Handler type.
2862 * @param GCPhys Handler range address.
2863 * @param cb Size of the handler range.
2864 * @param fHasHCHandler Set if the handler has a HC callback function.
2865 *
2866 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2867 * Handler memory type to memory which has no HC handler.
2868 */
2869REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2870{
2871 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%VGp cb=%VGp fHasHCHandler=%d\n",
2872 enmType, GCPhys, cb, fHasHCHandler));
2873 VM_ASSERT_EMT(pVM);
2874 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2875 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2876
2877 if (pVM->rem.s.cHandlerNotifications)
2878 REMR3ReplayHandlerNotifications(pVM);
2879
2880 Assert(!pVM->rem.s.fIgnoreAll);
2881 pVM->rem.s.fIgnoreAll = true;
2882
2883 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2884 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2885 else if (fHasHCHandler)
2886 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2887
2888 Assert(pVM->rem.s.fIgnoreAll);
2889 pVM->rem.s.fIgnoreAll = false;
2890}
2891
2892
2893/**
2894 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2895 *
2896 * @param pVM VM Handle.
2897 * @param enmType Handler type.
2898 * @param GCPhys Handler range address.
2899 * @param cb Size of the handler range.
2900 * @param fHasHCHandler Set if the handler has a HC callback function.
2901 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2902 */
2903REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2904{
2905 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%VGp cb=%VGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2906 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2907 VM_ASSERT_EMT(pVM);
2908
2909 if (pVM->rem.s.cHandlerNotifications)
2910 REMR3ReplayHandlerNotifications(pVM);
2911
2912 Assert(!pVM->rem.s.fIgnoreAll);
2913 pVM->rem.s.fIgnoreAll = true;
2914
2915/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2916 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2917 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2918 else if (fHasHCHandler)
2919 {
2920 if (!fRestoreAsRAM)
2921 {
2922 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2923 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2924 }
2925 else
2926 {
2927 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2928 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2929 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2930 }
2931 }
2932
2933 Assert(pVM->rem.s.fIgnoreAll);
2934 pVM->rem.s.fIgnoreAll = false;
2935}
2936
2937
2938/**
2939 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2940 *
2941 * @param pVM VM Handle.
2942 * @param enmType Handler type.
2943 * @param GCPhysOld Old handler range address.
2944 * @param GCPhysNew New handler range address.
2945 * @param cb Size of the handler range.
2946 * @param fHasHCHandler Set if the handler has a HC callback function.
2947 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2948 */
2949REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2950{
2951 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%VGp GCPhysNew=%VGp cb=%VGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2952 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2953 VM_ASSERT_EMT(pVM);
2954 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2955
2956 if (pVM->rem.s.cHandlerNotifications)
2957 REMR3ReplayHandlerNotifications(pVM);
2958
2959 if (fHasHCHandler)
2960 {
2961 Assert(!pVM->rem.s.fIgnoreAll);
2962 pVM->rem.s.fIgnoreAll = true;
2963
2964 /*
2965 * Reset the old page.
2966 */
2967 if (!fRestoreAsRAM)
2968 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2969 else
2970 {
2971 /* This is not perfect, but it'll do for PD monitoring... */
2972 Assert(cb == PAGE_SIZE);
2973 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2974 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2975 }
2976
2977 /*
2978 * Update the new page.
2979 */
2980 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2981 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2982 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2983
2984 Assert(pVM->rem.s.fIgnoreAll);
2985 pVM->rem.s.fIgnoreAll = false;
2986 }
2987}
2988
2989
2990/**
2991 * Checks if we're handling access to this page or not.
2992 *
2993 * @returns true if we're trapping access.
2994 * @returns false if we aren't.
2995 * @param pVM The VM handle.
2996 * @param GCPhys The physical address.
2997 *
2998 * @remark This function will only work correctly in VBOX_STRICT builds!
2999 */
3000REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3001{
3002#ifdef VBOX_STRICT
3003 if (pVM->rem.s.cHandlerNotifications)
3004 REMR3ReplayHandlerNotifications(pVM);
3005
3006 unsigned long off = get_phys_page_offset(GCPhys);
3007 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3008 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3009 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3010#else
3011 return false;
3012#endif
3013}
3014
3015
3016/**
3017 * Deals with a rare case in get_phys_addr_code where the code
3018 * is being monitored.
3019 *
3020 * It could also be an MMIO page, in which case we will raise a fatal error.
3021 *
3022 * @returns The physical address corresponding to addr.
3023 * @param env The cpu environment.
3024 * @param addr The virtual address.
3025 * @param pTLBEntry The TLB entry.
3026 */
3027target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry)
3028{
3029 PVM pVM = env->pVM;
3030 if ((pTLBEntry->addr_code & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3031 {
3032 target_ulong ret = pTLBEntry->addend + addr;
3033 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%VGv addr_code=%VGv addend=%VGp ret=%VGp\n",
3034 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, ret);
3035 return ret;
3036 }
3037 LogRel(("\nTrying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n"
3038 "*** handlers\n",
3039 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType));
3040 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3041 LogRel(("*** mmio\n"));
3042 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3043 LogRel(("*** phys\n"));
3044 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3045 cpu_abort(env, "Trying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3046 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3047 AssertFatalFailed();
3048}
3049
3050
3051/** Validate the physical address passed to the read functions.
3052 * Useful for finding non-guest-ram reads/writes. */
3053#if 0 //1 /* disable if it becomes bothersome... */
3054# define VBOX_CHECK_ADDR(GCPhys) AssertMsg(PGMPhysIsGCPhysValid(cpu_single_env->pVM, (GCPhys)), ("%VGp\n", (GCPhys)))
3055#else
3056# define VBOX_CHECK_ADDR(GCPhys) do { } while (0)
3057#endif
3058
3059/**
3060 * Read guest RAM and ROM.
3061 *
3062 * @param SrcGCPhys The source address (guest physical).
3063 * @param pvDst The destination address.
3064 * @param cb Number of bytes
3065 */
3066void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3067{
3068 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3069 VBOX_CHECK_ADDR(SrcGCPhys);
3070 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3071 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3072}
3073
3074
3075/**
3076 * Read guest RAM and ROM, unsigned 8-bit.
3077 *
3078 * @param SrcGCPhys The source address (guest physical).
3079 */
3080uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3081{
3082 uint8_t val;
3083 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3084 VBOX_CHECK_ADDR(SrcGCPhys);
3085 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3086 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3087 return val;
3088}
3089
3090
3091/**
3092 * Read guest RAM and ROM, signed 8-bit.
3093 *
3094 * @param SrcGCPhys The source address (guest physical).
3095 */
3096int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3097{
3098 int8_t val;
3099 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3100 VBOX_CHECK_ADDR(SrcGCPhys);
3101 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3102 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3103 return val;
3104}
3105
3106
3107/**
3108 * Read guest RAM and ROM, unsigned 16-bit.
3109 *
3110 * @param SrcGCPhys The source address (guest physical).
3111 */
3112uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3113{
3114 uint16_t val;
3115 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3116 VBOX_CHECK_ADDR(SrcGCPhys);
3117 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3118 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3119 return val;
3120}
3121
3122
3123/**
3124 * Read guest RAM and ROM, signed 16-bit.
3125 *
3126 * @param SrcGCPhys The source address (guest physical).
3127 */
3128int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3129{
3130 uint16_t val;
3131 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3132 VBOX_CHECK_ADDR(SrcGCPhys);
3133 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3134 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3135 return val;
3136}
3137
3138
3139/**
3140 * Read guest RAM and ROM, unsigned 32-bit.
3141 *
3142 * @param SrcGCPhys The source address (guest physical).
3143 */
3144uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3145{
3146 uint32_t val;
3147 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3148 VBOX_CHECK_ADDR(SrcGCPhys);
3149 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3150 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3151 return val;
3152}
3153
3154
3155/**
3156 * Read guest RAM and ROM, signed 32-bit.
3157 *
3158 * @param SrcGCPhys The source address (guest physical).
3159 */
3160int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3161{
3162 int32_t val;
3163 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3164 VBOX_CHECK_ADDR(SrcGCPhys);
3165 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3166 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3167 return val;
3168}
3169
3170
3171/**
3172 * Read guest RAM and ROM, unsigned 64-bit.
3173 *
3174 * @param SrcGCPhys The source address (guest physical).
3175 */
3176uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3177{
3178 uint64_t val;
3179 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3180 VBOX_CHECK_ADDR(SrcGCPhys);
3181 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3182 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3183 return val;
3184}
3185
3186
3187/**
3188 * Write guest RAM.
3189 *
3190 * @param DstGCPhys The destination address (guest physical).
3191 * @param pvSrc The source address.
3192 * @param cb Number of bytes to write
3193 */
3194void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3195{
3196 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3197 VBOX_CHECK_ADDR(DstGCPhys);
3198 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3199 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3200}
3201
3202
3203/**
3204 * Write guest RAM, unsigned 8-bit.
3205 *
3206 * @param DstGCPhys The destination address (guest physical).
3207 * @param val Value
3208 */
3209void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3210{
3211 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3212 VBOX_CHECK_ADDR(DstGCPhys);
3213 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3214 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3215}
3216
3217
3218/**
3219 * Write guest RAM, unsigned 8-bit.
3220 *
3221 * @param DstGCPhys The destination address (guest physical).
3222 * @param val Value
3223 */
3224void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3225{
3226 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3227 VBOX_CHECK_ADDR(DstGCPhys);
3228 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3229 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3230}
3231
3232
3233/**
3234 * Write guest RAM, unsigned 32-bit.
3235 *
3236 * @param DstGCPhys The destination address (guest physical).
3237 * @param val Value
3238 */
3239void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3240{
3241 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3242 VBOX_CHECK_ADDR(DstGCPhys);
3243 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3244 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3245}
3246
3247
3248/**
3249 * Write guest RAM, unsigned 64-bit.
3250 *
3251 * @param DstGCPhys The destination address (guest physical).
3252 * @param val Value
3253 */
3254void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3255{
3256 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3257 VBOX_CHECK_ADDR(DstGCPhys);
3258 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3259 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3260}
3261
3262#undef LOG_GROUP
3263#define LOG_GROUP LOG_GROUP_REM_MMIO
3264
3265/** Read MMIO memory. */
3266static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3267{
3268 uint32_t u32 = 0;
3269 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3270 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3271 Log2(("remR3MMIOReadU8: GCPhys=%VGp -> %02x\n", GCPhys, u32));
3272 return u32;
3273}
3274
3275/** Read MMIO memory. */
3276static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3277{
3278 uint32_t u32 = 0;
3279 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3280 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3281 Log2(("remR3MMIOReadU16: GCPhys=%VGp -> %04x\n", GCPhys, u32));
3282 return u32;
3283}
3284
3285/** Read MMIO memory. */
3286static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3287{
3288 uint32_t u32 = 0;
3289 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3290 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3291 Log2(("remR3MMIOReadU32: GCPhys=%VGp -> %08x\n", GCPhys, u32));
3292 return u32;
3293}
3294
3295/** Write to MMIO memory. */
3296static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3297{
3298 Log2(("remR3MMIOWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3299 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3300 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3301}
3302
3303/** Write to MMIO memory. */
3304static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3305{
3306 Log2(("remR3MMIOWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3307 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3308 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3309}
3310
3311/** Write to MMIO memory. */
3312static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3313{
3314 Log2(("remR3MMIOWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3315 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3316 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3317}
3318
3319
3320#undef LOG_GROUP
3321#define LOG_GROUP LOG_GROUP_REM_HANDLER
3322
3323/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3324
3325static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3326{
3327 Log2(("remR3HandlerReadU8: GCPhys=%VGp\n", GCPhys));
3328 uint8_t u8;
3329 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3330 return u8;
3331}
3332
3333static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3334{
3335 Log2(("remR3HandlerReadU16: GCPhys=%VGp\n", GCPhys));
3336 uint16_t u16;
3337 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3338 return u16;
3339}
3340
3341static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3342{
3343 Log2(("remR3HandlerReadU32: GCPhys=%VGp\n", GCPhys));
3344 uint32_t u32;
3345 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3346 return u32;
3347}
3348
3349static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3350{
3351 Log2(("remR3HandlerWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3352 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3353}
3354
3355static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3356{
3357 Log2(("remR3HandlerWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3358 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3359}
3360
3361static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3362{
3363 Log2(("remR3HandlerWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3364 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3365}
3366
3367/* -+- disassembly -+- */
3368
3369#undef LOG_GROUP
3370#define LOG_GROUP LOG_GROUP_REM_DISAS
3371
3372
3373/**
3374 * Enables or disables singled stepped disassembly.
3375 *
3376 * @returns VBox status code.
3377 * @param pVM VM handle.
3378 * @param fEnable To enable set this flag, to disable clear it.
3379 */
3380static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3381{
3382 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3383 VM_ASSERT_EMT(pVM);
3384
3385 if (fEnable)
3386 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3387 else
3388 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3389 return VINF_SUCCESS;
3390}
3391
3392
3393/**
3394 * Enables or disables singled stepped disassembly.
3395 *
3396 * @returns VBox status code.
3397 * @param pVM VM handle.
3398 * @param fEnable To enable set this flag, to disable clear it.
3399 */
3400REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3401{
3402 PVMREQ pReq;
3403 int rc;
3404
3405 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3406 if (VM_IS_EMT(pVM))
3407 return remR3DisasEnableStepping(pVM, fEnable);
3408
3409 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3410 AssertRC(rc);
3411 if (VBOX_SUCCESS(rc))
3412 rc = pReq->iStatus;
3413 VMR3ReqFree(pReq);
3414 return rc;
3415}
3416
3417
3418#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3419/**
3420 * External Debugger Command: .remstep [on|off|1|0]
3421 */
3422static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3423{
3424 bool fEnable;
3425 int rc;
3426
3427 /* print status */
3428 if (cArgs == 0)
3429 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3430 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3431
3432 /* convert the argument and change the mode. */
3433 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3434 if (VBOX_FAILURE(rc))
3435 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3436 rc = REMR3DisasEnableStepping(pVM, fEnable);
3437 if (VBOX_FAILURE(rc))
3438 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3439 return rc;
3440}
3441#endif
3442
3443
3444/**
3445 * Disassembles n instructions and prints them to the log.
3446 *
3447 * @returns Success indicator.
3448 * @param env Pointer to the recompiler CPU structure.
3449 * @param f32BitCode Indicates that whether or not the code should
3450 * be disassembled as 16 or 32 bit. If -1 the CS
3451 * selector will be inspected.
3452 * @param nrInstructions Nr of instructions to disassemble
3453 * @param pszPrefix
3454 * @remark not currently used for anything but ad-hoc debugging.
3455 */
3456bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3457{
3458 int i;
3459
3460 /*
3461 * Determin 16/32 bit mode.
3462 */
3463 if (f32BitCode == -1)
3464 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3465
3466 /*
3467 * Convert cs:eip to host context address.
3468 * We don't care to much about cross page correctness presently.
3469 */
3470 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip;
3471 void *pvPC;
3472 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3473 {
3474 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3475
3476 /* convert eip to physical address. */
3477 int rc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM,
3478 GCPtrPC,
3479 env->cr[3],
3480 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3481 &pvPC);
3482 if (VBOX_FAILURE(rc))
3483 {
3484 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3485 return false;
3486 pvPC = (char *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3487 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3488 }
3489 }
3490 else
3491 {
3492 /* physical address */
3493 int rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16, &pvPC);
3494 if (VBOX_FAILURE(rc))
3495 return false;
3496 }
3497
3498 /*
3499 * Disassemble.
3500 */
3501 RTINTPTR off = env->eip - (RTGCUINTPTR)pvPC;
3502 DISCPUSTATE Cpu;
3503 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3504 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3505 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3506 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3507 //Cpu.dwUserData[2] = GCPtrPC;
3508
3509 for (i=0;i<nrInstructions;i++)
3510 {
3511 char szOutput[256];
3512 uint32_t cbOp;
3513 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3514 return false;
3515 if (pszPrefix)
3516 Log(("%s: %s", pszPrefix, szOutput));
3517 else
3518 Log(("%s", szOutput));
3519
3520 pvPC += cbOp;
3521 }
3522 return true;
3523}
3524
3525
3526/** @todo need to test the new code, using the old code in the mean while. */
3527#define USE_OLD_DUMP_AND_DISASSEMBLY
3528
3529/**
3530 * Disassembles one instruction and prints it to the log.
3531 *
3532 * @returns Success indicator.
3533 * @param env Pointer to the recompiler CPU structure.
3534 * @param f32BitCode Indicates that whether or not the code should
3535 * be disassembled as 16 or 32 bit. If -1 the CS
3536 * selector will be inspected.
3537 * @param pszPrefix
3538 */
3539bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3540{
3541#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3542 PVM pVM = env->pVM;
3543
3544 /* Doesn't work in long mode. */
3545 if (env->hflags & HF_LMA_MASK)
3546 return false;
3547
3548 /*
3549 * Determin 16/32 bit mode.
3550 */
3551 if (f32BitCode == -1)
3552 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3553
3554 /*
3555 * Log registers
3556 */
3557 if (LogIs2Enabled())
3558 {
3559 remR3StateUpdate(pVM);
3560 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3561 }
3562
3563 /*
3564 * Convert cs:eip to host context address.
3565 * We don't care to much about cross page correctness presently.
3566 */
3567 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip;
3568 void *pvPC;
3569 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3570 {
3571 /* convert eip to physical address. */
3572 int rc = PGMPhysGCPtr2HCPtrByGstCR3(pVM,
3573 GCPtrPC,
3574 env->cr[3],
3575 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3576 &pvPC);
3577 if (VBOX_FAILURE(rc))
3578 {
3579 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3580 return false;
3581 pvPC = (char *)PATMR3QueryPatchMemHC(pVM, NULL)
3582 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3583 }
3584 }
3585 else
3586 {
3587
3588 /* physical address */
3589 int rc = PGMPhysGCPhys2HCPtr(pVM, (RTGCPHYS)GCPtrPC, 16, &pvPC);
3590 if (VBOX_FAILURE(rc))
3591 return false;
3592 }
3593
3594 /*
3595 * Disassemble.
3596 */
3597 RTINTPTR off = env->eip - (RTGCUINTPTR)pvPC;
3598 DISCPUSTATE Cpu;
3599 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3600 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3601 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3602 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3603 //Cpu.dwUserData[2] = GCPtrPC;
3604 char szOutput[256];
3605 uint32_t cbOp;
3606 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3607 return false;
3608
3609 if (!f32BitCode)
3610 {
3611 if (pszPrefix)
3612 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3613 else
3614 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3615 }
3616 else
3617 {
3618 if (pszPrefix)
3619 Log(("%s: %s", pszPrefix, szOutput));
3620 else
3621 Log(("%s", szOutput));
3622 }
3623 return true;
3624
3625#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3626 PVM pVM = env->pVM;
3627 const bool fLog = LogIsEnabled();
3628 const bool fLog2 = LogIs2Enabled();
3629 int rc = VINF_SUCCESS;
3630
3631 /*
3632 * Don't bother if there ain't any log output to do.
3633 */
3634 if (!fLog && !fLog2)
3635 return true;
3636
3637 /*
3638 * Update the state so DBGF reads the correct register values.
3639 */
3640 remR3StateUpdate(pVM);
3641
3642 /*
3643 * Log registers if requested.
3644 */
3645 if (!fLog2)
3646 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3647
3648 /*
3649 * Disassemble to log.
3650 */
3651 if (fLog)
3652 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3653
3654 return VBOX_SUCCESS(rc);
3655#endif
3656}
3657
3658
3659/**
3660 * Disassemble recompiled code.
3661 *
3662 * @param phFileIgnored Ignored, logfile usually.
3663 * @param pvCode Pointer to the code block.
3664 * @param cb Size of the code block.
3665 */
3666void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
3667{
3668 if (LogIs2Enabled())
3669 {
3670 unsigned off = 0;
3671 char szOutput[256];
3672 DISCPUSTATE Cpu;
3673
3674 memset(&Cpu, 0, sizeof(Cpu));
3675#ifdef RT_ARCH_X86
3676 Cpu.mode = CPUMODE_32BIT;
3677#else
3678 Cpu.mode = CPUMODE_64BIT;
3679#endif
3680
3681 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3682 while (off < cb)
3683 {
3684 uint32_t cbInstr;
3685 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3686 RTLogPrintf("%s", szOutput);
3687 else
3688 {
3689 RTLogPrintf("disas error\n");
3690 cbInstr = 1;
3691#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3692 break;
3693#endif
3694 }
3695 off += cbInstr;
3696 }
3697 }
3698 NOREF(phFileIgnored);
3699}
3700
3701
3702/**
3703 * Disassemble guest code.
3704 *
3705 * @param phFileIgnored Ignored, logfile usually.
3706 * @param uCode The guest address of the code to disassemble. (flat?)
3707 * @param cb Number of bytes to disassemble.
3708 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3709 */
3710void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
3711{
3712 if (LogIs2Enabled())
3713 {
3714 PVM pVM = cpu_single_env->pVM;
3715
3716 /*
3717 * Update the state so DBGF reads the correct register values (flags).
3718 */
3719 remR3StateUpdate(pVM);
3720
3721 /*
3722 * Do the disassembling.
3723 */
3724 RTLogPrintf("Guest Code: PC=%VGp #VGp (%VGp) bytes fFlags=%d\n", uCode, cb, cb, fFlags);
3725 RTSEL cs = cpu_single_env->segs[R_CS].selector;
3726 RTGCUINTPTR eip = uCode - cpu_single_env->segs[R_CS].base;
3727 for (;;)
3728 {
3729 char szBuf[256];
3730 uint32_t cbInstr;
3731 int rc = DBGFR3DisasInstrEx(pVM,
3732 cs,
3733 eip,
3734 0,
3735 szBuf, sizeof(szBuf),
3736 &cbInstr);
3737 if (VBOX_SUCCESS(rc))
3738 RTLogPrintf("%VGp %s\n", uCode, szBuf);
3739 else
3740 {
3741 RTLogPrintf("%VGp %04x:%VGp: %s\n", uCode, cs, eip, szBuf);
3742 cbInstr = 1;
3743 }
3744
3745 /* next */
3746 if (cb <= cbInstr)
3747 break;
3748 cb -= cbInstr;
3749 uCode += cbInstr;
3750 eip += cbInstr;
3751 }
3752 }
3753 NOREF(phFileIgnored);
3754}
3755
3756
3757/**
3758 * Looks up a guest symbol.
3759 *
3760 * @returns Pointer to symbol name. This is a static buffer.
3761 * @param orig_addr The address in question.
3762 */
3763const char *lookup_symbol(target_ulong orig_addr)
3764{
3765 RTGCINTPTR off = 0;
3766 DBGFSYMBOL Sym;
3767 PVM pVM = cpu_single_env->pVM;
3768 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3769 if (VBOX_SUCCESS(rc))
3770 {
3771 static char szSym[sizeof(Sym.szName) + 48];
3772 if (!off)
3773 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3774 else if (off > 0)
3775 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3776 else
3777 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3778 return szSym;
3779 }
3780 return "<N/A>";
3781}
3782
3783
3784#undef LOG_GROUP
3785#define LOG_GROUP LOG_GROUP_REM
3786
3787
3788/* -+- FF notifications -+- */
3789
3790
3791/**
3792 * Notification about a pending interrupt.
3793 *
3794 * @param pVM VM Handle.
3795 * @param u8Interrupt Interrupt
3796 * @thread The emulation thread.
3797 */
3798REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3799{
3800 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3801 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3802}
3803
3804/**
3805 * Notification about a pending interrupt.
3806 *
3807 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3808 * @param pVM VM Handle.
3809 * @thread The emulation thread.
3810 */
3811REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3812{
3813 return pVM->rem.s.u32PendingInterrupt;
3814}
3815
3816/**
3817 * Notification about the interrupt FF being set.
3818 *
3819 * @param pVM VM Handle.
3820 * @thread The emulation thread.
3821 */
3822REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3823{
3824 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3825 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3826 if (pVM->rem.s.fInREM)
3827 {
3828 if (VM_IS_EMT(pVM))
3829 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3830 else
3831 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD);
3832 }
3833}
3834
3835
3836/**
3837 * Notification about the interrupt FF being set.
3838 *
3839 * @param pVM VM Handle.
3840 * @thread Any.
3841 */
3842REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3843{
3844 LogFlow(("REMR3NotifyInterruptClear:\n"));
3845 if (pVM->rem.s.fInREM)
3846 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3847}
3848
3849
3850/**
3851 * Notification about pending timer(s).
3852 *
3853 * @param pVM VM Handle.
3854 * @thread Any.
3855 */
3856REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3857{
3858#ifndef DEBUG_bird
3859 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3860#endif
3861 if (pVM->rem.s.fInREM)
3862 {
3863 if (VM_IS_EMT(pVM))
3864 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3865 else
3866 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_TIMER);
3867 }
3868}
3869
3870
3871/**
3872 * Notification about pending DMA transfers.
3873 *
3874 * @param pVM VM Handle.
3875 * @thread Any.
3876 */
3877REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3878{
3879 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3880 if (pVM->rem.s.fInREM)
3881 {
3882 if (VM_IS_EMT(pVM))
3883 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3884 else
3885 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA);
3886 }
3887}
3888
3889
3890/**
3891 * Notification about pending timer(s).
3892 *
3893 * @param pVM VM Handle.
3894 * @thread Any.
3895 */
3896REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3897{
3898 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3899 if (pVM->rem.s.fInREM)
3900 {
3901 if (VM_IS_EMT(pVM))
3902 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3903 else
3904 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
3905 }
3906}
3907
3908
3909/**
3910 * Notification about pending FF set by an external thread.
3911 *
3912 * @param pVM VM handle.
3913 * @thread Any.
3914 */
3915REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3916{
3917 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3918 if (pVM->rem.s.fInREM)
3919 {
3920 if (VM_IS_EMT(pVM))
3921 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3922 else
3923 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
3924 }
3925}
3926
3927
3928#ifdef VBOX_WITH_STATISTICS
3929void remR3ProfileStart(int statcode)
3930{
3931 STAMPROFILEADV *pStat;
3932 switch(statcode)
3933 {
3934 case STATS_EMULATE_SINGLE_INSTR:
3935 pStat = &gStatExecuteSingleInstr;
3936 break;
3937 case STATS_QEMU_COMPILATION:
3938 pStat = &gStatCompilationQEmu;
3939 break;
3940 case STATS_QEMU_RUN_EMULATED_CODE:
3941 pStat = &gStatRunCodeQEmu;
3942 break;
3943 case STATS_QEMU_TOTAL:
3944 pStat = &gStatTotalTimeQEmu;
3945 break;
3946 case STATS_QEMU_RUN_TIMERS:
3947 pStat = &gStatTimers;
3948 break;
3949 case STATS_TLB_LOOKUP:
3950 pStat= &gStatTBLookup;
3951 break;
3952 case STATS_IRQ_HANDLING:
3953 pStat= &gStatIRQ;
3954 break;
3955 case STATS_RAW_CHECK:
3956 pStat = &gStatRawCheck;
3957 break;
3958
3959 default:
3960 AssertMsgFailed(("unknown stat %d\n", statcode));
3961 return;
3962 }
3963 STAM_PROFILE_ADV_START(pStat, a);
3964}
3965
3966
3967void remR3ProfileStop(int statcode)
3968{
3969 STAMPROFILEADV *pStat;
3970 switch(statcode)
3971 {
3972 case STATS_EMULATE_SINGLE_INSTR:
3973 pStat = &gStatExecuteSingleInstr;
3974 break;
3975 case STATS_QEMU_COMPILATION:
3976 pStat = &gStatCompilationQEmu;
3977 break;
3978 case STATS_QEMU_RUN_EMULATED_CODE:
3979 pStat = &gStatRunCodeQEmu;
3980 break;
3981 case STATS_QEMU_TOTAL:
3982 pStat = &gStatTotalTimeQEmu;
3983 break;
3984 case STATS_QEMU_RUN_TIMERS:
3985 pStat = &gStatTimers;
3986 break;
3987 case STATS_TLB_LOOKUP:
3988 pStat= &gStatTBLookup;
3989 break;
3990 case STATS_IRQ_HANDLING:
3991 pStat= &gStatIRQ;
3992 break;
3993 case STATS_RAW_CHECK:
3994 pStat = &gStatRawCheck;
3995 break;
3996 default:
3997 AssertMsgFailed(("unknown stat %d\n", statcode));
3998 return;
3999 }
4000 STAM_PROFILE_ADV_STOP(pStat, a);
4001}
4002#endif
4003
4004/**
4005 * Raise an RC, force rem exit.
4006 *
4007 * @param pVM VM handle.
4008 * @param rc The rc.
4009 */
4010void remR3RaiseRC(PVM pVM, int rc)
4011{
4012 Log(("remR3RaiseRC: rc=%Vrc\n", rc));
4013 Assert(pVM->rem.s.fInREM);
4014 VM_ASSERT_EMT(pVM);
4015 pVM->rem.s.rc = rc;
4016 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4017}
4018
4019
4020/* -+- timers -+- */
4021
4022uint64_t cpu_get_tsc(CPUX86State *env)
4023{
4024 STAM_COUNTER_INC(&gStatCpuGetTSC);
4025 return TMCpuTickGet(env->pVM);
4026}
4027
4028
4029/* -+- interrupts -+- */
4030
4031void cpu_set_ferr(CPUX86State *env)
4032{
4033 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4034 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4035}
4036
4037int cpu_get_pic_interrupt(CPUState *env)
4038{
4039 uint8_t u8Interrupt;
4040 int rc;
4041
4042 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4043 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4044 * with the (a)pic.
4045 */
4046 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4047 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4048 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4049 * remove this kludge. */
4050 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4051 {
4052 rc = VINF_SUCCESS;
4053 Assert(env->pVM->rem.s.u32PendingInterrupt >= 0 && env->pVM->rem.s.u32PendingInterrupt <= 255);
4054 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4055 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4056 }
4057 else
4058 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4059
4060 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Vrc\n", u8Interrupt, rc));
4061 if (VBOX_SUCCESS(rc))
4062 {
4063 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4064 env->interrupt_request |= CPU_INTERRUPT_HARD;
4065 return u8Interrupt;
4066 }
4067 return -1;
4068}
4069
4070
4071/* -+- local apic -+- */
4072
4073void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4074{
4075 int rc = PDMApicSetBase(env->pVM, val);
4076 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Vrc\n", val, rc)); NOREF(rc);
4077}
4078
4079uint64_t cpu_get_apic_base(CPUX86State *env)
4080{
4081 uint64_t u64;
4082 int rc = PDMApicGetBase(env->pVM, &u64);
4083 if (VBOX_SUCCESS(rc))
4084 {
4085 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4086 return u64;
4087 }
4088 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Vrc)\n", rc));
4089 return 0;
4090}
4091
4092void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4093{
4094 int rc = PDMApicSetTPR(env->pVM, val);
4095 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Vrc\n", val, rc)); NOREF(rc);
4096}
4097
4098uint8_t cpu_get_apic_tpr(CPUX86State *env)
4099{
4100 uint8_t u8;
4101 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4102 if (VBOX_SUCCESS(rc))
4103 {
4104 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4105 return u8;
4106 }
4107 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Vrc)\n", rc));
4108 return 0;
4109}
4110
4111
4112uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4113{
4114 uint64_t value;
4115 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4116 if (VBOX_SUCCESS(rc))
4117 {
4118 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4119 return value;
4120 }
4121 /** @todo: exception ? */
4122 LogFlow(("cpu_apic_rdms returns 0 (rc=%Vrc)\n", rc));
4123 return value;
4124}
4125
4126void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4127{
4128 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4129 /** @todo: exception if error ? */
4130 LogFlow(("cpu_apic_wrmsr: rc=%Vrc\n", rc)); NOREF(rc);
4131}
4132/* -+- I/O Ports -+- */
4133
4134#undef LOG_GROUP
4135#define LOG_GROUP LOG_GROUP_REM_IOPORT
4136
4137void cpu_outb(CPUState *env, int addr, int val)
4138{
4139 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4140 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4141
4142 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4143 if (RT_LIKELY(rc == VINF_SUCCESS))
4144 return;
4145 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4146 {
4147 Log(("cpu_outb: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4148 remR3RaiseRC(env->pVM, rc);
4149 return;
4150 }
4151 remAbort(rc, __FUNCTION__);
4152}
4153
4154void cpu_outw(CPUState *env, int addr, int val)
4155{
4156 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4157 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4158 if (RT_LIKELY(rc == VINF_SUCCESS))
4159 return;
4160 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4161 {
4162 Log(("cpu_outw: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4163 remR3RaiseRC(env->pVM, rc);
4164 return;
4165 }
4166 remAbort(rc, __FUNCTION__);
4167}
4168
4169void cpu_outl(CPUState *env, int addr, int val)
4170{
4171 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4172 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4173 if (RT_LIKELY(rc == VINF_SUCCESS))
4174 return;
4175 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4176 {
4177 Log(("cpu_outl: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4178 remR3RaiseRC(env->pVM, rc);
4179 return;
4180 }
4181 remAbort(rc, __FUNCTION__);
4182}
4183
4184int cpu_inb(CPUState *env, int addr)
4185{
4186 uint32_t u32 = 0;
4187 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4188 if (RT_LIKELY(rc == VINF_SUCCESS))
4189 {
4190 if (/*addr != 0x61 && */addr != 0x71)
4191 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4192 return (int)u32;
4193 }
4194 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4195 {
4196 Log(("cpu_inb: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4197 remR3RaiseRC(env->pVM, rc);
4198 return (int)u32;
4199 }
4200 remAbort(rc, __FUNCTION__);
4201 return 0xff;
4202}
4203
4204int cpu_inw(CPUState *env, int addr)
4205{
4206 uint32_t u32 = 0;
4207 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4208 if (RT_LIKELY(rc == VINF_SUCCESS))
4209 {
4210 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4211 return (int)u32;
4212 }
4213 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4214 {
4215 Log(("cpu_inw: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4216 remR3RaiseRC(env->pVM, rc);
4217 return (int)u32;
4218 }
4219 remAbort(rc, __FUNCTION__);
4220 return 0xffff;
4221}
4222
4223int cpu_inl(CPUState *env, int addr)
4224{
4225 uint32_t u32 = 0;
4226 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4227 if (RT_LIKELY(rc == VINF_SUCCESS))
4228 {
4229//if (addr==0x01f0 && u32 == 0x6b6d)
4230// loglevel = ~0;
4231 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4232 return (int)u32;
4233 }
4234 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4235 {
4236 Log(("cpu_inl: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4237 remR3RaiseRC(env->pVM, rc);
4238 return (int)u32;
4239 }
4240 remAbort(rc, __FUNCTION__);
4241 return 0xffffffff;
4242}
4243
4244#undef LOG_GROUP
4245#define LOG_GROUP LOG_GROUP_REM
4246
4247
4248/* -+- helpers and misc other interfaces -+- */
4249
4250/**
4251 * Perform the CPUID instruction.
4252 *
4253 * ASMCpuId cannot be invoked from some source files where this is used because of global
4254 * register allocations.
4255 *
4256 * @param env Pointer to the recompiler CPU structure.
4257 * @param uOperator CPUID operation (eax).
4258 * @param pvEAX Where to store eax.
4259 * @param pvEBX Where to store ebx.
4260 * @param pvECX Where to store ecx.
4261 * @param pvEDX Where to store edx.
4262 */
4263void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4264{
4265 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4266}
4267
4268
4269#if 0 /* not used */
4270/**
4271 * Interface for qemu hardware to report back fatal errors.
4272 */
4273void hw_error(const char *pszFormat, ...)
4274{
4275 /*
4276 * Bitch about it.
4277 */
4278 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4279 * this in my Odin32 tree at home! */
4280 va_list args;
4281 va_start(args, pszFormat);
4282 RTLogPrintf("fatal error in virtual hardware:");
4283 RTLogPrintfV(pszFormat, args);
4284 va_end(args);
4285 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4286
4287 /*
4288 * If we're in REM context we'll sync back the state before 'jumping' to
4289 * the EMs failure handling.
4290 */
4291 PVM pVM = cpu_single_env->pVM;
4292 if (pVM->rem.s.fInREM)
4293 REMR3StateBack(pVM);
4294 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4295 AssertMsgFailed(("EMR3FatalError returned!\n"));
4296}
4297#endif
4298
4299/**
4300 * Interface for the qemu cpu to report unhandled situation
4301 * raising a fatal VM error.
4302 */
4303void cpu_abort(CPUState *env, const char *pszFormat, ...)
4304{
4305 /*
4306 * Bitch about it.
4307 */
4308 RTLogFlags(NULL, "nodisabled nobuffered");
4309 va_list args;
4310 va_start(args, pszFormat);
4311 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4312 va_end(args);
4313 va_start(args, pszFormat);
4314 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4315 va_end(args);
4316
4317 /*
4318 * If we're in REM context we'll sync back the state before 'jumping' to
4319 * the EMs failure handling.
4320 */
4321 PVM pVM = cpu_single_env->pVM;
4322 if (pVM->rem.s.fInREM)
4323 REMR3StateBack(pVM);
4324 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4325 AssertMsgFailed(("EMR3FatalError returned!\n"));
4326}
4327
4328
4329/**
4330 * Aborts the VM.
4331 *
4332 * @param rc VBox error code.
4333 * @param pszTip Hint about why/when this happend.
4334 */
4335static void remAbort(int rc, const char *pszTip)
4336{
4337 /*
4338 * Bitch about it.
4339 */
4340 RTLogPrintf("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip);
4341 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip));
4342
4343 /*
4344 * Jump back to where we entered the recompiler.
4345 */
4346 PVM pVM = cpu_single_env->pVM;
4347 if (pVM->rem.s.fInREM)
4348 REMR3StateBack(pVM);
4349 EMR3FatalError(pVM, rc);
4350 AssertMsgFailed(("EMR3FatalError returned!\n"));
4351}
4352
4353
4354/**
4355 * Dumps a linux system call.
4356 * @param pVM VM handle.
4357 */
4358void remR3DumpLnxSyscall(PVM pVM)
4359{
4360 static const char *apsz[] =
4361 {
4362 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4363 "sys_exit",
4364 "sys_fork",
4365 "sys_read",
4366 "sys_write",
4367 "sys_open", /* 5 */
4368 "sys_close",
4369 "sys_waitpid",
4370 "sys_creat",
4371 "sys_link",
4372 "sys_unlink", /* 10 */
4373 "sys_execve",
4374 "sys_chdir",
4375 "sys_time",
4376 "sys_mknod",
4377 "sys_chmod", /* 15 */
4378 "sys_lchown16",
4379 "sys_ni_syscall", /* old break syscall holder */
4380 "sys_stat",
4381 "sys_lseek",
4382 "sys_getpid", /* 20 */
4383 "sys_mount",
4384 "sys_oldumount",
4385 "sys_setuid16",
4386 "sys_getuid16",
4387 "sys_stime", /* 25 */
4388 "sys_ptrace",
4389 "sys_alarm",
4390 "sys_fstat",
4391 "sys_pause",
4392 "sys_utime", /* 30 */
4393 "sys_ni_syscall", /* old stty syscall holder */
4394 "sys_ni_syscall", /* old gtty syscall holder */
4395 "sys_access",
4396 "sys_nice",
4397 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4398 "sys_sync",
4399 "sys_kill",
4400 "sys_rename",
4401 "sys_mkdir",
4402 "sys_rmdir", /* 40 */
4403 "sys_dup",
4404 "sys_pipe",
4405 "sys_times",
4406 "sys_ni_syscall", /* old prof syscall holder */
4407 "sys_brk", /* 45 */
4408 "sys_setgid16",
4409 "sys_getgid16",
4410 "sys_signal",
4411 "sys_geteuid16",
4412 "sys_getegid16", /* 50 */
4413 "sys_acct",
4414 "sys_umount", /* recycled never used phys() */
4415 "sys_ni_syscall", /* old lock syscall holder */
4416 "sys_ioctl",
4417 "sys_fcntl", /* 55 */
4418 "sys_ni_syscall", /* old mpx syscall holder */
4419 "sys_setpgid",
4420 "sys_ni_syscall", /* old ulimit syscall holder */
4421 "sys_olduname",
4422 "sys_umask", /* 60 */
4423 "sys_chroot",
4424 "sys_ustat",
4425 "sys_dup2",
4426 "sys_getppid",
4427 "sys_getpgrp", /* 65 */
4428 "sys_setsid",
4429 "sys_sigaction",
4430 "sys_sgetmask",
4431 "sys_ssetmask",
4432 "sys_setreuid16", /* 70 */
4433 "sys_setregid16",
4434 "sys_sigsuspend",
4435 "sys_sigpending",
4436 "sys_sethostname",
4437 "sys_setrlimit", /* 75 */
4438 "sys_old_getrlimit",
4439 "sys_getrusage",
4440 "sys_gettimeofday",
4441 "sys_settimeofday",
4442 "sys_getgroups16", /* 80 */
4443 "sys_setgroups16",
4444 "old_select",
4445 "sys_symlink",
4446 "sys_lstat",
4447 "sys_readlink", /* 85 */
4448 "sys_uselib",
4449 "sys_swapon",
4450 "sys_reboot",
4451 "old_readdir",
4452 "old_mmap", /* 90 */
4453 "sys_munmap",
4454 "sys_truncate",
4455 "sys_ftruncate",
4456 "sys_fchmod",
4457 "sys_fchown16", /* 95 */
4458 "sys_getpriority",
4459 "sys_setpriority",
4460 "sys_ni_syscall", /* old profil syscall holder */
4461 "sys_statfs",
4462 "sys_fstatfs", /* 100 */
4463 "sys_ioperm",
4464 "sys_socketcall",
4465 "sys_syslog",
4466 "sys_setitimer",
4467 "sys_getitimer", /* 105 */
4468 "sys_newstat",
4469 "sys_newlstat",
4470 "sys_newfstat",
4471 "sys_uname",
4472 "sys_iopl", /* 110 */
4473 "sys_vhangup",
4474 "sys_ni_syscall", /* old "idle" system call */
4475 "sys_vm86old",
4476 "sys_wait4",
4477 "sys_swapoff", /* 115 */
4478 "sys_sysinfo",
4479 "sys_ipc",
4480 "sys_fsync",
4481 "sys_sigreturn",
4482 "sys_clone", /* 120 */
4483 "sys_setdomainname",
4484 "sys_newuname",
4485 "sys_modify_ldt",
4486 "sys_adjtimex",
4487 "sys_mprotect", /* 125 */
4488 "sys_sigprocmask",
4489 "sys_ni_syscall", /* old "create_module" */
4490 "sys_init_module",
4491 "sys_delete_module",
4492 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4493 "sys_quotactl",
4494 "sys_getpgid",
4495 "sys_fchdir",
4496 "sys_bdflush",
4497 "sys_sysfs", /* 135 */
4498 "sys_personality",
4499 "sys_ni_syscall", /* reserved for afs_syscall */
4500 "sys_setfsuid16",
4501 "sys_setfsgid16",
4502 "sys_llseek", /* 140 */
4503 "sys_getdents",
4504 "sys_select",
4505 "sys_flock",
4506 "sys_msync",
4507 "sys_readv", /* 145 */
4508 "sys_writev",
4509 "sys_getsid",
4510 "sys_fdatasync",
4511 "sys_sysctl",
4512 "sys_mlock", /* 150 */
4513 "sys_munlock",
4514 "sys_mlockall",
4515 "sys_munlockall",
4516 "sys_sched_setparam",
4517 "sys_sched_getparam", /* 155 */
4518 "sys_sched_setscheduler",
4519 "sys_sched_getscheduler",
4520 "sys_sched_yield",
4521 "sys_sched_get_priority_max",
4522 "sys_sched_get_priority_min", /* 160 */
4523 "sys_sched_rr_get_interval",
4524 "sys_nanosleep",
4525 "sys_mremap",
4526 "sys_setresuid16",
4527 "sys_getresuid16", /* 165 */
4528 "sys_vm86",
4529 "sys_ni_syscall", /* Old sys_query_module */
4530 "sys_poll",
4531 "sys_nfsservctl",
4532 "sys_setresgid16", /* 170 */
4533 "sys_getresgid16",
4534 "sys_prctl",
4535 "sys_rt_sigreturn",
4536 "sys_rt_sigaction",
4537 "sys_rt_sigprocmask", /* 175 */
4538 "sys_rt_sigpending",
4539 "sys_rt_sigtimedwait",
4540 "sys_rt_sigqueueinfo",
4541 "sys_rt_sigsuspend",
4542 "sys_pread64", /* 180 */
4543 "sys_pwrite64",
4544 "sys_chown16",
4545 "sys_getcwd",
4546 "sys_capget",
4547 "sys_capset", /* 185 */
4548 "sys_sigaltstack",
4549 "sys_sendfile",
4550 "sys_ni_syscall", /* reserved for streams1 */
4551 "sys_ni_syscall", /* reserved for streams2 */
4552 "sys_vfork", /* 190 */
4553 "sys_getrlimit",
4554 "sys_mmap2",
4555 "sys_truncate64",
4556 "sys_ftruncate64",
4557 "sys_stat64", /* 195 */
4558 "sys_lstat64",
4559 "sys_fstat64",
4560 "sys_lchown",
4561 "sys_getuid",
4562 "sys_getgid", /* 200 */
4563 "sys_geteuid",
4564 "sys_getegid",
4565 "sys_setreuid",
4566 "sys_setregid",
4567 "sys_getgroups", /* 205 */
4568 "sys_setgroups",
4569 "sys_fchown",
4570 "sys_setresuid",
4571 "sys_getresuid",
4572 "sys_setresgid", /* 210 */
4573 "sys_getresgid",
4574 "sys_chown",
4575 "sys_setuid",
4576 "sys_setgid",
4577 "sys_setfsuid", /* 215 */
4578 "sys_setfsgid",
4579 "sys_pivot_root",
4580 "sys_mincore",
4581 "sys_madvise",
4582 "sys_getdents64", /* 220 */
4583 "sys_fcntl64",
4584 "sys_ni_syscall", /* reserved for TUX */
4585 "sys_ni_syscall",
4586 "sys_gettid",
4587 "sys_readahead", /* 225 */
4588 "sys_setxattr",
4589 "sys_lsetxattr",
4590 "sys_fsetxattr",
4591 "sys_getxattr",
4592 "sys_lgetxattr", /* 230 */
4593 "sys_fgetxattr",
4594 "sys_listxattr",
4595 "sys_llistxattr",
4596 "sys_flistxattr",
4597 "sys_removexattr", /* 235 */
4598 "sys_lremovexattr",
4599 "sys_fremovexattr",
4600 "sys_tkill",
4601 "sys_sendfile64",
4602 "sys_futex", /* 240 */
4603 "sys_sched_setaffinity",
4604 "sys_sched_getaffinity",
4605 "sys_set_thread_area",
4606 "sys_get_thread_area",
4607 "sys_io_setup", /* 245 */
4608 "sys_io_destroy",
4609 "sys_io_getevents",
4610 "sys_io_submit",
4611 "sys_io_cancel",
4612 "sys_fadvise64", /* 250 */
4613 "sys_ni_syscall",
4614 "sys_exit_group",
4615 "sys_lookup_dcookie",
4616 "sys_epoll_create",
4617 "sys_epoll_ctl", /* 255 */
4618 "sys_epoll_wait",
4619 "sys_remap_file_pages",
4620 "sys_set_tid_address",
4621 "sys_timer_create",
4622 "sys_timer_settime", /* 260 */
4623 "sys_timer_gettime",
4624 "sys_timer_getoverrun",
4625 "sys_timer_delete",
4626 "sys_clock_settime",
4627 "sys_clock_gettime", /* 265 */
4628 "sys_clock_getres",
4629 "sys_clock_nanosleep",
4630 "sys_statfs64",
4631 "sys_fstatfs64",
4632 "sys_tgkill", /* 270 */
4633 "sys_utimes",
4634 "sys_fadvise64_64",
4635 "sys_ni_syscall" /* sys_vserver */
4636 };
4637
4638 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4639 switch (uEAX)
4640 {
4641 default:
4642 if (uEAX < ELEMENTS(apsz))
4643 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4644 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4645 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4646 else
4647 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4648 break;
4649
4650 }
4651}
4652
4653
4654/**
4655 * Dumps an OpenBSD system call.
4656 * @param pVM VM handle.
4657 */
4658void remR3DumpOBsdSyscall(PVM pVM)
4659{
4660 static const char *apsz[] =
4661 {
4662 "SYS_syscall", //0
4663 "SYS_exit", //1
4664 "SYS_fork", //2
4665 "SYS_read", //3
4666 "SYS_write", //4
4667 "SYS_open", //5
4668 "SYS_close", //6
4669 "SYS_wait4", //7
4670 "SYS_8",
4671 "SYS_link", //9
4672 "SYS_unlink", //10
4673 "SYS_11",
4674 "SYS_chdir", //12
4675 "SYS_fchdir", //13
4676 "SYS_mknod", //14
4677 "SYS_chmod", //15
4678 "SYS_chown", //16
4679 "SYS_break", //17
4680 "SYS_18",
4681 "SYS_19",
4682 "SYS_getpid", //20
4683 "SYS_mount", //21
4684 "SYS_unmount", //22
4685 "SYS_setuid", //23
4686 "SYS_getuid", //24
4687 "SYS_geteuid", //25
4688 "SYS_ptrace", //26
4689 "SYS_recvmsg", //27
4690 "SYS_sendmsg", //28
4691 "SYS_recvfrom", //29
4692 "SYS_accept", //30
4693 "SYS_getpeername", //31
4694 "SYS_getsockname", //32
4695 "SYS_access", //33
4696 "SYS_chflags", //34
4697 "SYS_fchflags", //35
4698 "SYS_sync", //36
4699 "SYS_kill", //37
4700 "SYS_38",
4701 "SYS_getppid", //39
4702 "SYS_40",
4703 "SYS_dup", //41
4704 "SYS_opipe", //42
4705 "SYS_getegid", //43
4706 "SYS_profil", //44
4707 "SYS_ktrace", //45
4708 "SYS_sigaction", //46
4709 "SYS_getgid", //47
4710 "SYS_sigprocmask", //48
4711 "SYS_getlogin", //49
4712 "SYS_setlogin", //50
4713 "SYS_acct", //51
4714 "SYS_sigpending", //52
4715 "SYS_osigaltstack", //53
4716 "SYS_ioctl", //54
4717 "SYS_reboot", //55
4718 "SYS_revoke", //56
4719 "SYS_symlink", //57
4720 "SYS_readlink", //58
4721 "SYS_execve", //59
4722 "SYS_umask", //60
4723 "SYS_chroot", //61
4724 "SYS_62",
4725 "SYS_63",
4726 "SYS_64",
4727 "SYS_65",
4728 "SYS_vfork", //66
4729 "SYS_67",
4730 "SYS_68",
4731 "SYS_sbrk", //69
4732 "SYS_sstk", //70
4733 "SYS_61",
4734 "SYS_vadvise", //72
4735 "SYS_munmap", //73
4736 "SYS_mprotect", //74
4737 "SYS_madvise", //75
4738 "SYS_76",
4739 "SYS_77",
4740 "SYS_mincore", //78
4741 "SYS_getgroups", //79
4742 "SYS_setgroups", //80
4743 "SYS_getpgrp", //81
4744 "SYS_setpgid", //82
4745 "SYS_setitimer", //83
4746 "SYS_84",
4747 "SYS_85",
4748 "SYS_getitimer", //86
4749 "SYS_87",
4750 "SYS_88",
4751 "SYS_89",
4752 "SYS_dup2", //90
4753 "SYS_91",
4754 "SYS_fcntl", //92
4755 "SYS_select", //93
4756 "SYS_94",
4757 "SYS_fsync", //95
4758 "SYS_setpriority", //96
4759 "SYS_socket", //97
4760 "SYS_connect", //98
4761 "SYS_99",
4762 "SYS_getpriority", //100
4763 "SYS_101",
4764 "SYS_102",
4765 "SYS_sigreturn", //103
4766 "SYS_bind", //104
4767 "SYS_setsockopt", //105
4768 "SYS_listen", //106
4769 "SYS_107",
4770 "SYS_108",
4771 "SYS_109",
4772 "SYS_110",
4773 "SYS_sigsuspend", //111
4774 "SYS_112",
4775 "SYS_113",
4776 "SYS_114",
4777 "SYS_115",
4778 "SYS_gettimeofday", //116
4779 "SYS_getrusage", //117
4780 "SYS_getsockopt", //118
4781 "SYS_119",
4782 "SYS_readv", //120
4783 "SYS_writev", //121
4784 "SYS_settimeofday", //122
4785 "SYS_fchown", //123
4786 "SYS_fchmod", //124
4787 "SYS_125",
4788 "SYS_setreuid", //126
4789 "SYS_setregid", //127
4790 "SYS_rename", //128
4791 "SYS_129",
4792 "SYS_130",
4793 "SYS_flock", //131
4794 "SYS_mkfifo", //132
4795 "SYS_sendto", //133
4796 "SYS_shutdown", //134
4797 "SYS_socketpair", //135
4798 "SYS_mkdir", //136
4799 "SYS_rmdir", //137
4800 "SYS_utimes", //138
4801 "SYS_139",
4802 "SYS_adjtime", //140
4803 "SYS_141",
4804 "SYS_142",
4805 "SYS_143",
4806 "SYS_144",
4807 "SYS_145",
4808 "SYS_146",
4809 "SYS_setsid", //147
4810 "SYS_quotactl", //148
4811 "SYS_149",
4812 "SYS_150",
4813 "SYS_151",
4814 "SYS_152",
4815 "SYS_153",
4816 "SYS_154",
4817 "SYS_nfssvc", //155
4818 "SYS_156",
4819 "SYS_157",
4820 "SYS_158",
4821 "SYS_159",
4822 "SYS_160",
4823 "SYS_getfh", //161
4824 "SYS_162",
4825 "SYS_163",
4826 "SYS_164",
4827 "SYS_sysarch", //165
4828 "SYS_166",
4829 "SYS_167",
4830 "SYS_168",
4831 "SYS_169",
4832 "SYS_170",
4833 "SYS_171",
4834 "SYS_172",
4835 "SYS_pread", //173
4836 "SYS_pwrite", //174
4837 "SYS_175",
4838 "SYS_176",
4839 "SYS_177",
4840 "SYS_178",
4841 "SYS_179",
4842 "SYS_180",
4843 "SYS_setgid", //181
4844 "SYS_setegid", //182
4845 "SYS_seteuid", //183
4846 "SYS_lfs_bmapv", //184
4847 "SYS_lfs_markv", //185
4848 "SYS_lfs_segclean", //186
4849 "SYS_lfs_segwait", //187
4850 "SYS_188",
4851 "SYS_189",
4852 "SYS_190",
4853 "SYS_pathconf", //191
4854 "SYS_fpathconf", //192
4855 "SYS_swapctl", //193
4856 "SYS_getrlimit", //194
4857 "SYS_setrlimit", //195
4858 "SYS_getdirentries", //196
4859 "SYS_mmap", //197
4860 "SYS___syscall", //198
4861 "SYS_lseek", //199
4862 "SYS_truncate", //200
4863 "SYS_ftruncate", //201
4864 "SYS___sysctl", //202
4865 "SYS_mlock", //203
4866 "SYS_munlock", //204
4867 "SYS_205",
4868 "SYS_futimes", //206
4869 "SYS_getpgid", //207
4870 "SYS_xfspioctl", //208
4871 "SYS_209",
4872 "SYS_210",
4873 "SYS_211",
4874 "SYS_212",
4875 "SYS_213",
4876 "SYS_214",
4877 "SYS_215",
4878 "SYS_216",
4879 "SYS_217",
4880 "SYS_218",
4881 "SYS_219",
4882 "SYS_220",
4883 "SYS_semget", //221
4884 "SYS_222",
4885 "SYS_223",
4886 "SYS_224",
4887 "SYS_msgget", //225
4888 "SYS_msgsnd", //226
4889 "SYS_msgrcv", //227
4890 "SYS_shmat", //228
4891 "SYS_229",
4892 "SYS_shmdt", //230
4893 "SYS_231",
4894 "SYS_clock_gettime", //232
4895 "SYS_clock_settime", //233
4896 "SYS_clock_getres", //234
4897 "SYS_235",
4898 "SYS_236",
4899 "SYS_237",
4900 "SYS_238",
4901 "SYS_239",
4902 "SYS_nanosleep", //240
4903 "SYS_241",
4904 "SYS_242",
4905 "SYS_243",
4906 "SYS_244",
4907 "SYS_245",
4908 "SYS_246",
4909 "SYS_247",
4910 "SYS_248",
4911 "SYS_249",
4912 "SYS_minherit", //250
4913 "SYS_rfork", //251
4914 "SYS_poll", //252
4915 "SYS_issetugid", //253
4916 "SYS_lchown", //254
4917 "SYS_getsid", //255
4918 "SYS_msync", //256
4919 "SYS_257",
4920 "SYS_258",
4921 "SYS_259",
4922 "SYS_getfsstat", //260
4923 "SYS_statfs", //261
4924 "SYS_fstatfs", //262
4925 "SYS_pipe", //263
4926 "SYS_fhopen", //264
4927 "SYS_265",
4928 "SYS_fhstatfs", //266
4929 "SYS_preadv", //267
4930 "SYS_pwritev", //268
4931 "SYS_kqueue", //269
4932 "SYS_kevent", //270
4933 "SYS_mlockall", //271
4934 "SYS_munlockall", //272
4935 "SYS_getpeereid", //273
4936 "SYS_274",
4937 "SYS_275",
4938 "SYS_276",
4939 "SYS_277",
4940 "SYS_278",
4941 "SYS_279",
4942 "SYS_280",
4943 "SYS_getresuid", //281
4944 "SYS_setresuid", //282
4945 "SYS_getresgid", //283
4946 "SYS_setresgid", //284
4947 "SYS_285",
4948 "SYS_mquery", //286
4949 "SYS_closefrom", //287
4950 "SYS_sigaltstack", //288
4951 "SYS_shmget", //289
4952 "SYS_semop", //290
4953 "SYS_stat", //291
4954 "SYS_fstat", //292
4955 "SYS_lstat", //293
4956 "SYS_fhstat", //294
4957 "SYS___semctl", //295
4958 "SYS_shmctl", //296
4959 "SYS_msgctl", //297
4960 "SYS_MAXSYSCALL", //298
4961 //299
4962 //300
4963 };
4964 uint32_t uEAX;
4965 if (!LogIsEnabled())
4966 return;
4967 uEAX = CPUMGetGuestEAX(pVM);
4968 switch (uEAX)
4969 {
4970 default:
4971 if (uEAX < ELEMENTS(apsz))
4972 {
4973 uint32_t au32Args[8] = {0};
4974 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4975 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4976 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4977 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4978 }
4979 else
4980 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4981 break;
4982 }
4983}
4984
4985
4986#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4987/**
4988 * The Dll main entry point (stub).
4989 */
4990bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4991{
4992 return true;
4993}
4994
4995void *memcpy(void *dst, const void *src, size_t size)
4996{
4997 uint8_t*pbDst = dst, *pbSrc = src;
4998 while (size-- > 0)
4999 *pbDst++ = *pbSrc++;
5000 return dst;
5001}
5002
5003#endif
5004
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette