VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 16102

Last change on this file since 16102 was 16056, checked in by vboxsync, 16 years ago

REM: Wrap all printf and fprintf calls like we used to do (and does for VBoxREM2), except when DEBUG_TMP_LOGGING is set. DEBUG_TMP_LOGGING can be used with DEBUG_ALL_LOGGING only to split the log output where some of the qemu logging goes to /tmp/vbox-qemu.log.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.0 KB
Line 
1/* $Id: VBoxRecompiler.c 16056 2009-01-19 19:19:02Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33void cpu_exec_init_all(unsigned long tb_size);
34
35#include <VBox/rem.h>
36#include <VBox/vmapi.h>
37#include <VBox/tm.h>
38#include <VBox/ssm.h>
39#include <VBox/em.h>
40#include <VBox/trpm.h>
41#include <VBox/iom.h>
42#include <VBox/mm.h>
43#include <VBox/pgm.h>
44#include <VBox/pdm.h>
45#include <VBox/dbgf.h>
46#include <VBox/dbg.h>
47#include <VBox/hwaccm.h>
48#include <VBox/patm.h>
49#include <VBox/csam.h>
50#include "REMInternal.h"
51#include <VBox/vm.h>
52#include <VBox/param.h>
53#include <VBox/err.h>
54
55#include <VBox/log.h>
56#include <iprt/semaphore.h>
57#include <iprt/asm.h>
58#include <iprt/assert.h>
59#include <iprt/thread.h>
60#include <iprt/string.h>
61
62/* Don't wanna include everything. */
63extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
64extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
65extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
66extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
67extern void tlb_flush(CPUState *env, int flush_global);
68extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
69extern void sync_ldtr(CPUX86State *env1, int selector);
70extern int sync_tr(CPUX86State *env1, int selector);
71
72#ifdef VBOX_STRICT
73unsigned long get_phys_page_offset(target_ulong addr);
74#endif
75
76/*******************************************************************************
77* Defined Constants And Macros *
78*******************************************************************************/
79
80/** Copy 80-bit fpu register at pSrc to pDst.
81 * This is probably faster than *calling* memcpy.
82 */
83#define REM_COPY_FPU_REG(pDst, pSrc) \
84 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
85
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
91static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
92static void remR3StateUpdate(PVM pVM);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/*******************************************************************************
218* Internal Functions *
219*******************************************************************************/
220void remAbort(int rc, const char *pszTip);
221extern int testmath(void);
222
223/* Put them here to avoid unused variable warning. */
224AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
225#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
226//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
227/* Why did this have to be identical?? */
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#else
230AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
231#endif
232
233
234/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
235uint8_t* code_gen_prologue;
236
237/**
238 * Initializes the REM.
239 *
240 * @returns VBox status code.
241 * @param pVM The VM to operate on.
242 */
243REMR3DECL(int) REMR3Init(PVM pVM)
244{
245 uint32_t u32Dummy;
246 int rc;
247
248#ifdef VBOX_ENABLE_VBOXREM64
249 LogRel(("Using 64-bit aware REM\n"));
250#endif
251
252 /*
253 * Assert sanity.
254 */
255 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
256 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
257 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
258#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
259 Assert(!testmath());
260#endif
261 /*
262 * Init some internal data members.
263 */
264 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
265 pVM->rem.s.Env.pVM = pVM;
266#ifdef CPU_RAW_MODE_INIT
267 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
268#endif
269
270 /* ctx. */
271 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
272 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
273
274 /* ignore all notifications */
275 pVM->rem.s.fIgnoreAll = true;
276
277 code_gen_prologue = RTMemExecAlloc(_1K);
278
279 cpu_exec_init_all(0);
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (RT_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
332 if (RT_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
367
368 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
369 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
370 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
371 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
372
373 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
379
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387
388#endif
389
390#ifdef DEBUG_ALL_LOGGING
391 loglevel = ~0;
392# ifdef DEBUG_TMP_LOGGING
393 logfile = fopen("/tmp/vbox-qemu.log", "w");
394# endif
395#endif
396
397 return rc;
398}
399
400
401/**
402 * Terminates the REM.
403 *
404 * Termination means cleaning up and freeing all resources,
405 * the VM it self is at this point powered off or suspended.
406 *
407 * @returns VBox status code.
408 * @param pVM The VM to operate on.
409 */
410REMR3DECL(int) REMR3Term(PVM pVM)
411{
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * The VM is being reset.
418 *
419 * For the REM component this means to call the cpu_reset() and
420 * reinitialize some state variables.
421 *
422 * @param pVM VM handle.
423 */
424REMR3DECL(void) REMR3Reset(PVM pVM)
425{
426 /*
427 * Reset the REM cpu.
428 */
429 pVM->rem.s.fIgnoreAll = true;
430 cpu_reset(&pVM->rem.s.Env);
431 pVM->rem.s.cInvalidatedPages = 0;
432 pVM->rem.s.fIgnoreAll = false;
433
434 /* Clear raw ring 0 init state */
435 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
436
437 /* Flush the TBs the next time we execute code here. */
438 pVM->rem.s.fFlushTBs = true;
439}
440
441
442/**
443 * Execute state save operation.
444 *
445 * @returns VBox status code.
446 * @param pVM VM Handle.
447 * @param pSSM SSM operation handle.
448 */
449static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
450{
451 /*
452 * Save the required CPU Env bits.
453 * (Not much because we're never in REM when doing the save.)
454 */
455 PREM pRem = &pVM->rem.s;
456 LogFlow(("remR3Save:\n"));
457 Assert(!pRem->fInREM);
458 SSMR3PutU32(pSSM, pRem->Env.hflags);
459 SSMR3PutU32(pSSM, ~0); /* separator */
460
461 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
462 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
463 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
464
465 return SSMR3PutU32(pSSM, ~0); /* terminator */
466}
467
468
469/**
470 * Execute state load operation.
471 *
472 * @returns VBox status code.
473 * @param pVM VM Handle.
474 * @param pSSM SSM operation handle.
475 * @param u32Version Data layout version.
476 */
477static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
478{
479 uint32_t u32Dummy;
480 uint32_t fRawRing0 = false;
481 uint32_t u32Sep;
482 int rc;
483 PREM pRem;
484 LogFlow(("remR3Load:\n"));
485
486 /*
487 * Validate version.
488 */
489 if ( u32Version != REM_SAVED_STATE_VERSION
490 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
491 {
492 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
493 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
494 }
495
496 /*
497 * Do a reset to be on the safe side...
498 */
499 REMR3Reset(pVM);
500
501 /*
502 * Ignore all ignorable notifications.
503 * (Not doing this will cause serious trouble.)
504 */
505 pVM->rem.s.fIgnoreAll = true;
506
507 /*
508 * Load the required CPU Env bits.
509 * (Not much because we're never in REM when doing the save.)
510 */
511 pRem = &pVM->rem.s;
512 Assert(!pRem->fInREM);
513 SSMR3GetU32(pSSM, &pRem->Env.hflags);
514 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
515 {
516 /* Redundant REM CPU state has to be loaded, but can be ignored. */
517 CPUX86State_Ver16 temp;
518 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
519 }
520
521 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
522 if (RT_FAILURE(rc))
523 return rc;
524 if (u32Sep != ~0U)
525 {
526 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
527 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
528 }
529
530 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
531 SSMR3GetUInt(pSSM, &fRawRing0);
532 if (fRawRing0)
533 pRem->Env.state |= CPU_RAW_RING0;
534
535 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
536 {
537 unsigned i;
538
539 /*
540 * Load the REM stuff.
541 */
542 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
543 if (RT_FAILURE(rc))
544 return rc;
545 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
546 {
547 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
548 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
549 }
550 for (i = 0; i < pRem->cInvalidatedPages; i++)
551 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
552 }
553
554 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
555 if (RT_FAILURE(rc))
556 return rc;
557
558 /* check the terminator. */
559 rc = SSMR3GetU32(pSSM, &u32Sep);
560 if (RT_FAILURE(rc))
561 return rc;
562 if (u32Sep != ~0U)
563 {
564 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
565 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
566 }
567
568 /*
569 * Get the CPUID features.
570 */
571 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
572 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
573
574 /*
575 * Sync the Load Flush the TLB
576 */
577 tlb_flush(&pRem->Env, 1);
578
579 /*
580 * Stop ignoring ignornable notifications.
581 */
582 pVM->rem.s.fIgnoreAll = false;
583
584 /*
585 * Sync the whole CPU state when executing code in the recompiler.
586 */
587 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
588 return VINF_SUCCESS;
589}
590
591
592
593#undef LOG_GROUP
594#define LOG_GROUP LOG_GROUP_REM_RUN
595
596/**
597 * Single steps an instruction in recompiled mode.
598 *
599 * Before calling this function the REM state needs to be in sync with
600 * the VM. Call REMR3State() to perform the sync. It's only necessary
601 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
602 * and after calling REMR3StateBack().
603 *
604 * @returns VBox status code.
605 *
606 * @param pVM VM Handle.
607 */
608REMR3DECL(int) REMR3Step(PVM pVM)
609{
610 int rc, interrupt_request;
611 RTGCPTR GCPtrPC;
612 bool fBp;
613
614 /*
615 * Lock the REM - we don't wanna have anyone interrupting us
616 * while stepping - and enabled single stepping. We also ignore
617 * pending interrupts and suchlike.
618 */
619 interrupt_request = pVM->rem.s.Env.interrupt_request;
620 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
621 pVM->rem.s.Env.interrupt_request = 0;
622 cpu_single_step(&pVM->rem.s.Env, 1);
623
624 /*
625 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
626 */
627 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
628 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
629
630 /*
631 * Execute and handle the return code.
632 * We execute without enabling the cpu tick, so on success we'll
633 * just flip it on and off to make sure it moves
634 */
635 rc = cpu_exec(&pVM->rem.s.Env);
636 if (rc == EXCP_DEBUG)
637 {
638 TMCpuTickResume(pVM);
639 TMCpuTickPause(pVM);
640 TMVirtualResume(pVM);
641 TMVirtualPause(pVM);
642 rc = VINF_EM_DBG_STEPPED;
643 }
644 else
645 {
646 switch (rc)
647 {
648 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
649 case EXCP_HLT:
650 case EXCP_HALTED: rc = VINF_EM_HALT; break;
651 case EXCP_RC:
652 rc = pVM->rem.s.rc;
653 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
654 break;
655 default:
656 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
657 rc = VERR_INTERNAL_ERROR;
658 break;
659 }
660 }
661
662 /*
663 * Restore the stuff we changed to prevent interruption.
664 * Unlock the REM.
665 */
666 if (fBp)
667 {
668 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
669 Assert(rc2 == 0); NOREF(rc2);
670 }
671 cpu_single_step(&pVM->rem.s.Env, 0);
672 pVM->rem.s.Env.interrupt_request = interrupt_request;
673
674 return rc;
675}
676
677
678/**
679 * Set a breakpoint using the REM facilities.
680 *
681 * @returns VBox status code.
682 * @param pVM The VM handle.
683 * @param Address The breakpoint address.
684 * @thread The emulation thread.
685 */
686REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
687{
688 VM_ASSERT_EMT(pVM);
689 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
690 {
691 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
692 return VINF_SUCCESS;
693 }
694 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
695 return VERR_REM_NO_MORE_BP_SLOTS;
696}
697
698
699/**
700 * Clears a breakpoint set by REMR3BreakpointSet().
701 *
702 * @returns VBox status code.
703 * @param pVM The VM handle.
704 * @param Address The breakpoint address.
705 * @thread The emulation thread.
706 */
707REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
708{
709 VM_ASSERT_EMT(pVM);
710 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
711 {
712 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
713 return VINF_SUCCESS;
714 }
715 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
716 return VERR_REM_BP_NOT_FOUND;
717}
718
719
720/**
721 * Emulate an instruction.
722 *
723 * This function executes one instruction without letting anyone
724 * interrupt it. This is intended for being called while being in
725 * raw mode and thus will take care of all the state syncing between
726 * REM and the rest.
727 *
728 * @returns VBox status code.
729 * @param pVM VM handle.
730 */
731REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
732{
733 bool fFlushTBs;
734
735 int rc, rc2;
736 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
737
738 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
739 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
740 */
741 if (HWACCMIsEnabled(pVM))
742 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
743
744 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
745 fFlushTBs = pVM->rem.s.fFlushTBs;
746 pVM->rem.s.fFlushTBs = false;
747
748 /*
749 * Sync the state and enable single instruction / single stepping.
750 */
751 rc = REMR3State(pVM);
752 pVM->rem.s.fFlushTBs = fFlushTBs;
753 if (RT_SUCCESS(rc))
754 {
755 int interrupt_request = pVM->rem.s.Env.interrupt_request;
756 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
757 Assert(!pVM->rem.s.Env.singlestep_enabled);
758 /*
759 * Now we set the execute single instruction flag and enter the cpu_exec loop.
760 */
761 TMNotifyStartOfExecution(pVM);
762 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
763 rc = cpu_exec(&pVM->rem.s.Env);
764 TMNotifyEndOfExecution(pVM);
765 switch (rc)
766 {
767 /*
768 * Executed without anything out of the way happening.
769 */
770 case EXCP_SINGLE_INSTR:
771 rc = VINF_EM_RESCHEDULE;
772 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
773 break;
774
775 /*
776 * If we take a trap or start servicing a pending interrupt, we might end up here.
777 * (Timer thread or some other thread wishing EMT's attention.)
778 */
779 case EXCP_INTERRUPT:
780 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
781 rc = VINF_EM_RESCHEDULE;
782 break;
783
784 /*
785 * Single step, we assume!
786 * If there was a breakpoint there we're fucked now.
787 */
788 case EXCP_DEBUG:
789 {
790 /* breakpoint or single step? */
791 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
792 int iBP;
793 rc = VINF_EM_DBG_STEPPED;
794 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
795 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
796 {
797 rc = VINF_EM_DBG_BREAKPOINT;
798 break;
799 }
800 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
801 break;
802 }
803
804 /*
805 * hlt instruction.
806 */
807 case EXCP_HLT:
808 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
809 rc = VINF_EM_HALT;
810 break;
811
812 /*
813 * The VM has halted.
814 */
815 case EXCP_HALTED:
816 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
817 rc = VINF_EM_HALT;
818 break;
819
820 /*
821 * Switch to RAW-mode.
822 */
823 case EXCP_EXECUTE_RAW:
824 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
825 rc = VINF_EM_RESCHEDULE_RAW;
826 break;
827
828 /*
829 * Switch to hardware accelerated RAW-mode.
830 */
831 case EXCP_EXECUTE_HWACC:
832 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
833 rc = VINF_EM_RESCHEDULE_HWACC;
834 break;
835
836 /*
837 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
838 */
839 case EXCP_RC:
840 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
841 rc = pVM->rem.s.rc;
842 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
843 break;
844
845 /*
846 * Figure out the rest when they arrive....
847 */
848 default:
849 AssertMsgFailed(("rc=%d\n", rc));
850 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
851 rc = VINF_EM_RESCHEDULE;
852 break;
853 }
854
855 /*
856 * Switch back the state.
857 */
858 pVM->rem.s.Env.interrupt_request = interrupt_request;
859 rc2 = REMR3StateBack(pVM);
860 AssertRC(rc2);
861 }
862
863 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
864 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
865 return rc;
866}
867
868
869/**
870 * Runs code in recompiled mode.
871 *
872 * Before calling this function the REM state needs to be in sync with
873 * the VM. Call REMR3State() to perform the sync. It's only necessary
874 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
875 * and after calling REMR3StateBack().
876 *
877 * @returns VBox status code.
878 *
879 * @param pVM VM Handle.
880 */
881REMR3DECL(int) REMR3Run(PVM pVM)
882{
883 int rc;
884 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
885 Assert(pVM->rem.s.fInREM);
886
887 TMNotifyStartOfExecution(pVM);
888 rc = cpu_exec(&pVM->rem.s.Env);
889 TMNotifyEndOfExecution(pVM);
890 switch (rc)
891 {
892 /*
893 * This happens when the execution was interrupted
894 * by an external event, like pending timers.
895 */
896 case EXCP_INTERRUPT:
897 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
898 rc = VINF_SUCCESS;
899 break;
900
901 /*
902 * hlt instruction.
903 */
904 case EXCP_HLT:
905 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
906 rc = VINF_EM_HALT;
907 break;
908
909 /*
910 * The VM has halted.
911 */
912 case EXCP_HALTED:
913 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
914 rc = VINF_EM_HALT;
915 break;
916
917 /*
918 * Breakpoint/single step.
919 */
920 case EXCP_DEBUG:
921 {
922#if 0//def DEBUG_bird
923 static int iBP = 0;
924 printf("howdy, breakpoint! iBP=%d\n", iBP);
925 switch (iBP)
926 {
927 case 0:
928 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
929 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
930 //pVM->rem.s.Env.interrupt_request = 0;
931 //pVM->rem.s.Env.exception_index = -1;
932 //g_fInterruptDisabled = 1;
933 rc = VINF_SUCCESS;
934 asm("int3");
935 break;
936 default:
937 asm("int3");
938 break;
939 }
940 iBP++;
941#else
942 /* breakpoint or single step? */
943 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
944 int iBP;
945 rc = VINF_EM_DBG_STEPPED;
946 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
947 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
948 {
949 rc = VINF_EM_DBG_BREAKPOINT;
950 break;
951 }
952 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
953#endif
954 break;
955 }
956
957 /*
958 * Switch to RAW-mode.
959 */
960 case EXCP_EXECUTE_RAW:
961 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
962 rc = VINF_EM_RESCHEDULE_RAW;
963 break;
964
965 /*
966 * Switch to hardware accelerated RAW-mode.
967 */
968 case EXCP_EXECUTE_HWACC:
969 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
970 rc = VINF_EM_RESCHEDULE_HWACC;
971 break;
972
973 /*
974 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
975 */
976 case EXCP_RC:
977 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
978 rc = pVM->rem.s.rc;
979 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
980 break;
981
982 /*
983 * Figure out the rest when they arrive....
984 */
985 default:
986 AssertMsgFailed(("rc=%d\n", rc));
987 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
988 rc = VINF_SUCCESS;
989 break;
990 }
991
992 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
993 return rc;
994}
995
996
997/**
998 * Check if the cpu state is suitable for Raw execution.
999 *
1000 * @returns boolean
1001 * @param env The CPU env struct.
1002 * @param eip The EIP to check this for (might differ from env->eip).
1003 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1004 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1005 *
1006 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1007 */
1008bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1009{
1010 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1011 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1012 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1013 uint32_t u32CR0;
1014
1015 /* Update counter. */
1016 env->pVM->rem.s.cCanExecuteRaw++;
1017
1018 if (HWACCMIsEnabled(env->pVM))
1019 {
1020 CPUMCTX Ctx;
1021
1022 env->state |= CPU_RAW_HWACC;
1023
1024 /*
1025 * Create partial context for HWACCMR3CanExecuteGuest
1026 */
1027 Ctx.cr0 = env->cr[0];
1028 Ctx.cr3 = env->cr[3];
1029 Ctx.cr4 = env->cr[4];
1030
1031 Ctx.tr = env->tr.selector;
1032 Ctx.trHid.u64Base = env->tr.base;
1033 Ctx.trHid.u32Limit = env->tr.limit;
1034 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1035
1036 Ctx.idtr.cbIdt = env->idt.limit;
1037 Ctx.idtr.pIdt = env->idt.base;
1038
1039 Ctx.gdtr.cbGdt = env->gdt.limit;
1040 Ctx.gdtr.pGdt = env->gdt.base;
1041
1042 Ctx.rsp = env->regs[R_ESP];
1043#ifdef LOG_ENABLED
1044 Ctx.rip = env->eip;
1045#endif
1046
1047 Ctx.eflags.u32 = env->eflags;
1048
1049 Ctx.cs = env->segs[R_CS].selector;
1050 Ctx.csHid.u64Base = env->segs[R_CS].base;
1051 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1052 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1053
1054 Ctx.ds = env->segs[R_DS].selector;
1055 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1056 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1057 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1058
1059 Ctx.es = env->segs[R_ES].selector;
1060 Ctx.esHid.u64Base = env->segs[R_ES].base;
1061 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1062 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1063
1064 Ctx.fs = env->segs[R_FS].selector;
1065 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1066 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1067 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1068
1069 Ctx.gs = env->segs[R_GS].selector;
1070 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1071 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1072 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1073
1074 Ctx.ss = env->segs[R_SS].selector;
1075 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1076 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1077 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1078
1079 Ctx.msrEFER = env->efer;
1080
1081 /* Hardware accelerated raw-mode:
1082 *
1083 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1084 */
1085 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1086 {
1087 *piException = EXCP_EXECUTE_HWACC;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 /*
1094 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1095 * or 32 bits protected mode ring 0 code
1096 *
1097 * The tests are ordered by the likelyhood of being true during normal execution.
1098 */
1099 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1100 {
1101 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1102 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1103 return false;
1104 }
1105
1106#ifndef VBOX_RAW_V86
1107 if (fFlags & VM_MASK) {
1108 STAM_COUNTER_INC(&gStatRefuseVM86);
1109 Log2(("raw mode refused: VM_MASK\n"));
1110 return false;
1111 }
1112#endif
1113
1114 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1115 {
1116#ifndef DEBUG_bird
1117 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1118#endif
1119 return false;
1120 }
1121
1122 if (env->singlestep_enabled)
1123 {
1124 //Log2(("raw mode refused: Single step\n"));
1125 return false;
1126 }
1127
1128 if (env->nb_breakpoints > 0)
1129 {
1130 //Log2(("raw mode refused: Breakpoints\n"));
1131 return false;
1132 }
1133
1134 u32CR0 = env->cr[0];
1135 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1136 {
1137 STAM_COUNTER_INC(&gStatRefusePaging);
1138 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1139 return false;
1140 }
1141
1142 if (env->cr[4] & CR4_PAE_MASK)
1143 {
1144 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1145 {
1146 STAM_COUNTER_INC(&gStatRefusePAE);
1147 return false;
1148 }
1149 }
1150
1151 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1152 {
1153 if (!EMIsRawRing3Enabled(env->pVM))
1154 return false;
1155
1156 if (!(env->eflags & IF_MASK))
1157 {
1158 STAM_COUNTER_INC(&gStatRefuseIF0);
1159 Log2(("raw mode refused: IF (RawR3)\n"));
1160 return false;
1161 }
1162
1163 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1164 {
1165 STAM_COUNTER_INC(&gStatRefuseWP0);
1166 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1167 return false;
1168 }
1169 }
1170 else
1171 {
1172 if (!EMIsRawRing0Enabled(env->pVM))
1173 return false;
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1177 {
1178 STAM_COUNTER_INC(&gStatRefuseCode16);
1179 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1180 return false;
1181 }
1182
1183 // Only R0
1184 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1185 {
1186 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1187 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1188 return false;
1189 }
1190
1191 if (!(u32CR0 & CR0_WP_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseWP0);
1194 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1195 return false;
1196 }
1197
1198 if (PATMIsPatchGCAddr(env->pVM, eip))
1199 {
1200 Log2(("raw r0 mode forced: patch code\n"));
1201 *piException = EXCP_EXECUTE_RAW;
1202 return true;
1203 }
1204
1205#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1206 if (!(env->eflags & IF_MASK))
1207 {
1208 STAM_COUNTER_INC(&gStatRefuseIF0);
1209 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1210 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1211 return false;
1212 }
1213#endif
1214
1215 env->state |= CPU_RAW_RING0;
1216 }
1217
1218 /*
1219 * Don't reschedule the first time we're called, because there might be
1220 * special reasons why we're here that is not covered by the above checks.
1221 */
1222 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1223 {
1224 Log2(("raw mode refused: first scheduling\n"));
1225 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1226 return false;
1227 }
1228
1229 Assert(PGMPhysIsA20Enabled(env->pVM));
1230 *piException = EXCP_EXECUTE_RAW;
1231 return true;
1232}
1233
1234
1235/**
1236 * Fetches a code byte.
1237 *
1238 * @returns Success indicator (bool) for ease of use.
1239 * @param env The CPU environment structure.
1240 * @param GCPtrInstr Where to fetch code.
1241 * @param pu8Byte Where to store the byte on success
1242 */
1243bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1244{
1245 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1246 if (RT_SUCCESS(rc))
1247 return true;
1248 return false;
1249}
1250
1251
1252/**
1253 * Flush (or invalidate if you like) page table/dir entry.
1254 *
1255 * (invlpg instruction; tlb_flush_page)
1256 *
1257 * @param env Pointer to cpu environment.
1258 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1259 */
1260void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1261{
1262 PVM pVM = env->pVM;
1263 PCPUMCTX pCtx;
1264 int rc;
1265
1266 /*
1267 * When we're replaying invlpg instructions or restoring a saved
1268 * state we disable this path.
1269 */
1270 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1271 return;
1272 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1273 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1274
1275 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1276
1277 /*
1278 * Update the control registers before calling PGMFlushPage.
1279 */
1280 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1281 pCtx->cr0 = env->cr[0];
1282 pCtx->cr3 = env->cr[3];
1283 pCtx->cr4 = env->cr[4];
1284
1285 /*
1286 * Let PGM do the rest.
1287 */
1288 rc = PGMInvalidatePage(pVM, GCPtr);
1289 if (RT_FAILURE(rc))
1290 {
1291 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1292 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1293 }
1294 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1295}
1296
1297
1298#ifndef REM_PHYS_ADDR_IN_TLB
1299void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1300{
1301 void *pv;
1302 int rc;
1303
1304 /* Address must be aligned enough to fiddle with lower bits */
1305 Assert((physAddr & 0x3) == 0);
1306
1307 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1308 Assert( rc == VINF_SUCCESS
1309 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1310 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1311 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1312 if (RT_FAILURE(rc))
1313 return (void *)1;
1314 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1315 return (void *)((uintptr_t)pv | 2);
1316 return pv;
1317}
1318
1319target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1320{
1321 RTGCPHYS rv = 0;
1322 int rc;
1323
1324 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1325 Assert (RT_SUCCESS(rc));
1326
1327 return (target_ulong)rv;
1328}
1329#endif
1330
1331/**
1332 * Called from tlb_protect_code in order to write monitor a code page.
1333 *
1334 * @param env Pointer to the CPU environment.
1335 * @param GCPtr Code page to monitor
1336 */
1337void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1338{
1339#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1340 Assert(env->pVM->rem.s.fInREM);
1341 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1342 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1343 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1344 && !(env->eflags & VM_MASK) /* no V86 mode */
1345 && !HWACCMIsEnabled(env->pVM))
1346 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1347#endif
1348}
1349
1350/**
1351 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1352 *
1353 * @param env Pointer to the CPU environment.
1354 * @param GCPtr Code page to monitor
1355 */
1356void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1357{
1358 Assert(env->pVM->rem.s.fInREM);
1359#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1360 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1361 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1362 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1363 && !(env->eflags & VM_MASK) /* no V86 mode */
1364 && !HWACCMIsEnabled(env->pVM))
1365 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1366#endif
1367}
1368
1369/**
1370 * Called when the CPU is initialized, any of the CRx registers are changed or
1371 * when the A20 line is modified.
1372 *
1373 * @param env Pointer to the CPU environment.
1374 * @param fGlobal Set if the flush is global.
1375 */
1376void remR3FlushTLB(CPUState *env, bool fGlobal)
1377{
1378 PVM pVM = env->pVM;
1379 PCPUMCTX pCtx;
1380
1381 /*
1382 * When we're replaying invlpg instructions or restoring a saved
1383 * state we disable this path.
1384 */
1385 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1386 return;
1387 Assert(pVM->rem.s.fInREM);
1388
1389 /*
1390 * The caller doesn't check cr4, so we have to do that for ourselves.
1391 */
1392 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1393 fGlobal = true;
1394 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1395
1396 /*
1397 * Update the control registers before calling PGMR3FlushTLB.
1398 */
1399 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1400 pCtx->cr0 = env->cr[0];
1401 pCtx->cr3 = env->cr[3];
1402 pCtx->cr4 = env->cr[4];
1403
1404 /*
1405 * Let PGM do the rest.
1406 */
1407 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1408}
1409
1410
1411/**
1412 * Called when any of the cr0, cr4 or efer registers is updated.
1413 *
1414 * @param env Pointer to the CPU environment.
1415 */
1416void remR3ChangeCpuMode(CPUState *env)
1417{
1418 int rc;
1419 PVM pVM = env->pVM;
1420 PCPUMCTX pCtx;
1421
1422 /*
1423 * When we're replaying loads or restoring a saved
1424 * state this path is disabled.
1425 */
1426 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1427 return;
1428 Assert(pVM->rem.s.fInREM);
1429
1430 /*
1431 * Update the control registers before calling PGMChangeMode()
1432 * as it may need to map whatever cr3 is pointing to.
1433 */
1434 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1435 pCtx->cr0 = env->cr[0];
1436 pCtx->cr3 = env->cr[3];
1437 pCtx->cr4 = env->cr[4];
1438
1439#ifdef TARGET_X86_64
1440 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1441 if (rc != VINF_SUCCESS)
1442 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1443#else
1444 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1445 if (rc != VINF_SUCCESS)
1446 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1447#endif
1448}
1449
1450
1451/**
1452 * Called from compiled code to run dma.
1453 *
1454 * @param env Pointer to the CPU environment.
1455 */
1456void remR3DmaRun(CPUState *env)
1457{
1458 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1459 PDMR3DmaRun(env->pVM);
1460 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1461}
1462
1463
1464/**
1465 * Called from compiled code to schedule pending timers in VMM
1466 *
1467 * @param env Pointer to the CPU environment.
1468 */
1469void remR3TimersRun(CPUState *env)
1470{
1471 LogFlow(("remR3TimersRun:\n"));
1472 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1473 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1474 TMR3TimerQueuesDo(env->pVM);
1475 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1476 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1477}
1478
1479
1480/**
1481 * Record trap occurance
1482 *
1483 * @returns VBox status code
1484 * @param env Pointer to the CPU environment.
1485 * @param uTrap Trap nr
1486 * @param uErrorCode Error code
1487 * @param pvNextEIP Next EIP
1488 */
1489int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1490{
1491 PVM pVM = env->pVM;
1492#ifdef VBOX_WITH_STATISTICS
1493 static STAMCOUNTER s_aStatTrap[255];
1494 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1495#endif
1496
1497#ifdef VBOX_WITH_STATISTICS
1498 if (uTrap < 255)
1499 {
1500 if (!s_aRegisters[uTrap])
1501 {
1502 char szStatName[64];
1503 s_aRegisters[uTrap] = true;
1504 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1505 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1506 }
1507 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1508 }
1509#endif
1510 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1511 if( uTrap < 0x20
1512 && (env->cr[0] & X86_CR0_PE)
1513 && !(env->eflags & X86_EFL_VM))
1514 {
1515#ifdef DEBUG
1516 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1517#endif
1518 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1519 {
1520 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1521 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1522 return VERR_REM_TOO_MANY_TRAPS;
1523 }
1524 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1525 pVM->rem.s.cPendingExceptions = 1;
1526 pVM->rem.s.uPendingException = uTrap;
1527 pVM->rem.s.uPendingExcptEIP = env->eip;
1528 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1529 }
1530 else
1531 {
1532 pVM->rem.s.cPendingExceptions = 0;
1533 pVM->rem.s.uPendingException = uTrap;
1534 pVM->rem.s.uPendingExcptEIP = env->eip;
1535 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1536 }
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/*
1542 * Clear current active trap
1543 *
1544 * @param pVM VM Handle.
1545 */
1546void remR3TrapClear(PVM pVM)
1547{
1548 pVM->rem.s.cPendingExceptions = 0;
1549 pVM->rem.s.uPendingException = 0;
1550 pVM->rem.s.uPendingExcptEIP = 0;
1551 pVM->rem.s.uPendingExcptCR2 = 0;
1552}
1553
1554
1555/*
1556 * Record previous call instruction addresses
1557 *
1558 * @param env Pointer to the CPU environment.
1559 */
1560void remR3RecordCall(CPUState *env)
1561{
1562 CSAMR3RecordCallAddress(env->pVM, env->eip);
1563}
1564
1565
1566/**
1567 * Syncs the internal REM state with the VM.
1568 *
1569 * This must be called before REMR3Run() is invoked whenever when the REM
1570 * state is not up to date. Calling it several times in a row is not
1571 * permitted.
1572 *
1573 * @returns VBox status code.
1574 *
1575 * @param pVM VM Handle.
1576 * @param fFlushTBs Flush all translation blocks before executing code
1577 *
1578 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1579 * no do this since the majority of the callers don't want any unnecessary of events
1580 * pending that would immediatly interrupt execution.
1581 */
1582REMR3DECL(int) REMR3State(PVM pVM)
1583{
1584 register const CPUMCTX *pCtx;
1585 register unsigned fFlags;
1586 bool fHiddenSelRegsValid;
1587 unsigned i;
1588 TRPMEVENT enmType;
1589 uint8_t u8TrapNo;
1590 int rc;
1591
1592 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1593 Log2(("REMR3State:\n"));
1594
1595 pCtx = pVM->rem.s.pCtx;
1596 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1597
1598 Assert(!pVM->rem.s.fInREM);
1599 pVM->rem.s.fInStateSync = true;
1600
1601 /*
1602 * If we have to flush TBs, do that immediately.
1603 */
1604 if (pVM->rem.s.fFlushTBs)
1605 {
1606 STAM_COUNTER_INC(&gStatFlushTBs);
1607 tb_flush(&pVM->rem.s.Env);
1608 pVM->rem.s.fFlushTBs = false;
1609 }
1610
1611 /*
1612 * Copy the registers which require no special handling.
1613 */
1614#ifdef TARGET_X86_64
1615 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1616 Assert(R_EAX == 0);
1617 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1618 Assert(R_ECX == 1);
1619 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1620 Assert(R_EDX == 2);
1621 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1622 Assert(R_EBX == 3);
1623 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1624 Assert(R_ESP == 4);
1625 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1626 Assert(R_EBP == 5);
1627 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1628 Assert(R_ESI == 6);
1629 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1630 Assert(R_EDI == 7);
1631 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1632 pVM->rem.s.Env.regs[8] = pCtx->r8;
1633 pVM->rem.s.Env.regs[9] = pCtx->r9;
1634 pVM->rem.s.Env.regs[10] = pCtx->r10;
1635 pVM->rem.s.Env.regs[11] = pCtx->r11;
1636 pVM->rem.s.Env.regs[12] = pCtx->r12;
1637 pVM->rem.s.Env.regs[13] = pCtx->r13;
1638 pVM->rem.s.Env.regs[14] = pCtx->r14;
1639 pVM->rem.s.Env.regs[15] = pCtx->r15;
1640
1641 pVM->rem.s.Env.eip = pCtx->rip;
1642
1643 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1644#else
1645 Assert(R_EAX == 0);
1646 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1647 Assert(R_ECX == 1);
1648 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1649 Assert(R_EDX == 2);
1650 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1651 Assert(R_EBX == 3);
1652 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1653 Assert(R_ESP == 4);
1654 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1655 Assert(R_EBP == 5);
1656 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1657 Assert(R_ESI == 6);
1658 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1659 Assert(R_EDI == 7);
1660 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1661 pVM->rem.s.Env.eip = pCtx->eip;
1662
1663 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1664#endif
1665
1666 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1667
1668 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1669 for (i=0;i<8;i++)
1670 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1671
1672 /*
1673 * Clear the halted hidden flag (the interrupt waking up the CPU can
1674 * have been dispatched in raw mode).
1675 */
1676 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1677
1678 /*
1679 * Replay invlpg?
1680 */
1681 if (pVM->rem.s.cInvalidatedPages)
1682 {
1683 RTUINT i;
1684
1685 pVM->rem.s.fIgnoreInvlPg = true;
1686 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1687 {
1688 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1689 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1690 }
1691 pVM->rem.s.fIgnoreInvlPg = false;
1692 pVM->rem.s.cInvalidatedPages = 0;
1693 }
1694
1695 /* Replay notification changes? */
1696 if (pVM->rem.s.cHandlerNotifications)
1697 REMR3ReplayHandlerNotifications(pVM);
1698
1699 /* Update MSRs; before CRx registers! */
1700 pVM->rem.s.Env.efer = pCtx->msrEFER;
1701 pVM->rem.s.Env.star = pCtx->msrSTAR;
1702 pVM->rem.s.Env.pat = pCtx->msrPAT;
1703#ifdef TARGET_X86_64
1704 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1705 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1706 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1707 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1708
1709 /* Update the internal long mode activate flag according to the new EFER value. */
1710 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1711 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1712 else
1713 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1714#endif
1715
1716
1717 /*
1718 * Registers which are rarely changed and require special handling / order when changed.
1719 */
1720 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1721 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1722 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1723 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1724 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1725 {
1726 if (fFlags & CPUM_CHANGED_FPU_REM)
1727 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1728
1729 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1730 {
1731 pVM->rem.s.fIgnoreCR3Load = true;
1732 tlb_flush(&pVM->rem.s.Env, true);
1733 pVM->rem.s.fIgnoreCR3Load = false;
1734 }
1735
1736 /* CR4 before CR0! */
1737 if (fFlags & CPUM_CHANGED_CR4)
1738 {
1739 pVM->rem.s.fIgnoreCR3Load = true;
1740 pVM->rem.s.fIgnoreCpuMode = true;
1741 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1742 pVM->rem.s.fIgnoreCpuMode = false;
1743 pVM->rem.s.fIgnoreCR3Load = false;
1744 }
1745
1746 if (fFlags & CPUM_CHANGED_CR0)
1747 {
1748 pVM->rem.s.fIgnoreCR3Load = true;
1749 pVM->rem.s.fIgnoreCpuMode = true;
1750 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1751 pVM->rem.s.fIgnoreCpuMode = false;
1752 pVM->rem.s.fIgnoreCR3Load = false;
1753 }
1754
1755 if (fFlags & CPUM_CHANGED_CR3)
1756 {
1757 pVM->rem.s.fIgnoreCR3Load = true;
1758 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1759 pVM->rem.s.fIgnoreCR3Load = false;
1760 }
1761
1762 if (fFlags & CPUM_CHANGED_GDTR)
1763 {
1764 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1765 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1766 }
1767
1768 if (fFlags & CPUM_CHANGED_IDTR)
1769 {
1770 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1771 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1772 }
1773
1774 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1775 {
1776 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1777 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1778 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1779 }
1780
1781 if (fFlags & CPUM_CHANGED_LDTR)
1782 {
1783 if (fHiddenSelRegsValid)
1784 {
1785 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1786 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1787 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1788 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1789 }
1790 else
1791 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1792 }
1793
1794 if (fFlags & CPUM_CHANGED_TR)
1795 {
1796 if (fHiddenSelRegsValid)
1797 {
1798 pVM->rem.s.Env.tr.selector = pCtx->tr;
1799 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1800 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1801 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1802 }
1803 else
1804 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1805
1806 /** @note do_interrupt will fault if the busy flag is still set.... */
1807 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1808 }
1809
1810 if (fFlags & CPUM_CHANGED_CPUID)
1811 {
1812 uint32_t u32Dummy;
1813
1814 /*
1815 * Get the CPUID features.
1816 */
1817 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1818 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1819 }
1820 }
1821
1822 /*
1823 * Update selector registers.
1824 * This must be done *after* we've synced gdt, ldt and crX registers
1825 * since we're reading the GDT/LDT om sync_seg. This will happen with
1826 * saved state which takes a quick dip into rawmode for instance.
1827 */
1828 /*
1829 * Stack; Note first check this one as the CPL might have changed. The
1830 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1831 */
1832
1833 if (fHiddenSelRegsValid)
1834 {
1835 /* The hidden selector registers are valid in the CPU context. */
1836 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1837
1838 /* Set current CPL */
1839 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1840
1841 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1842 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1843 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1844 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1845 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1846 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1847 }
1848 else
1849 {
1850 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1851 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1852 {
1853 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1854
1855 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1856 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1857#ifdef VBOX_WITH_STATISTICS
1858 if (pVM->rem.s.Env.segs[R_SS].newselector)
1859 {
1860 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1861 }
1862#endif
1863 }
1864 else
1865 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1866
1867 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1868 {
1869 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1870 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1871#ifdef VBOX_WITH_STATISTICS
1872 if (pVM->rem.s.Env.segs[R_ES].newselector)
1873 {
1874 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1875 }
1876#endif
1877 }
1878 else
1879 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1880
1881 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1882 {
1883 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1884 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1885#ifdef VBOX_WITH_STATISTICS
1886 if (pVM->rem.s.Env.segs[R_CS].newselector)
1887 {
1888 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1889 }
1890#endif
1891 }
1892 else
1893 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1894
1895 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1896 {
1897 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1898 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1899#ifdef VBOX_WITH_STATISTICS
1900 if (pVM->rem.s.Env.segs[R_DS].newselector)
1901 {
1902 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1903 }
1904#endif
1905 }
1906 else
1907 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1908
1909 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1910 * be the same but not the base/limit. */
1911 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1912 {
1913 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1914 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1915#ifdef VBOX_WITH_STATISTICS
1916 if (pVM->rem.s.Env.segs[R_FS].newselector)
1917 {
1918 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1919 }
1920#endif
1921 }
1922 else
1923 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1924
1925 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1926 {
1927 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1928 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1929#ifdef VBOX_WITH_STATISTICS
1930 if (pVM->rem.s.Env.segs[R_GS].newselector)
1931 {
1932 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1933 }
1934#endif
1935 }
1936 else
1937 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1938 }
1939
1940 /*
1941 * Check for traps.
1942 */
1943 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1944 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1945 if (RT_SUCCESS(rc))
1946 {
1947#ifdef DEBUG
1948 if (u8TrapNo == 0x80)
1949 {
1950 remR3DumpLnxSyscall(pVM);
1951 remR3DumpOBsdSyscall(pVM);
1952 }
1953#endif
1954
1955 pVM->rem.s.Env.exception_index = u8TrapNo;
1956 if (enmType != TRPM_SOFTWARE_INT)
1957 {
1958 pVM->rem.s.Env.exception_is_int = 0;
1959 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1960 }
1961 else
1962 {
1963 /*
1964 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1965 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1966 * for int03 and into.
1967 */
1968 pVM->rem.s.Env.exception_is_int = 1;
1969 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1970 /* int 3 may be generated by one-byte 0xcc */
1971 if (u8TrapNo == 3)
1972 {
1973 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1974 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1975 }
1976 /* int 4 may be generated by one-byte 0xce */
1977 else if (u8TrapNo == 4)
1978 {
1979 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1980 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1981 }
1982 }
1983
1984 /* get error code and cr2 if needed. */
1985 switch (u8TrapNo)
1986 {
1987 case 0x0e:
1988 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1989 /* fallthru */
1990 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1991 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1992 break;
1993
1994 case 0x11: case 0x08:
1995 default:
1996 pVM->rem.s.Env.error_code = 0;
1997 break;
1998 }
1999
2000 /*
2001 * We can now reset the active trap since the recompiler is gonna have a go at it.
2002 */
2003 rc = TRPMResetTrap(pVM);
2004 AssertRC(rc);
2005 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2006 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2007 }
2008
2009 /*
2010 * Clear old interrupt request flags; Check for pending hardware interrupts.
2011 * (See @remark for why we don't check for other FFs.)
2012 */
2013 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2014 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2015 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2016 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2017
2018 /*
2019 * We're now in REM mode.
2020 */
2021 pVM->rem.s.fInREM = true;
2022 pVM->rem.s.fInStateSync = false;
2023 pVM->rem.s.cCanExecuteRaw = 0;
2024 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2025 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2026 return VINF_SUCCESS;
2027}
2028
2029
2030/**
2031 * Syncs back changes in the REM state to the the VM state.
2032 *
2033 * This must be called after invoking REMR3Run().
2034 * Calling it several times in a row is not permitted.
2035 *
2036 * @returns VBox status code.
2037 *
2038 * @param pVM VM Handle.
2039 */
2040REMR3DECL(int) REMR3StateBack(PVM pVM)
2041{
2042 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2043 unsigned i;
2044
2045 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2046 Log2(("REMR3StateBack:\n"));
2047 Assert(pVM->rem.s.fInREM);
2048
2049 /*
2050 * Copy back the registers.
2051 * This is done in the order they are declared in the CPUMCTX structure.
2052 */
2053
2054 /** @todo FOP */
2055 /** @todo FPUIP */
2056 /** @todo CS */
2057 /** @todo FPUDP */
2058 /** @todo DS */
2059 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2060 pCtx->fpu.MXCSR = 0;
2061 pCtx->fpu.MXCSR_MASK = 0;
2062
2063 /** @todo check if FPU/XMM was actually used in the recompiler */
2064 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2065//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2066
2067#ifdef TARGET_X86_64
2068 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2069 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2070 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2071 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2072 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2073 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2074 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2075 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2076 pCtx->r8 = pVM->rem.s.Env.regs[8];
2077 pCtx->r9 = pVM->rem.s.Env.regs[9];
2078 pCtx->r10 = pVM->rem.s.Env.regs[10];
2079 pCtx->r11 = pVM->rem.s.Env.regs[11];
2080 pCtx->r12 = pVM->rem.s.Env.regs[12];
2081 pCtx->r13 = pVM->rem.s.Env.regs[13];
2082 pCtx->r14 = pVM->rem.s.Env.regs[14];
2083 pCtx->r15 = pVM->rem.s.Env.regs[15];
2084
2085 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2086
2087#else
2088 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2089 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2090 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2091 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2092 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2093 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2094 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2095
2096 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2097#endif
2098
2099 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2100
2101#ifdef VBOX_WITH_STATISTICS
2102 if (pVM->rem.s.Env.segs[R_SS].newselector)
2103 {
2104 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2105 }
2106 if (pVM->rem.s.Env.segs[R_GS].newselector)
2107 {
2108 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2109 }
2110 if (pVM->rem.s.Env.segs[R_FS].newselector)
2111 {
2112 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2113 }
2114 if (pVM->rem.s.Env.segs[R_ES].newselector)
2115 {
2116 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2117 }
2118 if (pVM->rem.s.Env.segs[R_DS].newselector)
2119 {
2120 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2121 }
2122 if (pVM->rem.s.Env.segs[R_CS].newselector)
2123 {
2124 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2125 }
2126#endif
2127 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2128 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2129 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2130 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2131 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2132
2133#ifdef TARGET_X86_64
2134 pCtx->rip = pVM->rem.s.Env.eip;
2135 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2136#else
2137 pCtx->eip = pVM->rem.s.Env.eip;
2138 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2139#endif
2140
2141 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2142 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2143 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2144 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2145
2146 for (i=0;i<8;i++)
2147 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2148
2149 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2150 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2151 {
2152 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2153 STAM_COUNTER_INC(&gStatREMGDTChange);
2154 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2155 }
2156
2157 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2158 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2159 {
2160 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2161 STAM_COUNTER_INC(&gStatREMIDTChange);
2162 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2163 }
2164
2165 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2166 {
2167 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2168 STAM_COUNTER_INC(&gStatREMLDTRChange);
2169 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2170 }
2171 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2172 {
2173 pCtx->tr = pVM->rem.s.Env.tr.selector;
2174 STAM_COUNTER_INC(&gStatREMTRChange);
2175 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2176 }
2177
2178 /** @todo These values could still be out of sync! */
2179 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2180 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2181 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2182 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2183
2184 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2185 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2186 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2187
2188 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2189 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2190 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2191
2192 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2193 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2194 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2195
2196 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2197 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2198 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2199
2200 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2201 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2202 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2203
2204 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2205 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2206 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2207
2208 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2209 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2210 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2211
2212 /* Sysenter MSR */
2213 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2214 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2215 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2216
2217 /* System MSRs. */
2218 pCtx->msrEFER = pVM->rem.s.Env.efer;
2219 pCtx->msrSTAR = pVM->rem.s.Env.star;
2220 pCtx->msrPAT = pVM->rem.s.Env.pat;
2221#ifdef TARGET_X86_64
2222 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2223 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2224 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2225 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2226#endif
2227
2228 remR3TrapClear(pVM);
2229
2230 /*
2231 * Check for traps.
2232 */
2233 if ( pVM->rem.s.Env.exception_index >= 0
2234 && pVM->rem.s.Env.exception_index < 256)
2235 {
2236 int rc;
2237
2238 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2239 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2240 AssertRC(rc);
2241 switch (pVM->rem.s.Env.exception_index)
2242 {
2243 case 0x0e:
2244 TRPMSetFaultAddress(pVM, pCtx->cr2);
2245 /* fallthru */
2246 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2247 case 0x11: case 0x08: /* 0 */
2248 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2249 break;
2250 }
2251
2252 }
2253
2254 /*
2255 * We're not longer in REM mode.
2256 */
2257 pVM->rem.s.fInREM = false;
2258 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2259 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2260 return VINF_SUCCESS;
2261}
2262
2263
2264/**
2265 * This is called by the disassembler when it wants to update the cpu state
2266 * before for instance doing a register dump.
2267 */
2268static void remR3StateUpdate(PVM pVM)
2269{
2270 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2271 unsigned i;
2272
2273 Assert(pVM->rem.s.fInREM);
2274
2275 /*
2276 * Copy back the registers.
2277 * This is done in the order they are declared in the CPUMCTX structure.
2278 */
2279
2280 /** @todo FOP */
2281 /** @todo FPUIP */
2282 /** @todo CS */
2283 /** @todo FPUDP */
2284 /** @todo DS */
2285 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2286 pCtx->fpu.MXCSR = 0;
2287 pCtx->fpu.MXCSR_MASK = 0;
2288
2289 /** @todo check if FPU/XMM was actually used in the recompiler */
2290 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2291//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2292
2293#ifdef TARGET_X86_64
2294 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2295 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2296 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2297 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2298 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2299 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2300 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2301 pCtx->r8 = pVM->rem.s.Env.regs[8];
2302 pCtx->r9 = pVM->rem.s.Env.regs[9];
2303 pCtx->r10 = pVM->rem.s.Env.regs[10];
2304 pCtx->r11 = pVM->rem.s.Env.regs[11];
2305 pCtx->r12 = pVM->rem.s.Env.regs[12];
2306 pCtx->r13 = pVM->rem.s.Env.regs[13];
2307 pCtx->r14 = pVM->rem.s.Env.regs[14];
2308 pCtx->r15 = pVM->rem.s.Env.regs[15];
2309
2310 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2311#else
2312 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2313 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2314 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2315 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2316 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2317 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2318 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2319
2320 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2321#endif
2322
2323 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2324
2325 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2326 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2327 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2328 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2329 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2330
2331#ifdef TARGET_X86_64
2332 pCtx->rip = pVM->rem.s.Env.eip;
2333 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2334#else
2335 pCtx->eip = pVM->rem.s.Env.eip;
2336 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2337#endif
2338
2339 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2340 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2341 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2342 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2343
2344 for (i=0;i<8;i++)
2345 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2346
2347 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2348 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2349 {
2350 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2351 STAM_COUNTER_INC(&gStatREMGDTChange);
2352 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2353 }
2354
2355 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2356 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2357 {
2358 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2359 STAM_COUNTER_INC(&gStatREMIDTChange);
2360 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2361 }
2362
2363 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2364 {
2365 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2366 STAM_COUNTER_INC(&gStatREMLDTRChange);
2367 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2368 }
2369 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2370 {
2371 pCtx->tr = pVM->rem.s.Env.tr.selector;
2372 STAM_COUNTER_INC(&gStatREMTRChange);
2373 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2374 }
2375
2376 /** @todo These values could still be out of sync! */
2377 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2378 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2379 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2380 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2381
2382 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2383 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2384 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2385
2386 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2387 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2388 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2389
2390 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2391 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2392 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2393
2394 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2395 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2396 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2397
2398 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2399 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2400 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2401
2402 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2403 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2404 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2405
2406 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2407 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2408 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2409
2410 /* Sysenter MSR */
2411 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2412 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2413 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2414
2415 /* System MSRs. */
2416 pCtx->msrEFER = pVM->rem.s.Env.efer;
2417 pCtx->msrSTAR = pVM->rem.s.Env.star;
2418 pCtx->msrPAT = pVM->rem.s.Env.pat;
2419#ifdef TARGET_X86_64
2420 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2421 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2422 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2423 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2424#endif
2425
2426}
2427
2428
2429/**
2430 * Update the VMM state information if we're currently in REM.
2431 *
2432 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2433 * we're currently executing in REM and the VMM state is invalid. This method will of
2434 * course check that we're executing in REM before syncing any data over to the VMM.
2435 *
2436 * @param pVM The VM handle.
2437 */
2438REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2439{
2440 if (pVM->rem.s.fInREM)
2441 remR3StateUpdate(pVM);
2442}
2443
2444
2445#undef LOG_GROUP
2446#define LOG_GROUP LOG_GROUP_REM
2447
2448
2449/**
2450 * Notify the recompiler about Address Gate 20 state change.
2451 *
2452 * This notification is required since A20 gate changes are
2453 * initialized from a device driver and the VM might just as
2454 * well be in REM mode as in RAW mode.
2455 *
2456 * @param pVM VM handle.
2457 * @param fEnable True if the gate should be enabled.
2458 * False if the gate should be disabled.
2459 */
2460REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2461{
2462 bool fSaved;
2463
2464 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2465 VM_ASSERT_EMT(pVM);
2466
2467 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2468 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2469
2470 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2471
2472 pVM->rem.s.fIgnoreAll = fSaved;
2473}
2474
2475
2476/**
2477 * Replays the invalidated recorded pages.
2478 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2479 *
2480 * @param pVM VM handle.
2481 */
2482REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2483{
2484 RTUINT i;
2485
2486 VM_ASSERT_EMT(pVM);
2487
2488 /*
2489 * Sync the required registers.
2490 */
2491 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2492 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2493 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2494 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2495
2496 /*
2497 * Replay the flushes.
2498 */
2499 pVM->rem.s.fIgnoreInvlPg = true;
2500 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2501 {
2502 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2503 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2504 }
2505 pVM->rem.s.fIgnoreInvlPg = false;
2506 pVM->rem.s.cInvalidatedPages = 0;
2507}
2508
2509
2510/**
2511 * Replays the handler notification changes
2512 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2513 *
2514 * @param pVM VM handle.
2515 */
2516REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2517{
2518 /*
2519 * Replay the flushes.
2520 */
2521 RTUINT i;
2522 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2523
2524 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2525 VM_ASSERT_EMT(pVM);
2526
2527 pVM->rem.s.cHandlerNotifications = 0;
2528 for (i = 0; i < c; i++)
2529 {
2530 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2531 switch (pRec->enmKind)
2532 {
2533 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2534 REMR3NotifyHandlerPhysicalRegister(pVM,
2535 pRec->u.PhysicalRegister.enmType,
2536 pRec->u.PhysicalRegister.GCPhys,
2537 pRec->u.PhysicalRegister.cb,
2538 pRec->u.PhysicalRegister.fHasHCHandler);
2539 break;
2540
2541 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2542 REMR3NotifyHandlerPhysicalDeregister(pVM,
2543 pRec->u.PhysicalDeregister.enmType,
2544 pRec->u.PhysicalDeregister.GCPhys,
2545 pRec->u.PhysicalDeregister.cb,
2546 pRec->u.PhysicalDeregister.fHasHCHandler,
2547 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2548 break;
2549
2550 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2551 REMR3NotifyHandlerPhysicalModify(pVM,
2552 pRec->u.PhysicalModify.enmType,
2553 pRec->u.PhysicalModify.GCPhysOld,
2554 pRec->u.PhysicalModify.GCPhysNew,
2555 pRec->u.PhysicalModify.cb,
2556 pRec->u.PhysicalModify.fHasHCHandler,
2557 pRec->u.PhysicalModify.fRestoreAsRAM);
2558 break;
2559
2560 default:
2561 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2562 break;
2563 }
2564 }
2565 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2566}
2567
2568
2569/**
2570 * Notify REM about changed code page.
2571 *
2572 * @returns VBox status code.
2573 * @param pVM VM handle.
2574 * @param pvCodePage Code page address
2575 */
2576REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2577{
2578#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2579 int rc;
2580 RTGCPHYS PhysGC;
2581 uint64_t flags;
2582
2583 VM_ASSERT_EMT(pVM);
2584
2585 /*
2586 * Get the physical page address.
2587 */
2588 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2589 if (rc == VINF_SUCCESS)
2590 {
2591 /*
2592 * Sync the required registers and flush the whole page.
2593 * (Easier to do the whole page than notifying it about each physical
2594 * byte that was changed.
2595 */
2596 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2597 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2598 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2599 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2600
2601 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2602 }
2603#endif
2604 return VINF_SUCCESS;
2605}
2606
2607
2608/**
2609 * Notification about a successful MMR3PhysRegister() call.
2610 *
2611 * @param pVM VM handle.
2612 * @param GCPhys The physical address the RAM.
2613 * @param cb Size of the memory.
2614 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2615 */
2616REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2617{
2618 uint32_t cbBitmap;
2619 int rc;
2620 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2621 VM_ASSERT_EMT(pVM);
2622
2623 /*
2624 * Validate input - we trust the caller.
2625 */
2626 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2627 Assert(cb);
2628 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2629
2630 /*
2631 * Base ram?
2632 */
2633 if (!GCPhys)
2634 {
2635 phys_ram_size = cb;
2636 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2637#ifndef VBOX_STRICT
2638 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2639 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2640#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2641 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2642 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2643 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2644 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2645 AssertRC(rc);
2646 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2647#endif
2648 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2649 }
2650
2651 /*
2652 * Register the ram.
2653 */
2654 Assert(!pVM->rem.s.fIgnoreAll);
2655 pVM->rem.s.fIgnoreAll = true;
2656
2657#ifdef VBOX_WITH_NEW_PHYS_CODE
2658 if (fFlags & MM_RAM_FLAGS_RESERVED)
2659 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2660 else
2661 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2662#else
2663 if (!GCPhys)
2664 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2665 else
2666 {
2667 if (fFlags & MM_RAM_FLAGS_RESERVED)
2668 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2669 else
2670 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2671 }
2672#endif
2673 Assert(pVM->rem.s.fIgnoreAll);
2674 pVM->rem.s.fIgnoreAll = false;
2675}
2676
2677#ifndef VBOX_WITH_NEW_PHYS_CODE
2678
2679/**
2680 * Notification about a successful PGMR3PhysRegisterChunk() call.
2681 *
2682 * @param pVM VM handle.
2683 * @param GCPhys The physical address the RAM.
2684 * @param cb Size of the memory.
2685 * @param pvRam The HC address of the RAM.
2686 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2687 */
2688REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2689{
2690 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2691 VM_ASSERT_EMT(pVM);
2692
2693 /*
2694 * Validate input - we trust the caller.
2695 */
2696 Assert(pvRam);
2697 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2698 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2699 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2700 Assert(fFlags == 0 /* normal RAM */);
2701 Assert(!pVM->rem.s.fIgnoreAll);
2702 pVM->rem.s.fIgnoreAll = true;
2703 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2704 Assert(pVM->rem.s.fIgnoreAll);
2705 pVM->rem.s.fIgnoreAll = false;
2706}
2707
2708
2709/**
2710 * Grows dynamically allocated guest RAM.
2711 * Will raise a fatal error if the operation fails.
2712 *
2713 * @param physaddr The physical address.
2714 */
2715void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2716{
2717 int rc;
2718 PVM pVM = cpu_single_env->pVM;
2719 const RTGCPHYS GCPhys = physaddr;
2720
2721 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2722 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2723 if (RT_SUCCESS(rc))
2724 return;
2725
2726 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2727 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2728 AssertFatalFailed();
2729}
2730
2731#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2732
2733/**
2734 * Notification about a successful MMR3PhysRomRegister() call.
2735 *
2736 * @param pVM VM handle.
2737 * @param GCPhys The physical address of the ROM.
2738 * @param cb The size of the ROM.
2739 * @param pvCopy Pointer to the ROM copy.
2740 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2741 * This function will be called when ever the protection of the
2742 * shadow ROM changes (at reset and end of POST).
2743 */
2744REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2745{
2746 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2747 VM_ASSERT_EMT(pVM);
2748
2749 /*
2750 * Validate input - we trust the caller.
2751 */
2752 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2753 Assert(cb);
2754 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2755 Assert(pvCopy);
2756 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2757
2758 /*
2759 * Register the rom.
2760 */
2761 Assert(!pVM->rem.s.fIgnoreAll);
2762 pVM->rem.s.fIgnoreAll = true;
2763
2764 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2765
2766 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2767
2768 Assert(pVM->rem.s.fIgnoreAll);
2769 pVM->rem.s.fIgnoreAll = false;
2770}
2771
2772
2773/**
2774 * Notification about a successful memory deregistration or reservation.
2775 *
2776 * @param pVM VM Handle.
2777 * @param GCPhys Start physical address.
2778 * @param cb The size of the range.
2779 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2780 * reserve any memory soon.
2781 */
2782REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2783{
2784 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2785 VM_ASSERT_EMT(pVM);
2786
2787 /*
2788 * Validate input - we trust the caller.
2789 */
2790 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2791 Assert(cb);
2792 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2793
2794 /*
2795 * Unassigning the memory.
2796 */
2797 Assert(!pVM->rem.s.fIgnoreAll);
2798 pVM->rem.s.fIgnoreAll = true;
2799
2800 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2801
2802 Assert(pVM->rem.s.fIgnoreAll);
2803 pVM->rem.s.fIgnoreAll = false;
2804}
2805
2806
2807/**
2808 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2809 *
2810 * @param pVM VM Handle.
2811 * @param enmType Handler type.
2812 * @param GCPhys Handler range address.
2813 * @param cb Size of the handler range.
2814 * @param fHasHCHandler Set if the handler has a HC callback function.
2815 *
2816 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2817 * Handler memory type to memory which has no HC handler.
2818 */
2819REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2820{
2821 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2822 enmType, GCPhys, cb, fHasHCHandler));
2823 VM_ASSERT_EMT(pVM);
2824 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2825 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2826
2827 if (pVM->rem.s.cHandlerNotifications)
2828 REMR3ReplayHandlerNotifications(pVM);
2829
2830 Assert(!pVM->rem.s.fIgnoreAll);
2831 pVM->rem.s.fIgnoreAll = true;
2832
2833 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2834 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2835 else if (fHasHCHandler)
2836 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2837
2838 Assert(pVM->rem.s.fIgnoreAll);
2839 pVM->rem.s.fIgnoreAll = false;
2840}
2841
2842
2843/**
2844 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2845 *
2846 * @param pVM VM Handle.
2847 * @param enmType Handler type.
2848 * @param GCPhys Handler range address.
2849 * @param cb Size of the handler range.
2850 * @param fHasHCHandler Set if the handler has a HC callback function.
2851 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2852 */
2853REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2854{
2855 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2856 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2857 VM_ASSERT_EMT(pVM);
2858
2859 if (pVM->rem.s.cHandlerNotifications)
2860 REMR3ReplayHandlerNotifications(pVM);
2861
2862 Assert(!pVM->rem.s.fIgnoreAll);
2863 pVM->rem.s.fIgnoreAll = true;
2864
2865/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2866 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2867 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2868 else if (fHasHCHandler)
2869 {
2870 if (!fRestoreAsRAM)
2871 {
2872 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2873 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2874 }
2875 else
2876 {
2877 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2878 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2879 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2880 }
2881 }
2882
2883 Assert(pVM->rem.s.fIgnoreAll);
2884 pVM->rem.s.fIgnoreAll = false;
2885}
2886
2887
2888/**
2889 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2890 *
2891 * @param pVM VM Handle.
2892 * @param enmType Handler type.
2893 * @param GCPhysOld Old handler range address.
2894 * @param GCPhysNew New handler range address.
2895 * @param cb Size of the handler range.
2896 * @param fHasHCHandler Set if the handler has a HC callback function.
2897 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2898 */
2899REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2900{
2901 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2902 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2903 VM_ASSERT_EMT(pVM);
2904 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2905
2906 if (pVM->rem.s.cHandlerNotifications)
2907 REMR3ReplayHandlerNotifications(pVM);
2908
2909 if (fHasHCHandler)
2910 {
2911 Assert(!pVM->rem.s.fIgnoreAll);
2912 pVM->rem.s.fIgnoreAll = true;
2913
2914 /*
2915 * Reset the old page.
2916 */
2917 if (!fRestoreAsRAM)
2918 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2919 else
2920 {
2921 /* This is not perfect, but it'll do for PD monitoring... */
2922 Assert(cb == PAGE_SIZE);
2923 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2924 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2925 }
2926
2927 /*
2928 * Update the new page.
2929 */
2930 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2931 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2932 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2933
2934 Assert(pVM->rem.s.fIgnoreAll);
2935 pVM->rem.s.fIgnoreAll = false;
2936 }
2937}
2938
2939
2940/**
2941 * Checks if we're handling access to this page or not.
2942 *
2943 * @returns true if we're trapping access.
2944 * @returns false if we aren't.
2945 * @param pVM The VM handle.
2946 * @param GCPhys The physical address.
2947 *
2948 * @remark This function will only work correctly in VBOX_STRICT builds!
2949 */
2950REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2951{
2952#ifdef VBOX_STRICT
2953 unsigned long off;
2954 if (pVM->rem.s.cHandlerNotifications)
2955 REMR3ReplayHandlerNotifications(pVM);
2956
2957 off = get_phys_page_offset(GCPhys);
2958 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2959 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2960 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2961#else
2962 return false;
2963#endif
2964}
2965
2966
2967/**
2968 * Deals with a rare case in get_phys_addr_code where the code
2969 * is being monitored.
2970 *
2971 * It could also be an MMIO page, in which case we will raise a fatal error.
2972 *
2973 * @returns The physical address corresponding to addr.
2974 * @param env The cpu environment.
2975 * @param addr The virtual address.
2976 * @param pTLBEntry The TLB entry.
2977 */
2978target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2979 target_ulong addr,
2980 CPUTLBEntry* pTLBEntry,
2981 target_phys_addr_t ioTLBEntry)
2982{
2983 PVM pVM = env->pVM;
2984
2985 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2986 {
2987 /* If code memory is being monitored, appropriate IOTLB entry will have
2988 handler IO type, and addend will provide real physical address, no
2989 matter if we store VA in TLB or not, as handlers are always passed PA */
2990 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
2991 return ret;
2992 }
2993 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
2994 "*** handlers\n",
2995 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
2996 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
2997 LogRel(("*** mmio\n"));
2998 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
2999 LogRel(("*** phys\n"));
3000 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3001 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3002 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3003 AssertFatalFailed();
3004}
3005
3006/**
3007 * Read guest RAM and ROM.
3008 *
3009 * @param SrcGCPhys The source address (guest physical).
3010 * @param pvDst The destination address.
3011 * @param cb Number of bytes
3012 */
3013void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3014{
3015 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3016 VBOX_CHECK_ADDR(SrcGCPhys);
3017 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3018#ifdef VBOX_DEBUG_PHYS
3019 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3020#endif
3021 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3022}
3023
3024
3025/**
3026 * Read guest RAM and ROM, unsigned 8-bit.
3027 *
3028 * @param SrcGCPhys The source address (guest physical).
3029 */
3030RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3031{
3032 uint8_t val;
3033 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3034 VBOX_CHECK_ADDR(SrcGCPhys);
3035 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3036 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3037#ifdef VBOX_DEBUG_PHYS
3038 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3039#endif
3040 return val;
3041}
3042
3043
3044/**
3045 * Read guest RAM and ROM, signed 8-bit.
3046 *
3047 * @param SrcGCPhys The source address (guest physical).
3048 */
3049RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3050{
3051 int8_t val;
3052 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3053 VBOX_CHECK_ADDR(SrcGCPhys);
3054 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3055 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3056#ifdef VBOX_DEBUG_PHYS
3057 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3058#endif
3059 return val;
3060}
3061
3062
3063/**
3064 * Read guest RAM and ROM, unsigned 16-bit.
3065 *
3066 * @param SrcGCPhys The source address (guest physical).
3067 */
3068RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3069{
3070 uint16_t val;
3071 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3072 VBOX_CHECK_ADDR(SrcGCPhys);
3073 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3074 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3075#ifdef VBOX_DEBUG_PHYS
3076 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3077#endif
3078 return val;
3079}
3080
3081
3082/**
3083 * Read guest RAM and ROM, signed 16-bit.
3084 *
3085 * @param SrcGCPhys The source address (guest physical).
3086 */
3087RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3088{
3089 int16_t val;
3090 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3091 VBOX_CHECK_ADDR(SrcGCPhys);
3092 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3093 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3094#ifdef VBOX_DEBUG_PHYS
3095 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3096#endif
3097 return val;
3098}
3099
3100
3101/**
3102 * Read guest RAM and ROM, unsigned 32-bit.
3103 *
3104 * @param SrcGCPhys The source address (guest physical).
3105 */
3106RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3107{
3108 uint32_t val;
3109 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3110 VBOX_CHECK_ADDR(SrcGCPhys);
3111 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3112 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3113#ifdef VBOX_DEBUG_PHYS
3114 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3115#endif
3116 return val;
3117}
3118
3119
3120/**
3121 * Read guest RAM and ROM, signed 32-bit.
3122 *
3123 * @param SrcGCPhys The source address (guest physical).
3124 */
3125RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3126{
3127 int32_t val;
3128 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3129 VBOX_CHECK_ADDR(SrcGCPhys);
3130 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3131 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3132#ifdef VBOX_DEBUG_PHYS
3133 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3134#endif
3135 return val;
3136}
3137
3138
3139/**
3140 * Read guest RAM and ROM, unsigned 64-bit.
3141 *
3142 * @param SrcGCPhys The source address (guest physical).
3143 */
3144uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3145{
3146 uint64_t val;
3147 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3148 VBOX_CHECK_ADDR(SrcGCPhys);
3149 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3150 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3151#ifdef VBOX_DEBUG_PHYS
3152 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3153#endif
3154 return val;
3155}
3156
3157/**
3158 * Read guest RAM and ROM, signed 64-bit.
3159 *
3160 * @param SrcGCPhys The source address (guest physical).
3161 */
3162int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3163{
3164 int64_t val;
3165 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3166 VBOX_CHECK_ADDR(SrcGCPhys);
3167 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3168 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3169#ifdef VBOX_DEBUG_PHYS
3170 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3171#endif
3172 return val;
3173}
3174
3175
3176/**
3177 * Write guest RAM.
3178 *
3179 * @param DstGCPhys The destination address (guest physical).
3180 * @param pvSrc The source address.
3181 * @param cb Number of bytes to write
3182 */
3183void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3184{
3185 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3186 VBOX_CHECK_ADDR(DstGCPhys);
3187 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3188 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3189#ifdef VBOX_DEBUG_PHYS
3190 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3191#endif
3192}
3193
3194
3195/**
3196 * Write guest RAM, unsigned 8-bit.
3197 *
3198 * @param DstGCPhys The destination address (guest physical).
3199 * @param val Value
3200 */
3201void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3202{
3203 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3204 VBOX_CHECK_ADDR(DstGCPhys);
3205 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3206 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3207#ifdef VBOX_DEBUG_PHYS
3208 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3209#endif
3210}
3211
3212
3213/**
3214 * Write guest RAM, unsigned 8-bit.
3215 *
3216 * @param DstGCPhys The destination address (guest physical).
3217 * @param val Value
3218 */
3219void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3220{
3221 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3222 VBOX_CHECK_ADDR(DstGCPhys);
3223 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3224 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3225#ifdef VBOX_DEBUG_PHYS
3226 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3227#endif
3228}
3229
3230
3231/**
3232 * Write guest RAM, unsigned 32-bit.
3233 *
3234 * @param DstGCPhys The destination address (guest physical).
3235 * @param val Value
3236 */
3237void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3238{
3239 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3240 VBOX_CHECK_ADDR(DstGCPhys);
3241 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3242 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3243#ifdef VBOX_DEBUG_PHYS
3244 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3245#endif
3246}
3247
3248
3249/**
3250 * Write guest RAM, unsigned 64-bit.
3251 *
3252 * @param DstGCPhys The destination address (guest physical).
3253 * @param val Value
3254 */
3255void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3256{
3257 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3258 VBOX_CHECK_ADDR(DstGCPhys);
3259 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3260 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3261#ifdef VBOX_DEBUG_PHYS
3262 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3263#endif
3264}
3265
3266#undef LOG_GROUP
3267#define LOG_GROUP LOG_GROUP_REM_MMIO
3268
3269/** Read MMIO memory. */
3270static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3271{
3272 uint32_t u32 = 0;
3273 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3274 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3275 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3276 return u32;
3277}
3278
3279/** Read MMIO memory. */
3280static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3281{
3282 uint32_t u32 = 0;
3283 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3284 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3285 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3286 return u32;
3287}
3288
3289/** Read MMIO memory. */
3290static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3291{
3292 uint32_t u32 = 0;
3293 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3294 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3295 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3296 return u32;
3297}
3298
3299/** Write to MMIO memory. */
3300static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3301{
3302 int rc;
3303 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3304 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3305 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3306}
3307
3308/** Write to MMIO memory. */
3309static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3310{
3311 int rc;
3312 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3313 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3314 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3315}
3316
3317/** Write to MMIO memory. */
3318static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3319{
3320 int rc;
3321 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3322 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3323 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3324}
3325
3326
3327#undef LOG_GROUP
3328#define LOG_GROUP LOG_GROUP_REM_HANDLER
3329
3330/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3331
3332static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3333{
3334 uint8_t u8;
3335 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3336 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3337 return u8;
3338}
3339
3340static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3341{
3342 uint16_t u16;
3343 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3344 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3345 return u16;
3346}
3347
3348static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3349{
3350 uint32_t u32;
3351 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3352 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3353 return u32;
3354}
3355
3356static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3357{
3358 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3359 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3360}
3361
3362static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3363{
3364 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3365 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3366}
3367
3368static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3369{
3370 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3371 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3372}
3373
3374/* -+- disassembly -+- */
3375
3376#undef LOG_GROUP
3377#define LOG_GROUP LOG_GROUP_REM_DISAS
3378
3379
3380/**
3381 * Enables or disables singled stepped disassembly.
3382 *
3383 * @returns VBox status code.
3384 * @param pVM VM handle.
3385 * @param fEnable To enable set this flag, to disable clear it.
3386 */
3387static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3388{
3389 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3390 VM_ASSERT_EMT(pVM);
3391
3392 if (fEnable)
3393 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3394 else
3395 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3396 return VINF_SUCCESS;
3397}
3398
3399
3400/**
3401 * Enables or disables singled stepped disassembly.
3402 *
3403 * @returns VBox status code.
3404 * @param pVM VM handle.
3405 * @param fEnable To enable set this flag, to disable clear it.
3406 */
3407REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3408{
3409 PVMREQ pReq;
3410 int rc;
3411
3412 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3413 if (VM_IS_EMT(pVM))
3414 return remR3DisasEnableStepping(pVM, fEnable);
3415
3416 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3417 AssertRC(rc);
3418 if (RT_SUCCESS(rc))
3419 rc = pReq->iStatus;
3420 VMR3ReqFree(pReq);
3421 return rc;
3422}
3423
3424
3425#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3426/**
3427 * External Debugger Command: .remstep [on|off|1|0]
3428 */
3429static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3430{
3431 bool fEnable;
3432 int rc;
3433
3434 /* print status */
3435 if (cArgs == 0)
3436 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3437 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3438
3439 /* convert the argument and change the mode. */
3440 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3441 if (RT_FAILURE(rc))
3442 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3443 rc = REMR3DisasEnableStepping(pVM, fEnable);
3444 if (RT_FAILURE(rc))
3445 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3446 return rc;
3447}
3448#endif
3449
3450
3451/**
3452 * Disassembles n instructions and prints them to the log.
3453 *
3454 * @returns Success indicator.
3455 * @param env Pointer to the recompiler CPU structure.
3456 * @param f32BitCode Indicates that whether or not the code should
3457 * be disassembled as 16 or 32 bit. If -1 the CS
3458 * selector will be inspected.
3459 * @param nrInstructions Nr of instructions to disassemble
3460 * @param pszPrefix
3461 * @remark not currently used for anything but ad-hoc debugging.
3462 */
3463bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3464{
3465 int i, rc;
3466 RTGCPTR GCPtrPC;
3467 uint8_t *pvPC;
3468 RTINTPTR off;
3469 DISCPUSTATE Cpu;
3470
3471 /*
3472 * Determin 16/32 bit mode.
3473 */
3474 if (f32BitCode == -1)
3475 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3476
3477 /*
3478 * Convert cs:eip to host context address.
3479 * We don't care to much about cross page correctness presently.
3480 */
3481 GCPtrPC = env->segs[R_CS].base + env->eip;
3482 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3483 {
3484 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3485
3486 /* convert eip to physical address. */
3487 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3488 GCPtrPC,
3489 env->cr[3],
3490 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3491 (void**)&pvPC);
3492 if (RT_FAILURE(rc))
3493 {
3494 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3495 return false;
3496 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3497 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3498 }
3499 }
3500 else
3501 {
3502 /* physical address */
3503 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3504 (void**)&pvPC);
3505 if (RT_FAILURE(rc))
3506 return false;
3507 }
3508
3509 /*
3510 * Disassemble.
3511 */
3512 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3513 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3514 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3515 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3516 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3517 //Cpu.dwUserData[2] = GCPtrPC;
3518
3519 for (i=0;i<nrInstructions;i++)
3520 {
3521 char szOutput[256];
3522 uint32_t cbOp;
3523 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3524 return false;
3525 if (pszPrefix)
3526 Log(("%s: %s", pszPrefix, szOutput));
3527 else
3528 Log(("%s", szOutput));
3529
3530 pvPC += cbOp;
3531 }
3532 return true;
3533}
3534
3535
3536/** @todo need to test the new code, using the old code in the mean while. */
3537#define USE_OLD_DUMP_AND_DISASSEMBLY
3538
3539/**
3540 * Disassembles one instruction and prints it to the log.
3541 *
3542 * @returns Success indicator.
3543 * @param env Pointer to the recompiler CPU structure.
3544 * @param f32BitCode Indicates that whether or not the code should
3545 * be disassembled as 16 or 32 bit. If -1 the CS
3546 * selector will be inspected.
3547 * @param pszPrefix
3548 */
3549bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3550{
3551#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3552 PVM pVM = env->pVM;
3553 RTGCPTR GCPtrPC;
3554 uint8_t *pvPC;
3555 char szOutput[256];
3556 uint32_t cbOp;
3557 RTINTPTR off;
3558 DISCPUSTATE Cpu;
3559
3560
3561 /* Doesn't work in long mode. */
3562 if (env->hflags & HF_LMA_MASK)
3563 return false;
3564
3565 /*
3566 * Determin 16/32 bit mode.
3567 */
3568 if (f32BitCode == -1)
3569 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3570
3571 /*
3572 * Log registers
3573 */
3574 if (LogIs2Enabled())
3575 {
3576 remR3StateUpdate(pVM);
3577 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3578 }
3579
3580 /*
3581 * Convert cs:eip to host context address.
3582 * We don't care to much about cross page correctness presently.
3583 */
3584 GCPtrPC = env->segs[R_CS].base + env->eip;
3585 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3586 {
3587 /* convert eip to physical address. */
3588 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3589 GCPtrPC,
3590 env->cr[3],
3591 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3592 (void**)&pvPC);
3593 if (RT_FAILURE(rc))
3594 {
3595 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3596 return false;
3597 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3598 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3599 }
3600 }
3601 else
3602 {
3603
3604 /* physical address */
3605 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3606 if (RT_FAILURE(rc))
3607 return false;
3608 }
3609
3610 /*
3611 * Disassemble.
3612 */
3613 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3614 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3615 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3616 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3617 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3618 //Cpu.dwUserData[2] = GCPtrPC;
3619 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3620 return false;
3621
3622 if (!f32BitCode)
3623 {
3624 if (pszPrefix)
3625 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3626 else
3627 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3628 }
3629 else
3630 {
3631 if (pszPrefix)
3632 Log(("%s: %s", pszPrefix, szOutput));
3633 else
3634 Log(("%s", szOutput));
3635 }
3636 return true;
3637
3638#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3639 PVM pVM = env->pVM;
3640 const bool fLog = LogIsEnabled();
3641 const bool fLog2 = LogIs2Enabled();
3642 int rc = VINF_SUCCESS;
3643
3644 /*
3645 * Don't bother if there ain't any log output to do.
3646 */
3647 if (!fLog && !fLog2)
3648 return true;
3649
3650 /*
3651 * Update the state so DBGF reads the correct register values.
3652 */
3653 remR3StateUpdate(pVM);
3654
3655 /*
3656 * Log registers if requested.
3657 */
3658 if (!fLog2)
3659 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3660
3661 /*
3662 * Disassemble to log.
3663 */
3664 if (fLog)
3665 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3666
3667 return RT_SUCCESS(rc);
3668#endif
3669}
3670
3671
3672/**
3673 * Disassemble recompiled code.
3674 *
3675 * @param phFileIgnored Ignored, logfile usually.
3676 * @param pvCode Pointer to the code block.
3677 * @param cb Size of the code block.
3678 */
3679void disas(FILE *phFile, void *pvCode, unsigned long cb)
3680{
3681#ifdef DEBUG_TMP_LOGGING
3682# define DISAS_PRINTF(x...) fprintf(phFile, x)
3683#else
3684# define DISAS_PRINTF(x...) RTLogPrintf(x)
3685 if (LogIs2Enabled())
3686#endif
3687 {
3688 unsigned off = 0;
3689 char szOutput[256];
3690 DISCPUSTATE Cpu;
3691
3692 memset(&Cpu, 0, sizeof(Cpu));
3693#ifdef RT_ARCH_X86
3694 Cpu.mode = CPUMODE_32BIT;
3695#else
3696 Cpu.mode = CPUMODE_64BIT;
3697#endif
3698
3699 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3700 while (off < cb)
3701 {
3702 uint32_t cbInstr;
3703 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3704 DISAS_PRINTF("%s", szOutput);
3705 else
3706 {
3707 DISAS_PRINTF("disas error\n");
3708 cbInstr = 1;
3709#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3710 break;
3711#endif
3712 }
3713 off += cbInstr;
3714 }
3715 }
3716
3717#undef DISAS_PRINTF
3718}
3719
3720
3721/**
3722 * Disassemble guest code.
3723 *
3724 * @param phFileIgnored Ignored, logfile usually.
3725 * @param uCode The guest address of the code to disassemble. (flat?)
3726 * @param cb Number of bytes to disassemble.
3727 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3728 */
3729void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3730{
3731#ifdef DEBUG_TMP_LOGGING
3732# define DISAS_PRINTF(x...) fprintf(phFile, x)
3733#else
3734# define DISAS_PRINTF(x...) RTLogPrintf(x)
3735 if (LogIs2Enabled())
3736#endif
3737 {
3738 PVM pVM = cpu_single_env->pVM;
3739 RTSEL cs;
3740 RTGCUINTPTR eip;
3741
3742 /*
3743 * Update the state so DBGF reads the correct register values (flags).
3744 */
3745 remR3StateUpdate(pVM);
3746
3747 /*
3748 * Do the disassembling.
3749 */
3750 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3751 cs = cpu_single_env->segs[R_CS].selector;
3752 eip = uCode - cpu_single_env->segs[R_CS].base;
3753 for (;;)
3754 {
3755 char szBuf[256];
3756 uint32_t cbInstr;
3757 int rc = DBGFR3DisasInstrEx(pVM,
3758 cs,
3759 eip,
3760 0,
3761 szBuf, sizeof(szBuf),
3762 &cbInstr);
3763 if (RT_SUCCESS(rc))
3764 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3765 else
3766 {
3767 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3768 cbInstr = 1;
3769 }
3770
3771 /* next */
3772 if (cb <= cbInstr)
3773 break;
3774 cb -= cbInstr;
3775 uCode += cbInstr;
3776 eip += cbInstr;
3777 }
3778 }
3779#undef DISAS_PRINTF
3780}
3781
3782
3783/**
3784 * Looks up a guest symbol.
3785 *
3786 * @returns Pointer to symbol name. This is a static buffer.
3787 * @param orig_addr The address in question.
3788 */
3789const char *lookup_symbol(target_ulong orig_addr)
3790{
3791 RTGCINTPTR off = 0;
3792 DBGFSYMBOL Sym;
3793 PVM pVM = cpu_single_env->pVM;
3794 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3795 if (RT_SUCCESS(rc))
3796 {
3797 static char szSym[sizeof(Sym.szName) + 48];
3798 if (!off)
3799 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3800 else if (off > 0)
3801 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3802 else
3803 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3804 return szSym;
3805 }
3806 return "<N/A>";
3807}
3808
3809
3810#undef LOG_GROUP
3811#define LOG_GROUP LOG_GROUP_REM
3812
3813
3814/* -+- FF notifications -+- */
3815
3816
3817/**
3818 * Notification about a pending interrupt.
3819 *
3820 * @param pVM VM Handle.
3821 * @param u8Interrupt Interrupt
3822 * @thread The emulation thread.
3823 */
3824REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3825{
3826 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3827 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3828}
3829
3830/**
3831 * Notification about a pending interrupt.
3832 *
3833 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3834 * @param pVM VM Handle.
3835 * @thread The emulation thread.
3836 */
3837REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3838{
3839 return pVM->rem.s.u32PendingInterrupt;
3840}
3841
3842/**
3843 * Notification about the interrupt FF being set.
3844 *
3845 * @param pVM VM Handle.
3846 * @thread The emulation thread.
3847 */
3848REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3849{
3850 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3851 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3852 if (pVM->rem.s.fInREM)
3853 {
3854 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3855 CPU_INTERRUPT_EXTERNAL_HARD);
3856 }
3857}
3858
3859
3860/**
3861 * Notification about the interrupt FF being set.
3862 *
3863 * @param pVM VM Handle.
3864 * @thread Any.
3865 */
3866REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3867{
3868 LogFlow(("REMR3NotifyInterruptClear:\n"));
3869 if (pVM->rem.s.fInREM)
3870 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3871}
3872
3873
3874/**
3875 * Notification about pending timer(s).
3876 *
3877 * @param pVM VM Handle.
3878 * @thread Any.
3879 */
3880REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3881{
3882#ifndef DEBUG_bird
3883 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3884#endif
3885 if (pVM->rem.s.fInREM)
3886 {
3887 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3888 CPU_INTERRUPT_EXTERNAL_TIMER);
3889 }
3890}
3891
3892
3893/**
3894 * Notification about pending DMA transfers.
3895 *
3896 * @param pVM VM Handle.
3897 * @thread Any.
3898 */
3899REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3900{
3901 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3902 if (pVM->rem.s.fInREM)
3903 {
3904 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3905 CPU_INTERRUPT_EXTERNAL_DMA);
3906 }
3907}
3908
3909
3910/**
3911 * Notification about pending timer(s).
3912 *
3913 * @param pVM VM Handle.
3914 * @thread Any.
3915 */
3916REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3917{
3918 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3919 if (pVM->rem.s.fInREM)
3920 {
3921 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3922 CPU_INTERRUPT_EXTERNAL_EXIT);
3923 }
3924}
3925
3926
3927/**
3928 * Notification about pending FF set by an external thread.
3929 *
3930 * @param pVM VM handle.
3931 * @thread Any.
3932 */
3933REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3934{
3935 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3936 if (pVM->rem.s.fInREM)
3937 {
3938 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3939 CPU_INTERRUPT_EXTERNAL_EXIT);
3940 }
3941}
3942
3943
3944#ifdef VBOX_WITH_STATISTICS
3945void remR3ProfileStart(int statcode)
3946{
3947 STAMPROFILEADV *pStat;
3948 switch(statcode)
3949 {
3950 case STATS_EMULATE_SINGLE_INSTR:
3951 pStat = &gStatExecuteSingleInstr;
3952 break;
3953 case STATS_QEMU_COMPILATION:
3954 pStat = &gStatCompilationQEmu;
3955 break;
3956 case STATS_QEMU_RUN_EMULATED_CODE:
3957 pStat = &gStatRunCodeQEmu;
3958 break;
3959 case STATS_QEMU_TOTAL:
3960 pStat = &gStatTotalTimeQEmu;
3961 break;
3962 case STATS_QEMU_RUN_TIMERS:
3963 pStat = &gStatTimers;
3964 break;
3965 case STATS_TLB_LOOKUP:
3966 pStat= &gStatTBLookup;
3967 break;
3968 case STATS_IRQ_HANDLING:
3969 pStat= &gStatIRQ;
3970 break;
3971 case STATS_RAW_CHECK:
3972 pStat = &gStatRawCheck;
3973 break;
3974
3975 default:
3976 AssertMsgFailed(("unknown stat %d\n", statcode));
3977 return;
3978 }
3979 STAM_PROFILE_ADV_START(pStat, a);
3980}
3981
3982
3983void remR3ProfileStop(int statcode)
3984{
3985 STAMPROFILEADV *pStat;
3986 switch(statcode)
3987 {
3988 case STATS_EMULATE_SINGLE_INSTR:
3989 pStat = &gStatExecuteSingleInstr;
3990 break;
3991 case STATS_QEMU_COMPILATION:
3992 pStat = &gStatCompilationQEmu;
3993 break;
3994 case STATS_QEMU_RUN_EMULATED_CODE:
3995 pStat = &gStatRunCodeQEmu;
3996 break;
3997 case STATS_QEMU_TOTAL:
3998 pStat = &gStatTotalTimeQEmu;
3999 break;
4000 case STATS_QEMU_RUN_TIMERS:
4001 pStat = &gStatTimers;
4002 break;
4003 case STATS_TLB_LOOKUP:
4004 pStat= &gStatTBLookup;
4005 break;
4006 case STATS_IRQ_HANDLING:
4007 pStat= &gStatIRQ;
4008 break;
4009 case STATS_RAW_CHECK:
4010 pStat = &gStatRawCheck;
4011 break;
4012 default:
4013 AssertMsgFailed(("unknown stat %d\n", statcode));
4014 return;
4015 }
4016 STAM_PROFILE_ADV_STOP(pStat, a);
4017}
4018#endif
4019
4020/**
4021 * Raise an RC, force rem exit.
4022 *
4023 * @param pVM VM handle.
4024 * @param rc The rc.
4025 */
4026void remR3RaiseRC(PVM pVM, int rc)
4027{
4028 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4029 Assert(pVM->rem.s.fInREM);
4030 VM_ASSERT_EMT(pVM);
4031 pVM->rem.s.rc = rc;
4032 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4033}
4034
4035
4036/* -+- timers -+- */
4037
4038uint64_t cpu_get_tsc(CPUX86State *env)
4039{
4040 STAM_COUNTER_INC(&gStatCpuGetTSC);
4041 return TMCpuTickGet(env->pVM);
4042}
4043
4044
4045/* -+- interrupts -+- */
4046
4047void cpu_set_ferr(CPUX86State *env)
4048{
4049 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4050 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4051}
4052
4053int cpu_get_pic_interrupt(CPUState *env)
4054{
4055 uint8_t u8Interrupt;
4056 int rc;
4057
4058 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4059 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4060 * with the (a)pic.
4061 */
4062 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4063 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4064 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4065 * remove this kludge. */
4066 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4067 {
4068 rc = VINF_SUCCESS;
4069 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4070 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4071 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4072 }
4073 else
4074 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4075
4076 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4077 if (RT_SUCCESS(rc))
4078 {
4079 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4080 env->interrupt_request |= CPU_INTERRUPT_HARD;
4081 return u8Interrupt;
4082 }
4083 return -1;
4084}
4085
4086
4087/* -+- local apic -+- */
4088
4089void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4090{
4091 int rc = PDMApicSetBase(env->pVM, val);
4092 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4093}
4094
4095uint64_t cpu_get_apic_base(CPUX86State *env)
4096{
4097 uint64_t u64;
4098 int rc = PDMApicGetBase(env->pVM, &u64);
4099 if (RT_SUCCESS(rc))
4100 {
4101 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4102 return u64;
4103 }
4104 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4105 return 0;
4106}
4107
4108void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4109{
4110 int rc = PDMApicSetTPR(env->pVM, val);
4111 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4112}
4113
4114uint8_t cpu_get_apic_tpr(CPUX86State *env)
4115{
4116 uint8_t u8;
4117 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4118 if (RT_SUCCESS(rc))
4119 {
4120 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4121 return u8;
4122 }
4123 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4124 return 0;
4125}
4126
4127
4128uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4129{
4130 uint64_t value;
4131 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4132 if (RT_SUCCESS(rc))
4133 {
4134 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4135 return value;
4136 }
4137 /** @todo: exception ? */
4138 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4139 return value;
4140}
4141
4142void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4143{
4144 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4145 /** @todo: exception if error ? */
4146 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4147}
4148
4149uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4150{
4151 return CPUMGetGuestMsr(env->pVM, msr);
4152}
4153
4154void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4155{
4156 CPUMSetGuestMsr(env->pVM, msr, val);
4157}
4158/* -+- I/O Ports -+- */
4159
4160#undef LOG_GROUP
4161#define LOG_GROUP LOG_GROUP_REM_IOPORT
4162
4163void cpu_outb(CPUState *env, int addr, int val)
4164{
4165 int rc;
4166
4167 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4168 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4169
4170 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4171 if (RT_LIKELY(rc == VINF_SUCCESS))
4172 return;
4173 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4174 {
4175 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4176 remR3RaiseRC(env->pVM, rc);
4177 return;
4178 }
4179 remAbort(rc, __FUNCTION__);
4180}
4181
4182void cpu_outw(CPUState *env, int addr, int val)
4183{
4184 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4185 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4186 if (RT_LIKELY(rc == VINF_SUCCESS))
4187 return;
4188 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4189 {
4190 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4191 remR3RaiseRC(env->pVM, rc);
4192 return;
4193 }
4194 remAbort(rc, __FUNCTION__);
4195}
4196
4197void cpu_outl(CPUState *env, int addr, int val)
4198{
4199 int rc;
4200 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4201 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4202 if (RT_LIKELY(rc == VINF_SUCCESS))
4203 return;
4204 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4205 {
4206 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4207 remR3RaiseRC(env->pVM, rc);
4208 return;
4209 }
4210 remAbort(rc, __FUNCTION__);
4211}
4212
4213int cpu_inb(CPUState *env, int addr)
4214{
4215 uint32_t u32 = 0;
4216 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4217 if (RT_LIKELY(rc == VINF_SUCCESS))
4218 {
4219 if (/*addr != 0x61 && */addr != 0x71)
4220 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4221 return (int)u32;
4222 }
4223 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4224 {
4225 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4226 remR3RaiseRC(env->pVM, rc);
4227 return (int)u32;
4228 }
4229 remAbort(rc, __FUNCTION__);
4230 return 0xff;
4231}
4232
4233int cpu_inw(CPUState *env, int addr)
4234{
4235 uint32_t u32 = 0;
4236 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4237 if (RT_LIKELY(rc == VINF_SUCCESS))
4238 {
4239 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4240 return (int)u32;
4241 }
4242 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4243 {
4244 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4245 remR3RaiseRC(env->pVM, rc);
4246 return (int)u32;
4247 }
4248 remAbort(rc, __FUNCTION__);
4249 return 0xffff;
4250}
4251
4252int cpu_inl(CPUState *env, int addr)
4253{
4254 uint32_t u32 = 0;
4255 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4256 if (RT_LIKELY(rc == VINF_SUCCESS))
4257 {
4258//if (addr==0x01f0 && u32 == 0x6b6d)
4259// loglevel = ~0;
4260 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4261 return (int)u32;
4262 }
4263 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4264 {
4265 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4266 remR3RaiseRC(env->pVM, rc);
4267 return (int)u32;
4268 }
4269 remAbort(rc, __FUNCTION__);
4270 return 0xffffffff;
4271}
4272
4273#undef LOG_GROUP
4274#define LOG_GROUP LOG_GROUP_REM
4275
4276
4277/* -+- helpers and misc other interfaces -+- */
4278
4279/**
4280 * Perform the CPUID instruction.
4281 *
4282 * ASMCpuId cannot be invoked from some source files where this is used because of global
4283 * register allocations.
4284 *
4285 * @param env Pointer to the recompiler CPU structure.
4286 * @param uOperator CPUID operation (eax).
4287 * @param pvEAX Where to store eax.
4288 * @param pvEBX Where to store ebx.
4289 * @param pvECX Where to store ecx.
4290 * @param pvEDX Where to store edx.
4291 */
4292void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4293{
4294 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4295}
4296
4297
4298#if 0 /* not used */
4299/**
4300 * Interface for qemu hardware to report back fatal errors.
4301 */
4302void hw_error(const char *pszFormat, ...)
4303{
4304 /*
4305 * Bitch about it.
4306 */
4307 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4308 * this in my Odin32 tree at home! */
4309 va_list args;
4310 va_start(args, pszFormat);
4311 RTLogPrintf("fatal error in virtual hardware:");
4312 RTLogPrintfV(pszFormat, args);
4313 va_end(args);
4314 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4315
4316 /*
4317 * If we're in REM context we'll sync back the state before 'jumping' to
4318 * the EMs failure handling.
4319 */
4320 PVM pVM = cpu_single_env->pVM;
4321 if (pVM->rem.s.fInREM)
4322 REMR3StateBack(pVM);
4323 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4324 AssertMsgFailed(("EMR3FatalError returned!\n"));
4325}
4326#endif
4327
4328/**
4329 * Interface for the qemu cpu to report unhandled situation
4330 * raising a fatal VM error.
4331 */
4332void cpu_abort(CPUState *env, const char *pszFormat, ...)
4333{
4334 va_list args;
4335 PVM pVM;
4336
4337 /*
4338 * Bitch about it.
4339 */
4340#ifndef _MSC_VER
4341 /** @todo: MSVC is right - it's not valid C */
4342 RTLogFlags(NULL, "nodisabled nobuffered");
4343#endif
4344 va_start(args, pszFormat);
4345 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4346 va_end(args);
4347 va_start(args, pszFormat);
4348 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4349 va_end(args);
4350
4351 /*
4352 * If we're in REM context we'll sync back the state before 'jumping' to
4353 * the EMs failure handling.
4354 */
4355 pVM = cpu_single_env->pVM;
4356 if (pVM->rem.s.fInREM)
4357 REMR3StateBack(pVM);
4358 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4359 AssertMsgFailed(("EMR3FatalError returned!\n"));
4360}
4361
4362
4363/**
4364 * Aborts the VM.
4365 *
4366 * @param rc VBox error code.
4367 * @param pszTip Hint about why/when this happend.
4368 */
4369void remAbort(int rc, const char *pszTip)
4370{
4371 PVM pVM;
4372
4373 /*
4374 * Bitch about it.
4375 */
4376 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4377 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4378
4379 /*
4380 * Jump back to where we entered the recompiler.
4381 */
4382 pVM = cpu_single_env->pVM;
4383 if (pVM->rem.s.fInREM)
4384 REMR3StateBack(pVM);
4385 EMR3FatalError(pVM, rc);
4386 AssertMsgFailed(("EMR3FatalError returned!\n"));
4387}
4388
4389
4390/**
4391 * Dumps a linux system call.
4392 * @param pVM VM handle.
4393 */
4394void remR3DumpLnxSyscall(PVM pVM)
4395{
4396 static const char *apsz[] =
4397 {
4398 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4399 "sys_exit",
4400 "sys_fork",
4401 "sys_read",
4402 "sys_write",
4403 "sys_open", /* 5 */
4404 "sys_close",
4405 "sys_waitpid",
4406 "sys_creat",
4407 "sys_link",
4408 "sys_unlink", /* 10 */
4409 "sys_execve",
4410 "sys_chdir",
4411 "sys_time",
4412 "sys_mknod",
4413 "sys_chmod", /* 15 */
4414 "sys_lchown16",
4415 "sys_ni_syscall", /* old break syscall holder */
4416 "sys_stat",
4417 "sys_lseek",
4418 "sys_getpid", /* 20 */
4419 "sys_mount",
4420 "sys_oldumount",
4421 "sys_setuid16",
4422 "sys_getuid16",
4423 "sys_stime", /* 25 */
4424 "sys_ptrace",
4425 "sys_alarm",
4426 "sys_fstat",
4427 "sys_pause",
4428 "sys_utime", /* 30 */
4429 "sys_ni_syscall", /* old stty syscall holder */
4430 "sys_ni_syscall", /* old gtty syscall holder */
4431 "sys_access",
4432 "sys_nice",
4433 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4434 "sys_sync",
4435 "sys_kill",
4436 "sys_rename",
4437 "sys_mkdir",
4438 "sys_rmdir", /* 40 */
4439 "sys_dup",
4440 "sys_pipe",
4441 "sys_times",
4442 "sys_ni_syscall", /* old prof syscall holder */
4443 "sys_brk", /* 45 */
4444 "sys_setgid16",
4445 "sys_getgid16",
4446 "sys_signal",
4447 "sys_geteuid16",
4448 "sys_getegid16", /* 50 */
4449 "sys_acct",
4450 "sys_umount", /* recycled never used phys() */
4451 "sys_ni_syscall", /* old lock syscall holder */
4452 "sys_ioctl",
4453 "sys_fcntl", /* 55 */
4454 "sys_ni_syscall", /* old mpx syscall holder */
4455 "sys_setpgid",
4456 "sys_ni_syscall", /* old ulimit syscall holder */
4457 "sys_olduname",
4458 "sys_umask", /* 60 */
4459 "sys_chroot",
4460 "sys_ustat",
4461 "sys_dup2",
4462 "sys_getppid",
4463 "sys_getpgrp", /* 65 */
4464 "sys_setsid",
4465 "sys_sigaction",
4466 "sys_sgetmask",
4467 "sys_ssetmask",
4468 "sys_setreuid16", /* 70 */
4469 "sys_setregid16",
4470 "sys_sigsuspend",
4471 "sys_sigpending",
4472 "sys_sethostname",
4473 "sys_setrlimit", /* 75 */
4474 "sys_old_getrlimit",
4475 "sys_getrusage",
4476 "sys_gettimeofday",
4477 "sys_settimeofday",
4478 "sys_getgroups16", /* 80 */
4479 "sys_setgroups16",
4480 "old_select",
4481 "sys_symlink",
4482 "sys_lstat",
4483 "sys_readlink", /* 85 */
4484 "sys_uselib",
4485 "sys_swapon",
4486 "sys_reboot",
4487 "old_readdir",
4488 "old_mmap", /* 90 */
4489 "sys_munmap",
4490 "sys_truncate",
4491 "sys_ftruncate",
4492 "sys_fchmod",
4493 "sys_fchown16", /* 95 */
4494 "sys_getpriority",
4495 "sys_setpriority",
4496 "sys_ni_syscall", /* old profil syscall holder */
4497 "sys_statfs",
4498 "sys_fstatfs", /* 100 */
4499 "sys_ioperm",
4500 "sys_socketcall",
4501 "sys_syslog",
4502 "sys_setitimer",
4503 "sys_getitimer", /* 105 */
4504 "sys_newstat",
4505 "sys_newlstat",
4506 "sys_newfstat",
4507 "sys_uname",
4508 "sys_iopl", /* 110 */
4509 "sys_vhangup",
4510 "sys_ni_syscall", /* old "idle" system call */
4511 "sys_vm86old",
4512 "sys_wait4",
4513 "sys_swapoff", /* 115 */
4514 "sys_sysinfo",
4515 "sys_ipc",
4516 "sys_fsync",
4517 "sys_sigreturn",
4518 "sys_clone", /* 120 */
4519 "sys_setdomainname",
4520 "sys_newuname",
4521 "sys_modify_ldt",
4522 "sys_adjtimex",
4523 "sys_mprotect", /* 125 */
4524 "sys_sigprocmask",
4525 "sys_ni_syscall", /* old "create_module" */
4526 "sys_init_module",
4527 "sys_delete_module",
4528 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4529 "sys_quotactl",
4530 "sys_getpgid",
4531 "sys_fchdir",
4532 "sys_bdflush",
4533 "sys_sysfs", /* 135 */
4534 "sys_personality",
4535 "sys_ni_syscall", /* reserved for afs_syscall */
4536 "sys_setfsuid16",
4537 "sys_setfsgid16",
4538 "sys_llseek", /* 140 */
4539 "sys_getdents",
4540 "sys_select",
4541 "sys_flock",
4542 "sys_msync",
4543 "sys_readv", /* 145 */
4544 "sys_writev",
4545 "sys_getsid",
4546 "sys_fdatasync",
4547 "sys_sysctl",
4548 "sys_mlock", /* 150 */
4549 "sys_munlock",
4550 "sys_mlockall",
4551 "sys_munlockall",
4552 "sys_sched_setparam",
4553 "sys_sched_getparam", /* 155 */
4554 "sys_sched_setscheduler",
4555 "sys_sched_getscheduler",
4556 "sys_sched_yield",
4557 "sys_sched_get_priority_max",
4558 "sys_sched_get_priority_min", /* 160 */
4559 "sys_sched_rr_get_interval",
4560 "sys_nanosleep",
4561 "sys_mremap",
4562 "sys_setresuid16",
4563 "sys_getresuid16", /* 165 */
4564 "sys_vm86",
4565 "sys_ni_syscall", /* Old sys_query_module */
4566 "sys_poll",
4567 "sys_nfsservctl",
4568 "sys_setresgid16", /* 170 */
4569 "sys_getresgid16",
4570 "sys_prctl",
4571 "sys_rt_sigreturn",
4572 "sys_rt_sigaction",
4573 "sys_rt_sigprocmask", /* 175 */
4574 "sys_rt_sigpending",
4575 "sys_rt_sigtimedwait",
4576 "sys_rt_sigqueueinfo",
4577 "sys_rt_sigsuspend",
4578 "sys_pread64", /* 180 */
4579 "sys_pwrite64",
4580 "sys_chown16",
4581 "sys_getcwd",
4582 "sys_capget",
4583 "sys_capset", /* 185 */
4584 "sys_sigaltstack",
4585 "sys_sendfile",
4586 "sys_ni_syscall", /* reserved for streams1 */
4587 "sys_ni_syscall", /* reserved for streams2 */
4588 "sys_vfork", /* 190 */
4589 "sys_getrlimit",
4590 "sys_mmap2",
4591 "sys_truncate64",
4592 "sys_ftruncate64",
4593 "sys_stat64", /* 195 */
4594 "sys_lstat64",
4595 "sys_fstat64",
4596 "sys_lchown",
4597 "sys_getuid",
4598 "sys_getgid", /* 200 */
4599 "sys_geteuid",
4600 "sys_getegid",
4601 "sys_setreuid",
4602 "sys_setregid",
4603 "sys_getgroups", /* 205 */
4604 "sys_setgroups",
4605 "sys_fchown",
4606 "sys_setresuid",
4607 "sys_getresuid",
4608 "sys_setresgid", /* 210 */
4609 "sys_getresgid",
4610 "sys_chown",
4611 "sys_setuid",
4612 "sys_setgid",
4613 "sys_setfsuid", /* 215 */
4614 "sys_setfsgid",
4615 "sys_pivot_root",
4616 "sys_mincore",
4617 "sys_madvise",
4618 "sys_getdents64", /* 220 */
4619 "sys_fcntl64",
4620 "sys_ni_syscall", /* reserved for TUX */
4621 "sys_ni_syscall",
4622 "sys_gettid",
4623 "sys_readahead", /* 225 */
4624 "sys_setxattr",
4625 "sys_lsetxattr",
4626 "sys_fsetxattr",
4627 "sys_getxattr",
4628 "sys_lgetxattr", /* 230 */
4629 "sys_fgetxattr",
4630 "sys_listxattr",
4631 "sys_llistxattr",
4632 "sys_flistxattr",
4633 "sys_removexattr", /* 235 */
4634 "sys_lremovexattr",
4635 "sys_fremovexattr",
4636 "sys_tkill",
4637 "sys_sendfile64",
4638 "sys_futex", /* 240 */
4639 "sys_sched_setaffinity",
4640 "sys_sched_getaffinity",
4641 "sys_set_thread_area",
4642 "sys_get_thread_area",
4643 "sys_io_setup", /* 245 */
4644 "sys_io_destroy",
4645 "sys_io_getevents",
4646 "sys_io_submit",
4647 "sys_io_cancel",
4648 "sys_fadvise64", /* 250 */
4649 "sys_ni_syscall",
4650 "sys_exit_group",
4651 "sys_lookup_dcookie",
4652 "sys_epoll_create",
4653 "sys_epoll_ctl", /* 255 */
4654 "sys_epoll_wait",
4655 "sys_remap_file_pages",
4656 "sys_set_tid_address",
4657 "sys_timer_create",
4658 "sys_timer_settime", /* 260 */
4659 "sys_timer_gettime",
4660 "sys_timer_getoverrun",
4661 "sys_timer_delete",
4662 "sys_clock_settime",
4663 "sys_clock_gettime", /* 265 */
4664 "sys_clock_getres",
4665 "sys_clock_nanosleep",
4666 "sys_statfs64",
4667 "sys_fstatfs64",
4668 "sys_tgkill", /* 270 */
4669 "sys_utimes",
4670 "sys_fadvise64_64",
4671 "sys_ni_syscall" /* sys_vserver */
4672 };
4673
4674 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4675 switch (uEAX)
4676 {
4677 default:
4678 if (uEAX < RT_ELEMENTS(apsz))
4679 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4680 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4681 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4682 else
4683 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4684 break;
4685
4686 }
4687}
4688
4689
4690/**
4691 * Dumps an OpenBSD system call.
4692 * @param pVM VM handle.
4693 */
4694void remR3DumpOBsdSyscall(PVM pVM)
4695{
4696 static const char *apsz[] =
4697 {
4698 "SYS_syscall", //0
4699 "SYS_exit", //1
4700 "SYS_fork", //2
4701 "SYS_read", //3
4702 "SYS_write", //4
4703 "SYS_open", //5
4704 "SYS_close", //6
4705 "SYS_wait4", //7
4706 "SYS_8",
4707 "SYS_link", //9
4708 "SYS_unlink", //10
4709 "SYS_11",
4710 "SYS_chdir", //12
4711 "SYS_fchdir", //13
4712 "SYS_mknod", //14
4713 "SYS_chmod", //15
4714 "SYS_chown", //16
4715 "SYS_break", //17
4716 "SYS_18",
4717 "SYS_19",
4718 "SYS_getpid", //20
4719 "SYS_mount", //21
4720 "SYS_unmount", //22
4721 "SYS_setuid", //23
4722 "SYS_getuid", //24
4723 "SYS_geteuid", //25
4724 "SYS_ptrace", //26
4725 "SYS_recvmsg", //27
4726 "SYS_sendmsg", //28
4727 "SYS_recvfrom", //29
4728 "SYS_accept", //30
4729 "SYS_getpeername", //31
4730 "SYS_getsockname", //32
4731 "SYS_access", //33
4732 "SYS_chflags", //34
4733 "SYS_fchflags", //35
4734 "SYS_sync", //36
4735 "SYS_kill", //37
4736 "SYS_38",
4737 "SYS_getppid", //39
4738 "SYS_40",
4739 "SYS_dup", //41
4740 "SYS_opipe", //42
4741 "SYS_getegid", //43
4742 "SYS_profil", //44
4743 "SYS_ktrace", //45
4744 "SYS_sigaction", //46
4745 "SYS_getgid", //47
4746 "SYS_sigprocmask", //48
4747 "SYS_getlogin", //49
4748 "SYS_setlogin", //50
4749 "SYS_acct", //51
4750 "SYS_sigpending", //52
4751 "SYS_osigaltstack", //53
4752 "SYS_ioctl", //54
4753 "SYS_reboot", //55
4754 "SYS_revoke", //56
4755 "SYS_symlink", //57
4756 "SYS_readlink", //58
4757 "SYS_execve", //59
4758 "SYS_umask", //60
4759 "SYS_chroot", //61
4760 "SYS_62",
4761 "SYS_63",
4762 "SYS_64",
4763 "SYS_65",
4764 "SYS_vfork", //66
4765 "SYS_67",
4766 "SYS_68",
4767 "SYS_sbrk", //69
4768 "SYS_sstk", //70
4769 "SYS_61",
4770 "SYS_vadvise", //72
4771 "SYS_munmap", //73
4772 "SYS_mprotect", //74
4773 "SYS_madvise", //75
4774 "SYS_76",
4775 "SYS_77",
4776 "SYS_mincore", //78
4777 "SYS_getgroups", //79
4778 "SYS_setgroups", //80
4779 "SYS_getpgrp", //81
4780 "SYS_setpgid", //82
4781 "SYS_setitimer", //83
4782 "SYS_84",
4783 "SYS_85",
4784 "SYS_getitimer", //86
4785 "SYS_87",
4786 "SYS_88",
4787 "SYS_89",
4788 "SYS_dup2", //90
4789 "SYS_91",
4790 "SYS_fcntl", //92
4791 "SYS_select", //93
4792 "SYS_94",
4793 "SYS_fsync", //95
4794 "SYS_setpriority", //96
4795 "SYS_socket", //97
4796 "SYS_connect", //98
4797 "SYS_99",
4798 "SYS_getpriority", //100
4799 "SYS_101",
4800 "SYS_102",
4801 "SYS_sigreturn", //103
4802 "SYS_bind", //104
4803 "SYS_setsockopt", //105
4804 "SYS_listen", //106
4805 "SYS_107",
4806 "SYS_108",
4807 "SYS_109",
4808 "SYS_110",
4809 "SYS_sigsuspend", //111
4810 "SYS_112",
4811 "SYS_113",
4812 "SYS_114",
4813 "SYS_115",
4814 "SYS_gettimeofday", //116
4815 "SYS_getrusage", //117
4816 "SYS_getsockopt", //118
4817 "SYS_119",
4818 "SYS_readv", //120
4819 "SYS_writev", //121
4820 "SYS_settimeofday", //122
4821 "SYS_fchown", //123
4822 "SYS_fchmod", //124
4823 "SYS_125",
4824 "SYS_setreuid", //126
4825 "SYS_setregid", //127
4826 "SYS_rename", //128
4827 "SYS_129",
4828 "SYS_130",
4829 "SYS_flock", //131
4830 "SYS_mkfifo", //132
4831 "SYS_sendto", //133
4832 "SYS_shutdown", //134
4833 "SYS_socketpair", //135
4834 "SYS_mkdir", //136
4835 "SYS_rmdir", //137
4836 "SYS_utimes", //138
4837 "SYS_139",
4838 "SYS_adjtime", //140
4839 "SYS_141",
4840 "SYS_142",
4841 "SYS_143",
4842 "SYS_144",
4843 "SYS_145",
4844 "SYS_146",
4845 "SYS_setsid", //147
4846 "SYS_quotactl", //148
4847 "SYS_149",
4848 "SYS_150",
4849 "SYS_151",
4850 "SYS_152",
4851 "SYS_153",
4852 "SYS_154",
4853 "SYS_nfssvc", //155
4854 "SYS_156",
4855 "SYS_157",
4856 "SYS_158",
4857 "SYS_159",
4858 "SYS_160",
4859 "SYS_getfh", //161
4860 "SYS_162",
4861 "SYS_163",
4862 "SYS_164",
4863 "SYS_sysarch", //165
4864 "SYS_166",
4865 "SYS_167",
4866 "SYS_168",
4867 "SYS_169",
4868 "SYS_170",
4869 "SYS_171",
4870 "SYS_172",
4871 "SYS_pread", //173
4872 "SYS_pwrite", //174
4873 "SYS_175",
4874 "SYS_176",
4875 "SYS_177",
4876 "SYS_178",
4877 "SYS_179",
4878 "SYS_180",
4879 "SYS_setgid", //181
4880 "SYS_setegid", //182
4881 "SYS_seteuid", //183
4882 "SYS_lfs_bmapv", //184
4883 "SYS_lfs_markv", //185
4884 "SYS_lfs_segclean", //186
4885 "SYS_lfs_segwait", //187
4886 "SYS_188",
4887 "SYS_189",
4888 "SYS_190",
4889 "SYS_pathconf", //191
4890 "SYS_fpathconf", //192
4891 "SYS_swapctl", //193
4892 "SYS_getrlimit", //194
4893 "SYS_setrlimit", //195
4894 "SYS_getdirentries", //196
4895 "SYS_mmap", //197
4896 "SYS___syscall", //198
4897 "SYS_lseek", //199
4898 "SYS_truncate", //200
4899 "SYS_ftruncate", //201
4900 "SYS___sysctl", //202
4901 "SYS_mlock", //203
4902 "SYS_munlock", //204
4903 "SYS_205",
4904 "SYS_futimes", //206
4905 "SYS_getpgid", //207
4906 "SYS_xfspioctl", //208
4907 "SYS_209",
4908 "SYS_210",
4909 "SYS_211",
4910 "SYS_212",
4911 "SYS_213",
4912 "SYS_214",
4913 "SYS_215",
4914 "SYS_216",
4915 "SYS_217",
4916 "SYS_218",
4917 "SYS_219",
4918 "SYS_220",
4919 "SYS_semget", //221
4920 "SYS_222",
4921 "SYS_223",
4922 "SYS_224",
4923 "SYS_msgget", //225
4924 "SYS_msgsnd", //226
4925 "SYS_msgrcv", //227
4926 "SYS_shmat", //228
4927 "SYS_229",
4928 "SYS_shmdt", //230
4929 "SYS_231",
4930 "SYS_clock_gettime", //232
4931 "SYS_clock_settime", //233
4932 "SYS_clock_getres", //234
4933 "SYS_235",
4934 "SYS_236",
4935 "SYS_237",
4936 "SYS_238",
4937 "SYS_239",
4938 "SYS_nanosleep", //240
4939 "SYS_241",
4940 "SYS_242",
4941 "SYS_243",
4942 "SYS_244",
4943 "SYS_245",
4944 "SYS_246",
4945 "SYS_247",
4946 "SYS_248",
4947 "SYS_249",
4948 "SYS_minherit", //250
4949 "SYS_rfork", //251
4950 "SYS_poll", //252
4951 "SYS_issetugid", //253
4952 "SYS_lchown", //254
4953 "SYS_getsid", //255
4954 "SYS_msync", //256
4955 "SYS_257",
4956 "SYS_258",
4957 "SYS_259",
4958 "SYS_getfsstat", //260
4959 "SYS_statfs", //261
4960 "SYS_fstatfs", //262
4961 "SYS_pipe", //263
4962 "SYS_fhopen", //264
4963 "SYS_265",
4964 "SYS_fhstatfs", //266
4965 "SYS_preadv", //267
4966 "SYS_pwritev", //268
4967 "SYS_kqueue", //269
4968 "SYS_kevent", //270
4969 "SYS_mlockall", //271
4970 "SYS_munlockall", //272
4971 "SYS_getpeereid", //273
4972 "SYS_274",
4973 "SYS_275",
4974 "SYS_276",
4975 "SYS_277",
4976 "SYS_278",
4977 "SYS_279",
4978 "SYS_280",
4979 "SYS_getresuid", //281
4980 "SYS_setresuid", //282
4981 "SYS_getresgid", //283
4982 "SYS_setresgid", //284
4983 "SYS_285",
4984 "SYS_mquery", //286
4985 "SYS_closefrom", //287
4986 "SYS_sigaltstack", //288
4987 "SYS_shmget", //289
4988 "SYS_semop", //290
4989 "SYS_stat", //291
4990 "SYS_fstat", //292
4991 "SYS_lstat", //293
4992 "SYS_fhstat", //294
4993 "SYS___semctl", //295
4994 "SYS_shmctl", //296
4995 "SYS_msgctl", //297
4996 "SYS_MAXSYSCALL", //298
4997 //299
4998 //300
4999 };
5000 uint32_t uEAX;
5001 if (!LogIsEnabled())
5002 return;
5003 uEAX = CPUMGetGuestEAX(pVM);
5004 switch (uEAX)
5005 {
5006 default:
5007 if (uEAX < RT_ELEMENTS(apsz))
5008 {
5009 uint32_t au32Args[8] = {0};
5010 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5011 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5012 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5013 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5014 }
5015 else
5016 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5017 break;
5018 }
5019}
5020
5021
5022#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5023/**
5024 * The Dll main entry point (stub).
5025 */
5026bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5027{
5028 return true;
5029}
5030
5031void *memcpy(void *dst, const void *src, size_t size)
5032{
5033 uint8_t*pbDst = dst, *pbSrc = src;
5034 while (size-- > 0)
5035 *pbDst++ = *pbSrc++;
5036 return dst;
5037}
5038
5039#endif
5040
5041void cpu_smm_update(CPUState* env)
5042{
5043}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette