VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 16132

Last change on this file since 16132 was 16127, checked in by vboxsync, 16 years ago

REM: sync the FPU state after CR4 and CPUID or we'll get the wrong format.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.1 KB
Line 
1/* $Id: VBoxRecompiler.c 16127 2009-01-21 11:14:17Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33void cpu_exec_init_all(unsigned long tb_size);
34
35#include <VBox/rem.h>
36#include <VBox/vmapi.h>
37#include <VBox/tm.h>
38#include <VBox/ssm.h>
39#include <VBox/em.h>
40#include <VBox/trpm.h>
41#include <VBox/iom.h>
42#include <VBox/mm.h>
43#include <VBox/pgm.h>
44#include <VBox/pdm.h>
45#include <VBox/dbgf.h>
46#include <VBox/dbg.h>
47#include <VBox/hwaccm.h>
48#include <VBox/patm.h>
49#include <VBox/csam.h>
50#include "REMInternal.h"
51#include <VBox/vm.h>
52#include <VBox/param.h>
53#include <VBox/err.h>
54
55#include <VBox/log.h>
56#include <iprt/semaphore.h>
57#include <iprt/asm.h>
58#include <iprt/assert.h>
59#include <iprt/thread.h>
60#include <iprt/string.h>
61
62/* Don't wanna include everything. */
63extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
64extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
65extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
66extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
67extern void tlb_flush(CPUState *env, int flush_global);
68extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
69extern void sync_ldtr(CPUX86State *env1, int selector);
70extern int sync_tr(CPUX86State *env1, int selector);
71
72#ifdef VBOX_STRICT
73unsigned long get_phys_page_offset(target_ulong addr);
74#endif
75
76/*******************************************************************************
77* Defined Constants And Macros *
78*******************************************************************************/
79
80/** Copy 80-bit fpu register at pSrc to pDst.
81 * This is probably faster than *calling* memcpy.
82 */
83#define REM_COPY_FPU_REG(pDst, pSrc) \
84 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
85
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
91static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
92static void remR3StateUpdate(PVM pVM);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/*******************************************************************************
218* Internal Functions *
219*******************************************************************************/
220void remAbort(int rc, const char *pszTip);
221extern int testmath(void);
222
223/* Put them here to avoid unused variable warning. */
224AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
225#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
226//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
227/* Why did this have to be identical?? */
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#else
230AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
231#endif
232
233
234/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
235uint8_t* code_gen_prologue;
236
237/**
238 * Initializes the REM.
239 *
240 * @returns VBox status code.
241 * @param pVM The VM to operate on.
242 */
243REMR3DECL(int) REMR3Init(PVM pVM)
244{
245 uint32_t u32Dummy;
246 int rc;
247
248#ifdef VBOX_ENABLE_VBOXREM64
249 LogRel(("Using 64-bit aware REM\n"));
250#endif
251
252 /*
253 * Assert sanity.
254 */
255 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
256 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
257 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
258#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
259 Assert(!testmath());
260#endif
261 /*
262 * Init some internal data members.
263 */
264 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
265 pVM->rem.s.Env.pVM = pVM;
266#ifdef CPU_RAW_MODE_INIT
267 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
268#endif
269
270 /* ctx. */
271 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
272 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
273
274 /* ignore all notifications */
275 pVM->rem.s.fIgnoreAll = true;
276
277 code_gen_prologue = RTMemExecAlloc(_1K);
278
279 cpu_exec_init_all(0);
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (RT_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
332 if (RT_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
367
368 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
369 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
370 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
371 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
372
373 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
379
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387
388#endif
389
390#ifdef DEBUG_ALL_LOGGING
391 loglevel = ~0;
392# ifdef DEBUG_TMP_LOGGING
393 logfile = fopen("/tmp/vbox-qemu.log", "w");
394# endif
395#endif
396
397 return rc;
398}
399
400
401/**
402 * Terminates the REM.
403 *
404 * Termination means cleaning up and freeing all resources,
405 * the VM it self is at this point powered off or suspended.
406 *
407 * @returns VBox status code.
408 * @param pVM The VM to operate on.
409 */
410REMR3DECL(int) REMR3Term(PVM pVM)
411{
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * The VM is being reset.
418 *
419 * For the REM component this means to call the cpu_reset() and
420 * reinitialize some state variables.
421 *
422 * @param pVM VM handle.
423 */
424REMR3DECL(void) REMR3Reset(PVM pVM)
425{
426 /*
427 * Reset the REM cpu.
428 */
429 pVM->rem.s.fIgnoreAll = true;
430 cpu_reset(&pVM->rem.s.Env);
431 pVM->rem.s.cInvalidatedPages = 0;
432 pVM->rem.s.fIgnoreAll = false;
433
434 /* Clear raw ring 0 init state */
435 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
436
437 /* Flush the TBs the next time we execute code here. */
438 pVM->rem.s.fFlushTBs = true;
439}
440
441
442/**
443 * Execute state save operation.
444 *
445 * @returns VBox status code.
446 * @param pVM VM Handle.
447 * @param pSSM SSM operation handle.
448 */
449static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
450{
451 /*
452 * Save the required CPU Env bits.
453 * (Not much because we're never in REM when doing the save.)
454 */
455 PREM pRem = &pVM->rem.s;
456 LogFlow(("remR3Save:\n"));
457 Assert(!pRem->fInREM);
458 SSMR3PutU32(pSSM, pRem->Env.hflags);
459 SSMR3PutU32(pSSM, ~0); /* separator */
460
461 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
462 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
463 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
464
465 return SSMR3PutU32(pSSM, ~0); /* terminator */
466}
467
468
469/**
470 * Execute state load operation.
471 *
472 * @returns VBox status code.
473 * @param pVM VM Handle.
474 * @param pSSM SSM operation handle.
475 * @param u32Version Data layout version.
476 */
477static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
478{
479 uint32_t u32Dummy;
480 uint32_t fRawRing0 = false;
481 uint32_t u32Sep;
482 int rc;
483 PREM pRem;
484 LogFlow(("remR3Load:\n"));
485
486 /*
487 * Validate version.
488 */
489 if ( u32Version != REM_SAVED_STATE_VERSION
490 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
491 {
492 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
493 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
494 }
495
496 /*
497 * Do a reset to be on the safe side...
498 */
499 REMR3Reset(pVM);
500
501 /*
502 * Ignore all ignorable notifications.
503 * (Not doing this will cause serious trouble.)
504 */
505 pVM->rem.s.fIgnoreAll = true;
506
507 /*
508 * Load the required CPU Env bits.
509 * (Not much because we're never in REM when doing the save.)
510 */
511 pRem = &pVM->rem.s;
512 Assert(!pRem->fInREM);
513 SSMR3GetU32(pSSM, &pRem->Env.hflags);
514 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
515 {
516 /* Redundant REM CPU state has to be loaded, but can be ignored. */
517 CPUX86State_Ver16 temp;
518 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
519 }
520
521 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
522 if (RT_FAILURE(rc))
523 return rc;
524 if (u32Sep != ~0U)
525 {
526 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
527 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
528 }
529
530 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
531 SSMR3GetUInt(pSSM, &fRawRing0);
532 if (fRawRing0)
533 pRem->Env.state |= CPU_RAW_RING0;
534
535 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
536 {
537 unsigned i;
538
539 /*
540 * Load the REM stuff.
541 */
542 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
543 if (RT_FAILURE(rc))
544 return rc;
545 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
546 {
547 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
548 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
549 }
550 for (i = 0; i < pRem->cInvalidatedPages; i++)
551 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
552 }
553
554 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
555 if (RT_FAILURE(rc))
556 return rc;
557
558 /* check the terminator. */
559 rc = SSMR3GetU32(pSSM, &u32Sep);
560 if (RT_FAILURE(rc))
561 return rc;
562 if (u32Sep != ~0U)
563 {
564 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
565 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
566 }
567
568 /*
569 * Get the CPUID features.
570 */
571 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
572 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
573
574 /*
575 * Sync the Load Flush the TLB
576 */
577 tlb_flush(&pRem->Env, 1);
578
579 /*
580 * Stop ignoring ignornable notifications.
581 */
582 pVM->rem.s.fIgnoreAll = false;
583
584 /*
585 * Sync the whole CPU state when executing code in the recompiler.
586 */
587 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
588 return VINF_SUCCESS;
589}
590
591
592
593#undef LOG_GROUP
594#define LOG_GROUP LOG_GROUP_REM_RUN
595
596/**
597 * Single steps an instruction in recompiled mode.
598 *
599 * Before calling this function the REM state needs to be in sync with
600 * the VM. Call REMR3State() to perform the sync. It's only necessary
601 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
602 * and after calling REMR3StateBack().
603 *
604 * @returns VBox status code.
605 *
606 * @param pVM VM Handle.
607 */
608REMR3DECL(int) REMR3Step(PVM pVM)
609{
610 int rc, interrupt_request;
611 RTGCPTR GCPtrPC;
612 bool fBp;
613
614 /*
615 * Lock the REM - we don't wanna have anyone interrupting us
616 * while stepping - and enabled single stepping. We also ignore
617 * pending interrupts and suchlike.
618 */
619 interrupt_request = pVM->rem.s.Env.interrupt_request;
620 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
621 pVM->rem.s.Env.interrupt_request = 0;
622 cpu_single_step(&pVM->rem.s.Env, 1);
623
624 /*
625 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
626 */
627 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
628 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
629
630 /*
631 * Execute and handle the return code.
632 * We execute without enabling the cpu tick, so on success we'll
633 * just flip it on and off to make sure it moves
634 */
635 rc = cpu_exec(&pVM->rem.s.Env);
636 if (rc == EXCP_DEBUG)
637 {
638 TMCpuTickResume(pVM);
639 TMCpuTickPause(pVM);
640 TMVirtualResume(pVM);
641 TMVirtualPause(pVM);
642 rc = VINF_EM_DBG_STEPPED;
643 }
644 else
645 {
646 switch (rc)
647 {
648 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
649 case EXCP_HLT:
650 case EXCP_HALTED: rc = VINF_EM_HALT; break;
651 case EXCP_RC:
652 rc = pVM->rem.s.rc;
653 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
654 break;
655 default:
656 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
657 rc = VERR_INTERNAL_ERROR;
658 break;
659 }
660 }
661
662 /*
663 * Restore the stuff we changed to prevent interruption.
664 * Unlock the REM.
665 */
666 if (fBp)
667 {
668 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
669 Assert(rc2 == 0); NOREF(rc2);
670 }
671 cpu_single_step(&pVM->rem.s.Env, 0);
672 pVM->rem.s.Env.interrupt_request = interrupt_request;
673
674 return rc;
675}
676
677
678/**
679 * Set a breakpoint using the REM facilities.
680 *
681 * @returns VBox status code.
682 * @param pVM The VM handle.
683 * @param Address The breakpoint address.
684 * @thread The emulation thread.
685 */
686REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
687{
688 VM_ASSERT_EMT(pVM);
689 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
690 {
691 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
692 return VINF_SUCCESS;
693 }
694 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
695 return VERR_REM_NO_MORE_BP_SLOTS;
696}
697
698
699/**
700 * Clears a breakpoint set by REMR3BreakpointSet().
701 *
702 * @returns VBox status code.
703 * @param pVM The VM handle.
704 * @param Address The breakpoint address.
705 * @thread The emulation thread.
706 */
707REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
708{
709 VM_ASSERT_EMT(pVM);
710 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
711 {
712 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
713 return VINF_SUCCESS;
714 }
715 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
716 return VERR_REM_BP_NOT_FOUND;
717}
718
719
720/**
721 * Emulate an instruction.
722 *
723 * This function executes one instruction without letting anyone
724 * interrupt it. This is intended for being called while being in
725 * raw mode and thus will take care of all the state syncing between
726 * REM and the rest.
727 *
728 * @returns VBox status code.
729 * @param pVM VM handle.
730 */
731REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
732{
733 bool fFlushTBs;
734
735 int rc, rc2;
736 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
737
738 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
739 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
740 */
741 if (HWACCMIsEnabled(pVM))
742 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
743
744 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
745 fFlushTBs = pVM->rem.s.fFlushTBs;
746 pVM->rem.s.fFlushTBs = false;
747
748 /*
749 * Sync the state and enable single instruction / single stepping.
750 */
751 rc = REMR3State(pVM);
752 pVM->rem.s.fFlushTBs = fFlushTBs;
753 if (RT_SUCCESS(rc))
754 {
755 int interrupt_request = pVM->rem.s.Env.interrupt_request;
756 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
757 Assert(!pVM->rem.s.Env.singlestep_enabled);
758 /*
759 * Now we set the execute single instruction flag and enter the cpu_exec loop.
760 */
761 TMNotifyStartOfExecution(pVM);
762 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
763 rc = cpu_exec(&pVM->rem.s.Env);
764 TMNotifyEndOfExecution(pVM);
765 switch (rc)
766 {
767 /*
768 * Executed without anything out of the way happening.
769 */
770 case EXCP_SINGLE_INSTR:
771 rc = VINF_EM_RESCHEDULE;
772 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
773 break;
774
775 /*
776 * If we take a trap or start servicing a pending interrupt, we might end up here.
777 * (Timer thread or some other thread wishing EMT's attention.)
778 */
779 case EXCP_INTERRUPT:
780 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
781 rc = VINF_EM_RESCHEDULE;
782 break;
783
784 /*
785 * Single step, we assume!
786 * If there was a breakpoint there we're fucked now.
787 */
788 case EXCP_DEBUG:
789 {
790 /* breakpoint or single step? */
791 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
792 int iBP;
793 rc = VINF_EM_DBG_STEPPED;
794 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
795 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
796 {
797 rc = VINF_EM_DBG_BREAKPOINT;
798 break;
799 }
800 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
801 break;
802 }
803
804 /*
805 * hlt instruction.
806 */
807 case EXCP_HLT:
808 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
809 rc = VINF_EM_HALT;
810 break;
811
812 /*
813 * The VM has halted.
814 */
815 case EXCP_HALTED:
816 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
817 rc = VINF_EM_HALT;
818 break;
819
820 /*
821 * Switch to RAW-mode.
822 */
823 case EXCP_EXECUTE_RAW:
824 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
825 rc = VINF_EM_RESCHEDULE_RAW;
826 break;
827
828 /*
829 * Switch to hardware accelerated RAW-mode.
830 */
831 case EXCP_EXECUTE_HWACC:
832 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
833 rc = VINF_EM_RESCHEDULE_HWACC;
834 break;
835
836 /*
837 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
838 */
839 case EXCP_RC:
840 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
841 rc = pVM->rem.s.rc;
842 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
843 break;
844
845 /*
846 * Figure out the rest when they arrive....
847 */
848 default:
849 AssertMsgFailed(("rc=%d\n", rc));
850 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
851 rc = VINF_EM_RESCHEDULE;
852 break;
853 }
854
855 /*
856 * Switch back the state.
857 */
858 pVM->rem.s.Env.interrupt_request = interrupt_request;
859 rc2 = REMR3StateBack(pVM);
860 AssertRC(rc2);
861 }
862
863 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
864 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
865 return rc;
866}
867
868
869/**
870 * Runs code in recompiled mode.
871 *
872 * Before calling this function the REM state needs to be in sync with
873 * the VM. Call REMR3State() to perform the sync. It's only necessary
874 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
875 * and after calling REMR3StateBack().
876 *
877 * @returns VBox status code.
878 *
879 * @param pVM VM Handle.
880 */
881REMR3DECL(int) REMR3Run(PVM pVM)
882{
883 int rc;
884 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
885 Assert(pVM->rem.s.fInREM);
886
887 TMNotifyStartOfExecution(pVM);
888 rc = cpu_exec(&pVM->rem.s.Env);
889 TMNotifyEndOfExecution(pVM);
890 switch (rc)
891 {
892 /*
893 * This happens when the execution was interrupted
894 * by an external event, like pending timers.
895 */
896 case EXCP_INTERRUPT:
897 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
898 rc = VINF_SUCCESS;
899 break;
900
901 /*
902 * hlt instruction.
903 */
904 case EXCP_HLT:
905 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
906 rc = VINF_EM_HALT;
907 break;
908
909 /*
910 * The VM has halted.
911 */
912 case EXCP_HALTED:
913 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
914 rc = VINF_EM_HALT;
915 break;
916
917 /*
918 * Breakpoint/single step.
919 */
920 case EXCP_DEBUG:
921 {
922#if 0//def DEBUG_bird
923 static int iBP = 0;
924 printf("howdy, breakpoint! iBP=%d\n", iBP);
925 switch (iBP)
926 {
927 case 0:
928 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
929 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
930 //pVM->rem.s.Env.interrupt_request = 0;
931 //pVM->rem.s.Env.exception_index = -1;
932 //g_fInterruptDisabled = 1;
933 rc = VINF_SUCCESS;
934 asm("int3");
935 break;
936 default:
937 asm("int3");
938 break;
939 }
940 iBP++;
941#else
942 /* breakpoint or single step? */
943 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
944 int iBP;
945 rc = VINF_EM_DBG_STEPPED;
946 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
947 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
948 {
949 rc = VINF_EM_DBG_BREAKPOINT;
950 break;
951 }
952 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
953#endif
954 break;
955 }
956
957 /*
958 * Switch to RAW-mode.
959 */
960 case EXCP_EXECUTE_RAW:
961 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
962 rc = VINF_EM_RESCHEDULE_RAW;
963 break;
964
965 /*
966 * Switch to hardware accelerated RAW-mode.
967 */
968 case EXCP_EXECUTE_HWACC:
969 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
970 rc = VINF_EM_RESCHEDULE_HWACC;
971 break;
972
973 /*
974 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
975 */
976 case EXCP_RC:
977 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
978 rc = pVM->rem.s.rc;
979 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
980 break;
981
982 /*
983 * Figure out the rest when they arrive....
984 */
985 default:
986 AssertMsgFailed(("rc=%d\n", rc));
987 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
988 rc = VINF_SUCCESS;
989 break;
990 }
991
992 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
993 return rc;
994}
995
996
997/**
998 * Check if the cpu state is suitable for Raw execution.
999 *
1000 * @returns boolean
1001 * @param env The CPU env struct.
1002 * @param eip The EIP to check this for (might differ from env->eip).
1003 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1004 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1005 *
1006 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1007 */
1008bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1009{
1010 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1011 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1012 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1013 uint32_t u32CR0;
1014
1015 /* Update counter. */
1016 env->pVM->rem.s.cCanExecuteRaw++;
1017
1018 if (HWACCMIsEnabled(env->pVM))
1019 {
1020 CPUMCTX Ctx;
1021
1022 env->state |= CPU_RAW_HWACC;
1023
1024 /*
1025 * Create partial context for HWACCMR3CanExecuteGuest
1026 */
1027 Ctx.cr0 = env->cr[0];
1028 Ctx.cr3 = env->cr[3];
1029 Ctx.cr4 = env->cr[4];
1030
1031 Ctx.tr = env->tr.selector;
1032 Ctx.trHid.u64Base = env->tr.base;
1033 Ctx.trHid.u32Limit = env->tr.limit;
1034 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1035
1036 Ctx.idtr.cbIdt = env->idt.limit;
1037 Ctx.idtr.pIdt = env->idt.base;
1038
1039 Ctx.gdtr.cbGdt = env->gdt.limit;
1040 Ctx.gdtr.pGdt = env->gdt.base;
1041
1042 Ctx.rsp = env->regs[R_ESP];
1043#ifdef LOG_ENABLED
1044 Ctx.rip = env->eip;
1045#endif
1046
1047 Ctx.eflags.u32 = env->eflags;
1048
1049 Ctx.cs = env->segs[R_CS].selector;
1050 Ctx.csHid.u64Base = env->segs[R_CS].base;
1051 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1052 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1053
1054 Ctx.ds = env->segs[R_DS].selector;
1055 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1056 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1057 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1058
1059 Ctx.es = env->segs[R_ES].selector;
1060 Ctx.esHid.u64Base = env->segs[R_ES].base;
1061 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1062 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1063
1064 Ctx.fs = env->segs[R_FS].selector;
1065 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1066 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1067 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1068
1069 Ctx.gs = env->segs[R_GS].selector;
1070 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1071 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1072 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1073
1074 Ctx.ss = env->segs[R_SS].selector;
1075 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1076 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1077 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1078
1079 Ctx.msrEFER = env->efer;
1080
1081 /* Hardware accelerated raw-mode:
1082 *
1083 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1084 */
1085 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1086 {
1087 *piException = EXCP_EXECUTE_HWACC;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 /*
1094 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1095 * or 32 bits protected mode ring 0 code
1096 *
1097 * The tests are ordered by the likelyhood of being true during normal execution.
1098 */
1099 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1100 {
1101 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1102 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1103 return false;
1104 }
1105
1106#ifndef VBOX_RAW_V86
1107 if (fFlags & VM_MASK) {
1108 STAM_COUNTER_INC(&gStatRefuseVM86);
1109 Log2(("raw mode refused: VM_MASK\n"));
1110 return false;
1111 }
1112#endif
1113
1114 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1115 {
1116#ifndef DEBUG_bird
1117 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1118#endif
1119 return false;
1120 }
1121
1122 if (env->singlestep_enabled)
1123 {
1124 //Log2(("raw mode refused: Single step\n"));
1125 return false;
1126 }
1127
1128 if (env->nb_breakpoints > 0)
1129 {
1130 //Log2(("raw mode refused: Breakpoints\n"));
1131 return false;
1132 }
1133
1134 u32CR0 = env->cr[0];
1135 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1136 {
1137 STAM_COUNTER_INC(&gStatRefusePaging);
1138 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1139 return false;
1140 }
1141
1142 if (env->cr[4] & CR4_PAE_MASK)
1143 {
1144 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1145 {
1146 STAM_COUNTER_INC(&gStatRefusePAE);
1147 return false;
1148 }
1149 }
1150
1151 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1152 {
1153 if (!EMIsRawRing3Enabled(env->pVM))
1154 return false;
1155
1156 if (!(env->eflags & IF_MASK))
1157 {
1158 STAM_COUNTER_INC(&gStatRefuseIF0);
1159 Log2(("raw mode refused: IF (RawR3)\n"));
1160 return false;
1161 }
1162
1163 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1164 {
1165 STAM_COUNTER_INC(&gStatRefuseWP0);
1166 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1167 return false;
1168 }
1169 }
1170 else
1171 {
1172 if (!EMIsRawRing0Enabled(env->pVM))
1173 return false;
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1177 {
1178 STAM_COUNTER_INC(&gStatRefuseCode16);
1179 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1180 return false;
1181 }
1182
1183 // Only R0
1184 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1185 {
1186 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1187 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1188 return false;
1189 }
1190
1191 if (!(u32CR0 & CR0_WP_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseWP0);
1194 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1195 return false;
1196 }
1197
1198 if (PATMIsPatchGCAddr(env->pVM, eip))
1199 {
1200 Log2(("raw r0 mode forced: patch code\n"));
1201 *piException = EXCP_EXECUTE_RAW;
1202 return true;
1203 }
1204
1205#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1206 if (!(env->eflags & IF_MASK))
1207 {
1208 STAM_COUNTER_INC(&gStatRefuseIF0);
1209 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1210 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1211 return false;
1212 }
1213#endif
1214
1215 env->state |= CPU_RAW_RING0;
1216 }
1217
1218 /*
1219 * Don't reschedule the first time we're called, because there might be
1220 * special reasons why we're here that is not covered by the above checks.
1221 */
1222 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1223 {
1224 Log2(("raw mode refused: first scheduling\n"));
1225 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1226 return false;
1227 }
1228
1229 Assert(PGMPhysIsA20Enabled(env->pVM));
1230 *piException = EXCP_EXECUTE_RAW;
1231 return true;
1232}
1233
1234
1235/**
1236 * Fetches a code byte.
1237 *
1238 * @returns Success indicator (bool) for ease of use.
1239 * @param env The CPU environment structure.
1240 * @param GCPtrInstr Where to fetch code.
1241 * @param pu8Byte Where to store the byte on success
1242 */
1243bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1244{
1245 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1246 if (RT_SUCCESS(rc))
1247 return true;
1248 return false;
1249}
1250
1251
1252/**
1253 * Flush (or invalidate if you like) page table/dir entry.
1254 *
1255 * (invlpg instruction; tlb_flush_page)
1256 *
1257 * @param env Pointer to cpu environment.
1258 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1259 */
1260void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1261{
1262 PVM pVM = env->pVM;
1263 PCPUMCTX pCtx;
1264 int rc;
1265
1266 /*
1267 * When we're replaying invlpg instructions or restoring a saved
1268 * state we disable this path.
1269 */
1270 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1271 return;
1272 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1273 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1274
1275 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1276
1277 /*
1278 * Update the control registers before calling PGMFlushPage.
1279 */
1280 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1281 pCtx->cr0 = env->cr[0];
1282 pCtx->cr3 = env->cr[3];
1283 pCtx->cr4 = env->cr[4];
1284
1285 /*
1286 * Let PGM do the rest.
1287 */
1288 rc = PGMInvalidatePage(pVM, GCPtr);
1289 if (RT_FAILURE(rc))
1290 {
1291 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1292 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1293 }
1294 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1295}
1296
1297
1298#ifndef REM_PHYS_ADDR_IN_TLB
1299void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1300{
1301 void *pv;
1302 int rc;
1303
1304 /* Address must be aligned enough to fiddle with lower bits */
1305 Assert((physAddr & 0x3) == 0);
1306
1307 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1308 Assert( rc == VINF_SUCCESS
1309 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1310 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1311 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1312 if (RT_FAILURE(rc))
1313 return (void *)1;
1314 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1315 return (void *)((uintptr_t)pv | 2);
1316 return pv;
1317}
1318
1319target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1320{
1321 RTGCPHYS rv = 0;
1322 int rc;
1323
1324 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1325 Assert (RT_SUCCESS(rc));
1326
1327 return (target_ulong)rv;
1328}
1329#endif
1330
1331/**
1332 * Called from tlb_protect_code in order to write monitor a code page.
1333 *
1334 * @param env Pointer to the CPU environment.
1335 * @param GCPtr Code page to monitor
1336 */
1337void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1338{
1339#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1340 Assert(env->pVM->rem.s.fInREM);
1341 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1342 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1343 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1344 && !(env->eflags & VM_MASK) /* no V86 mode */
1345 && !HWACCMIsEnabled(env->pVM))
1346 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1347#endif
1348}
1349
1350/**
1351 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1352 *
1353 * @param env Pointer to the CPU environment.
1354 * @param GCPtr Code page to monitor
1355 */
1356void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1357{
1358 Assert(env->pVM->rem.s.fInREM);
1359#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1360 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1361 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1362 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1363 && !(env->eflags & VM_MASK) /* no V86 mode */
1364 && !HWACCMIsEnabled(env->pVM))
1365 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1366#endif
1367}
1368
1369/**
1370 * Called when the CPU is initialized, any of the CRx registers are changed or
1371 * when the A20 line is modified.
1372 *
1373 * @param env Pointer to the CPU environment.
1374 * @param fGlobal Set if the flush is global.
1375 */
1376void remR3FlushTLB(CPUState *env, bool fGlobal)
1377{
1378 PVM pVM = env->pVM;
1379 PCPUMCTX pCtx;
1380
1381 /*
1382 * When we're replaying invlpg instructions or restoring a saved
1383 * state we disable this path.
1384 */
1385 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1386 return;
1387 Assert(pVM->rem.s.fInREM);
1388
1389 /*
1390 * The caller doesn't check cr4, so we have to do that for ourselves.
1391 */
1392 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1393 fGlobal = true;
1394 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1395
1396 /*
1397 * Update the control registers before calling PGMR3FlushTLB.
1398 */
1399 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1400 pCtx->cr0 = env->cr[0];
1401 pCtx->cr3 = env->cr[3];
1402 pCtx->cr4 = env->cr[4];
1403
1404 /*
1405 * Let PGM do the rest.
1406 */
1407 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1408}
1409
1410
1411/**
1412 * Called when any of the cr0, cr4 or efer registers is updated.
1413 *
1414 * @param env Pointer to the CPU environment.
1415 */
1416void remR3ChangeCpuMode(CPUState *env)
1417{
1418 int rc;
1419 PVM pVM = env->pVM;
1420 PCPUMCTX pCtx;
1421
1422 /*
1423 * When we're replaying loads or restoring a saved
1424 * state this path is disabled.
1425 */
1426 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1427 return;
1428 Assert(pVM->rem.s.fInREM);
1429
1430 /*
1431 * Update the control registers before calling PGMChangeMode()
1432 * as it may need to map whatever cr3 is pointing to.
1433 */
1434 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1435 pCtx->cr0 = env->cr[0];
1436 pCtx->cr3 = env->cr[3];
1437 pCtx->cr4 = env->cr[4];
1438
1439#ifdef TARGET_X86_64
1440 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1441 if (rc != VINF_SUCCESS)
1442 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1443#else
1444 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1445 if (rc != VINF_SUCCESS)
1446 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1447#endif
1448}
1449
1450
1451/**
1452 * Called from compiled code to run dma.
1453 *
1454 * @param env Pointer to the CPU environment.
1455 */
1456void remR3DmaRun(CPUState *env)
1457{
1458 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1459 PDMR3DmaRun(env->pVM);
1460 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1461}
1462
1463
1464/**
1465 * Called from compiled code to schedule pending timers in VMM
1466 *
1467 * @param env Pointer to the CPU environment.
1468 */
1469void remR3TimersRun(CPUState *env)
1470{
1471 LogFlow(("remR3TimersRun:\n"));
1472 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1473 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1474 TMR3TimerQueuesDo(env->pVM);
1475 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1476 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1477}
1478
1479
1480/**
1481 * Record trap occurance
1482 *
1483 * @returns VBox status code
1484 * @param env Pointer to the CPU environment.
1485 * @param uTrap Trap nr
1486 * @param uErrorCode Error code
1487 * @param pvNextEIP Next EIP
1488 */
1489int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1490{
1491 PVM pVM = env->pVM;
1492#ifdef VBOX_WITH_STATISTICS
1493 static STAMCOUNTER s_aStatTrap[255];
1494 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1495#endif
1496
1497#ifdef VBOX_WITH_STATISTICS
1498 if (uTrap < 255)
1499 {
1500 if (!s_aRegisters[uTrap])
1501 {
1502 char szStatName[64];
1503 s_aRegisters[uTrap] = true;
1504 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1505 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1506 }
1507 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1508 }
1509#endif
1510 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1511 if( uTrap < 0x20
1512 && (env->cr[0] & X86_CR0_PE)
1513 && !(env->eflags & X86_EFL_VM))
1514 {
1515#ifdef DEBUG
1516 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1517#endif
1518 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1519 {
1520 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1521 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1522 return VERR_REM_TOO_MANY_TRAPS;
1523 }
1524 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1525 pVM->rem.s.cPendingExceptions = 1;
1526 pVM->rem.s.uPendingException = uTrap;
1527 pVM->rem.s.uPendingExcptEIP = env->eip;
1528 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1529 }
1530 else
1531 {
1532 pVM->rem.s.cPendingExceptions = 0;
1533 pVM->rem.s.uPendingException = uTrap;
1534 pVM->rem.s.uPendingExcptEIP = env->eip;
1535 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1536 }
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/*
1542 * Clear current active trap
1543 *
1544 * @param pVM VM Handle.
1545 */
1546void remR3TrapClear(PVM pVM)
1547{
1548 pVM->rem.s.cPendingExceptions = 0;
1549 pVM->rem.s.uPendingException = 0;
1550 pVM->rem.s.uPendingExcptEIP = 0;
1551 pVM->rem.s.uPendingExcptCR2 = 0;
1552}
1553
1554
1555/*
1556 * Record previous call instruction addresses
1557 *
1558 * @param env Pointer to the CPU environment.
1559 */
1560void remR3RecordCall(CPUState *env)
1561{
1562 CSAMR3RecordCallAddress(env->pVM, env->eip);
1563}
1564
1565
1566/**
1567 * Syncs the internal REM state with the VM.
1568 *
1569 * This must be called before REMR3Run() is invoked whenever when the REM
1570 * state is not up to date. Calling it several times in a row is not
1571 * permitted.
1572 *
1573 * @returns VBox status code.
1574 *
1575 * @param pVM VM Handle.
1576 * @param fFlushTBs Flush all translation blocks before executing code
1577 *
1578 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1579 * no do this since the majority of the callers don't want any unnecessary of events
1580 * pending that would immediatly interrupt execution.
1581 */
1582REMR3DECL(int) REMR3State(PVM pVM)
1583{
1584 register const CPUMCTX *pCtx;
1585 register unsigned fFlags;
1586 bool fHiddenSelRegsValid;
1587 unsigned i;
1588 TRPMEVENT enmType;
1589 uint8_t u8TrapNo;
1590 int rc;
1591
1592 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1593 Log2(("REMR3State:\n"));
1594
1595 pCtx = pVM->rem.s.pCtx;
1596 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1597
1598 Assert(!pVM->rem.s.fInREM);
1599 pVM->rem.s.fInStateSync = true;
1600
1601 /*
1602 * If we have to flush TBs, do that immediately.
1603 */
1604 if (pVM->rem.s.fFlushTBs)
1605 {
1606 STAM_COUNTER_INC(&gStatFlushTBs);
1607 tb_flush(&pVM->rem.s.Env);
1608 pVM->rem.s.fFlushTBs = false;
1609 }
1610
1611 /*
1612 * Copy the registers which require no special handling.
1613 */
1614#ifdef TARGET_X86_64
1615 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1616 Assert(R_EAX == 0);
1617 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1618 Assert(R_ECX == 1);
1619 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1620 Assert(R_EDX == 2);
1621 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1622 Assert(R_EBX == 3);
1623 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1624 Assert(R_ESP == 4);
1625 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1626 Assert(R_EBP == 5);
1627 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1628 Assert(R_ESI == 6);
1629 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1630 Assert(R_EDI == 7);
1631 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1632 pVM->rem.s.Env.regs[8] = pCtx->r8;
1633 pVM->rem.s.Env.regs[9] = pCtx->r9;
1634 pVM->rem.s.Env.regs[10] = pCtx->r10;
1635 pVM->rem.s.Env.regs[11] = pCtx->r11;
1636 pVM->rem.s.Env.regs[12] = pCtx->r12;
1637 pVM->rem.s.Env.regs[13] = pCtx->r13;
1638 pVM->rem.s.Env.regs[14] = pCtx->r14;
1639 pVM->rem.s.Env.regs[15] = pCtx->r15;
1640
1641 pVM->rem.s.Env.eip = pCtx->rip;
1642
1643 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1644#else
1645 Assert(R_EAX == 0);
1646 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1647 Assert(R_ECX == 1);
1648 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1649 Assert(R_EDX == 2);
1650 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1651 Assert(R_EBX == 3);
1652 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1653 Assert(R_ESP == 4);
1654 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1655 Assert(R_EBP == 5);
1656 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1657 Assert(R_ESI == 6);
1658 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1659 Assert(R_EDI == 7);
1660 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1661 pVM->rem.s.Env.eip = pCtx->eip;
1662
1663 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1664#endif
1665
1666 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1667
1668 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1669 for (i=0;i<8;i++)
1670 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1671
1672 /*
1673 * Clear the halted hidden flag (the interrupt waking up the CPU can
1674 * have been dispatched in raw mode).
1675 */
1676 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1677
1678 /*
1679 * Replay invlpg?
1680 */
1681 if (pVM->rem.s.cInvalidatedPages)
1682 {
1683 RTUINT i;
1684
1685 pVM->rem.s.fIgnoreInvlPg = true;
1686 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1687 {
1688 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1689 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1690 }
1691 pVM->rem.s.fIgnoreInvlPg = false;
1692 pVM->rem.s.cInvalidatedPages = 0;
1693 }
1694
1695 /* Replay notification changes? */
1696 if (pVM->rem.s.cHandlerNotifications)
1697 REMR3ReplayHandlerNotifications(pVM);
1698
1699 /* Update MSRs; before CRx registers! */
1700 pVM->rem.s.Env.efer = pCtx->msrEFER;
1701 pVM->rem.s.Env.star = pCtx->msrSTAR;
1702 pVM->rem.s.Env.pat = pCtx->msrPAT;
1703#ifdef TARGET_X86_64
1704 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1705 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1706 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1707 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1708
1709 /* Update the internal long mode activate flag according to the new EFER value. */
1710 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1711 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1712 else
1713 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1714#endif
1715
1716
1717 /*
1718 * Registers which are rarely changed and require special handling / order when changed.
1719 */
1720 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1721 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1722 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1723 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1724 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1725 {
1726 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1727 {
1728 pVM->rem.s.fIgnoreCR3Load = true;
1729 tlb_flush(&pVM->rem.s.Env, true);
1730 pVM->rem.s.fIgnoreCR3Load = false;
1731 }
1732
1733 /* CR4 before CR0! */
1734 if (fFlags & CPUM_CHANGED_CR4)
1735 {
1736 pVM->rem.s.fIgnoreCR3Load = true;
1737 pVM->rem.s.fIgnoreCpuMode = true;
1738 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1739 pVM->rem.s.fIgnoreCpuMode = false;
1740 pVM->rem.s.fIgnoreCR3Load = false;
1741 }
1742
1743 if (fFlags & CPUM_CHANGED_CR0)
1744 {
1745 pVM->rem.s.fIgnoreCR3Load = true;
1746 pVM->rem.s.fIgnoreCpuMode = true;
1747 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1748 pVM->rem.s.fIgnoreCpuMode = false;
1749 pVM->rem.s.fIgnoreCR3Load = false;
1750 }
1751
1752 if (fFlags & CPUM_CHANGED_CR3)
1753 {
1754 pVM->rem.s.fIgnoreCR3Load = true;
1755 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1756 pVM->rem.s.fIgnoreCR3Load = false;
1757 }
1758
1759 if (fFlags & CPUM_CHANGED_GDTR)
1760 {
1761 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1762 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1763 }
1764
1765 if (fFlags & CPUM_CHANGED_IDTR)
1766 {
1767 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1768 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1769 }
1770
1771 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1772 {
1773 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1774 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1775 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1776 }
1777
1778 if (fFlags & CPUM_CHANGED_LDTR)
1779 {
1780 if (fHiddenSelRegsValid)
1781 {
1782 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1783 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1784 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1785 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1786 }
1787 else
1788 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1789 }
1790
1791 if (fFlags & CPUM_CHANGED_TR)
1792 {
1793 if (fHiddenSelRegsValid)
1794 {
1795 pVM->rem.s.Env.tr.selector = pCtx->tr;
1796 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1797 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1798 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1799 }
1800 else
1801 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1802
1803 /** @note do_interrupt will fault if the busy flag is still set.... */
1804 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1805 }
1806
1807 if (fFlags & CPUM_CHANGED_CPUID)
1808 {
1809 uint32_t u32Dummy;
1810
1811 /*
1812 * Get the CPUID features.
1813 */
1814 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1815 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1816 }
1817
1818 /* Sync FPU state after CR4 and CPUID. */
1819 if (fFlags & CPUM_CHANGED_FPU_REM)
1820 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1821 }
1822
1823 /*
1824 * Update selector registers.
1825 * This must be done *after* we've synced gdt, ldt and crX registers
1826 * since we're reading the GDT/LDT om sync_seg. This will happen with
1827 * saved state which takes a quick dip into rawmode for instance.
1828 */
1829 /*
1830 * Stack; Note first check this one as the CPL might have changed. The
1831 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1832 */
1833
1834 if (fHiddenSelRegsValid)
1835 {
1836 /* The hidden selector registers are valid in the CPU context. */
1837 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1838
1839 /* Set current CPL */
1840 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1841
1842 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1843 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1844 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1845 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1846 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1847 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1848 }
1849 else
1850 {
1851 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1852 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1853 {
1854 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1855
1856 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1857 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1858#ifdef VBOX_WITH_STATISTICS
1859 if (pVM->rem.s.Env.segs[R_SS].newselector)
1860 {
1861 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1862 }
1863#endif
1864 }
1865 else
1866 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1867
1868 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1869 {
1870 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1871 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1872#ifdef VBOX_WITH_STATISTICS
1873 if (pVM->rem.s.Env.segs[R_ES].newselector)
1874 {
1875 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1876 }
1877#endif
1878 }
1879 else
1880 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1881
1882 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1883 {
1884 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1885 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1886#ifdef VBOX_WITH_STATISTICS
1887 if (pVM->rem.s.Env.segs[R_CS].newselector)
1888 {
1889 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1890 }
1891#endif
1892 }
1893 else
1894 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1895
1896 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1897 {
1898 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1899 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1900#ifdef VBOX_WITH_STATISTICS
1901 if (pVM->rem.s.Env.segs[R_DS].newselector)
1902 {
1903 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1904 }
1905#endif
1906 }
1907 else
1908 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1909
1910 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1911 * be the same but not the base/limit. */
1912 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1913 {
1914 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1915 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1916#ifdef VBOX_WITH_STATISTICS
1917 if (pVM->rem.s.Env.segs[R_FS].newselector)
1918 {
1919 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1920 }
1921#endif
1922 }
1923 else
1924 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1925
1926 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1927 {
1928 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1929 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1930#ifdef VBOX_WITH_STATISTICS
1931 if (pVM->rem.s.Env.segs[R_GS].newselector)
1932 {
1933 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1934 }
1935#endif
1936 }
1937 else
1938 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1939 }
1940
1941 /*
1942 * Check for traps.
1943 */
1944 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1945 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1946 if (RT_SUCCESS(rc))
1947 {
1948#ifdef DEBUG
1949 if (u8TrapNo == 0x80)
1950 {
1951 remR3DumpLnxSyscall(pVM);
1952 remR3DumpOBsdSyscall(pVM);
1953 }
1954#endif
1955
1956 pVM->rem.s.Env.exception_index = u8TrapNo;
1957 if (enmType != TRPM_SOFTWARE_INT)
1958 {
1959 pVM->rem.s.Env.exception_is_int = 0;
1960 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1961 }
1962 else
1963 {
1964 /*
1965 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1966 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1967 * for int03 and into.
1968 */
1969 pVM->rem.s.Env.exception_is_int = 1;
1970 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1971 /* int 3 may be generated by one-byte 0xcc */
1972 if (u8TrapNo == 3)
1973 {
1974 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1975 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1976 }
1977 /* int 4 may be generated by one-byte 0xce */
1978 else if (u8TrapNo == 4)
1979 {
1980 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1981 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1982 }
1983 }
1984
1985 /* get error code and cr2 if needed. */
1986 switch (u8TrapNo)
1987 {
1988 case 0x0e:
1989 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1990 /* fallthru */
1991 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1992 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1993 break;
1994
1995 case 0x11: case 0x08:
1996 default:
1997 pVM->rem.s.Env.error_code = 0;
1998 break;
1999 }
2000
2001 /*
2002 * We can now reset the active trap since the recompiler is gonna have a go at it.
2003 */
2004 rc = TRPMResetTrap(pVM);
2005 AssertRC(rc);
2006 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2007 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2008 }
2009
2010 /*
2011 * Clear old interrupt request flags; Check for pending hardware interrupts.
2012 * (See @remark for why we don't check for other FFs.)
2013 */
2014 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2015 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2016 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2017 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2018
2019 /*
2020 * We're now in REM mode.
2021 */
2022 pVM->rem.s.fInREM = true;
2023 pVM->rem.s.fInStateSync = false;
2024 pVM->rem.s.cCanExecuteRaw = 0;
2025 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2026 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * Syncs back changes in the REM state to the the VM state.
2033 *
2034 * This must be called after invoking REMR3Run().
2035 * Calling it several times in a row is not permitted.
2036 *
2037 * @returns VBox status code.
2038 *
2039 * @param pVM VM Handle.
2040 */
2041REMR3DECL(int) REMR3StateBack(PVM pVM)
2042{
2043 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2044 unsigned i;
2045
2046 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2047 Log2(("REMR3StateBack:\n"));
2048 Assert(pVM->rem.s.fInREM);
2049
2050 /*
2051 * Copy back the registers.
2052 * This is done in the order they are declared in the CPUMCTX structure.
2053 */
2054
2055 /** @todo FOP */
2056 /** @todo FPUIP */
2057 /** @todo CS */
2058 /** @todo FPUDP */
2059 /** @todo DS */
2060 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2061 pCtx->fpu.MXCSR = 0;
2062 pCtx->fpu.MXCSR_MASK = 0;
2063
2064 /** @todo check if FPU/XMM was actually used in the recompiler */
2065 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2066//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2067
2068#ifdef TARGET_X86_64
2069 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2070 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2071 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2072 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2073 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2074 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2075 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2076 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2077 pCtx->r8 = pVM->rem.s.Env.regs[8];
2078 pCtx->r9 = pVM->rem.s.Env.regs[9];
2079 pCtx->r10 = pVM->rem.s.Env.regs[10];
2080 pCtx->r11 = pVM->rem.s.Env.regs[11];
2081 pCtx->r12 = pVM->rem.s.Env.regs[12];
2082 pCtx->r13 = pVM->rem.s.Env.regs[13];
2083 pCtx->r14 = pVM->rem.s.Env.regs[14];
2084 pCtx->r15 = pVM->rem.s.Env.regs[15];
2085
2086 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2087
2088#else
2089 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2090 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2091 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2092 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2093 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2094 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2095 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2096
2097 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2098#endif
2099
2100 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2101
2102#ifdef VBOX_WITH_STATISTICS
2103 if (pVM->rem.s.Env.segs[R_SS].newselector)
2104 {
2105 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2106 }
2107 if (pVM->rem.s.Env.segs[R_GS].newselector)
2108 {
2109 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2110 }
2111 if (pVM->rem.s.Env.segs[R_FS].newselector)
2112 {
2113 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2114 }
2115 if (pVM->rem.s.Env.segs[R_ES].newselector)
2116 {
2117 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2118 }
2119 if (pVM->rem.s.Env.segs[R_DS].newselector)
2120 {
2121 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2122 }
2123 if (pVM->rem.s.Env.segs[R_CS].newselector)
2124 {
2125 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2126 }
2127#endif
2128 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2129 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2130 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2131 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2132 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2133
2134#ifdef TARGET_X86_64
2135 pCtx->rip = pVM->rem.s.Env.eip;
2136 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2137#else
2138 pCtx->eip = pVM->rem.s.Env.eip;
2139 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2140#endif
2141
2142 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2143 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2144 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2145 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2146
2147 for (i=0;i<8;i++)
2148 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2149
2150 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2151 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2152 {
2153 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2154 STAM_COUNTER_INC(&gStatREMGDTChange);
2155 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2156 }
2157
2158 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2159 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2160 {
2161 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2162 STAM_COUNTER_INC(&gStatREMIDTChange);
2163 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2164 }
2165
2166 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2167 {
2168 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2169 STAM_COUNTER_INC(&gStatREMLDTRChange);
2170 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2171 }
2172 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2173 {
2174 pCtx->tr = pVM->rem.s.Env.tr.selector;
2175 STAM_COUNTER_INC(&gStatREMTRChange);
2176 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2177 }
2178
2179 /** @todo These values could still be out of sync! */
2180 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2181 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2182 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2183 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2184
2185 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2186 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2187 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2188
2189 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2190 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2191 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2192
2193 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2194 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2195 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2196
2197 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2198 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2199 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2200
2201 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2202 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2203 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2204
2205 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2206 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2207 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2208
2209 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2210 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2211 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2212
2213 /* Sysenter MSR */
2214 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2215 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2216 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2217
2218 /* System MSRs. */
2219 pCtx->msrEFER = pVM->rem.s.Env.efer;
2220 pCtx->msrSTAR = pVM->rem.s.Env.star;
2221 pCtx->msrPAT = pVM->rem.s.Env.pat;
2222#ifdef TARGET_X86_64
2223 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2224 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2225 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2226 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2227#endif
2228
2229 remR3TrapClear(pVM);
2230
2231 /*
2232 * Check for traps.
2233 */
2234 if ( pVM->rem.s.Env.exception_index >= 0
2235 && pVM->rem.s.Env.exception_index < 256)
2236 {
2237 int rc;
2238
2239 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2240 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2241 AssertRC(rc);
2242 switch (pVM->rem.s.Env.exception_index)
2243 {
2244 case 0x0e:
2245 TRPMSetFaultAddress(pVM, pCtx->cr2);
2246 /* fallthru */
2247 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2248 case 0x11: case 0x08: /* 0 */
2249 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2250 break;
2251 }
2252
2253 }
2254
2255 /*
2256 * We're not longer in REM mode.
2257 */
2258 pVM->rem.s.fInREM = false;
2259 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2260 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2261 return VINF_SUCCESS;
2262}
2263
2264
2265/**
2266 * This is called by the disassembler when it wants to update the cpu state
2267 * before for instance doing a register dump.
2268 */
2269static void remR3StateUpdate(PVM pVM)
2270{
2271 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2272 unsigned i;
2273
2274 Assert(pVM->rem.s.fInREM);
2275
2276 /*
2277 * Copy back the registers.
2278 * This is done in the order they are declared in the CPUMCTX structure.
2279 */
2280
2281 /** @todo FOP */
2282 /** @todo FPUIP */
2283 /** @todo CS */
2284 /** @todo FPUDP */
2285 /** @todo DS */
2286 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2287 pCtx->fpu.MXCSR = 0;
2288 pCtx->fpu.MXCSR_MASK = 0;
2289
2290 /** @todo check if FPU/XMM was actually used in the recompiler */
2291 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2292//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2293
2294#ifdef TARGET_X86_64
2295 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2296 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2297 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2298 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2299 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2300 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2301 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2302 pCtx->r8 = pVM->rem.s.Env.regs[8];
2303 pCtx->r9 = pVM->rem.s.Env.regs[9];
2304 pCtx->r10 = pVM->rem.s.Env.regs[10];
2305 pCtx->r11 = pVM->rem.s.Env.regs[11];
2306 pCtx->r12 = pVM->rem.s.Env.regs[12];
2307 pCtx->r13 = pVM->rem.s.Env.regs[13];
2308 pCtx->r14 = pVM->rem.s.Env.regs[14];
2309 pCtx->r15 = pVM->rem.s.Env.regs[15];
2310
2311 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2312#else
2313 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2314 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2315 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2316 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2317 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2318 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2319 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2320
2321 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2322#endif
2323
2324 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2325
2326 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2327 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2328 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2329 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2330 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2331
2332#ifdef TARGET_X86_64
2333 pCtx->rip = pVM->rem.s.Env.eip;
2334 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2335#else
2336 pCtx->eip = pVM->rem.s.Env.eip;
2337 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2338#endif
2339
2340 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2341 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2342 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2343 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2344
2345 for (i=0;i<8;i++)
2346 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2347
2348 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2349 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2350 {
2351 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2352 STAM_COUNTER_INC(&gStatREMGDTChange);
2353 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2354 }
2355
2356 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2357 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2358 {
2359 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2360 STAM_COUNTER_INC(&gStatREMIDTChange);
2361 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2362 }
2363
2364 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2365 {
2366 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2367 STAM_COUNTER_INC(&gStatREMLDTRChange);
2368 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2369 }
2370 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2371 {
2372 pCtx->tr = pVM->rem.s.Env.tr.selector;
2373 STAM_COUNTER_INC(&gStatREMTRChange);
2374 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2375 }
2376
2377 /** @todo These values could still be out of sync! */
2378 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2379 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2380 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2381 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2382
2383 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2384 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2385 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2386
2387 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2388 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2389 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2390
2391 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2392 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2393 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2394
2395 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2396 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2397 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2398
2399 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2400 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2401 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2402
2403 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2404 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2405 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2406
2407 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2408 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2409 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2410
2411 /* Sysenter MSR */
2412 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2413 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2414 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2415
2416 /* System MSRs. */
2417 pCtx->msrEFER = pVM->rem.s.Env.efer;
2418 pCtx->msrSTAR = pVM->rem.s.Env.star;
2419 pCtx->msrPAT = pVM->rem.s.Env.pat;
2420#ifdef TARGET_X86_64
2421 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2422 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2423 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2424 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2425#endif
2426
2427}
2428
2429
2430/**
2431 * Update the VMM state information if we're currently in REM.
2432 *
2433 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2434 * we're currently executing in REM and the VMM state is invalid. This method will of
2435 * course check that we're executing in REM before syncing any data over to the VMM.
2436 *
2437 * @param pVM The VM handle.
2438 */
2439REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2440{
2441 if (pVM->rem.s.fInREM)
2442 remR3StateUpdate(pVM);
2443}
2444
2445
2446#undef LOG_GROUP
2447#define LOG_GROUP LOG_GROUP_REM
2448
2449
2450/**
2451 * Notify the recompiler about Address Gate 20 state change.
2452 *
2453 * This notification is required since A20 gate changes are
2454 * initialized from a device driver and the VM might just as
2455 * well be in REM mode as in RAW mode.
2456 *
2457 * @param pVM VM handle.
2458 * @param fEnable True if the gate should be enabled.
2459 * False if the gate should be disabled.
2460 */
2461REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2462{
2463 bool fSaved;
2464
2465 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2466 VM_ASSERT_EMT(pVM);
2467
2468 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2469 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2470
2471 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2472
2473 pVM->rem.s.fIgnoreAll = fSaved;
2474}
2475
2476
2477/**
2478 * Replays the invalidated recorded pages.
2479 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2480 *
2481 * @param pVM VM handle.
2482 */
2483REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2484{
2485 RTUINT i;
2486
2487 VM_ASSERT_EMT(pVM);
2488
2489 /*
2490 * Sync the required registers.
2491 */
2492 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2493 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2494 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2495 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2496
2497 /*
2498 * Replay the flushes.
2499 */
2500 pVM->rem.s.fIgnoreInvlPg = true;
2501 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2502 {
2503 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2504 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2505 }
2506 pVM->rem.s.fIgnoreInvlPg = false;
2507 pVM->rem.s.cInvalidatedPages = 0;
2508}
2509
2510
2511/**
2512 * Replays the handler notification changes
2513 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2514 *
2515 * @param pVM VM handle.
2516 */
2517REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2518{
2519 /*
2520 * Replay the flushes.
2521 */
2522 RTUINT i;
2523 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2524
2525 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2526 VM_ASSERT_EMT(pVM);
2527
2528 pVM->rem.s.cHandlerNotifications = 0;
2529 for (i = 0; i < c; i++)
2530 {
2531 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2532 switch (pRec->enmKind)
2533 {
2534 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2535 REMR3NotifyHandlerPhysicalRegister(pVM,
2536 pRec->u.PhysicalRegister.enmType,
2537 pRec->u.PhysicalRegister.GCPhys,
2538 pRec->u.PhysicalRegister.cb,
2539 pRec->u.PhysicalRegister.fHasHCHandler);
2540 break;
2541
2542 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2543 REMR3NotifyHandlerPhysicalDeregister(pVM,
2544 pRec->u.PhysicalDeregister.enmType,
2545 pRec->u.PhysicalDeregister.GCPhys,
2546 pRec->u.PhysicalDeregister.cb,
2547 pRec->u.PhysicalDeregister.fHasHCHandler,
2548 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2549 break;
2550
2551 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2552 REMR3NotifyHandlerPhysicalModify(pVM,
2553 pRec->u.PhysicalModify.enmType,
2554 pRec->u.PhysicalModify.GCPhysOld,
2555 pRec->u.PhysicalModify.GCPhysNew,
2556 pRec->u.PhysicalModify.cb,
2557 pRec->u.PhysicalModify.fHasHCHandler,
2558 pRec->u.PhysicalModify.fRestoreAsRAM);
2559 break;
2560
2561 default:
2562 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2563 break;
2564 }
2565 }
2566 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2567}
2568
2569
2570/**
2571 * Notify REM about changed code page.
2572 *
2573 * @returns VBox status code.
2574 * @param pVM VM handle.
2575 * @param pvCodePage Code page address
2576 */
2577REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2578{
2579#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2580 int rc;
2581 RTGCPHYS PhysGC;
2582 uint64_t flags;
2583
2584 VM_ASSERT_EMT(pVM);
2585
2586 /*
2587 * Get the physical page address.
2588 */
2589 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2590 if (rc == VINF_SUCCESS)
2591 {
2592 /*
2593 * Sync the required registers and flush the whole page.
2594 * (Easier to do the whole page than notifying it about each physical
2595 * byte that was changed.
2596 */
2597 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2598 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2599 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2600 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2601
2602 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2603 }
2604#endif
2605 return VINF_SUCCESS;
2606}
2607
2608
2609/**
2610 * Notification about a successful MMR3PhysRegister() call.
2611 *
2612 * @param pVM VM handle.
2613 * @param GCPhys The physical address the RAM.
2614 * @param cb Size of the memory.
2615 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2616 */
2617REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2618{
2619 uint32_t cbBitmap;
2620 int rc;
2621 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2622 VM_ASSERT_EMT(pVM);
2623
2624 /*
2625 * Validate input - we trust the caller.
2626 */
2627 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2628 Assert(cb);
2629 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2630
2631 /*
2632 * Base ram?
2633 */
2634 if (!GCPhys)
2635 {
2636 phys_ram_size = cb;
2637 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2638#ifndef VBOX_STRICT
2639 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2640 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2641#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2642 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2643 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2644 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2645 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2646 AssertRC(rc);
2647 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2648#endif
2649 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2650 }
2651
2652 /*
2653 * Register the ram.
2654 */
2655 Assert(!pVM->rem.s.fIgnoreAll);
2656 pVM->rem.s.fIgnoreAll = true;
2657
2658#ifdef VBOX_WITH_NEW_PHYS_CODE
2659 if (fFlags & MM_RAM_FLAGS_RESERVED)
2660 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2661 else
2662 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2663#else
2664 if (!GCPhys)
2665 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2666 else
2667 {
2668 if (fFlags & MM_RAM_FLAGS_RESERVED)
2669 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2670 else
2671 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2672 }
2673#endif
2674 Assert(pVM->rem.s.fIgnoreAll);
2675 pVM->rem.s.fIgnoreAll = false;
2676}
2677
2678#ifndef VBOX_WITH_NEW_PHYS_CODE
2679
2680/**
2681 * Notification about a successful PGMR3PhysRegisterChunk() call.
2682 *
2683 * @param pVM VM handle.
2684 * @param GCPhys The physical address the RAM.
2685 * @param cb Size of the memory.
2686 * @param pvRam The HC address of the RAM.
2687 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2688 */
2689REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2690{
2691 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2692 VM_ASSERT_EMT(pVM);
2693
2694 /*
2695 * Validate input - we trust the caller.
2696 */
2697 Assert(pvRam);
2698 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2699 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2700 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2701 Assert(fFlags == 0 /* normal RAM */);
2702 Assert(!pVM->rem.s.fIgnoreAll);
2703 pVM->rem.s.fIgnoreAll = true;
2704 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2705 Assert(pVM->rem.s.fIgnoreAll);
2706 pVM->rem.s.fIgnoreAll = false;
2707}
2708
2709
2710/**
2711 * Grows dynamically allocated guest RAM.
2712 * Will raise a fatal error if the operation fails.
2713 *
2714 * @param physaddr The physical address.
2715 */
2716void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2717{
2718 int rc;
2719 PVM pVM = cpu_single_env->pVM;
2720 const RTGCPHYS GCPhys = physaddr;
2721
2722 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2723 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2724 if (RT_SUCCESS(rc))
2725 return;
2726
2727 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2728 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2729 AssertFatalFailed();
2730}
2731
2732#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2733
2734/**
2735 * Notification about a successful MMR3PhysRomRegister() call.
2736 *
2737 * @param pVM VM handle.
2738 * @param GCPhys The physical address of the ROM.
2739 * @param cb The size of the ROM.
2740 * @param pvCopy Pointer to the ROM copy.
2741 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2742 * This function will be called when ever the protection of the
2743 * shadow ROM changes (at reset and end of POST).
2744 */
2745REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2746{
2747 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2748 VM_ASSERT_EMT(pVM);
2749
2750 /*
2751 * Validate input - we trust the caller.
2752 */
2753 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2754 Assert(cb);
2755 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2756 Assert(pvCopy);
2757 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2758
2759 /*
2760 * Register the rom.
2761 */
2762 Assert(!pVM->rem.s.fIgnoreAll);
2763 pVM->rem.s.fIgnoreAll = true;
2764
2765 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2766
2767 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2768
2769 Assert(pVM->rem.s.fIgnoreAll);
2770 pVM->rem.s.fIgnoreAll = false;
2771}
2772
2773
2774/**
2775 * Notification about a successful memory deregistration or reservation.
2776 *
2777 * @param pVM VM Handle.
2778 * @param GCPhys Start physical address.
2779 * @param cb The size of the range.
2780 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2781 * reserve any memory soon.
2782 */
2783REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2784{
2785 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2786 VM_ASSERT_EMT(pVM);
2787
2788 /*
2789 * Validate input - we trust the caller.
2790 */
2791 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2792 Assert(cb);
2793 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2794
2795 /*
2796 * Unassigning the memory.
2797 */
2798 Assert(!pVM->rem.s.fIgnoreAll);
2799 pVM->rem.s.fIgnoreAll = true;
2800
2801 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2802
2803 Assert(pVM->rem.s.fIgnoreAll);
2804 pVM->rem.s.fIgnoreAll = false;
2805}
2806
2807
2808/**
2809 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2810 *
2811 * @param pVM VM Handle.
2812 * @param enmType Handler type.
2813 * @param GCPhys Handler range address.
2814 * @param cb Size of the handler range.
2815 * @param fHasHCHandler Set if the handler has a HC callback function.
2816 *
2817 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2818 * Handler memory type to memory which has no HC handler.
2819 */
2820REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2821{
2822 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2823 enmType, GCPhys, cb, fHasHCHandler));
2824 VM_ASSERT_EMT(pVM);
2825 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2826 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2827
2828 if (pVM->rem.s.cHandlerNotifications)
2829 REMR3ReplayHandlerNotifications(pVM);
2830
2831 Assert(!pVM->rem.s.fIgnoreAll);
2832 pVM->rem.s.fIgnoreAll = true;
2833
2834 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2835 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2836 else if (fHasHCHandler)
2837 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2838
2839 Assert(pVM->rem.s.fIgnoreAll);
2840 pVM->rem.s.fIgnoreAll = false;
2841}
2842
2843
2844/**
2845 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2846 *
2847 * @param pVM VM Handle.
2848 * @param enmType Handler type.
2849 * @param GCPhys Handler range address.
2850 * @param cb Size of the handler range.
2851 * @param fHasHCHandler Set if the handler has a HC callback function.
2852 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2853 */
2854REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2855{
2856 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2857 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2858 VM_ASSERT_EMT(pVM);
2859
2860 if (pVM->rem.s.cHandlerNotifications)
2861 REMR3ReplayHandlerNotifications(pVM);
2862
2863 Assert(!pVM->rem.s.fIgnoreAll);
2864 pVM->rem.s.fIgnoreAll = true;
2865
2866/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2867 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2868 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2869 else if (fHasHCHandler)
2870 {
2871 if (!fRestoreAsRAM)
2872 {
2873 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2874 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2875 }
2876 else
2877 {
2878 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2879 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2880 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2881 }
2882 }
2883
2884 Assert(pVM->rem.s.fIgnoreAll);
2885 pVM->rem.s.fIgnoreAll = false;
2886}
2887
2888
2889/**
2890 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2891 *
2892 * @param pVM VM Handle.
2893 * @param enmType Handler type.
2894 * @param GCPhysOld Old handler range address.
2895 * @param GCPhysNew New handler range address.
2896 * @param cb Size of the handler range.
2897 * @param fHasHCHandler Set if the handler has a HC callback function.
2898 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2899 */
2900REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2901{
2902 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2903 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2904 VM_ASSERT_EMT(pVM);
2905 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2906
2907 if (pVM->rem.s.cHandlerNotifications)
2908 REMR3ReplayHandlerNotifications(pVM);
2909
2910 if (fHasHCHandler)
2911 {
2912 Assert(!pVM->rem.s.fIgnoreAll);
2913 pVM->rem.s.fIgnoreAll = true;
2914
2915 /*
2916 * Reset the old page.
2917 */
2918 if (!fRestoreAsRAM)
2919 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2920 else
2921 {
2922 /* This is not perfect, but it'll do for PD monitoring... */
2923 Assert(cb == PAGE_SIZE);
2924 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2925 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2926 }
2927
2928 /*
2929 * Update the new page.
2930 */
2931 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2932 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2933 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2934
2935 Assert(pVM->rem.s.fIgnoreAll);
2936 pVM->rem.s.fIgnoreAll = false;
2937 }
2938}
2939
2940
2941/**
2942 * Checks if we're handling access to this page or not.
2943 *
2944 * @returns true if we're trapping access.
2945 * @returns false if we aren't.
2946 * @param pVM The VM handle.
2947 * @param GCPhys The physical address.
2948 *
2949 * @remark This function will only work correctly in VBOX_STRICT builds!
2950 */
2951REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2952{
2953#ifdef VBOX_STRICT
2954 unsigned long off;
2955 if (pVM->rem.s.cHandlerNotifications)
2956 REMR3ReplayHandlerNotifications(pVM);
2957
2958 off = get_phys_page_offset(GCPhys);
2959 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2960 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2961 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2962#else
2963 return false;
2964#endif
2965}
2966
2967
2968/**
2969 * Deals with a rare case in get_phys_addr_code where the code
2970 * is being monitored.
2971 *
2972 * It could also be an MMIO page, in which case we will raise a fatal error.
2973 *
2974 * @returns The physical address corresponding to addr.
2975 * @param env The cpu environment.
2976 * @param addr The virtual address.
2977 * @param pTLBEntry The TLB entry.
2978 */
2979target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2980 target_ulong addr,
2981 CPUTLBEntry* pTLBEntry,
2982 target_phys_addr_t ioTLBEntry)
2983{
2984 PVM pVM = env->pVM;
2985
2986 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2987 {
2988 /* If code memory is being monitored, appropriate IOTLB entry will have
2989 handler IO type, and addend will provide real physical address, no
2990 matter if we store VA in TLB or not, as handlers are always passed PA */
2991 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
2992 return ret;
2993 }
2994 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
2995 "*** handlers\n",
2996 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
2997 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
2998 LogRel(("*** mmio\n"));
2999 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3000 LogRel(("*** phys\n"));
3001 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3002 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3003 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3004 AssertFatalFailed();
3005}
3006
3007/**
3008 * Read guest RAM and ROM.
3009 *
3010 * @param SrcGCPhys The source address (guest physical).
3011 * @param pvDst The destination address.
3012 * @param cb Number of bytes
3013 */
3014void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3015{
3016 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3017 VBOX_CHECK_ADDR(SrcGCPhys);
3018 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3019#ifdef VBOX_DEBUG_PHYS
3020 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3021#endif
3022 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3023}
3024
3025
3026/**
3027 * Read guest RAM and ROM, unsigned 8-bit.
3028 *
3029 * @param SrcGCPhys The source address (guest physical).
3030 */
3031RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3032{
3033 uint8_t val;
3034 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3035 VBOX_CHECK_ADDR(SrcGCPhys);
3036 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3037 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3038#ifdef VBOX_DEBUG_PHYS
3039 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3040#endif
3041 return val;
3042}
3043
3044
3045/**
3046 * Read guest RAM and ROM, signed 8-bit.
3047 *
3048 * @param SrcGCPhys The source address (guest physical).
3049 */
3050RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3051{
3052 int8_t val;
3053 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3054 VBOX_CHECK_ADDR(SrcGCPhys);
3055 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3056 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3057#ifdef VBOX_DEBUG_PHYS
3058 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3059#endif
3060 return val;
3061}
3062
3063
3064/**
3065 * Read guest RAM and ROM, unsigned 16-bit.
3066 *
3067 * @param SrcGCPhys The source address (guest physical).
3068 */
3069RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3070{
3071 uint16_t val;
3072 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3073 VBOX_CHECK_ADDR(SrcGCPhys);
3074 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3075 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3076#ifdef VBOX_DEBUG_PHYS
3077 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3078#endif
3079 return val;
3080}
3081
3082
3083/**
3084 * Read guest RAM and ROM, signed 16-bit.
3085 *
3086 * @param SrcGCPhys The source address (guest physical).
3087 */
3088RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3089{
3090 int16_t val;
3091 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3092 VBOX_CHECK_ADDR(SrcGCPhys);
3093 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3094 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3095#ifdef VBOX_DEBUG_PHYS
3096 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3097#endif
3098 return val;
3099}
3100
3101
3102/**
3103 * Read guest RAM and ROM, unsigned 32-bit.
3104 *
3105 * @param SrcGCPhys The source address (guest physical).
3106 */
3107RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3108{
3109 uint32_t val;
3110 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3111 VBOX_CHECK_ADDR(SrcGCPhys);
3112 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3113 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3114#ifdef VBOX_DEBUG_PHYS
3115 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3116#endif
3117 return val;
3118}
3119
3120
3121/**
3122 * Read guest RAM and ROM, signed 32-bit.
3123 *
3124 * @param SrcGCPhys The source address (guest physical).
3125 */
3126RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3127{
3128 int32_t val;
3129 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3130 VBOX_CHECK_ADDR(SrcGCPhys);
3131 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3132 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3133#ifdef VBOX_DEBUG_PHYS
3134 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3135#endif
3136 return val;
3137}
3138
3139
3140/**
3141 * Read guest RAM and ROM, unsigned 64-bit.
3142 *
3143 * @param SrcGCPhys The source address (guest physical).
3144 */
3145uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3146{
3147 uint64_t val;
3148 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3149 VBOX_CHECK_ADDR(SrcGCPhys);
3150 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3151 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3152#ifdef VBOX_DEBUG_PHYS
3153 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3154#endif
3155 return val;
3156}
3157
3158/**
3159 * Read guest RAM and ROM, signed 64-bit.
3160 *
3161 * @param SrcGCPhys The source address (guest physical).
3162 */
3163int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3164{
3165 int64_t val;
3166 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3167 VBOX_CHECK_ADDR(SrcGCPhys);
3168 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3169 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3170#ifdef VBOX_DEBUG_PHYS
3171 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3172#endif
3173 return val;
3174}
3175
3176
3177/**
3178 * Write guest RAM.
3179 *
3180 * @param DstGCPhys The destination address (guest physical).
3181 * @param pvSrc The source address.
3182 * @param cb Number of bytes to write
3183 */
3184void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3185{
3186 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3187 VBOX_CHECK_ADDR(DstGCPhys);
3188 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3189 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3190#ifdef VBOX_DEBUG_PHYS
3191 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3192#endif
3193}
3194
3195
3196/**
3197 * Write guest RAM, unsigned 8-bit.
3198 *
3199 * @param DstGCPhys The destination address (guest physical).
3200 * @param val Value
3201 */
3202void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3203{
3204 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3205 VBOX_CHECK_ADDR(DstGCPhys);
3206 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3207 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3208#ifdef VBOX_DEBUG_PHYS
3209 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3210#endif
3211}
3212
3213
3214/**
3215 * Write guest RAM, unsigned 8-bit.
3216 *
3217 * @param DstGCPhys The destination address (guest physical).
3218 * @param val Value
3219 */
3220void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3221{
3222 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3223 VBOX_CHECK_ADDR(DstGCPhys);
3224 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3225 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3226#ifdef VBOX_DEBUG_PHYS
3227 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3228#endif
3229}
3230
3231
3232/**
3233 * Write guest RAM, unsigned 32-bit.
3234 *
3235 * @param DstGCPhys The destination address (guest physical).
3236 * @param val Value
3237 */
3238void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3239{
3240 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3241 VBOX_CHECK_ADDR(DstGCPhys);
3242 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3243 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3244#ifdef VBOX_DEBUG_PHYS
3245 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3246#endif
3247}
3248
3249
3250/**
3251 * Write guest RAM, unsigned 64-bit.
3252 *
3253 * @param DstGCPhys The destination address (guest physical).
3254 * @param val Value
3255 */
3256void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3257{
3258 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3259 VBOX_CHECK_ADDR(DstGCPhys);
3260 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3261 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3262#ifdef VBOX_DEBUG_PHYS
3263 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3264#endif
3265}
3266
3267#undef LOG_GROUP
3268#define LOG_GROUP LOG_GROUP_REM_MMIO
3269
3270/** Read MMIO memory. */
3271static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3272{
3273 uint32_t u32 = 0;
3274 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3275 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3276 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3277 return u32;
3278}
3279
3280/** Read MMIO memory. */
3281static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3282{
3283 uint32_t u32 = 0;
3284 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3285 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3286 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3287 return u32;
3288}
3289
3290/** Read MMIO memory. */
3291static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3292{
3293 uint32_t u32 = 0;
3294 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3295 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3296 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3297 return u32;
3298}
3299
3300/** Write to MMIO memory. */
3301static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3302{
3303 int rc;
3304 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3305 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3306 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3307}
3308
3309/** Write to MMIO memory. */
3310static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3311{
3312 int rc;
3313 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3314 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3315 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3316}
3317
3318/** Write to MMIO memory. */
3319static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3320{
3321 int rc;
3322 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3323 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3324 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3325}
3326
3327
3328#undef LOG_GROUP
3329#define LOG_GROUP LOG_GROUP_REM_HANDLER
3330
3331/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3332
3333static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3334{
3335 uint8_t u8;
3336 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3337 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3338 return u8;
3339}
3340
3341static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3342{
3343 uint16_t u16;
3344 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3345 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3346 return u16;
3347}
3348
3349static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3350{
3351 uint32_t u32;
3352 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3353 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3354 return u32;
3355}
3356
3357static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3358{
3359 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3360 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3361}
3362
3363static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3364{
3365 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3366 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3367}
3368
3369static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3370{
3371 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3372 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3373}
3374
3375/* -+- disassembly -+- */
3376
3377#undef LOG_GROUP
3378#define LOG_GROUP LOG_GROUP_REM_DISAS
3379
3380
3381/**
3382 * Enables or disables singled stepped disassembly.
3383 *
3384 * @returns VBox status code.
3385 * @param pVM VM handle.
3386 * @param fEnable To enable set this flag, to disable clear it.
3387 */
3388static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3389{
3390 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3391 VM_ASSERT_EMT(pVM);
3392
3393 if (fEnable)
3394 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3395 else
3396 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3397 return VINF_SUCCESS;
3398}
3399
3400
3401/**
3402 * Enables or disables singled stepped disassembly.
3403 *
3404 * @returns VBox status code.
3405 * @param pVM VM handle.
3406 * @param fEnable To enable set this flag, to disable clear it.
3407 */
3408REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3409{
3410 PVMREQ pReq;
3411 int rc;
3412
3413 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3414 if (VM_IS_EMT(pVM))
3415 return remR3DisasEnableStepping(pVM, fEnable);
3416
3417 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3418 AssertRC(rc);
3419 if (RT_SUCCESS(rc))
3420 rc = pReq->iStatus;
3421 VMR3ReqFree(pReq);
3422 return rc;
3423}
3424
3425
3426#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3427/**
3428 * External Debugger Command: .remstep [on|off|1|0]
3429 */
3430static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3431{
3432 bool fEnable;
3433 int rc;
3434
3435 /* print status */
3436 if (cArgs == 0)
3437 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3438 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3439
3440 /* convert the argument and change the mode. */
3441 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3442 if (RT_FAILURE(rc))
3443 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3444 rc = REMR3DisasEnableStepping(pVM, fEnable);
3445 if (RT_FAILURE(rc))
3446 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3447 return rc;
3448}
3449#endif
3450
3451
3452/**
3453 * Disassembles n instructions and prints them to the log.
3454 *
3455 * @returns Success indicator.
3456 * @param env Pointer to the recompiler CPU structure.
3457 * @param f32BitCode Indicates that whether or not the code should
3458 * be disassembled as 16 or 32 bit. If -1 the CS
3459 * selector will be inspected.
3460 * @param nrInstructions Nr of instructions to disassemble
3461 * @param pszPrefix
3462 * @remark not currently used for anything but ad-hoc debugging.
3463 */
3464bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3465{
3466 int i, rc;
3467 RTGCPTR GCPtrPC;
3468 uint8_t *pvPC;
3469 RTINTPTR off;
3470 DISCPUSTATE Cpu;
3471
3472 /*
3473 * Determin 16/32 bit mode.
3474 */
3475 if (f32BitCode == -1)
3476 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3477
3478 /*
3479 * Convert cs:eip to host context address.
3480 * We don't care to much about cross page correctness presently.
3481 */
3482 GCPtrPC = env->segs[R_CS].base + env->eip;
3483 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3484 {
3485 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3486
3487 /* convert eip to physical address. */
3488 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3489 GCPtrPC,
3490 env->cr[3],
3491 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3492 (void**)&pvPC);
3493 if (RT_FAILURE(rc))
3494 {
3495 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3496 return false;
3497 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3498 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3499 }
3500 }
3501 else
3502 {
3503 /* physical address */
3504 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3505 (void**)&pvPC);
3506 if (RT_FAILURE(rc))
3507 return false;
3508 }
3509
3510 /*
3511 * Disassemble.
3512 */
3513 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3514 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3515 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3516 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3517 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3518 //Cpu.dwUserData[2] = GCPtrPC;
3519
3520 for (i=0;i<nrInstructions;i++)
3521 {
3522 char szOutput[256];
3523 uint32_t cbOp;
3524 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3525 return false;
3526 if (pszPrefix)
3527 Log(("%s: %s", pszPrefix, szOutput));
3528 else
3529 Log(("%s", szOutput));
3530
3531 pvPC += cbOp;
3532 }
3533 return true;
3534}
3535
3536
3537/** @todo need to test the new code, using the old code in the mean while. */
3538#define USE_OLD_DUMP_AND_DISASSEMBLY
3539
3540/**
3541 * Disassembles one instruction and prints it to the log.
3542 *
3543 * @returns Success indicator.
3544 * @param env Pointer to the recompiler CPU structure.
3545 * @param f32BitCode Indicates that whether or not the code should
3546 * be disassembled as 16 or 32 bit. If -1 the CS
3547 * selector will be inspected.
3548 * @param pszPrefix
3549 */
3550bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3551{
3552#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3553 PVM pVM = env->pVM;
3554 RTGCPTR GCPtrPC;
3555 uint8_t *pvPC;
3556 char szOutput[256];
3557 uint32_t cbOp;
3558 RTINTPTR off;
3559 DISCPUSTATE Cpu;
3560
3561
3562 /* Doesn't work in long mode. */
3563 if (env->hflags & HF_LMA_MASK)
3564 return false;
3565
3566 /*
3567 * Determin 16/32 bit mode.
3568 */
3569 if (f32BitCode == -1)
3570 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3571
3572 /*
3573 * Log registers
3574 */
3575 if (LogIs2Enabled())
3576 {
3577 remR3StateUpdate(pVM);
3578 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3579 }
3580
3581 /*
3582 * Convert cs:eip to host context address.
3583 * We don't care to much about cross page correctness presently.
3584 */
3585 GCPtrPC = env->segs[R_CS].base + env->eip;
3586 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3587 {
3588 /* convert eip to physical address. */
3589 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3590 GCPtrPC,
3591 env->cr[3],
3592 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3593 (void**)&pvPC);
3594 if (RT_FAILURE(rc))
3595 {
3596 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3597 return false;
3598 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3599 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3600 }
3601 }
3602 else
3603 {
3604
3605 /* physical address */
3606 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3607 if (RT_FAILURE(rc))
3608 return false;
3609 }
3610
3611 /*
3612 * Disassemble.
3613 */
3614 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3615 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3616 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3617 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3618 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3619 //Cpu.dwUserData[2] = GCPtrPC;
3620 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3621 return false;
3622
3623 if (!f32BitCode)
3624 {
3625 if (pszPrefix)
3626 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3627 else
3628 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3629 }
3630 else
3631 {
3632 if (pszPrefix)
3633 Log(("%s: %s", pszPrefix, szOutput));
3634 else
3635 Log(("%s", szOutput));
3636 }
3637 return true;
3638
3639#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3640 PVM pVM = env->pVM;
3641 const bool fLog = LogIsEnabled();
3642 const bool fLog2 = LogIs2Enabled();
3643 int rc = VINF_SUCCESS;
3644
3645 /*
3646 * Don't bother if there ain't any log output to do.
3647 */
3648 if (!fLog && !fLog2)
3649 return true;
3650
3651 /*
3652 * Update the state so DBGF reads the correct register values.
3653 */
3654 remR3StateUpdate(pVM);
3655
3656 /*
3657 * Log registers if requested.
3658 */
3659 if (!fLog2)
3660 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3661
3662 /*
3663 * Disassemble to log.
3664 */
3665 if (fLog)
3666 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3667
3668 return RT_SUCCESS(rc);
3669#endif
3670}
3671
3672
3673/**
3674 * Disassemble recompiled code.
3675 *
3676 * @param phFileIgnored Ignored, logfile usually.
3677 * @param pvCode Pointer to the code block.
3678 * @param cb Size of the code block.
3679 */
3680void disas(FILE *phFile, void *pvCode, unsigned long cb)
3681{
3682#ifdef DEBUG_TMP_LOGGING
3683# define DISAS_PRINTF(x...) fprintf(phFile, x)
3684#else
3685# define DISAS_PRINTF(x...) RTLogPrintf(x)
3686 if (LogIs2Enabled())
3687#endif
3688 {
3689 unsigned off = 0;
3690 char szOutput[256];
3691 DISCPUSTATE Cpu;
3692
3693 memset(&Cpu, 0, sizeof(Cpu));
3694#ifdef RT_ARCH_X86
3695 Cpu.mode = CPUMODE_32BIT;
3696#else
3697 Cpu.mode = CPUMODE_64BIT;
3698#endif
3699
3700 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3701 while (off < cb)
3702 {
3703 uint32_t cbInstr;
3704 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3705 DISAS_PRINTF("%s", szOutput);
3706 else
3707 {
3708 DISAS_PRINTF("disas error\n");
3709 cbInstr = 1;
3710#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3711 break;
3712#endif
3713 }
3714 off += cbInstr;
3715 }
3716 }
3717
3718#undef DISAS_PRINTF
3719}
3720
3721
3722/**
3723 * Disassemble guest code.
3724 *
3725 * @param phFileIgnored Ignored, logfile usually.
3726 * @param uCode The guest address of the code to disassemble. (flat?)
3727 * @param cb Number of bytes to disassemble.
3728 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3729 */
3730void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3731{
3732#ifdef DEBUG_TMP_LOGGING
3733# define DISAS_PRINTF(x...) fprintf(phFile, x)
3734#else
3735# define DISAS_PRINTF(x...) RTLogPrintf(x)
3736 if (LogIs2Enabled())
3737#endif
3738 {
3739 PVM pVM = cpu_single_env->pVM;
3740 RTSEL cs;
3741 RTGCUINTPTR eip;
3742
3743 /*
3744 * Update the state so DBGF reads the correct register values (flags).
3745 */
3746 remR3StateUpdate(pVM);
3747
3748 /*
3749 * Do the disassembling.
3750 */
3751 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3752 cs = cpu_single_env->segs[R_CS].selector;
3753 eip = uCode - cpu_single_env->segs[R_CS].base;
3754 for (;;)
3755 {
3756 char szBuf[256];
3757 uint32_t cbInstr;
3758 int rc = DBGFR3DisasInstrEx(pVM,
3759 cs,
3760 eip,
3761 0,
3762 szBuf, sizeof(szBuf),
3763 &cbInstr);
3764 if (RT_SUCCESS(rc))
3765 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3766 else
3767 {
3768 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3769 cbInstr = 1;
3770 }
3771
3772 /* next */
3773 if (cb <= cbInstr)
3774 break;
3775 cb -= cbInstr;
3776 uCode += cbInstr;
3777 eip += cbInstr;
3778 }
3779 }
3780#undef DISAS_PRINTF
3781}
3782
3783
3784/**
3785 * Looks up a guest symbol.
3786 *
3787 * @returns Pointer to symbol name. This is a static buffer.
3788 * @param orig_addr The address in question.
3789 */
3790const char *lookup_symbol(target_ulong orig_addr)
3791{
3792 RTGCINTPTR off = 0;
3793 DBGFSYMBOL Sym;
3794 PVM pVM = cpu_single_env->pVM;
3795 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3796 if (RT_SUCCESS(rc))
3797 {
3798 static char szSym[sizeof(Sym.szName) + 48];
3799 if (!off)
3800 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3801 else if (off > 0)
3802 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3803 else
3804 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3805 return szSym;
3806 }
3807 return "<N/A>";
3808}
3809
3810
3811#undef LOG_GROUP
3812#define LOG_GROUP LOG_GROUP_REM
3813
3814
3815/* -+- FF notifications -+- */
3816
3817
3818/**
3819 * Notification about a pending interrupt.
3820 *
3821 * @param pVM VM Handle.
3822 * @param u8Interrupt Interrupt
3823 * @thread The emulation thread.
3824 */
3825REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3826{
3827 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3828 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3829}
3830
3831/**
3832 * Notification about a pending interrupt.
3833 *
3834 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3835 * @param pVM VM Handle.
3836 * @thread The emulation thread.
3837 */
3838REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3839{
3840 return pVM->rem.s.u32PendingInterrupt;
3841}
3842
3843/**
3844 * Notification about the interrupt FF being set.
3845 *
3846 * @param pVM VM Handle.
3847 * @thread The emulation thread.
3848 */
3849REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3850{
3851 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3852 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3853 if (pVM->rem.s.fInREM)
3854 {
3855 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3856 CPU_INTERRUPT_EXTERNAL_HARD);
3857 }
3858}
3859
3860
3861/**
3862 * Notification about the interrupt FF being set.
3863 *
3864 * @param pVM VM Handle.
3865 * @thread Any.
3866 */
3867REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3868{
3869 LogFlow(("REMR3NotifyInterruptClear:\n"));
3870 if (pVM->rem.s.fInREM)
3871 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3872}
3873
3874
3875/**
3876 * Notification about pending timer(s).
3877 *
3878 * @param pVM VM Handle.
3879 * @thread Any.
3880 */
3881REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3882{
3883#ifndef DEBUG_bird
3884 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3885#endif
3886 if (pVM->rem.s.fInREM)
3887 {
3888 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3889 CPU_INTERRUPT_EXTERNAL_TIMER);
3890 }
3891}
3892
3893
3894/**
3895 * Notification about pending DMA transfers.
3896 *
3897 * @param pVM VM Handle.
3898 * @thread Any.
3899 */
3900REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3901{
3902 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3903 if (pVM->rem.s.fInREM)
3904 {
3905 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3906 CPU_INTERRUPT_EXTERNAL_DMA);
3907 }
3908}
3909
3910
3911/**
3912 * Notification about pending timer(s).
3913 *
3914 * @param pVM VM Handle.
3915 * @thread Any.
3916 */
3917REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3918{
3919 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3920 if (pVM->rem.s.fInREM)
3921 {
3922 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3923 CPU_INTERRUPT_EXTERNAL_EXIT);
3924 }
3925}
3926
3927
3928/**
3929 * Notification about pending FF set by an external thread.
3930 *
3931 * @param pVM VM handle.
3932 * @thread Any.
3933 */
3934REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3935{
3936 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3937 if (pVM->rem.s.fInREM)
3938 {
3939 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3940 CPU_INTERRUPT_EXTERNAL_EXIT);
3941 }
3942}
3943
3944
3945#ifdef VBOX_WITH_STATISTICS
3946void remR3ProfileStart(int statcode)
3947{
3948 STAMPROFILEADV *pStat;
3949 switch(statcode)
3950 {
3951 case STATS_EMULATE_SINGLE_INSTR:
3952 pStat = &gStatExecuteSingleInstr;
3953 break;
3954 case STATS_QEMU_COMPILATION:
3955 pStat = &gStatCompilationQEmu;
3956 break;
3957 case STATS_QEMU_RUN_EMULATED_CODE:
3958 pStat = &gStatRunCodeQEmu;
3959 break;
3960 case STATS_QEMU_TOTAL:
3961 pStat = &gStatTotalTimeQEmu;
3962 break;
3963 case STATS_QEMU_RUN_TIMERS:
3964 pStat = &gStatTimers;
3965 break;
3966 case STATS_TLB_LOOKUP:
3967 pStat= &gStatTBLookup;
3968 break;
3969 case STATS_IRQ_HANDLING:
3970 pStat= &gStatIRQ;
3971 break;
3972 case STATS_RAW_CHECK:
3973 pStat = &gStatRawCheck;
3974 break;
3975
3976 default:
3977 AssertMsgFailed(("unknown stat %d\n", statcode));
3978 return;
3979 }
3980 STAM_PROFILE_ADV_START(pStat, a);
3981}
3982
3983
3984void remR3ProfileStop(int statcode)
3985{
3986 STAMPROFILEADV *pStat;
3987 switch(statcode)
3988 {
3989 case STATS_EMULATE_SINGLE_INSTR:
3990 pStat = &gStatExecuteSingleInstr;
3991 break;
3992 case STATS_QEMU_COMPILATION:
3993 pStat = &gStatCompilationQEmu;
3994 break;
3995 case STATS_QEMU_RUN_EMULATED_CODE:
3996 pStat = &gStatRunCodeQEmu;
3997 break;
3998 case STATS_QEMU_TOTAL:
3999 pStat = &gStatTotalTimeQEmu;
4000 break;
4001 case STATS_QEMU_RUN_TIMERS:
4002 pStat = &gStatTimers;
4003 break;
4004 case STATS_TLB_LOOKUP:
4005 pStat= &gStatTBLookup;
4006 break;
4007 case STATS_IRQ_HANDLING:
4008 pStat= &gStatIRQ;
4009 break;
4010 case STATS_RAW_CHECK:
4011 pStat = &gStatRawCheck;
4012 break;
4013 default:
4014 AssertMsgFailed(("unknown stat %d\n", statcode));
4015 return;
4016 }
4017 STAM_PROFILE_ADV_STOP(pStat, a);
4018}
4019#endif
4020
4021/**
4022 * Raise an RC, force rem exit.
4023 *
4024 * @param pVM VM handle.
4025 * @param rc The rc.
4026 */
4027void remR3RaiseRC(PVM pVM, int rc)
4028{
4029 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4030 Assert(pVM->rem.s.fInREM);
4031 VM_ASSERT_EMT(pVM);
4032 pVM->rem.s.rc = rc;
4033 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4034}
4035
4036
4037/* -+- timers -+- */
4038
4039uint64_t cpu_get_tsc(CPUX86State *env)
4040{
4041 STAM_COUNTER_INC(&gStatCpuGetTSC);
4042 return TMCpuTickGet(env->pVM);
4043}
4044
4045
4046/* -+- interrupts -+- */
4047
4048void cpu_set_ferr(CPUX86State *env)
4049{
4050 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4051 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4052}
4053
4054int cpu_get_pic_interrupt(CPUState *env)
4055{
4056 uint8_t u8Interrupt;
4057 int rc;
4058
4059 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4060 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4061 * with the (a)pic.
4062 */
4063 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4064 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4065 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4066 * remove this kludge. */
4067 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4068 {
4069 rc = VINF_SUCCESS;
4070 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4071 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4072 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4073 }
4074 else
4075 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4076
4077 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4078 if (RT_SUCCESS(rc))
4079 {
4080 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4081 env->interrupt_request |= CPU_INTERRUPT_HARD;
4082 return u8Interrupt;
4083 }
4084 return -1;
4085}
4086
4087
4088/* -+- local apic -+- */
4089
4090void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4091{
4092 int rc = PDMApicSetBase(env->pVM, val);
4093 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4094}
4095
4096uint64_t cpu_get_apic_base(CPUX86State *env)
4097{
4098 uint64_t u64;
4099 int rc = PDMApicGetBase(env->pVM, &u64);
4100 if (RT_SUCCESS(rc))
4101 {
4102 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4103 return u64;
4104 }
4105 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4106 return 0;
4107}
4108
4109void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4110{
4111 int rc = PDMApicSetTPR(env->pVM, val);
4112 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4113}
4114
4115uint8_t cpu_get_apic_tpr(CPUX86State *env)
4116{
4117 uint8_t u8;
4118 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4119 if (RT_SUCCESS(rc))
4120 {
4121 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4122 return u8;
4123 }
4124 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4125 return 0;
4126}
4127
4128
4129uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4130{
4131 uint64_t value;
4132 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4133 if (RT_SUCCESS(rc))
4134 {
4135 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4136 return value;
4137 }
4138 /** @todo: exception ? */
4139 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4140 return value;
4141}
4142
4143void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4144{
4145 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4146 /** @todo: exception if error ? */
4147 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4148}
4149
4150uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4151{
4152 return CPUMGetGuestMsr(env->pVM, msr);
4153}
4154
4155void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4156{
4157 CPUMSetGuestMsr(env->pVM, msr, val);
4158}
4159/* -+- I/O Ports -+- */
4160
4161#undef LOG_GROUP
4162#define LOG_GROUP LOG_GROUP_REM_IOPORT
4163
4164void cpu_outb(CPUState *env, int addr, int val)
4165{
4166 int rc;
4167
4168 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4169 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4170
4171 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4172 if (RT_LIKELY(rc == VINF_SUCCESS))
4173 return;
4174 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4175 {
4176 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4177 remR3RaiseRC(env->pVM, rc);
4178 return;
4179 }
4180 remAbort(rc, __FUNCTION__);
4181}
4182
4183void cpu_outw(CPUState *env, int addr, int val)
4184{
4185 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4186 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4187 if (RT_LIKELY(rc == VINF_SUCCESS))
4188 return;
4189 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4190 {
4191 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4192 remR3RaiseRC(env->pVM, rc);
4193 return;
4194 }
4195 remAbort(rc, __FUNCTION__);
4196}
4197
4198void cpu_outl(CPUState *env, int addr, int val)
4199{
4200 int rc;
4201 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4202 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4203 if (RT_LIKELY(rc == VINF_SUCCESS))
4204 return;
4205 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4206 {
4207 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4208 remR3RaiseRC(env->pVM, rc);
4209 return;
4210 }
4211 remAbort(rc, __FUNCTION__);
4212}
4213
4214int cpu_inb(CPUState *env, int addr)
4215{
4216 uint32_t u32 = 0;
4217 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4218 if (RT_LIKELY(rc == VINF_SUCCESS))
4219 {
4220 if (/*addr != 0x61 && */addr != 0x71)
4221 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4222 return (int)u32;
4223 }
4224 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4225 {
4226 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4227 remR3RaiseRC(env->pVM, rc);
4228 return (int)u32;
4229 }
4230 remAbort(rc, __FUNCTION__);
4231 return 0xff;
4232}
4233
4234int cpu_inw(CPUState *env, int addr)
4235{
4236 uint32_t u32 = 0;
4237 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4238 if (RT_LIKELY(rc == VINF_SUCCESS))
4239 {
4240 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4241 return (int)u32;
4242 }
4243 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4244 {
4245 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4246 remR3RaiseRC(env->pVM, rc);
4247 return (int)u32;
4248 }
4249 remAbort(rc, __FUNCTION__);
4250 return 0xffff;
4251}
4252
4253int cpu_inl(CPUState *env, int addr)
4254{
4255 uint32_t u32 = 0;
4256 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4257 if (RT_LIKELY(rc == VINF_SUCCESS))
4258 {
4259//if (addr==0x01f0 && u32 == 0x6b6d)
4260// loglevel = ~0;
4261 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4262 return (int)u32;
4263 }
4264 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4265 {
4266 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4267 remR3RaiseRC(env->pVM, rc);
4268 return (int)u32;
4269 }
4270 remAbort(rc, __FUNCTION__);
4271 return 0xffffffff;
4272}
4273
4274#undef LOG_GROUP
4275#define LOG_GROUP LOG_GROUP_REM
4276
4277
4278/* -+- helpers and misc other interfaces -+- */
4279
4280/**
4281 * Perform the CPUID instruction.
4282 *
4283 * ASMCpuId cannot be invoked from some source files where this is used because of global
4284 * register allocations.
4285 *
4286 * @param env Pointer to the recompiler CPU structure.
4287 * @param uOperator CPUID operation (eax).
4288 * @param pvEAX Where to store eax.
4289 * @param pvEBX Where to store ebx.
4290 * @param pvECX Where to store ecx.
4291 * @param pvEDX Where to store edx.
4292 */
4293void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4294{
4295 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4296}
4297
4298
4299#if 0 /* not used */
4300/**
4301 * Interface for qemu hardware to report back fatal errors.
4302 */
4303void hw_error(const char *pszFormat, ...)
4304{
4305 /*
4306 * Bitch about it.
4307 */
4308 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4309 * this in my Odin32 tree at home! */
4310 va_list args;
4311 va_start(args, pszFormat);
4312 RTLogPrintf("fatal error in virtual hardware:");
4313 RTLogPrintfV(pszFormat, args);
4314 va_end(args);
4315 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4316
4317 /*
4318 * If we're in REM context we'll sync back the state before 'jumping' to
4319 * the EMs failure handling.
4320 */
4321 PVM pVM = cpu_single_env->pVM;
4322 if (pVM->rem.s.fInREM)
4323 REMR3StateBack(pVM);
4324 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4325 AssertMsgFailed(("EMR3FatalError returned!\n"));
4326}
4327#endif
4328
4329/**
4330 * Interface for the qemu cpu to report unhandled situation
4331 * raising a fatal VM error.
4332 */
4333void cpu_abort(CPUState *env, const char *pszFormat, ...)
4334{
4335 va_list args;
4336 PVM pVM;
4337
4338 /*
4339 * Bitch about it.
4340 */
4341#ifndef _MSC_VER
4342 /** @todo: MSVC is right - it's not valid C */
4343 RTLogFlags(NULL, "nodisabled nobuffered");
4344#endif
4345 va_start(args, pszFormat);
4346 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4347 va_end(args);
4348 va_start(args, pszFormat);
4349 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4350 va_end(args);
4351
4352 /*
4353 * If we're in REM context we'll sync back the state before 'jumping' to
4354 * the EMs failure handling.
4355 */
4356 pVM = cpu_single_env->pVM;
4357 if (pVM->rem.s.fInREM)
4358 REMR3StateBack(pVM);
4359 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4360 AssertMsgFailed(("EMR3FatalError returned!\n"));
4361}
4362
4363
4364/**
4365 * Aborts the VM.
4366 *
4367 * @param rc VBox error code.
4368 * @param pszTip Hint about why/when this happend.
4369 */
4370void remAbort(int rc, const char *pszTip)
4371{
4372 PVM pVM;
4373
4374 /*
4375 * Bitch about it.
4376 */
4377 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4378 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4379
4380 /*
4381 * Jump back to where we entered the recompiler.
4382 */
4383 pVM = cpu_single_env->pVM;
4384 if (pVM->rem.s.fInREM)
4385 REMR3StateBack(pVM);
4386 EMR3FatalError(pVM, rc);
4387 AssertMsgFailed(("EMR3FatalError returned!\n"));
4388}
4389
4390
4391/**
4392 * Dumps a linux system call.
4393 * @param pVM VM handle.
4394 */
4395void remR3DumpLnxSyscall(PVM pVM)
4396{
4397 static const char *apsz[] =
4398 {
4399 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4400 "sys_exit",
4401 "sys_fork",
4402 "sys_read",
4403 "sys_write",
4404 "sys_open", /* 5 */
4405 "sys_close",
4406 "sys_waitpid",
4407 "sys_creat",
4408 "sys_link",
4409 "sys_unlink", /* 10 */
4410 "sys_execve",
4411 "sys_chdir",
4412 "sys_time",
4413 "sys_mknod",
4414 "sys_chmod", /* 15 */
4415 "sys_lchown16",
4416 "sys_ni_syscall", /* old break syscall holder */
4417 "sys_stat",
4418 "sys_lseek",
4419 "sys_getpid", /* 20 */
4420 "sys_mount",
4421 "sys_oldumount",
4422 "sys_setuid16",
4423 "sys_getuid16",
4424 "sys_stime", /* 25 */
4425 "sys_ptrace",
4426 "sys_alarm",
4427 "sys_fstat",
4428 "sys_pause",
4429 "sys_utime", /* 30 */
4430 "sys_ni_syscall", /* old stty syscall holder */
4431 "sys_ni_syscall", /* old gtty syscall holder */
4432 "sys_access",
4433 "sys_nice",
4434 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4435 "sys_sync",
4436 "sys_kill",
4437 "sys_rename",
4438 "sys_mkdir",
4439 "sys_rmdir", /* 40 */
4440 "sys_dup",
4441 "sys_pipe",
4442 "sys_times",
4443 "sys_ni_syscall", /* old prof syscall holder */
4444 "sys_brk", /* 45 */
4445 "sys_setgid16",
4446 "sys_getgid16",
4447 "sys_signal",
4448 "sys_geteuid16",
4449 "sys_getegid16", /* 50 */
4450 "sys_acct",
4451 "sys_umount", /* recycled never used phys() */
4452 "sys_ni_syscall", /* old lock syscall holder */
4453 "sys_ioctl",
4454 "sys_fcntl", /* 55 */
4455 "sys_ni_syscall", /* old mpx syscall holder */
4456 "sys_setpgid",
4457 "sys_ni_syscall", /* old ulimit syscall holder */
4458 "sys_olduname",
4459 "sys_umask", /* 60 */
4460 "sys_chroot",
4461 "sys_ustat",
4462 "sys_dup2",
4463 "sys_getppid",
4464 "sys_getpgrp", /* 65 */
4465 "sys_setsid",
4466 "sys_sigaction",
4467 "sys_sgetmask",
4468 "sys_ssetmask",
4469 "sys_setreuid16", /* 70 */
4470 "sys_setregid16",
4471 "sys_sigsuspend",
4472 "sys_sigpending",
4473 "sys_sethostname",
4474 "sys_setrlimit", /* 75 */
4475 "sys_old_getrlimit",
4476 "sys_getrusage",
4477 "sys_gettimeofday",
4478 "sys_settimeofday",
4479 "sys_getgroups16", /* 80 */
4480 "sys_setgroups16",
4481 "old_select",
4482 "sys_symlink",
4483 "sys_lstat",
4484 "sys_readlink", /* 85 */
4485 "sys_uselib",
4486 "sys_swapon",
4487 "sys_reboot",
4488 "old_readdir",
4489 "old_mmap", /* 90 */
4490 "sys_munmap",
4491 "sys_truncate",
4492 "sys_ftruncate",
4493 "sys_fchmod",
4494 "sys_fchown16", /* 95 */
4495 "sys_getpriority",
4496 "sys_setpriority",
4497 "sys_ni_syscall", /* old profil syscall holder */
4498 "sys_statfs",
4499 "sys_fstatfs", /* 100 */
4500 "sys_ioperm",
4501 "sys_socketcall",
4502 "sys_syslog",
4503 "sys_setitimer",
4504 "sys_getitimer", /* 105 */
4505 "sys_newstat",
4506 "sys_newlstat",
4507 "sys_newfstat",
4508 "sys_uname",
4509 "sys_iopl", /* 110 */
4510 "sys_vhangup",
4511 "sys_ni_syscall", /* old "idle" system call */
4512 "sys_vm86old",
4513 "sys_wait4",
4514 "sys_swapoff", /* 115 */
4515 "sys_sysinfo",
4516 "sys_ipc",
4517 "sys_fsync",
4518 "sys_sigreturn",
4519 "sys_clone", /* 120 */
4520 "sys_setdomainname",
4521 "sys_newuname",
4522 "sys_modify_ldt",
4523 "sys_adjtimex",
4524 "sys_mprotect", /* 125 */
4525 "sys_sigprocmask",
4526 "sys_ni_syscall", /* old "create_module" */
4527 "sys_init_module",
4528 "sys_delete_module",
4529 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4530 "sys_quotactl",
4531 "sys_getpgid",
4532 "sys_fchdir",
4533 "sys_bdflush",
4534 "sys_sysfs", /* 135 */
4535 "sys_personality",
4536 "sys_ni_syscall", /* reserved for afs_syscall */
4537 "sys_setfsuid16",
4538 "sys_setfsgid16",
4539 "sys_llseek", /* 140 */
4540 "sys_getdents",
4541 "sys_select",
4542 "sys_flock",
4543 "sys_msync",
4544 "sys_readv", /* 145 */
4545 "sys_writev",
4546 "sys_getsid",
4547 "sys_fdatasync",
4548 "sys_sysctl",
4549 "sys_mlock", /* 150 */
4550 "sys_munlock",
4551 "sys_mlockall",
4552 "sys_munlockall",
4553 "sys_sched_setparam",
4554 "sys_sched_getparam", /* 155 */
4555 "sys_sched_setscheduler",
4556 "sys_sched_getscheduler",
4557 "sys_sched_yield",
4558 "sys_sched_get_priority_max",
4559 "sys_sched_get_priority_min", /* 160 */
4560 "sys_sched_rr_get_interval",
4561 "sys_nanosleep",
4562 "sys_mremap",
4563 "sys_setresuid16",
4564 "sys_getresuid16", /* 165 */
4565 "sys_vm86",
4566 "sys_ni_syscall", /* Old sys_query_module */
4567 "sys_poll",
4568 "sys_nfsservctl",
4569 "sys_setresgid16", /* 170 */
4570 "sys_getresgid16",
4571 "sys_prctl",
4572 "sys_rt_sigreturn",
4573 "sys_rt_sigaction",
4574 "sys_rt_sigprocmask", /* 175 */
4575 "sys_rt_sigpending",
4576 "sys_rt_sigtimedwait",
4577 "sys_rt_sigqueueinfo",
4578 "sys_rt_sigsuspend",
4579 "sys_pread64", /* 180 */
4580 "sys_pwrite64",
4581 "sys_chown16",
4582 "sys_getcwd",
4583 "sys_capget",
4584 "sys_capset", /* 185 */
4585 "sys_sigaltstack",
4586 "sys_sendfile",
4587 "sys_ni_syscall", /* reserved for streams1 */
4588 "sys_ni_syscall", /* reserved for streams2 */
4589 "sys_vfork", /* 190 */
4590 "sys_getrlimit",
4591 "sys_mmap2",
4592 "sys_truncate64",
4593 "sys_ftruncate64",
4594 "sys_stat64", /* 195 */
4595 "sys_lstat64",
4596 "sys_fstat64",
4597 "sys_lchown",
4598 "sys_getuid",
4599 "sys_getgid", /* 200 */
4600 "sys_geteuid",
4601 "sys_getegid",
4602 "sys_setreuid",
4603 "sys_setregid",
4604 "sys_getgroups", /* 205 */
4605 "sys_setgroups",
4606 "sys_fchown",
4607 "sys_setresuid",
4608 "sys_getresuid",
4609 "sys_setresgid", /* 210 */
4610 "sys_getresgid",
4611 "sys_chown",
4612 "sys_setuid",
4613 "sys_setgid",
4614 "sys_setfsuid", /* 215 */
4615 "sys_setfsgid",
4616 "sys_pivot_root",
4617 "sys_mincore",
4618 "sys_madvise",
4619 "sys_getdents64", /* 220 */
4620 "sys_fcntl64",
4621 "sys_ni_syscall", /* reserved for TUX */
4622 "sys_ni_syscall",
4623 "sys_gettid",
4624 "sys_readahead", /* 225 */
4625 "sys_setxattr",
4626 "sys_lsetxattr",
4627 "sys_fsetxattr",
4628 "sys_getxattr",
4629 "sys_lgetxattr", /* 230 */
4630 "sys_fgetxattr",
4631 "sys_listxattr",
4632 "sys_llistxattr",
4633 "sys_flistxattr",
4634 "sys_removexattr", /* 235 */
4635 "sys_lremovexattr",
4636 "sys_fremovexattr",
4637 "sys_tkill",
4638 "sys_sendfile64",
4639 "sys_futex", /* 240 */
4640 "sys_sched_setaffinity",
4641 "sys_sched_getaffinity",
4642 "sys_set_thread_area",
4643 "sys_get_thread_area",
4644 "sys_io_setup", /* 245 */
4645 "sys_io_destroy",
4646 "sys_io_getevents",
4647 "sys_io_submit",
4648 "sys_io_cancel",
4649 "sys_fadvise64", /* 250 */
4650 "sys_ni_syscall",
4651 "sys_exit_group",
4652 "sys_lookup_dcookie",
4653 "sys_epoll_create",
4654 "sys_epoll_ctl", /* 255 */
4655 "sys_epoll_wait",
4656 "sys_remap_file_pages",
4657 "sys_set_tid_address",
4658 "sys_timer_create",
4659 "sys_timer_settime", /* 260 */
4660 "sys_timer_gettime",
4661 "sys_timer_getoverrun",
4662 "sys_timer_delete",
4663 "sys_clock_settime",
4664 "sys_clock_gettime", /* 265 */
4665 "sys_clock_getres",
4666 "sys_clock_nanosleep",
4667 "sys_statfs64",
4668 "sys_fstatfs64",
4669 "sys_tgkill", /* 270 */
4670 "sys_utimes",
4671 "sys_fadvise64_64",
4672 "sys_ni_syscall" /* sys_vserver */
4673 };
4674
4675 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4676 switch (uEAX)
4677 {
4678 default:
4679 if (uEAX < RT_ELEMENTS(apsz))
4680 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4681 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4682 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4683 else
4684 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4685 break;
4686
4687 }
4688}
4689
4690
4691/**
4692 * Dumps an OpenBSD system call.
4693 * @param pVM VM handle.
4694 */
4695void remR3DumpOBsdSyscall(PVM pVM)
4696{
4697 static const char *apsz[] =
4698 {
4699 "SYS_syscall", //0
4700 "SYS_exit", //1
4701 "SYS_fork", //2
4702 "SYS_read", //3
4703 "SYS_write", //4
4704 "SYS_open", //5
4705 "SYS_close", //6
4706 "SYS_wait4", //7
4707 "SYS_8",
4708 "SYS_link", //9
4709 "SYS_unlink", //10
4710 "SYS_11",
4711 "SYS_chdir", //12
4712 "SYS_fchdir", //13
4713 "SYS_mknod", //14
4714 "SYS_chmod", //15
4715 "SYS_chown", //16
4716 "SYS_break", //17
4717 "SYS_18",
4718 "SYS_19",
4719 "SYS_getpid", //20
4720 "SYS_mount", //21
4721 "SYS_unmount", //22
4722 "SYS_setuid", //23
4723 "SYS_getuid", //24
4724 "SYS_geteuid", //25
4725 "SYS_ptrace", //26
4726 "SYS_recvmsg", //27
4727 "SYS_sendmsg", //28
4728 "SYS_recvfrom", //29
4729 "SYS_accept", //30
4730 "SYS_getpeername", //31
4731 "SYS_getsockname", //32
4732 "SYS_access", //33
4733 "SYS_chflags", //34
4734 "SYS_fchflags", //35
4735 "SYS_sync", //36
4736 "SYS_kill", //37
4737 "SYS_38",
4738 "SYS_getppid", //39
4739 "SYS_40",
4740 "SYS_dup", //41
4741 "SYS_opipe", //42
4742 "SYS_getegid", //43
4743 "SYS_profil", //44
4744 "SYS_ktrace", //45
4745 "SYS_sigaction", //46
4746 "SYS_getgid", //47
4747 "SYS_sigprocmask", //48
4748 "SYS_getlogin", //49
4749 "SYS_setlogin", //50
4750 "SYS_acct", //51
4751 "SYS_sigpending", //52
4752 "SYS_osigaltstack", //53
4753 "SYS_ioctl", //54
4754 "SYS_reboot", //55
4755 "SYS_revoke", //56
4756 "SYS_symlink", //57
4757 "SYS_readlink", //58
4758 "SYS_execve", //59
4759 "SYS_umask", //60
4760 "SYS_chroot", //61
4761 "SYS_62",
4762 "SYS_63",
4763 "SYS_64",
4764 "SYS_65",
4765 "SYS_vfork", //66
4766 "SYS_67",
4767 "SYS_68",
4768 "SYS_sbrk", //69
4769 "SYS_sstk", //70
4770 "SYS_61",
4771 "SYS_vadvise", //72
4772 "SYS_munmap", //73
4773 "SYS_mprotect", //74
4774 "SYS_madvise", //75
4775 "SYS_76",
4776 "SYS_77",
4777 "SYS_mincore", //78
4778 "SYS_getgroups", //79
4779 "SYS_setgroups", //80
4780 "SYS_getpgrp", //81
4781 "SYS_setpgid", //82
4782 "SYS_setitimer", //83
4783 "SYS_84",
4784 "SYS_85",
4785 "SYS_getitimer", //86
4786 "SYS_87",
4787 "SYS_88",
4788 "SYS_89",
4789 "SYS_dup2", //90
4790 "SYS_91",
4791 "SYS_fcntl", //92
4792 "SYS_select", //93
4793 "SYS_94",
4794 "SYS_fsync", //95
4795 "SYS_setpriority", //96
4796 "SYS_socket", //97
4797 "SYS_connect", //98
4798 "SYS_99",
4799 "SYS_getpriority", //100
4800 "SYS_101",
4801 "SYS_102",
4802 "SYS_sigreturn", //103
4803 "SYS_bind", //104
4804 "SYS_setsockopt", //105
4805 "SYS_listen", //106
4806 "SYS_107",
4807 "SYS_108",
4808 "SYS_109",
4809 "SYS_110",
4810 "SYS_sigsuspend", //111
4811 "SYS_112",
4812 "SYS_113",
4813 "SYS_114",
4814 "SYS_115",
4815 "SYS_gettimeofday", //116
4816 "SYS_getrusage", //117
4817 "SYS_getsockopt", //118
4818 "SYS_119",
4819 "SYS_readv", //120
4820 "SYS_writev", //121
4821 "SYS_settimeofday", //122
4822 "SYS_fchown", //123
4823 "SYS_fchmod", //124
4824 "SYS_125",
4825 "SYS_setreuid", //126
4826 "SYS_setregid", //127
4827 "SYS_rename", //128
4828 "SYS_129",
4829 "SYS_130",
4830 "SYS_flock", //131
4831 "SYS_mkfifo", //132
4832 "SYS_sendto", //133
4833 "SYS_shutdown", //134
4834 "SYS_socketpair", //135
4835 "SYS_mkdir", //136
4836 "SYS_rmdir", //137
4837 "SYS_utimes", //138
4838 "SYS_139",
4839 "SYS_adjtime", //140
4840 "SYS_141",
4841 "SYS_142",
4842 "SYS_143",
4843 "SYS_144",
4844 "SYS_145",
4845 "SYS_146",
4846 "SYS_setsid", //147
4847 "SYS_quotactl", //148
4848 "SYS_149",
4849 "SYS_150",
4850 "SYS_151",
4851 "SYS_152",
4852 "SYS_153",
4853 "SYS_154",
4854 "SYS_nfssvc", //155
4855 "SYS_156",
4856 "SYS_157",
4857 "SYS_158",
4858 "SYS_159",
4859 "SYS_160",
4860 "SYS_getfh", //161
4861 "SYS_162",
4862 "SYS_163",
4863 "SYS_164",
4864 "SYS_sysarch", //165
4865 "SYS_166",
4866 "SYS_167",
4867 "SYS_168",
4868 "SYS_169",
4869 "SYS_170",
4870 "SYS_171",
4871 "SYS_172",
4872 "SYS_pread", //173
4873 "SYS_pwrite", //174
4874 "SYS_175",
4875 "SYS_176",
4876 "SYS_177",
4877 "SYS_178",
4878 "SYS_179",
4879 "SYS_180",
4880 "SYS_setgid", //181
4881 "SYS_setegid", //182
4882 "SYS_seteuid", //183
4883 "SYS_lfs_bmapv", //184
4884 "SYS_lfs_markv", //185
4885 "SYS_lfs_segclean", //186
4886 "SYS_lfs_segwait", //187
4887 "SYS_188",
4888 "SYS_189",
4889 "SYS_190",
4890 "SYS_pathconf", //191
4891 "SYS_fpathconf", //192
4892 "SYS_swapctl", //193
4893 "SYS_getrlimit", //194
4894 "SYS_setrlimit", //195
4895 "SYS_getdirentries", //196
4896 "SYS_mmap", //197
4897 "SYS___syscall", //198
4898 "SYS_lseek", //199
4899 "SYS_truncate", //200
4900 "SYS_ftruncate", //201
4901 "SYS___sysctl", //202
4902 "SYS_mlock", //203
4903 "SYS_munlock", //204
4904 "SYS_205",
4905 "SYS_futimes", //206
4906 "SYS_getpgid", //207
4907 "SYS_xfspioctl", //208
4908 "SYS_209",
4909 "SYS_210",
4910 "SYS_211",
4911 "SYS_212",
4912 "SYS_213",
4913 "SYS_214",
4914 "SYS_215",
4915 "SYS_216",
4916 "SYS_217",
4917 "SYS_218",
4918 "SYS_219",
4919 "SYS_220",
4920 "SYS_semget", //221
4921 "SYS_222",
4922 "SYS_223",
4923 "SYS_224",
4924 "SYS_msgget", //225
4925 "SYS_msgsnd", //226
4926 "SYS_msgrcv", //227
4927 "SYS_shmat", //228
4928 "SYS_229",
4929 "SYS_shmdt", //230
4930 "SYS_231",
4931 "SYS_clock_gettime", //232
4932 "SYS_clock_settime", //233
4933 "SYS_clock_getres", //234
4934 "SYS_235",
4935 "SYS_236",
4936 "SYS_237",
4937 "SYS_238",
4938 "SYS_239",
4939 "SYS_nanosleep", //240
4940 "SYS_241",
4941 "SYS_242",
4942 "SYS_243",
4943 "SYS_244",
4944 "SYS_245",
4945 "SYS_246",
4946 "SYS_247",
4947 "SYS_248",
4948 "SYS_249",
4949 "SYS_minherit", //250
4950 "SYS_rfork", //251
4951 "SYS_poll", //252
4952 "SYS_issetugid", //253
4953 "SYS_lchown", //254
4954 "SYS_getsid", //255
4955 "SYS_msync", //256
4956 "SYS_257",
4957 "SYS_258",
4958 "SYS_259",
4959 "SYS_getfsstat", //260
4960 "SYS_statfs", //261
4961 "SYS_fstatfs", //262
4962 "SYS_pipe", //263
4963 "SYS_fhopen", //264
4964 "SYS_265",
4965 "SYS_fhstatfs", //266
4966 "SYS_preadv", //267
4967 "SYS_pwritev", //268
4968 "SYS_kqueue", //269
4969 "SYS_kevent", //270
4970 "SYS_mlockall", //271
4971 "SYS_munlockall", //272
4972 "SYS_getpeereid", //273
4973 "SYS_274",
4974 "SYS_275",
4975 "SYS_276",
4976 "SYS_277",
4977 "SYS_278",
4978 "SYS_279",
4979 "SYS_280",
4980 "SYS_getresuid", //281
4981 "SYS_setresuid", //282
4982 "SYS_getresgid", //283
4983 "SYS_setresgid", //284
4984 "SYS_285",
4985 "SYS_mquery", //286
4986 "SYS_closefrom", //287
4987 "SYS_sigaltstack", //288
4988 "SYS_shmget", //289
4989 "SYS_semop", //290
4990 "SYS_stat", //291
4991 "SYS_fstat", //292
4992 "SYS_lstat", //293
4993 "SYS_fhstat", //294
4994 "SYS___semctl", //295
4995 "SYS_shmctl", //296
4996 "SYS_msgctl", //297
4997 "SYS_MAXSYSCALL", //298
4998 //299
4999 //300
5000 };
5001 uint32_t uEAX;
5002 if (!LogIsEnabled())
5003 return;
5004 uEAX = CPUMGetGuestEAX(pVM);
5005 switch (uEAX)
5006 {
5007 default:
5008 if (uEAX < RT_ELEMENTS(apsz))
5009 {
5010 uint32_t au32Args[8] = {0};
5011 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5012 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5013 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5014 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5015 }
5016 else
5017 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5018 break;
5019 }
5020}
5021
5022
5023#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5024/**
5025 * The Dll main entry point (stub).
5026 */
5027bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5028{
5029 return true;
5030}
5031
5032void *memcpy(void *dst, const void *src, size_t size)
5033{
5034 uint8_t*pbDst = dst, *pbSrc = src;
5035 while (size-- > 0)
5036 *pbDst++ = *pbSrc++;
5037 return dst;
5038}
5039
5040#endif
5041
5042void cpu_smm_update(CPUState* env)
5043{
5044}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette