VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 13627

Last change on this file since 13627 was 13615, checked in by vboxsync, 17 years ago

build fix (Knut, could you please updae both recompilers, if making changes, or at least ping me on that?)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.5 KB
Line 
1/* $Id: VBoxRecompiler.c 13615 2008-10-28 12:54:17Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30
31void cpu_exec_init_all(unsigned long tb_size);
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
62extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
63extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
64extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
65extern void tlb_flush(CPUState *env, int flush_global);
66extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
67extern void sync_ldtr(CPUX86State *env1, int selector);
68extern int sync_tr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145
146/*
147 * Global stuff.
148 */
149
150/** MMIO read callbacks. */
151CPUReadMemoryFunc *g_apfnMMIORead[3] =
152{
153 remR3MMIOReadU8,
154 remR3MMIOReadU16,
155 remR3MMIOReadU32
156};
157
158/** MMIO write callbacks. */
159CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
160{
161 remR3MMIOWriteU8,
162 remR3MMIOWriteU16,
163 remR3MMIOWriteU32
164};
165
166/** Handler read callbacks. */
167CPUReadMemoryFunc *g_apfnHandlerRead[3] =
168{
169 remR3HandlerReadU8,
170 remR3HandlerReadU16,
171 remR3HandlerReadU32
172};
173
174/** Handler write callbacks. */
175CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
176{
177 remR3HandlerWriteU8,
178 remR3HandlerWriteU16,
179 remR3HandlerWriteU32
180};
181
182
183#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
184/*
185 * Debugger commands.
186 */
187static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
188
189/** '.remstep' arguments. */
190static const DBGCVARDESC g_aArgRemStep[] =
191{
192 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
193 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
194};
195
196/** Command descriptors. */
197static const DBGCCMD g_aCmds[] =
198{
199 {
200 .pszCmd ="remstep",
201 .cArgsMin = 0,
202 .cArgsMax = 1,
203 .paArgDescs = &g_aArgRemStep[0],
204 .cArgDescs = ELEMENTS(g_aArgRemStep),
205 .pResultDesc = NULL,
206 .fFlags = 0,
207 .pfnHandler = remR3CmdDisasEnableStepping,
208 .pszSyntax = "[on/off]",
209 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
210 "If no arguments show the current state."
211 }
212};
213#endif
214
215
216/*******************************************************************************
217* Internal Functions *
218*******************************************************************************/
219static void remAbort(int rc, const char *pszTip);
220extern int testmath(void);
221
222/* Put them here to avoid unused variable warning. */
223AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
224#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
225//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
226/* Why did this have to be identical?? */
227AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
228#else
229AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
230#endif
231
232
233/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
234uint8_t* code_gen_prologue;
235
236/**
237 * Initializes the REM.
238 *
239 * @returns VBox status code.
240 * @param pVM The VM to operate on.
241 */
242REMR3DECL(int) REMR3Init(PVM pVM)
243{
244 uint32_t u32Dummy;
245 unsigned i;
246 int rc;
247
248 /*
249 * Assert sanity.
250 */
251 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
252 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
253 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
254#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
255 Assert(!testmath());
256#endif
257 /*
258 * Init some internal data members.
259 */
260 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
261 pVM->rem.s.Env.pVM = pVM;
262#ifdef CPU_RAW_MODE_INIT
263 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
264#endif
265
266 /* ctx. */
267 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
268 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
269
270 /* ignore all notifications */
271 pVM->rem.s.fIgnoreAll = true;
272
273 code_gen_prologue = RTMemExecAlloc(_1K);
274
275 cpu_exec_init_all(0);
276
277 /*
278 * Init the recompiler.
279 */
280 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
281 {
282 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
283 return VERR_GENERAL_FAILURE;
284 }
285 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
286 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
287
288 /* allocate code buffer for single instruction emulation. */
289 pVM->rem.s.Env.cbCodeBuffer = 4096;
290 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
291 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
292
293 /* finally, set the cpu_single_env global. */
294 cpu_single_env = &pVM->rem.s.Env;
295
296 /* Nothing is pending by default */
297 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
298
299 /*
300 * Register ram types.
301 */
302 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
303 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
304 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
305 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
306 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
307
308 /* stop ignoring. */
309 pVM->rem.s.fIgnoreAll = false;
310
311 /*
312 * Register the saved state data unit.
313 */
314 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
315 NULL, remR3Save, NULL,
316 NULL, remR3Load, NULL);
317 if (VBOX_FAILURE(rc))
318 return rc;
319
320#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
321 /*
322 * Debugger commands.
323 */
324 static bool fRegisteredCmds = false;
325 if (!fRegisteredCmds)
326 {
327 int rc = DBGCRegisterCommands(&g_aCmds[0], ELEMENTS(g_aCmds));
328 if (VBOX_SUCCESS(rc))
329 fRegisteredCmds = true;
330 }
331#endif
332
333#ifdef VBOX_WITH_STATISTICS
334 /*
335 * Statistics.
336 */
337 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
338 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
339 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
340 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
341 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
342 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
343 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
344 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
345 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
346 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
347 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
348 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
349
350 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
351
352 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
353 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
354 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
355 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
356 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
357 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
358 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
359 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
360 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
361 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
362 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
363
364 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
365 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
366 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
367 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
368
369 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
370 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
371 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
372 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
373 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
375
376 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
379 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
382
383
384#endif
385
386#ifdef DEBUG_ALL_LOGGING
387 loglevel = ~0;
388#endif
389
390 return rc;
391}
392
393
394/**
395 * Terminates the REM.
396 *
397 * Termination means cleaning up and freeing all resources,
398 * the VM it self is at this point powered off or suspended.
399 *
400 * @returns VBox status code.
401 * @param pVM The VM to operate on.
402 */
403REMR3DECL(int) REMR3Term(PVM pVM)
404{
405 return VINF_SUCCESS;
406}
407
408
409/**
410 * The VM is being reset.
411 *
412 * For the REM component this means to call the cpu_reset() and
413 * reinitialize some state variables.
414 *
415 * @param pVM VM handle.
416 */
417REMR3DECL(void) REMR3Reset(PVM pVM)
418{
419 /*
420 * Reset the REM cpu.
421 */
422 pVM->rem.s.fIgnoreAll = true;
423 cpu_reset(&pVM->rem.s.Env);
424 pVM->rem.s.cInvalidatedPages = 0;
425 pVM->rem.s.fIgnoreAll = false;
426
427 /* Clear raw ring 0 init state */
428 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
429}
430
431
432/**
433 * Execute state save operation.
434 *
435 * @returns VBox status code.
436 * @param pVM VM Handle.
437 * @param pSSM SSM operation handle.
438 */
439static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
440{
441 /*
442 * Save the required CPU Env bits.
443 * (Not much because we're never in REM when doing the save.)
444 */
445 PREM pRem = &pVM->rem.s;
446 LogFlow(("remR3Save:\n"));
447 Assert(!pRem->fInREM);
448 SSMR3PutU32(pSSM, pRem->Env.hflags);
449 SSMR3PutU32(pSSM, ~0); /* separator */
450
451 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
452 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
453 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
454
455 return SSMR3PutU32(pSSM, ~0); /* terminator */
456}
457
458
459/**
460 * Execute state load operation.
461 *
462 * @returns VBox status code.
463 * @param pVM VM Handle.
464 * @param pSSM SSM operation handle.
465 * @param u32Version Data layout version.
466 */
467static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
468{
469 uint32_t u32Dummy;
470 uint32_t fRawRing0 = false;
471 uint32_t u32Sep;
472 int rc;
473 PREM pRem;
474 LogFlow(("remR3Load:\n"));
475
476 /*
477 * Validate version.
478 */
479 if ( u32Version != REM_SAVED_STATE_VERSION
480 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
481 {
482 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
483 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
484 }
485
486 /*
487 * Do a reset to be on the safe side...
488 */
489 REMR3Reset(pVM);
490
491 /*
492 * Ignore all ignorable notifications.
493 * (Not doing this will cause serious trouble.)
494 */
495 pVM->rem.s.fIgnoreAll = true;
496
497 /*
498 * Load the required CPU Env bits.
499 * (Not much because we're never in REM when doing the save.)
500 */
501 pRem = &pVM->rem.s;
502 Assert(!pRem->fInREM);
503 SSMR3GetU32(pSSM, &pRem->Env.hflags);
504 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
505 {
506 /* Redundant REM CPU state has to be loaded, but can be ignored. */
507 CPUX86State_Ver16 temp;
508 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
509 }
510
511 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
512 if (VBOX_FAILURE(rc))
513 return rc;
514 if (u32Sep != ~0U)
515 {
516 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
517 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
518 }
519
520 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
521 SSMR3GetUInt(pSSM, &fRawRing0);
522 if (fRawRing0)
523 pRem->Env.state |= CPU_RAW_RING0;
524
525 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
526 {
527 unsigned i;
528
529 /*
530 * Load the REM stuff.
531 */
532 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
533 if (VBOX_FAILURE(rc))
534 return rc;
535 if (pRem->cInvalidatedPages > ELEMENTS(pRem->aGCPtrInvalidatedPages))
536 {
537 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
538 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
539 }
540 for (i = 0; i < pRem->cInvalidatedPages; i++)
541 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
542 }
543
544 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
545 if (VBOX_FAILURE(rc))
546 return rc;
547
548 /* check the terminator. */
549 rc = SSMR3GetU32(pSSM, &u32Sep);
550 if (VBOX_FAILURE(rc))
551 return rc;
552 if (u32Sep != ~0U)
553 {
554 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
555 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
556 }
557
558 /*
559 * Get the CPUID features.
560 */
561 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
562 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
563
564 /*
565 * Sync the Load Flush the TLB
566 */
567 tlb_flush(&pRem->Env, 1);
568
569 /*
570 * Stop ignoring ignornable notifications.
571 */
572 pVM->rem.s.fIgnoreAll = false;
573
574 /*
575 * Sync the whole CPU state when executing code in the recompiler.
576 */
577 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
578 return VINF_SUCCESS;
579}
580
581
582
583#undef LOG_GROUP
584#define LOG_GROUP LOG_GROUP_REM_RUN
585
586/**
587 * Single steps an instruction in recompiled mode.
588 *
589 * Before calling this function the REM state needs to be in sync with
590 * the VM. Call REMR3State() to perform the sync. It's only necessary
591 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
592 * and after calling REMR3StateBack().
593 *
594 * @returns VBox status code.
595 *
596 * @param pVM VM Handle.
597 */
598REMR3DECL(int) REMR3Step(PVM pVM)
599{
600 int rc, interrupt_request;
601 RTGCPTR GCPtrPC;
602 bool fBp;
603
604 /*
605 * Lock the REM - we don't wanna have anyone interrupting us
606 * while stepping - and enabled single stepping. We also ignore
607 * pending interrupts and suchlike.
608 */
609 interrupt_request = pVM->rem.s.Env.interrupt_request;
610 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
611 pVM->rem.s.Env.interrupt_request = 0;
612 cpu_single_step(&pVM->rem.s.Env, 1);
613
614 /*
615 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
616 */
617 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
618 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
619
620 /*
621 * Execute and handle the return code.
622 * We execute without enabling the cpu tick, so on success we'll
623 * just flip it on and off to make sure it moves
624 */
625 rc = cpu_exec(&pVM->rem.s.Env);
626 if (rc == EXCP_DEBUG)
627 {
628 TMCpuTickResume(pVM);
629 TMCpuTickPause(pVM);
630 TMVirtualResume(pVM);
631 TMVirtualPause(pVM);
632 rc = VINF_EM_DBG_STEPPED;
633 }
634 else
635 {
636 AssertMsgFailed(("Damn, this shouldn't happen! cpu_exec returned %d while singlestepping\n", rc));
637 switch (rc)
638 {
639 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
640 case EXCP_HLT:
641 case EXCP_HALTED: rc = VINF_EM_HALT; break;
642 case EXCP_RC:
643 rc = pVM->rem.s.rc;
644 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
645 break;
646 default:
647 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
648 rc = VERR_INTERNAL_ERROR;
649 break;
650 }
651 }
652
653 /*
654 * Restore the stuff we changed to prevent interruption.
655 * Unlock the REM.
656 */
657 if (fBp)
658 {
659 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
660 Assert(rc2 == 0); NOREF(rc2);
661 }
662 cpu_single_step(&pVM->rem.s.Env, 0);
663 pVM->rem.s.Env.interrupt_request = interrupt_request;
664
665 return rc;
666}
667
668
669/**
670 * Set a breakpoint using the REM facilities.
671 *
672 * @returns VBox status code.
673 * @param pVM The VM handle.
674 * @param Address The breakpoint address.
675 * @thread The emulation thread.
676 */
677REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
678{
679 VM_ASSERT_EMT(pVM);
680 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
681 {
682 LogFlow(("REMR3BreakpointSet: Address=%VGv\n", Address));
683 return VINF_SUCCESS;
684 }
685 LogFlow(("REMR3BreakpointSet: Address=%VGv - failed!\n", Address));
686 return VERR_REM_NO_MORE_BP_SLOTS;
687}
688
689
690/**
691 * Clears a breakpoint set by REMR3BreakpointSet().
692 *
693 * @returns VBox status code.
694 * @param pVM The VM handle.
695 * @param Address The breakpoint address.
696 * @thread The emulation thread.
697 */
698REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
699{
700 VM_ASSERT_EMT(pVM);
701 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
702 {
703 LogFlow(("REMR3BreakpointClear: Address=%VGv\n", Address));
704 return VINF_SUCCESS;
705 }
706 LogFlow(("REMR3BreakpointClear: Address=%VGv - not found!\n", Address));
707 return VERR_REM_BP_NOT_FOUND;
708}
709
710
711/**
712 * Emulate an instruction.
713 *
714 * This function executes one instruction without letting anyone
715 * interrupt it. This is intended for being called while being in
716 * raw mode and thus will take care of all the state syncing between
717 * REM and the rest.
718 *
719 * @returns VBox status code.
720 * @param pVM VM handle.
721 */
722REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
723{
724 int rc, rc2;
725 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
726
727 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
728 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
729 */
730 if (HWACCMIsEnabled(pVM))
731 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
732
733 /*
734 * Sync the state and enable single instruction / single stepping.
735 */
736 rc = REMR3State(pVM);
737 if (VBOX_SUCCESS(rc))
738 {
739 int interrupt_request = pVM->rem.s.Env.interrupt_request;
740 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
741 Assert(!pVM->rem.s.Env.singlestep_enabled);
742#if 1
743
744 /*
745 * Now we set the execute single instruction flag and enter the cpu_exec loop.
746 */
747 TMNotifyStartOfExecution(pVM);
748 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
749 rc = cpu_exec(&pVM->rem.s.Env);
750 TMNotifyEndOfExecution(pVM);
751 switch (rc)
752 {
753 /*
754 * Executed without anything out of the way happening.
755 */
756 case EXCP_SINGLE_INSTR:
757 rc = VINF_EM_RESCHEDULE;
758 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
759 break;
760
761 /*
762 * If we take a trap or start servicing a pending interrupt, we might end up here.
763 * (Timer thread or some other thread wishing EMT's attention.)
764 */
765 case EXCP_INTERRUPT:
766 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
767 rc = VINF_EM_RESCHEDULE;
768 break;
769
770 /*
771 * Single step, we assume!
772 * If there was a breakpoint there we're fucked now.
773 */
774 case EXCP_DEBUG:
775 {
776 /* breakpoint or single step? */
777 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
778 int iBP;
779 rc = VINF_EM_DBG_STEPPED;
780 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
781 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
782 {
783 rc = VINF_EM_DBG_BREAKPOINT;
784 break;
785 }
786 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
787 break;
788 }
789
790 /*
791 * hlt instruction.
792 */
793 case EXCP_HLT:
794 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
795 rc = VINF_EM_HALT;
796 break;
797
798 /*
799 * The VM has halted.
800 */
801 case EXCP_HALTED:
802 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
803 rc = VINF_EM_HALT;
804 break;
805
806 /*
807 * Switch to RAW-mode.
808 */
809 case EXCP_EXECUTE_RAW:
810 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
811 rc = VINF_EM_RESCHEDULE_RAW;
812 break;
813
814 /*
815 * Switch to hardware accelerated RAW-mode.
816 */
817 case EXCP_EXECUTE_HWACC:
818 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
819 rc = VINF_EM_RESCHEDULE_HWACC;
820 break;
821
822 /*
823 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
824 */
825 case EXCP_RC:
826 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
827 rc = pVM->rem.s.rc;
828 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
829 break;
830
831 /*
832 * Figure out the rest when they arrive....
833 */
834 default:
835 AssertMsgFailed(("rc=%d\n", rc));
836 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
837 rc = VINF_EM_RESCHEDULE;
838 break;
839 }
840
841 /*
842 * Switch back the state.
843 */
844#else
845 pVM->rem.s.Env.interrupt_request = 0;
846 cpu_single_step(&pVM->rem.s.Env, 1);
847
848 /*
849 * Execute and handle the return code.
850 * We execute without enabling the cpu tick, so on success we'll
851 * just flip it on and off to make sure it moves.
852 *
853 * (We do not use emulate_single_instr() because that doesn't enter the
854 * right way in will cause serious trouble if a longjmp was attempted.)
855 */
856# ifdef DEBUG_bird
857 remR3DisasInstr(&pVM->rem.s.Env, 1, "REMR3EmulateInstruction");
858# endif
859 TMNotifyStartOfExecution(pVM);
860 int cTimesMax = 16384;
861 uint32_t eip = pVM->rem.s.Env.eip;
862 do
863 {
864 rc = cpu_exec(&pVM->rem.s.Env);
865
866 } while ( eip == pVM->rem.s.Env.eip
867 && (rc == EXCP_DEBUG || rc == EXCP_EXECUTE_RAW)
868 && --cTimesMax > 0);
869 TMNotifyEndOfExecution(pVM);
870 switch (rc)
871 {
872 /*
873 * Single step, we assume!
874 * If there was a breakpoint there we're fucked now.
875 */
876 case EXCP_DEBUG:
877 {
878 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG\n"));
879 rc = VINF_EM_RESCHEDULE;
880 break;
881 }
882
883 /*
884 * We cannot be interrupted!
885 */
886 case EXCP_INTERRUPT:
887 AssertMsgFailed(("Shouldn't happen! Everything was locked!\n"));
888 rc = VERR_INTERNAL_ERROR;
889 break;
890
891 /*
892 * hlt instruction.
893 */
894 case EXCP_HLT:
895 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
896 rc = VINF_EM_HALT;
897 break;
898
899 /*
900 * The VM has halted.
901 */
902 case EXCP_HALTED:
903 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
904 rc = VINF_EM_HALT;
905 break;
906
907 /*
908 * Switch to RAW-mode.
909 */
910 case EXCP_EXECUTE_RAW:
911 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
912 rc = VINF_EM_RESCHEDULE_RAW;
913 break;
914
915 /*
916 * Switch to hardware accelerated RAW-mode.
917 */
918 case EXCP_EXECUTE_HWACC:
919 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
920 rc = VINF_EM_RESCHEDULE_HWACC;
921 break;
922
923 /*
924 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
925 */
926 case EXCP_RC:
927 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
928 rc = pVM->rem.s.rc;
929 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
930 break;
931
932 /*
933 * Figure out the rest when they arrive....
934 */
935 default:
936 AssertMsgFailed(("rc=%d\n", rc));
937 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
938 rc = VINF_SUCCESS;
939 break;
940 }
941
942 /*
943 * Switch back the state.
944 */
945 cpu_single_step(&pVM->rem.s.Env, 0);
946#endif
947 pVM->rem.s.Env.interrupt_request = interrupt_request;
948 rc2 = REMR3StateBack(pVM);
949 AssertRC(rc2);
950 }
951
952 Log2(("REMR3EmulateInstruction: returns %Vrc (cs:eip=%04x:%VGv)\n",
953 rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
954 return rc;
955}
956
957
958/**
959 * Runs code in recompiled mode.
960 *
961 * Before calling this function the REM state needs to be in sync with
962 * the VM. Call REMR3State() to perform the sync. It's only necessary
963 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
964 * and after calling REMR3StateBack().
965 *
966 * @returns VBox status code.
967 *
968 * @param pVM VM Handle.
969 */
970REMR3DECL(int) REMR3Run(PVM pVM)
971{
972 int rc;
973 Log2(("REMR3Run: (cs:eip=%04x:%VGv)\n", pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
974 Assert(pVM->rem.s.fInREM);
975
976 TMNotifyStartOfExecution(pVM);
977 rc = cpu_exec(&pVM->rem.s.Env);
978 TMNotifyEndOfExecution(pVM);
979 switch (rc)
980 {
981 /*
982 * This happens when the execution was interrupted
983 * by an external event, like pending timers.
984 */
985 case EXCP_INTERRUPT:
986 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
987 rc = VINF_SUCCESS;
988 break;
989
990 /*
991 * hlt instruction.
992 */
993 case EXCP_HLT:
994 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
995 rc = VINF_EM_HALT;
996 break;
997
998 /*
999 * The VM has halted.
1000 */
1001 case EXCP_HALTED:
1002 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1003 rc = VINF_EM_HALT;
1004 break;
1005
1006 /*
1007 * Breakpoint/single step.
1008 */
1009 case EXCP_DEBUG:
1010 {
1011#if 0//def DEBUG_bird
1012 static int iBP = 0;
1013 printf("howdy, breakpoint! iBP=%d\n", iBP);
1014 switch (iBP)
1015 {
1016 case 0:
1017 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1018 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1019 //pVM->rem.s.Env.interrupt_request = 0;
1020 //pVM->rem.s.Env.exception_index = -1;
1021 //g_fInterruptDisabled = 1;
1022 rc = VINF_SUCCESS;
1023 asm("int3");
1024 break;
1025 default:
1026 asm("int3");
1027 break;
1028 }
1029 iBP++;
1030#else
1031 /* breakpoint or single step? */
1032 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1033 int iBP;
1034 rc = VINF_EM_DBG_STEPPED;
1035 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1036 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1037 {
1038 rc = VINF_EM_DBG_BREAKPOINT;
1039 break;
1040 }
1041 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
1042#endif
1043 break;
1044 }
1045
1046 /*
1047 * Switch to RAW-mode.
1048 */
1049 case EXCP_EXECUTE_RAW:
1050 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1051 rc = VINF_EM_RESCHEDULE_RAW;
1052 break;
1053
1054 /*
1055 * Switch to hardware accelerated RAW-mode.
1056 */
1057 case EXCP_EXECUTE_HWACC:
1058 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1059 rc = VINF_EM_RESCHEDULE_HWACC;
1060 break;
1061
1062 /*
1063 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1064 */
1065 case EXCP_RC:
1066 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
1067 rc = pVM->rem.s.rc;
1068 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1069 break;
1070
1071 /*
1072 * Figure out the rest when they arrive....
1073 */
1074 default:
1075 AssertMsgFailed(("rc=%d\n", rc));
1076 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1077 rc = VINF_SUCCESS;
1078 break;
1079 }
1080
1081 Log2(("REMR3Run: returns %Vrc (cs:eip=%04x:%VGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
1082 return rc;
1083}
1084
1085
1086/**
1087 * Check if the cpu state is suitable for Raw execution.
1088 *
1089 * @returns boolean
1090 * @param env The CPU env struct.
1091 * @param eip The EIP to check this for (might differ from env->eip).
1092 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1093 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1094 *
1095 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1096 */
1097bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1098{
1099 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1100 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1101 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1102 uint32_t u32CR0;
1103
1104 /* Update counter. */
1105 env->pVM->rem.s.cCanExecuteRaw++;
1106
1107 if (HWACCMIsEnabled(env->pVM))
1108 {
1109 CPUMCTX Ctx;
1110
1111 env->state |= CPU_RAW_HWACC;
1112
1113 /*
1114 * Create partial context for HWACCMR3CanExecuteGuest
1115 */
1116 Ctx.cr0 = env->cr[0];
1117 Ctx.cr3 = env->cr[3];
1118 Ctx.cr4 = env->cr[4];
1119
1120 Ctx.tr = env->tr.selector;
1121 Ctx.trHid.u64Base = env->tr.base;
1122 Ctx.trHid.u32Limit = env->tr.limit;
1123 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1124
1125 Ctx.idtr.cbIdt = env->idt.limit;
1126 Ctx.idtr.pIdt = env->idt.base;
1127
1128 Ctx.eflags.u32 = env->eflags;
1129
1130 Ctx.cs = env->segs[R_CS].selector;
1131 Ctx.csHid.u64Base = env->segs[R_CS].base;
1132 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1133 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1134
1135 Ctx.ds = env->segs[R_DS].selector;
1136 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1137 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1138 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1139
1140 Ctx.es = env->segs[R_ES].selector;
1141 Ctx.esHid.u64Base = env->segs[R_ES].base;
1142 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1143 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1144
1145 Ctx.fs = env->segs[R_FS].selector;
1146 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1147 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1148 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1149
1150 Ctx.gs = env->segs[R_GS].selector;
1151 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1152 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1153 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1154
1155 Ctx.ss = env->segs[R_SS].selector;
1156 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1157 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1158 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1159
1160 Ctx.msrEFER = env->efer;
1161
1162 /* Hardware accelerated raw-mode:
1163 *
1164 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1165 */
1166 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1167 {
1168 *piException = EXCP_EXECUTE_HWACC;
1169 return true;
1170 }
1171 return false;
1172 }
1173
1174 /*
1175 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1176 * or 32 bits protected mode ring 0 code
1177 *
1178 * The tests are ordered by the likelyhood of being true during normal execution.
1179 */
1180 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1181 {
1182 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1183 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1184 return false;
1185 }
1186
1187#ifndef VBOX_RAW_V86
1188 if (fFlags & VM_MASK) {
1189 STAM_COUNTER_INC(&gStatRefuseVM86);
1190 Log2(("raw mode refused: VM_MASK\n"));
1191 return false;
1192 }
1193#endif
1194
1195 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1196 {
1197#ifndef DEBUG_bird
1198 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1199#endif
1200 return false;
1201 }
1202
1203 if (env->singlestep_enabled)
1204 {
1205 //Log2(("raw mode refused: Single step\n"));
1206 return false;
1207 }
1208
1209 if (env->nb_breakpoints > 0)
1210 {
1211 //Log2(("raw mode refused: Breakpoints\n"));
1212 return false;
1213 }
1214
1215 u32CR0 = env->cr[0];
1216 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1217 {
1218 STAM_COUNTER_INC(&gStatRefusePaging);
1219 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1220 return false;
1221 }
1222
1223 if (env->cr[4] & CR4_PAE_MASK)
1224 {
1225 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1226 {
1227 STAM_COUNTER_INC(&gStatRefusePAE);
1228 return false;
1229 }
1230 }
1231
1232 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1233 {
1234 if (!EMIsRawRing3Enabled(env->pVM))
1235 return false;
1236
1237 if (!(env->eflags & IF_MASK))
1238 {
1239 STAM_COUNTER_INC(&gStatRefuseIF0);
1240 Log2(("raw mode refused: IF (RawR3)\n"));
1241 return false;
1242 }
1243
1244 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1245 {
1246 STAM_COUNTER_INC(&gStatRefuseWP0);
1247 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1248 return false;
1249 }
1250 }
1251 else
1252 {
1253 if (!EMIsRawRing0Enabled(env->pVM))
1254 return false;
1255
1256 // Let's start with pure 32 bits ring 0 code first
1257 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseCode16);
1260 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1261 return false;
1262 }
1263
1264 // Only R0
1265 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1266 {
1267 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1268 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1269 return false;
1270 }
1271
1272 if (!(u32CR0 & CR0_WP_MASK))
1273 {
1274 STAM_COUNTER_INC(&gStatRefuseWP0);
1275 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1276 return false;
1277 }
1278
1279 if (PATMIsPatchGCAddr(env->pVM, eip))
1280 {
1281 Log2(("raw r0 mode forced: patch code\n"));
1282 *piException = EXCP_EXECUTE_RAW;
1283 return true;
1284 }
1285
1286#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1287 if (!(env->eflags & IF_MASK))
1288 {
1289 STAM_COUNTER_INC(&gStatRefuseIF0);
1290 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1291 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1292 return false;
1293 }
1294#endif
1295
1296 env->state |= CPU_RAW_RING0;
1297 }
1298
1299 /*
1300 * Don't reschedule the first time we're called, because there might be
1301 * special reasons why we're here that is not covered by the above checks.
1302 */
1303 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1304 {
1305 Log2(("raw mode refused: first scheduling\n"));
1306 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1307 return false;
1308 }
1309
1310 Assert(PGMPhysIsA20Enabled(env->pVM));
1311 *piException = EXCP_EXECUTE_RAW;
1312 return true;
1313}
1314
1315
1316/**
1317 * Fetches a code byte.
1318 *
1319 * @returns Success indicator (bool) for ease of use.
1320 * @param env The CPU environment structure.
1321 * @param GCPtrInstr Where to fetch code.
1322 * @param pu8Byte Where to store the byte on success
1323 */
1324bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1325{
1326 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1327 if (VBOX_SUCCESS(rc))
1328 return true;
1329 return false;
1330}
1331
1332
1333/**
1334 * Flush (or invalidate if you like) page table/dir entry.
1335 *
1336 * (invlpg instruction; tlb_flush_page)
1337 *
1338 * @param env Pointer to cpu environment.
1339 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1340 */
1341void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1342{
1343 PVM pVM = env->pVM;
1344 PCPUMCTX pCtx;
1345 int rc;
1346
1347 /*
1348 * When we're replaying invlpg instructions or restoring a saved
1349 * state we disable this path.
1350 */
1351 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1352 return;
1353 Log(("remR3FlushPage: GCPtr=%VGv\n", GCPtr));
1354 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1355
1356 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1357
1358 /*
1359 * Update the control registers before calling PGMFlushPage.
1360 */
1361 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1362 pCtx->cr0 = env->cr[0];
1363 pCtx->cr3 = env->cr[3];
1364 pCtx->cr4 = env->cr[4];
1365
1366 /*
1367 * Let PGM do the rest.
1368 */
1369 rc = PGMInvalidatePage(pVM, GCPtr);
1370 if (VBOX_FAILURE(rc))
1371 {
1372 AssertMsgFailed(("remR3FlushPage %VGv failed with %d!!\n", GCPtr, rc));
1373 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1374 }
1375 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1376}
1377
1378
1379/**
1380 * Called from tlb_protect_code in order to write monitor a code page.
1381 *
1382 * @param env Pointer to the CPU environment.
1383 * @param GCPtr Code page to monitor
1384 */
1385void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1386{
1387#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1388 Assert(env->pVM->rem.s.fInREM);
1389 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1390 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1391 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1392 && !(env->eflags & VM_MASK) /* no V86 mode */
1393 && !HWACCMIsEnabled(env->pVM))
1394 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1395#endif
1396}
1397
1398/**
1399 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1400 *
1401 * @param env Pointer to the CPU environment.
1402 * @param GCPtr Code page to monitor
1403 */
1404void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1405{
1406 Assert(env->pVM->rem.s.fInREM);
1407#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1408 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1409 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1410 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1411 && !(env->eflags & VM_MASK) /* no V86 mode */
1412 && !HWACCMIsEnabled(env->pVM))
1413 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1414#endif
1415}
1416
1417
1418/**
1419 * Called when the CPU is initialized, any of the CRx registers are changed or
1420 * when the A20 line is modified.
1421 *
1422 * @param env Pointer to the CPU environment.
1423 * @param fGlobal Set if the flush is global.
1424 */
1425void remR3FlushTLB(CPUState *env, bool fGlobal)
1426{
1427 PVM pVM = env->pVM;
1428 PCPUMCTX pCtx;
1429
1430 /*
1431 * When we're replaying invlpg instructions or restoring a saved
1432 * state we disable this path.
1433 */
1434 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1435 return;
1436 Assert(pVM->rem.s.fInREM);
1437
1438 /*
1439 * The caller doesn't check cr4, so we have to do that for ourselves.
1440 */
1441 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1442 fGlobal = true;
1443 Log(("remR3FlushTLB: CR0=%RGr CR3=%RGr CR4=%RGr %s\n", env->cr[0], env->cr[3], env->cr[4], fGlobal ? " global" : ""));
1444
1445 /*
1446 * Update the control registers before calling PGMR3FlushTLB.
1447 */
1448 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1449 pCtx->cr0 = env->cr[0];
1450 pCtx->cr3 = env->cr[3];
1451 pCtx->cr4 = env->cr[4];
1452
1453 /*
1454 * Let PGM do the rest.
1455 */
1456 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1457}
1458
1459
1460/**
1461 * Called when any of the cr0, cr4 or efer registers is updated.
1462 *
1463 * @param env Pointer to the CPU environment.
1464 */
1465void remR3ChangeCpuMode(CPUState *env)
1466{
1467 int rc;
1468 PVM pVM = env->pVM;
1469 PCPUMCTX pCtx;
1470
1471 /*
1472 * When we're replaying loads or restoring a saved
1473 * state this path is disabled.
1474 */
1475 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1476 return;
1477 Assert(pVM->rem.s.fInREM);
1478
1479 /*
1480 * Update the control registers before calling PGMChangeMode()
1481 * as it may need to map whatever cr3 is pointing to.
1482 */
1483 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1484 pCtx->cr0 = env->cr[0];
1485 pCtx->cr3 = env->cr[3];
1486 pCtx->cr4 = env->cr[4];
1487
1488#ifdef TARGET_X86_64
1489 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1490 if (rc != VINF_SUCCESS)
1491 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Vrc\n", env->cr[0], env->cr[4], env->efer, rc);
1492#else
1493 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1494 if (rc != VINF_SUCCESS)
1495 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Vrc\n", env->cr[0], env->cr[4], 0LL, rc);
1496#endif
1497}
1498
1499
1500/**
1501 * Called from compiled code to run dma.
1502 *
1503 * @param env Pointer to the CPU environment.
1504 */
1505void remR3DmaRun(CPUState *env)
1506{
1507 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1508 PDMR3DmaRun(env->pVM);
1509 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1510}
1511
1512
1513/**
1514 * Called from compiled code to schedule pending timers in VMM
1515 *
1516 * @param env Pointer to the CPU environment.
1517 */
1518void remR3TimersRun(CPUState *env)
1519{
1520 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1521 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1522 TMR3TimerQueuesDo(env->pVM);
1523 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1524 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1525}
1526
1527
1528/**
1529 * Record trap occurance
1530 *
1531 * @returns VBox status code
1532 * @param env Pointer to the CPU environment.
1533 * @param uTrap Trap nr
1534 * @param uErrorCode Error code
1535 * @param pvNextEIP Next EIP
1536 */
1537int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, uint32_t pvNextEIP)
1538{
1539 PVM pVM = env->pVM;
1540#ifdef VBOX_WITH_STATISTICS
1541 static STAMCOUNTER s_aStatTrap[255];
1542 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1543#endif
1544
1545#ifdef VBOX_WITH_STATISTICS
1546 if (uTrap < 255)
1547 {
1548 if (!s_aRegisters[uTrap])
1549 {
1550 char szStatName[64];
1551 s_aRegisters[uTrap] = true;
1552 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1553 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1554 }
1555 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1556 }
1557#endif
1558 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%VGv\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1559 if( uTrap < 0x20
1560 && (env->cr[0] & X86_CR0_PE)
1561 && !(env->eflags & X86_EFL_VM))
1562 {
1563#ifdef DEBUG
1564 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1565#endif
1566 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1567 {
1568 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%VGv\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1569 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1570 return VERR_REM_TOO_MANY_TRAPS;
1571 }
1572 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1573 pVM->rem.s.cPendingExceptions = 1;
1574 pVM->rem.s.uPendingException = uTrap;
1575 pVM->rem.s.uPendingExcptEIP = env->eip;
1576 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1577 }
1578 else
1579 {
1580 pVM->rem.s.cPendingExceptions = 0;
1581 pVM->rem.s.uPendingException = uTrap;
1582 pVM->rem.s.uPendingExcptEIP = env->eip;
1583 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1584 }
1585 return VINF_SUCCESS;
1586}
1587
1588
1589/*
1590 * Clear current active trap
1591 *
1592 * @param pVM VM Handle.
1593 */
1594void remR3TrapClear(PVM pVM)
1595{
1596 pVM->rem.s.cPendingExceptions = 0;
1597 pVM->rem.s.uPendingException = 0;
1598 pVM->rem.s.uPendingExcptEIP = 0;
1599 pVM->rem.s.uPendingExcptCR2 = 0;
1600}
1601
1602
1603/*
1604 * Record previous call instruction addresses
1605 *
1606 * @param env Pointer to the CPU environment.
1607 */
1608void remR3RecordCall(CPUState *env)
1609{
1610 CSAMR3RecordCallAddress(env->pVM, env->eip);
1611}
1612
1613
1614/**
1615 * Syncs the internal REM state with the VM.
1616 *
1617 * This must be called before REMR3Run() is invoked whenever when the REM
1618 * state is not up to date. Calling it several times in a row is not
1619 * permitted.
1620 *
1621 * @returns VBox status code.
1622 *
1623 * @param pVM VM Handle.
1624 * @param fFlushTBs Flush all translation blocks before executing code
1625 *
1626 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1627 * no do this since the majority of the callers don't want any unnecessary of events
1628 * pending that would immediatly interrupt execution.
1629 */
1630REMR3DECL(int) REMR3State(PVM pVM)
1631{
1632 register const CPUMCTX *pCtx;
1633 register unsigned fFlags;
1634 bool fHiddenSelRegsValid;
1635 unsigned i;
1636 TRPMEVENT enmType;
1637 uint8_t u8TrapNo;
1638 int rc;
1639
1640 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1641 Log2(("REMR3State:\n"));
1642
1643 pCtx = pVM->rem.s.pCtx;
1644 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1645
1646 Assert(!pVM->rem.s.fInREM);
1647 pVM->rem.s.fInStateSync = true;
1648
1649 /*
1650 * If we have to flush TBs, do that immediately.
1651 */
1652 if (pVM->rem.s.fFlushTBs)
1653 {
1654 STAM_COUNTER_INC(&gStatFlushTBs);
1655 tb_flush(&pVM->rem.s.Env);
1656 pVM->rem.s.fFlushTBs = false;
1657 }
1658
1659 /*
1660 * Copy the registers which require no special handling.
1661 */
1662#ifdef TARGET_X86_64
1663 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1664 Assert(R_EAX == 0);
1665 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1666 Assert(R_ECX == 1);
1667 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1668 Assert(R_EDX == 2);
1669 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1670 Assert(R_EBX == 3);
1671 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1672 Assert(R_ESP == 4);
1673 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1674 Assert(R_EBP == 5);
1675 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1676 Assert(R_ESI == 6);
1677 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1678 Assert(R_EDI == 7);
1679 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1680 pVM->rem.s.Env.regs[8] = pCtx->r8;
1681 pVM->rem.s.Env.regs[9] = pCtx->r9;
1682 pVM->rem.s.Env.regs[10] = pCtx->r10;
1683 pVM->rem.s.Env.regs[11] = pCtx->r11;
1684 pVM->rem.s.Env.regs[12] = pCtx->r12;
1685 pVM->rem.s.Env.regs[13] = pCtx->r13;
1686 pVM->rem.s.Env.regs[14] = pCtx->r14;
1687 pVM->rem.s.Env.regs[15] = pCtx->r15;
1688
1689 pVM->rem.s.Env.eip = pCtx->rip;
1690
1691 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1692#else
1693 Assert(R_EAX == 0);
1694 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1695 Assert(R_ECX == 1);
1696 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1697 Assert(R_EDX == 2);
1698 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1699 Assert(R_EBX == 3);
1700 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1701 Assert(R_ESP == 4);
1702 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1703 Assert(R_EBP == 5);
1704 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1705 Assert(R_ESI == 6);
1706 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1707 Assert(R_EDI == 7);
1708 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1709 pVM->rem.s.Env.eip = pCtx->eip;
1710
1711 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1712#endif
1713
1714 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1715
1716 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1717 for (i=0;i<8;i++)
1718 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1719
1720 /*
1721 * Clear the halted hidden flag (the interrupt waking up the CPU can
1722 * have been dispatched in raw mode).
1723 */
1724 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1725
1726 /*
1727 * Replay invlpg?
1728 */
1729 if (pVM->rem.s.cInvalidatedPages)
1730 {
1731 RTUINT i;
1732
1733 pVM->rem.s.fIgnoreInvlPg = true;
1734 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1735 {
1736 Log2(("REMR3State: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1737 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1738 }
1739 pVM->rem.s.fIgnoreInvlPg = false;
1740 pVM->rem.s.cInvalidatedPages = 0;
1741 }
1742
1743 /* Replay notification changes? */
1744 if (pVM->rem.s.cHandlerNotifications)
1745 REMR3ReplayHandlerNotifications(pVM);
1746
1747 /* Update MSRs; before CRx registers! */
1748 pVM->rem.s.Env.efer = pCtx->msrEFER;
1749 pVM->rem.s.Env.star = pCtx->msrSTAR;
1750 pVM->rem.s.Env.pat = pCtx->msrPAT;
1751#ifdef TARGET_X86_64
1752 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1753 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1754 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1755 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1756
1757 /* Update the internal long mode activate flag according to the new EFER value. */
1758 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1759 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1760 else
1761 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1762#endif
1763
1764
1765 /*
1766 * Registers which are rarely changed and require special handling / order when changed.
1767 */
1768 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1769 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1770 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1771 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1772 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1773 {
1774 if (fFlags & CPUM_CHANGED_FPU_REM)
1775 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1776
1777 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1778 {
1779 pVM->rem.s.fIgnoreCR3Load = true;
1780 tlb_flush(&pVM->rem.s.Env, true);
1781 pVM->rem.s.fIgnoreCR3Load = false;
1782 }
1783
1784 /* CR4 before CR0! */
1785 if (fFlags & CPUM_CHANGED_CR4)
1786 {
1787 pVM->rem.s.fIgnoreCR3Load = true;
1788 pVM->rem.s.fIgnoreCpuMode = true;
1789 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1790 pVM->rem.s.fIgnoreCpuMode = false;
1791 pVM->rem.s.fIgnoreCR3Load = false;
1792 }
1793
1794 if (fFlags & CPUM_CHANGED_CR0)
1795 {
1796 pVM->rem.s.fIgnoreCR3Load = true;
1797 pVM->rem.s.fIgnoreCpuMode = true;
1798 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1799 pVM->rem.s.fIgnoreCpuMode = false;
1800 pVM->rem.s.fIgnoreCR3Load = false;
1801 }
1802
1803 if (fFlags & CPUM_CHANGED_CR3)
1804 {
1805 pVM->rem.s.fIgnoreCR3Load = true;
1806 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1807 pVM->rem.s.fIgnoreCR3Load = false;
1808 }
1809
1810 if (fFlags & CPUM_CHANGED_GDTR)
1811 {
1812 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1813 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1814 }
1815
1816 if (fFlags & CPUM_CHANGED_IDTR)
1817 {
1818 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1819 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1820 }
1821
1822 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1823 {
1824 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1825 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1826 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1827 }
1828
1829 if (fFlags & CPUM_CHANGED_LDTR)
1830 {
1831 if (fHiddenSelRegsValid)
1832 {
1833 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1834 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1835 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1836 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1837 }
1838 else
1839 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1840 }
1841
1842 if (fFlags & CPUM_CHANGED_TR)
1843 {
1844 if (fHiddenSelRegsValid)
1845 {
1846 pVM->rem.s.Env.tr.selector = pCtx->tr;
1847 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1848 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1849 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1850 }
1851 else
1852 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1853
1854 /** @note do_interrupt will fault if the busy flag is still set.... */
1855 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1856 }
1857
1858 if (fFlags & CPUM_CHANGED_CPUID)
1859 {
1860 uint32_t u32Dummy;
1861
1862 /*
1863 * Get the CPUID features.
1864 */
1865 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1866 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1867 }
1868 }
1869
1870 /*
1871 * Update selector registers.
1872 * This must be done *after* we've synced gdt, ldt and crX registers
1873 * since we're reading the GDT/LDT om sync_seg. This will happen with
1874 * saved state which takes a quick dip into rawmode for instance.
1875 */
1876 /*
1877 * Stack; Note first check this one as the CPL might have changed. The
1878 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1879 */
1880
1881 if (fHiddenSelRegsValid)
1882 {
1883 /* The hidden selector registers are valid in the CPU context. */
1884 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1885
1886 /* Set current CPL */
1887 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1888
1889 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1890 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1891 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1892 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1893 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1894 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1895 }
1896 else
1897 {
1898 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1899 if (pVM->rem.s.Env.segs[R_SS].selector != (uint16_t)pCtx->ss)
1900 {
1901 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1902
1903 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1904 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1905#ifdef VBOX_WITH_STATISTICS
1906 if (pVM->rem.s.Env.segs[R_SS].newselector)
1907 {
1908 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1909 }
1910#endif
1911 }
1912 else
1913 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1914
1915 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1916 {
1917 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1918 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1919#ifdef VBOX_WITH_STATISTICS
1920 if (pVM->rem.s.Env.segs[R_ES].newselector)
1921 {
1922 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1923 }
1924#endif
1925 }
1926 else
1927 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1928
1929 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1930 {
1931 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1932 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1933#ifdef VBOX_WITH_STATISTICS
1934 if (pVM->rem.s.Env.segs[R_CS].newselector)
1935 {
1936 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1937 }
1938#endif
1939 }
1940 else
1941 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1942
1943 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1944 {
1945 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1946 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1947#ifdef VBOX_WITH_STATISTICS
1948 if (pVM->rem.s.Env.segs[R_DS].newselector)
1949 {
1950 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1951 }
1952#endif
1953 }
1954 else
1955 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1956
1957 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1958 * be the same but not the base/limit. */
1959 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1960 {
1961 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1962 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1963#ifdef VBOX_WITH_STATISTICS
1964 if (pVM->rem.s.Env.segs[R_FS].newselector)
1965 {
1966 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1967 }
1968#endif
1969 }
1970 else
1971 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1972
1973 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1974 {
1975 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1976 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1977#ifdef VBOX_WITH_STATISTICS
1978 if (pVM->rem.s.Env.segs[R_GS].newselector)
1979 {
1980 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1981 }
1982#endif
1983 }
1984 else
1985 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1986 }
1987
1988 /*
1989 * Check for traps.
1990 */
1991 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1992 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1993 if (VBOX_SUCCESS(rc))
1994 {
1995#ifdef DEBUG
1996 if (u8TrapNo == 0x80)
1997 {
1998 remR3DumpLnxSyscall(pVM);
1999 remR3DumpOBsdSyscall(pVM);
2000 }
2001#endif
2002
2003 pVM->rem.s.Env.exception_index = u8TrapNo;
2004 if (enmType != TRPM_SOFTWARE_INT)
2005 {
2006 pVM->rem.s.Env.exception_is_int = 0;
2007 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2008 }
2009 else
2010 {
2011 /*
2012 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2013 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2014 * for int03 and into.
2015 */
2016 pVM->rem.s.Env.exception_is_int = 1;
2017 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2018 /* int 3 may be generated by one-byte 0xcc */
2019 if (u8TrapNo == 3)
2020 {
2021 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2022 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2023 }
2024 /* int 4 may be generated by one-byte 0xce */
2025 else if (u8TrapNo == 4)
2026 {
2027 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2028 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2029 }
2030 }
2031
2032 /* get error code and cr2 if needed. */
2033 switch (u8TrapNo)
2034 {
2035 case 0x0e:
2036 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2037 /* fallthru */
2038 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2039 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2040 break;
2041
2042 case 0x11: case 0x08:
2043 default:
2044 pVM->rem.s.Env.error_code = 0;
2045 break;
2046 }
2047
2048 /*
2049 * We can now reset the active trap since the recompiler is gonna have a go at it.
2050 */
2051 rc = TRPMResetTrap(pVM);
2052 AssertRC(rc);
2053 Log2(("REMR3State: trap=%02x errcd=%VGv cr2=%VGv nexteip=%VGv%s\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.error_code,
2054 pVM->rem.s.Env.cr[2], pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2055 }
2056
2057 /*
2058 * Clear old interrupt request flags; Check for pending hardware interrupts.
2059 * (See @remark for why we don't check for other FFs.)
2060 */
2061 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2062 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2063 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2064 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2065
2066 /*
2067 * We're now in REM mode.
2068 */
2069 pVM->rem.s.fInREM = true;
2070 pVM->rem.s.fInStateSync = false;
2071 pVM->rem.s.cCanExecuteRaw = 0;
2072 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2073 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2074 return VINF_SUCCESS;
2075}
2076
2077
2078/**
2079 * Syncs back changes in the REM state to the the VM state.
2080 *
2081 * This must be called after invoking REMR3Run().
2082 * Calling it several times in a row is not permitted.
2083 *
2084 * @returns VBox status code.
2085 *
2086 * @param pVM VM Handle.
2087 */
2088REMR3DECL(int) REMR3StateBack(PVM pVM)
2089{
2090 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2091 unsigned i;
2092
2093 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2094 Log2(("REMR3StateBack:\n"));
2095 Assert(pVM->rem.s.fInREM);
2096
2097 /*
2098 * Copy back the registers.
2099 * This is done in the order they are declared in the CPUMCTX structure.
2100 */
2101
2102 /** @todo FOP */
2103 /** @todo FPUIP */
2104 /** @todo CS */
2105 /** @todo FPUDP */
2106 /** @todo DS */
2107 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2108 pCtx->fpu.MXCSR = 0;
2109 pCtx->fpu.MXCSR_MASK = 0;
2110
2111 /** @todo check if FPU/XMM was actually used in the recompiler */
2112 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2113//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2114
2115#ifdef TARGET_X86_64
2116 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2117 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2118 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2119 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2120 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2121 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2122 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2123 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2124 pCtx->r8 = pVM->rem.s.Env.regs[8];
2125 pCtx->r9 = pVM->rem.s.Env.regs[9];
2126 pCtx->r10 = pVM->rem.s.Env.regs[10];
2127 pCtx->r11 = pVM->rem.s.Env.regs[11];
2128 pCtx->r12 = pVM->rem.s.Env.regs[12];
2129 pCtx->r13 = pVM->rem.s.Env.regs[13];
2130 pCtx->r14 = pVM->rem.s.Env.regs[14];
2131 pCtx->r15 = pVM->rem.s.Env.regs[15];
2132
2133 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2134
2135#else
2136 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2137 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2138 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2139 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2140 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2141 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2142 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2143
2144 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2145#endif
2146
2147 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2148
2149#ifdef VBOX_WITH_STATISTICS
2150 if (pVM->rem.s.Env.segs[R_SS].newselector)
2151 {
2152 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2153 }
2154 if (pVM->rem.s.Env.segs[R_GS].newselector)
2155 {
2156 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2157 }
2158 if (pVM->rem.s.Env.segs[R_FS].newselector)
2159 {
2160 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2161 }
2162 if (pVM->rem.s.Env.segs[R_ES].newselector)
2163 {
2164 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2165 }
2166 if (pVM->rem.s.Env.segs[R_DS].newselector)
2167 {
2168 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2169 }
2170 if (pVM->rem.s.Env.segs[R_CS].newselector)
2171 {
2172 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2173 }
2174#endif
2175 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2176 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2177 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2178 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2179 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2180
2181#ifdef TARGET_X86_64
2182 pCtx->rip = pVM->rem.s.Env.eip;
2183 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2184#else
2185 pCtx->eip = pVM->rem.s.Env.eip;
2186 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2187#endif
2188
2189 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2190 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2191 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2192 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2193
2194 for (i=0;i<8;i++)
2195 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2196
2197 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2198 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2199 {
2200 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2201 STAM_COUNTER_INC(&gStatREMGDTChange);
2202 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2203 }
2204
2205 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2206 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2207 {
2208 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2209 STAM_COUNTER_INC(&gStatREMIDTChange);
2210 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2211 }
2212
2213 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2214 {
2215 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2216 STAM_COUNTER_INC(&gStatREMLDTRChange);
2217 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2218 }
2219 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2220 {
2221 pCtx->tr = pVM->rem.s.Env.tr.selector;
2222 STAM_COUNTER_INC(&gStatREMTRChange);
2223 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2224 }
2225
2226 /** @todo These values could still be out of sync! */
2227 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2228 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2229 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2230 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2231
2232 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2233 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2234 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2235
2236 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2237 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2238 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2239
2240 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2241 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2242 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2243
2244 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2245 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2246 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2247
2248 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2249 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2250 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2251
2252 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2253 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2254 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2255
2256 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2257 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2258 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2259
2260 /* Sysenter MSR */
2261 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2262 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2263 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2264
2265 /* System MSRs. */
2266 pCtx->msrEFER = pVM->rem.s.Env.efer;
2267 pCtx->msrSTAR = pVM->rem.s.Env.star;
2268 pCtx->msrPAT = pVM->rem.s.Env.pat;
2269#ifdef TARGET_X86_64
2270 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2271 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2272 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2273 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2274#endif
2275
2276 remR3TrapClear(pVM);
2277
2278 /*
2279 * Check for traps.
2280 */
2281 if ( pVM->rem.s.Env.exception_index >= 0
2282 && pVM->rem.s.Env.exception_index < 256)
2283 {
2284 int rc;
2285
2286 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2287 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2288 AssertRC(rc);
2289 switch (pVM->rem.s.Env.exception_index)
2290 {
2291 case 0x0e:
2292 TRPMSetFaultAddress(pVM, pCtx->cr2);
2293 /* fallthru */
2294 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2295 case 0x11: case 0x08: /* 0 */
2296 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2297 break;
2298 }
2299
2300 }
2301
2302 /*
2303 * We're not longer in REM mode.
2304 */
2305 pVM->rem.s.fInREM = false;
2306 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2307 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2308 return VINF_SUCCESS;
2309}
2310
2311
2312/**
2313 * This is called by the disassembler when it wants to update the cpu state
2314 * before for instance doing a register dump.
2315 */
2316static void remR3StateUpdate(PVM pVM)
2317{
2318 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2319 unsigned i;
2320
2321 Assert(pVM->rem.s.fInREM);
2322
2323 /*
2324 * Copy back the registers.
2325 * This is done in the order they are declared in the CPUMCTX structure.
2326 */
2327
2328 /** @todo FOP */
2329 /** @todo FPUIP */
2330 /** @todo CS */
2331 /** @todo FPUDP */
2332 /** @todo DS */
2333 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2334 pCtx->fpu.MXCSR = 0;
2335 pCtx->fpu.MXCSR_MASK = 0;
2336
2337 /** @todo check if FPU/XMM was actually used in the recompiler */
2338 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2339//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2340
2341#ifdef TARGET_X86_64
2342 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2343 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2344 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2345 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2346 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2347 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2348 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2349 pCtx->r8 = pVM->rem.s.Env.regs[8];
2350 pCtx->r9 = pVM->rem.s.Env.regs[9];
2351 pCtx->r10 = pVM->rem.s.Env.regs[10];
2352 pCtx->r11 = pVM->rem.s.Env.regs[11];
2353 pCtx->r12 = pVM->rem.s.Env.regs[12];
2354 pCtx->r13 = pVM->rem.s.Env.regs[13];
2355 pCtx->r14 = pVM->rem.s.Env.regs[14];
2356 pCtx->r15 = pVM->rem.s.Env.regs[15];
2357
2358 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2359#else
2360 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2361 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2362 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2363 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2364 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2365 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2366 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2367
2368 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2369#endif
2370
2371 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2372
2373 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2374 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2375 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2376 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2377 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2378
2379#ifdef TARGET_X86_64
2380 pCtx->rip = pVM->rem.s.Env.eip;
2381 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2382#else
2383 pCtx->eip = pVM->rem.s.Env.eip;
2384 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2385#endif
2386
2387 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2388 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2389 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2390 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2391
2392 for (i=0;i<8;i++)
2393 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2394
2395 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2396 if (pCtx->gdtr.pGdt != (uint32_t)pVM->rem.s.Env.gdt.base)
2397 {
2398 pCtx->gdtr.pGdt = (uint32_t)pVM->rem.s.Env.gdt.base;
2399 STAM_COUNTER_INC(&gStatREMGDTChange);
2400 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2401 }
2402
2403 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2404 if (pCtx->idtr.pIdt != (uint32_t)pVM->rem.s.Env.idt.base)
2405 {
2406 pCtx->idtr.pIdt = (uint32_t)pVM->rem.s.Env.idt.base;
2407 STAM_COUNTER_INC(&gStatREMIDTChange);
2408 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2409 }
2410
2411 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2412 {
2413 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2414 STAM_COUNTER_INC(&gStatREMLDTRChange);
2415 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2416 }
2417 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2418 {
2419 pCtx->tr = pVM->rem.s.Env.tr.selector;
2420 STAM_COUNTER_INC(&gStatREMTRChange);
2421 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2422 }
2423
2424 /** @todo These values could still be out of sync! */
2425 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2426 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2427 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2428 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2429
2430 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2431 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2432 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2433
2434 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2435 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2436 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2437
2438 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2439 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2440 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2441
2442 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2443 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2444 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2445
2446 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2447 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2448 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2449
2450 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2451 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2452 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2453
2454 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2455 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2456 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2457
2458 /* Sysenter MSR */
2459 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2460 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2461 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2462
2463 /* System MSRs. */
2464 pCtx->msrEFER = pVM->rem.s.Env.efer;
2465 pCtx->msrSTAR = pVM->rem.s.Env.star;
2466 pCtx->msrPAT = pVM->rem.s.Env.pat;
2467#ifdef TARGET_X86_64
2468 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2469 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2470 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2471 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2472#endif
2473
2474}
2475
2476
2477/**
2478 * Update the VMM state information if we're currently in REM.
2479 *
2480 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2481 * we're currently executing in REM and the VMM state is invalid. This method will of
2482 * course check that we're executing in REM before syncing any data over to the VMM.
2483 *
2484 * @param pVM The VM handle.
2485 */
2486REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2487{
2488 if (pVM->rem.s.fInREM)
2489 remR3StateUpdate(pVM);
2490}
2491
2492
2493#undef LOG_GROUP
2494#define LOG_GROUP LOG_GROUP_REM
2495
2496
2497/**
2498 * Notify the recompiler about Address Gate 20 state change.
2499 *
2500 * This notification is required since A20 gate changes are
2501 * initialized from a device driver and the VM might just as
2502 * well be in REM mode as in RAW mode.
2503 *
2504 * @param pVM VM handle.
2505 * @param fEnable True if the gate should be enabled.
2506 * False if the gate should be disabled.
2507 */
2508REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2509{
2510 bool fSaved;
2511
2512 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2513 VM_ASSERT_EMT(pVM);
2514
2515 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2516 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2517
2518 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2519
2520 pVM->rem.s.fIgnoreAll = fSaved;
2521}
2522
2523
2524/**
2525 * Replays the invalidated recorded pages.
2526 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2527 *
2528 * @param pVM VM handle.
2529 */
2530REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2531{
2532 RTUINT i;
2533
2534 VM_ASSERT_EMT(pVM);
2535
2536 /*
2537 * Sync the required registers.
2538 */
2539 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2540 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2541 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2542 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2543
2544 /*
2545 * Replay the flushes.
2546 */
2547 pVM->rem.s.fIgnoreInvlPg = true;
2548 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2549 {
2550 Log2(("REMR3ReplayInvalidatedPages: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2551 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2552 }
2553 pVM->rem.s.fIgnoreInvlPg = false;
2554 pVM->rem.s.cInvalidatedPages = 0;
2555}
2556
2557
2558/**
2559 * Replays the handler notification changes
2560 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2561 *
2562 * @param pVM VM handle.
2563 */
2564REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2565{
2566 /*
2567 * Replay the flushes.
2568 */
2569 RTUINT i;
2570 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2571
2572 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2573 VM_ASSERT_EMT(pVM);
2574
2575 pVM->rem.s.cHandlerNotifications = 0;
2576 for (i = 0; i < c; i++)
2577 {
2578 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2579 switch (pRec->enmKind)
2580 {
2581 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2582 REMR3NotifyHandlerPhysicalRegister(pVM,
2583 pRec->u.PhysicalRegister.enmType,
2584 pRec->u.PhysicalRegister.GCPhys,
2585 pRec->u.PhysicalRegister.cb,
2586 pRec->u.PhysicalRegister.fHasHCHandler);
2587 break;
2588
2589 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2590 REMR3NotifyHandlerPhysicalDeregister(pVM,
2591 pRec->u.PhysicalDeregister.enmType,
2592 pRec->u.PhysicalDeregister.GCPhys,
2593 pRec->u.PhysicalDeregister.cb,
2594 pRec->u.PhysicalDeregister.fHasHCHandler,
2595 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2596 break;
2597
2598 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2599 REMR3NotifyHandlerPhysicalModify(pVM,
2600 pRec->u.PhysicalModify.enmType,
2601 pRec->u.PhysicalModify.GCPhysOld,
2602 pRec->u.PhysicalModify.GCPhysNew,
2603 pRec->u.PhysicalModify.cb,
2604 pRec->u.PhysicalModify.fHasHCHandler,
2605 pRec->u.PhysicalModify.fRestoreAsRAM);
2606 break;
2607
2608 default:
2609 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2610 break;
2611 }
2612 }
2613 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2614}
2615
2616
2617/**
2618 * Notify REM about changed code page.
2619 *
2620 * @returns VBox status code.
2621 * @param pVM VM handle.
2622 * @param pvCodePage Code page address
2623 */
2624REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2625{
2626#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2627 int rc;
2628 RTGCPHYS PhysGC;
2629 uint64_t flags;
2630
2631 VM_ASSERT_EMT(pVM);
2632
2633 /*
2634 * Get the physical page address.
2635 */
2636 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2637 if (rc == VINF_SUCCESS)
2638 {
2639 /*
2640 * Sync the required registers and flush the whole page.
2641 * (Easier to do the whole page than notifying it about each physical
2642 * byte that was changed.
2643 */
2644 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2645 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2646 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2647 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2648
2649 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2650 }
2651#endif
2652 return VINF_SUCCESS;
2653}
2654
2655
2656/**
2657 * Notification about a successful MMR3PhysRegister() call.
2658 *
2659 * @param pVM VM handle.
2660 * @param GCPhys The physical address the RAM.
2661 * @param cb Size of the memory.
2662 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2663 */
2664REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2665{
2666 uint32_t cbBitmap;
2667 int rc;
2668 Log(("REMR3NotifyPhysRamRegister: GCPhys=%VGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2669 VM_ASSERT_EMT(pVM);
2670
2671 /*
2672 * Validate input - we trust the caller.
2673 */
2674 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2675 Assert(cb);
2676 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2677
2678 /*
2679 * Base ram?
2680 */
2681 if (!GCPhys)
2682 {
2683 phys_ram_size = cb;
2684 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2685#ifndef VBOX_STRICT
2686 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2687 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2688#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2689 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2690 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2691 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2692 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2693 AssertRC(rc);
2694 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2695#endif
2696 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2697 }
2698
2699 /*
2700 * Register the ram.
2701 */
2702 Assert(!pVM->rem.s.fIgnoreAll);
2703 pVM->rem.s.fIgnoreAll = true;
2704
2705#ifdef VBOX_WITH_NEW_PHYS_CODE
2706 if (fFlags & MM_RAM_FLAGS_RESERVED)
2707 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2708 else
2709 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2710#else
2711 if (!GCPhys)
2712 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2713 else
2714 {
2715 if (fFlags & MM_RAM_FLAGS_RESERVED)
2716 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2717 else
2718 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2719 }
2720#endif
2721 Assert(pVM->rem.s.fIgnoreAll);
2722 pVM->rem.s.fIgnoreAll = false;
2723}
2724
2725#ifndef VBOX_WITH_NEW_PHYS_CODE
2726
2727/**
2728 * Notification about a successful PGMR3PhysRegisterChunk() call.
2729 *
2730 * @param pVM VM handle.
2731 * @param GCPhys The physical address the RAM.
2732 * @param cb Size of the memory.
2733 * @param pvRam The HC address of the RAM.
2734 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2735 */
2736REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2737{
2738 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2739 VM_ASSERT_EMT(pVM);
2740
2741 /*
2742 * Validate input - we trust the caller.
2743 */
2744 Assert(pvRam);
2745 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2746 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2747 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2748 Assert(fFlags == 0 /* normal RAM */);
2749 Assert(!pVM->rem.s.fIgnoreAll);
2750 pVM->rem.s.fIgnoreAll = true;
2751
2752 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2753
2754 Assert(pVM->rem.s.fIgnoreAll);
2755 pVM->rem.s.fIgnoreAll = false;
2756}
2757
2758
2759/**
2760 * Grows dynamically allocated guest RAM.
2761 * Will raise a fatal error if the operation fails.
2762 *
2763 * @param physaddr The physical address.
2764 */
2765void remR3GrowDynRange(unsigned long physaddr)
2766{
2767 int rc;
2768 PVM pVM = cpu_single_env->pVM;
2769 const RTGCPHYS GCPhys = physaddr;
2770
2771 LogFlow(("remR3GrowDynRange %VGp\n", physaddr));
2772 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2773 if (VBOX_SUCCESS(rc))
2774 return;
2775
2776 LogRel(("\nUnable to allocate guest RAM chunk at %VGp\n", physaddr));
2777 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %VGp\n", physaddr);
2778 AssertFatalFailed();
2779}
2780
2781#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2782
2783/**
2784 * Notification about a successful MMR3PhysRomRegister() call.
2785 *
2786 * @param pVM VM handle.
2787 * @param GCPhys The physical address of the ROM.
2788 * @param cb The size of the ROM.
2789 * @param pvCopy Pointer to the ROM copy.
2790 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2791 * This function will be called when ever the protection of the
2792 * shadow ROM changes (at reset and end of POST).
2793 */
2794REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2795{
2796 Log(("REMR3NotifyPhysRomRegister: GCPhys=%VGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2797 VM_ASSERT_EMT(pVM);
2798
2799 /*
2800 * Validate input - we trust the caller.
2801 */
2802 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2803 Assert(cb);
2804 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2805 Assert(pvCopy);
2806 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2807
2808 /*
2809 * Register the rom.
2810 */
2811 Assert(!pVM->rem.s.fIgnoreAll);
2812 pVM->rem.s.fIgnoreAll = true;
2813
2814 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2815
2816 Log2(("%.64Vhxd\n", (char *)pvCopy + cb - 64));
2817
2818 Assert(pVM->rem.s.fIgnoreAll);
2819 pVM->rem.s.fIgnoreAll = false;
2820}
2821
2822
2823/**
2824 * Notification about a successful memory deregistration or reservation.
2825 *
2826 * @param pVM VM Handle.
2827 * @param GCPhys Start physical address.
2828 * @param cb The size of the range.
2829 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2830 * reserve any memory soon.
2831 */
2832REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2833{
2834 Log(("REMR3NotifyPhysReserve: GCPhys=%VGp cb=%d\n", GCPhys, cb));
2835 VM_ASSERT_EMT(pVM);
2836
2837 /*
2838 * Validate input - we trust the caller.
2839 */
2840 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2841 Assert(cb);
2842 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2843
2844 /*
2845 * Unassigning the memory.
2846 */
2847 Assert(!pVM->rem.s.fIgnoreAll);
2848 pVM->rem.s.fIgnoreAll = true;
2849
2850 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2851
2852 Assert(pVM->rem.s.fIgnoreAll);
2853 pVM->rem.s.fIgnoreAll = false;
2854}
2855
2856
2857/**
2858 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2859 *
2860 * @param pVM VM Handle.
2861 * @param enmType Handler type.
2862 * @param GCPhys Handler range address.
2863 * @param cb Size of the handler range.
2864 * @param fHasHCHandler Set if the handler has a HC callback function.
2865 *
2866 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2867 * Handler memory type to memory which has no HC handler.
2868 */
2869REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2870{
2871 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%VGp cb=%VGp fHasHCHandler=%d\n",
2872 enmType, GCPhys, cb, fHasHCHandler));
2873 VM_ASSERT_EMT(pVM);
2874 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2875 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2876
2877 if (pVM->rem.s.cHandlerNotifications)
2878 REMR3ReplayHandlerNotifications(pVM);
2879
2880 Assert(!pVM->rem.s.fIgnoreAll);
2881 pVM->rem.s.fIgnoreAll = true;
2882
2883 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2884 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2885 else if (fHasHCHandler)
2886 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2887
2888 Assert(pVM->rem.s.fIgnoreAll);
2889 pVM->rem.s.fIgnoreAll = false;
2890}
2891
2892
2893/**
2894 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2895 *
2896 * @param pVM VM Handle.
2897 * @param enmType Handler type.
2898 * @param GCPhys Handler range address.
2899 * @param cb Size of the handler range.
2900 * @param fHasHCHandler Set if the handler has a HC callback function.
2901 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2902 */
2903REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2904{
2905 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%VGp cb=%VGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2906 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2907 VM_ASSERT_EMT(pVM);
2908
2909 if (pVM->rem.s.cHandlerNotifications)
2910 REMR3ReplayHandlerNotifications(pVM);
2911
2912 Assert(!pVM->rem.s.fIgnoreAll);
2913 pVM->rem.s.fIgnoreAll = true;
2914
2915/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2916 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2917 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2918 else if (fHasHCHandler)
2919 {
2920 if (!fRestoreAsRAM)
2921 {
2922 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2923 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2924 }
2925 else
2926 {
2927 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2928 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2929 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2930 }
2931 }
2932
2933 Assert(pVM->rem.s.fIgnoreAll);
2934 pVM->rem.s.fIgnoreAll = false;
2935}
2936
2937
2938/**
2939 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2940 *
2941 * @param pVM VM Handle.
2942 * @param enmType Handler type.
2943 * @param GCPhysOld Old handler range address.
2944 * @param GCPhysNew New handler range address.
2945 * @param cb Size of the handler range.
2946 * @param fHasHCHandler Set if the handler has a HC callback function.
2947 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2948 */
2949REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2950{
2951 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%VGp GCPhysNew=%VGp cb=%VGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2952 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2953 VM_ASSERT_EMT(pVM);
2954 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2955
2956 if (pVM->rem.s.cHandlerNotifications)
2957 REMR3ReplayHandlerNotifications(pVM);
2958
2959 if (fHasHCHandler)
2960 {
2961 Assert(!pVM->rem.s.fIgnoreAll);
2962 pVM->rem.s.fIgnoreAll = true;
2963
2964 /*
2965 * Reset the old page.
2966 */
2967 if (!fRestoreAsRAM)
2968 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2969 else
2970 {
2971 /* This is not perfect, but it'll do for PD monitoring... */
2972 Assert(cb == PAGE_SIZE);
2973 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2974 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2975 }
2976
2977 /*
2978 * Update the new page.
2979 */
2980 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2981 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2982 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2983
2984 Assert(pVM->rem.s.fIgnoreAll);
2985 pVM->rem.s.fIgnoreAll = false;
2986 }
2987}
2988
2989
2990/**
2991 * Checks if we're handling access to this page or not.
2992 *
2993 * @returns true if we're trapping access.
2994 * @returns false if we aren't.
2995 * @param pVM The VM handle.
2996 * @param GCPhys The physical address.
2997 *
2998 * @remark This function will only work correctly in VBOX_STRICT builds!
2999 */
3000REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3001{
3002#ifdef VBOX_STRICT
3003 unsigned long off;
3004 if (pVM->rem.s.cHandlerNotifications)
3005 REMR3ReplayHandlerNotifications(pVM);
3006
3007 off = get_phys_page_offset(GCPhys);
3008 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3009 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3010 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3011#else
3012 return false;
3013#endif
3014}
3015
3016
3017/**
3018 * Deals with a rare case in get_phys_addr_code where the code
3019 * is being monitored.
3020 *
3021 * It could also be an MMIO page, in which case we will raise a fatal error.
3022 *
3023 * @returns The physical address corresponding to addr.
3024 * @param env The cpu environment.
3025 * @param addr The virtual address.
3026 * @param pTLBEntry The TLB entry.
3027 */
3028target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry)
3029{
3030 PVM pVM = env->pVM;
3031 if ((pTLBEntry->addr_code & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3032 {
3033 target_ulong ret = pTLBEntry->addend + addr;
3034 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%VGv addr_code=%VGv addend=%VGp ret=%VGp\n",
3035 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, ret);
3036 return ret;
3037 }
3038 LogRel(("\nTrying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n"
3039 "*** handlers\n",
3040 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType));
3041 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3042 LogRel(("*** mmio\n"));
3043 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3044 LogRel(("*** phys\n"));
3045 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3046 cpu_abort(env, "Trying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3047 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3048 AssertFatalFailed();
3049}
3050
3051
3052/** Validate the physical address passed to the read functions.
3053 * Useful for finding non-guest-ram reads/writes. */
3054#if 0 //1 /* disable if it becomes bothersome... */
3055# define VBOX_CHECK_ADDR(GCPhys) AssertMsg(PGMPhysIsGCPhysValid(cpu_single_env->pVM, (GCPhys)), ("%VGp\n", (GCPhys)))
3056#else
3057# define VBOX_CHECK_ADDR(GCPhys) do { } while (0)
3058#endif
3059
3060/**
3061 * Read guest RAM and ROM.
3062 *
3063 * @param SrcGCPhys The source address (guest physical).
3064 * @param pvDst The destination address.
3065 * @param cb Number of bytes
3066 */
3067void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3068{
3069 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3070 VBOX_CHECK_ADDR(SrcGCPhys);
3071 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3072 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3073}
3074
3075
3076/**
3077 * Read guest RAM and ROM, unsigned 8-bit.
3078 *
3079 * @param SrcGCPhys The source address (guest physical).
3080 */
3081uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3082{
3083 uint8_t val;
3084 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3085 VBOX_CHECK_ADDR(SrcGCPhys);
3086 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3087 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3088 return val;
3089}
3090
3091
3092/**
3093 * Read guest RAM and ROM, signed 8-bit.
3094 *
3095 * @param SrcGCPhys The source address (guest physical).
3096 */
3097int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3098{
3099 int8_t val;
3100 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3101 VBOX_CHECK_ADDR(SrcGCPhys);
3102 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3103 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3104 return val;
3105}
3106
3107
3108/**
3109 * Read guest RAM and ROM, unsigned 16-bit.
3110 *
3111 * @param SrcGCPhys The source address (guest physical).
3112 */
3113uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3114{
3115 uint16_t val;
3116 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3117 VBOX_CHECK_ADDR(SrcGCPhys);
3118 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3119 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3120 return val;
3121}
3122
3123
3124/**
3125 * Read guest RAM and ROM, signed 16-bit.
3126 *
3127 * @param SrcGCPhys The source address (guest physical).
3128 */
3129int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3130{
3131 uint16_t val;
3132 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3133 VBOX_CHECK_ADDR(SrcGCPhys);
3134 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3135 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3136 return val;
3137}
3138
3139
3140/**
3141 * Read guest RAM and ROM, unsigned 32-bit.
3142 *
3143 * @param SrcGCPhys The source address (guest physical).
3144 */
3145uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3146{
3147 uint32_t val;
3148 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3149 VBOX_CHECK_ADDR(SrcGCPhys);
3150 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3151 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3152 return val;
3153}
3154
3155
3156/**
3157 * Read guest RAM and ROM, signed 32-bit.
3158 *
3159 * @param SrcGCPhys The source address (guest physical).
3160 */
3161int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3162{
3163 int32_t val;
3164 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3165 VBOX_CHECK_ADDR(SrcGCPhys);
3166 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3167 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3168 return val;
3169}
3170
3171
3172/**
3173 * Read guest RAM and ROM, unsigned 64-bit.
3174 *
3175 * @param SrcGCPhys The source address (guest physical).
3176 */
3177uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3178{
3179 uint64_t val;
3180 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3181 VBOX_CHECK_ADDR(SrcGCPhys);
3182 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3183 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3184 return val;
3185}
3186
3187/**
3188 * Read guest RAM and ROM, signed 64-bit.
3189 *
3190 * @param SrcGCPhys The source address (guest physical).
3191 */
3192int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3193{
3194 int64_t val;
3195 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3196 VBOX_CHECK_ADDR(SrcGCPhys);
3197 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3198 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3199 return val;
3200}
3201
3202
3203/**
3204 * Write guest RAM.
3205 *
3206 * @param DstGCPhys The destination address (guest physical).
3207 * @param pvSrc The source address.
3208 * @param cb Number of bytes to write
3209 */
3210void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3211{
3212 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3213 VBOX_CHECK_ADDR(DstGCPhys);
3214 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3215 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3216}
3217
3218
3219/**
3220 * Write guest RAM, unsigned 8-bit.
3221 *
3222 * @param DstGCPhys The destination address (guest physical).
3223 * @param val Value
3224 */
3225void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3226{
3227 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3228 VBOX_CHECK_ADDR(DstGCPhys);
3229 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3230 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3231}
3232
3233
3234/**
3235 * Write guest RAM, unsigned 8-bit.
3236 *
3237 * @param DstGCPhys The destination address (guest physical).
3238 * @param val Value
3239 */
3240void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3241{
3242 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3243 VBOX_CHECK_ADDR(DstGCPhys);
3244 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3245 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3246}
3247
3248
3249/**
3250 * Write guest RAM, unsigned 32-bit.
3251 *
3252 * @param DstGCPhys The destination address (guest physical).
3253 * @param val Value
3254 */
3255void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3256{
3257 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3258 VBOX_CHECK_ADDR(DstGCPhys);
3259 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3260 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3261}
3262
3263
3264/**
3265 * Write guest RAM, unsigned 64-bit.
3266 *
3267 * @param DstGCPhys The destination address (guest physical).
3268 * @param val Value
3269 */
3270void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3271{
3272 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3273 VBOX_CHECK_ADDR(DstGCPhys);
3274 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3275 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3276}
3277
3278#undef LOG_GROUP
3279#define LOG_GROUP LOG_GROUP_REM_MMIO
3280
3281/** Read MMIO memory. */
3282static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3283{
3284 uint32_t u32 = 0;
3285 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3286 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3287 Log2(("remR3MMIOReadU8: GCPhys=%VGp -> %02x\n", GCPhys, u32));
3288 return u32;
3289}
3290
3291/** Read MMIO memory. */
3292static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3293{
3294 uint32_t u32 = 0;
3295 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3296 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3297 Log2(("remR3MMIOReadU16: GCPhys=%VGp -> %04x\n", GCPhys, u32));
3298 return u32;
3299}
3300
3301/** Read MMIO memory. */
3302static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3303{
3304 uint32_t u32 = 0;
3305 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3306 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3307 Log2(("remR3MMIOReadU32: GCPhys=%VGp -> %08x\n", GCPhys, u32));
3308 return u32;
3309}
3310
3311/** Write to MMIO memory. */
3312static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3313{
3314 int rc;
3315 Log2(("remR3MMIOWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3316 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3317 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3318}
3319
3320/** Write to MMIO memory. */
3321static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3322{
3323 int rc;
3324 Log2(("remR3MMIOWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3325 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3326 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3327}
3328
3329/** Write to MMIO memory. */
3330static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3331{
3332 int rc;
3333 Log2(("remR3MMIOWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3334 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3335 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3336}
3337
3338
3339#undef LOG_GROUP
3340#define LOG_GROUP LOG_GROUP_REM_HANDLER
3341
3342/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3343
3344static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3345{
3346 uint8_t u8;
3347 Log2(("remR3HandlerReadU8: GCPhys=%VGp\n", GCPhys));
3348 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3349 return u8;
3350}
3351
3352static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3353{
3354 uint16_t u16;
3355 Log2(("remR3HandlerReadU16: GCPhys=%VGp\n", GCPhys));
3356 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3357 return u16;
3358}
3359
3360static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3361{
3362 uint32_t u32;
3363 Log2(("remR3HandlerReadU32: GCPhys=%VGp\n", GCPhys));
3364 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3365 return u32;
3366}
3367
3368static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3369{
3370 Log2(("remR3HandlerWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3371 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3372}
3373
3374static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3375{
3376 Log2(("remR3HandlerWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3377 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3378}
3379
3380static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3381{
3382 Log2(("remR3HandlerWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3383 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3384}
3385
3386/* -+- disassembly -+- */
3387
3388#undef LOG_GROUP
3389#define LOG_GROUP LOG_GROUP_REM_DISAS
3390
3391
3392/**
3393 * Enables or disables singled stepped disassembly.
3394 *
3395 * @returns VBox status code.
3396 * @param pVM VM handle.
3397 * @param fEnable To enable set this flag, to disable clear it.
3398 */
3399static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3400{
3401 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3402 VM_ASSERT_EMT(pVM);
3403
3404 if (fEnable)
3405 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3406 else
3407 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3408 return VINF_SUCCESS;
3409}
3410
3411
3412/**
3413 * Enables or disables singled stepped disassembly.
3414 *
3415 * @returns VBox status code.
3416 * @param pVM VM handle.
3417 * @param fEnable To enable set this flag, to disable clear it.
3418 */
3419REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3420{
3421 PVMREQ pReq;
3422 int rc;
3423
3424 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3425 if (VM_IS_EMT(pVM))
3426 return remR3DisasEnableStepping(pVM, fEnable);
3427
3428 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3429 AssertRC(rc);
3430 if (VBOX_SUCCESS(rc))
3431 rc = pReq->iStatus;
3432 VMR3ReqFree(pReq);
3433 return rc;
3434}
3435
3436
3437#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3438/**
3439 * External Debugger Command: .remstep [on|off|1|0]
3440 */
3441static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3442{
3443 bool fEnable;
3444 int rc;
3445
3446 /* print status */
3447 if (cArgs == 0)
3448 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3449 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3450
3451 /* convert the argument and change the mode. */
3452 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3453 if (VBOX_FAILURE(rc))
3454 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3455 rc = REMR3DisasEnableStepping(pVM, fEnable);
3456 if (VBOX_FAILURE(rc))
3457 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3458 return rc;
3459}
3460#endif
3461
3462
3463/**
3464 * Disassembles n instructions and prints them to the log.
3465 *
3466 * @returns Success indicator.
3467 * @param env Pointer to the recompiler CPU structure.
3468 * @param f32BitCode Indicates that whether or not the code should
3469 * be disassembled as 16 or 32 bit. If -1 the CS
3470 * selector will be inspected.
3471 * @param nrInstructions Nr of instructions to disassemble
3472 * @param pszPrefix
3473 * @remark not currently used for anything but ad-hoc debugging.
3474 */
3475bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3476{
3477 int i, rc;
3478 RTGCPTR GCPtrPC;
3479 uint8_t *pvPC;
3480 RTINTPTR off;
3481 DISCPUSTATE Cpu;
3482
3483 /*
3484 * Determin 16/32 bit mode.
3485 */
3486 if (f32BitCode == -1)
3487 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3488
3489 /*
3490 * Convert cs:eip to host context address.
3491 * We don't care to much about cross page correctness presently.
3492 */
3493 GCPtrPC = env->segs[R_CS].base + env->eip;
3494 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3495 {
3496 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3497
3498 /* convert eip to physical address. */
3499 rc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM,
3500 GCPtrPC,
3501 env->cr[3],
3502 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3503 (void**)&pvPC);
3504 if (VBOX_FAILURE(rc))
3505 {
3506 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3507 return false;
3508 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3509 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3510 }
3511 }
3512 else
3513 {
3514 /* physical address */
3515 rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3516 (void**)&pvPC);
3517 if (VBOX_FAILURE(rc))
3518 return false;
3519 }
3520
3521 /*
3522 * Disassemble.
3523 */
3524 off = env->eip - (RTGCUINTPTR)pvPC;
3525 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3526 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3527 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3528 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3529 //Cpu.dwUserData[2] = GCPtrPC;
3530
3531 for (i=0;i<nrInstructions;i++)
3532 {
3533 char szOutput[256];
3534 uint32_t cbOp;
3535 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3536 return false;
3537 if (pszPrefix)
3538 Log(("%s: %s", pszPrefix, szOutput));
3539 else
3540 Log(("%s", szOutput));
3541
3542 pvPC += cbOp;
3543 }
3544 return true;
3545}
3546
3547
3548/** @todo need to test the new code, using the old code in the mean while. */
3549#define USE_OLD_DUMP_AND_DISASSEMBLY
3550
3551/**
3552 * Disassembles one instruction and prints it to the log.
3553 *
3554 * @returns Success indicator.
3555 * @param env Pointer to the recompiler CPU structure.
3556 * @param f32BitCode Indicates that whether or not the code should
3557 * be disassembled as 16 or 32 bit. If -1 the CS
3558 * selector will be inspected.
3559 * @param pszPrefix
3560 */
3561bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3562{
3563#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3564 PVM pVM = env->pVM;
3565 RTGCPTR GCPtrPC;
3566 uint8_t *pvPC;
3567 char szOutput[256];
3568 uint32_t cbOp;
3569 RTINTPTR off;
3570 DISCPUSTATE Cpu;
3571
3572
3573 /* Doesn't work in long mode. */
3574 if (env->hflags & HF_LMA_MASK)
3575 return false;
3576
3577 /*
3578 * Determin 16/32 bit mode.
3579 */
3580 if (f32BitCode == -1)
3581 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3582
3583 /*
3584 * Log registers
3585 */
3586 if (LogIs2Enabled())
3587 {
3588 remR3StateUpdate(pVM);
3589 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3590 }
3591
3592 /*
3593 * Convert cs:eip to host context address.
3594 * We don't care to much about cross page correctness presently.
3595 */
3596 GCPtrPC = env->segs[R_CS].base + env->eip;
3597 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3598 {
3599 /* convert eip to physical address. */
3600 int rc = PGMPhysGCPtr2HCPtrByGstCR3(pVM,
3601 GCPtrPC,
3602 env->cr[3],
3603 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3604 (void**)&pvPC);
3605 if (VBOX_FAILURE(rc))
3606 {
3607 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3608 return false;
3609 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3610 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3611 }
3612 }
3613 else
3614 {
3615
3616 /* physical address */
3617 int rc = PGMPhysGCPhys2HCPtr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3618 if (VBOX_FAILURE(rc))
3619 return false;
3620 }
3621
3622 /*
3623 * Disassemble.
3624 */
3625 off = env->eip - (RTGCUINTPTR)pvPC;
3626 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3627 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3628 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3629 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3630 //Cpu.dwUserData[2] = GCPtrPC;
3631 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3632 return false;
3633
3634 if (!f32BitCode)
3635 {
3636 if (pszPrefix)
3637 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3638 else
3639 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3640 }
3641 else
3642 {
3643 if (pszPrefix)
3644 Log(("%s: %s", pszPrefix, szOutput));
3645 else
3646 Log(("%s", szOutput));
3647 }
3648 return true;
3649
3650#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3651 PVM pVM = env->pVM;
3652 const bool fLog = LogIsEnabled();
3653 const bool fLog2 = LogIs2Enabled();
3654 int rc = VINF_SUCCESS;
3655
3656 /*
3657 * Don't bother if there ain't any log output to do.
3658 */
3659 if (!fLog && !fLog2)
3660 return true;
3661
3662 /*
3663 * Update the state so DBGF reads the correct register values.
3664 */
3665 remR3StateUpdate(pVM);
3666
3667 /*
3668 * Log registers if requested.
3669 */
3670 if (!fLog2)
3671 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3672
3673 /*
3674 * Disassemble to log.
3675 */
3676 if (fLog)
3677 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3678
3679 return VBOX_SUCCESS(rc);
3680#endif
3681}
3682
3683
3684/**
3685 * Disassemble recompiled code.
3686 *
3687 * @param phFileIgnored Ignored, logfile usually.
3688 * @param pvCode Pointer to the code block.
3689 * @param cb Size of the code block.
3690 */
3691void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
3692{
3693 if (LogIs2Enabled())
3694 {
3695 unsigned off = 0;
3696 char szOutput[256];
3697 DISCPUSTATE Cpu;
3698
3699 memset(&Cpu, 0, sizeof(Cpu));
3700#ifdef RT_ARCH_X86
3701 Cpu.mode = CPUMODE_32BIT;
3702#else
3703 Cpu.mode = CPUMODE_64BIT;
3704#endif
3705
3706 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3707 while (off < cb)
3708 {
3709 uint32_t cbInstr;
3710 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3711 RTLogPrintf("%s", szOutput);
3712 else
3713 {
3714 RTLogPrintf("disas error\n");
3715 cbInstr = 1;
3716#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3717 break;
3718#endif
3719 }
3720 off += cbInstr;
3721 }
3722 }
3723 NOREF(phFileIgnored);
3724}
3725
3726
3727/**
3728 * Disassemble guest code.
3729 *
3730 * @param phFileIgnored Ignored, logfile usually.
3731 * @param uCode The guest address of the code to disassemble. (flat?)
3732 * @param cb Number of bytes to disassemble.
3733 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3734 */
3735void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
3736{
3737 if (LogIs2Enabled())
3738 {
3739 PVM pVM = cpu_single_env->pVM;
3740 RTSEL cs;
3741 RTGCUINTPTR eip;
3742
3743 /*
3744 * Update the state so DBGF reads the correct register values (flags).
3745 */
3746 remR3StateUpdate(pVM);
3747
3748 /*
3749 * Do the disassembling.
3750 */
3751 RTLogPrintf("Guest Code: PC=%VGp #VGp (%VGp) bytes fFlags=%d\n", uCode, cb, cb, fFlags);
3752 cs = cpu_single_env->segs[R_CS].selector;
3753 eip = uCode - cpu_single_env->segs[R_CS].base;
3754 for (;;)
3755 {
3756 char szBuf[256];
3757 uint32_t cbInstr;
3758 int rc = DBGFR3DisasInstrEx(pVM,
3759 cs,
3760 eip,
3761 0,
3762 szBuf, sizeof(szBuf),
3763 &cbInstr);
3764 if (VBOX_SUCCESS(rc))
3765 RTLogPrintf("%VGp %s\n", uCode, szBuf);
3766 else
3767 {
3768 RTLogPrintf("%VGp %04x:%VGp: %s\n", uCode, cs, eip, szBuf);
3769 cbInstr = 1;
3770 }
3771
3772 /* next */
3773 if (cb <= cbInstr)
3774 break;
3775 cb -= cbInstr;
3776 uCode += cbInstr;
3777 eip += cbInstr;
3778 }
3779 }
3780 NOREF(phFileIgnored);
3781}
3782
3783
3784/**
3785 * Looks up a guest symbol.
3786 *
3787 * @returns Pointer to symbol name. This is a static buffer.
3788 * @param orig_addr The address in question.
3789 */
3790const char *lookup_symbol(target_ulong orig_addr)
3791{
3792 RTGCINTPTR off = 0;
3793 DBGFSYMBOL Sym;
3794 PVM pVM = cpu_single_env->pVM;
3795 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3796 if (VBOX_SUCCESS(rc))
3797 {
3798 static char szSym[sizeof(Sym.szName) + 48];
3799 if (!off)
3800 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3801 else if (off > 0)
3802 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3803 else
3804 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3805 return szSym;
3806 }
3807 return "<N/A>";
3808}
3809
3810
3811#undef LOG_GROUP
3812#define LOG_GROUP LOG_GROUP_REM
3813
3814
3815/* -+- FF notifications -+- */
3816
3817
3818/**
3819 * Notification about a pending interrupt.
3820 *
3821 * @param pVM VM Handle.
3822 * @param u8Interrupt Interrupt
3823 * @thread The emulation thread.
3824 */
3825REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3826{
3827 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3828 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3829}
3830
3831/**
3832 * Notification about a pending interrupt.
3833 *
3834 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3835 * @param pVM VM Handle.
3836 * @thread The emulation thread.
3837 */
3838REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3839{
3840 return pVM->rem.s.u32PendingInterrupt;
3841}
3842
3843/**
3844 * Notification about the interrupt FF being set.
3845 *
3846 * @param pVM VM Handle.
3847 * @thread The emulation thread.
3848 */
3849REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3850{
3851 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3852 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3853 if (pVM->rem.s.fInREM)
3854 {
3855 if (VM_IS_EMT(pVM))
3856 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3857 else
3858 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3859 CPU_INTERRUPT_EXTERNAL_HARD);
3860 }
3861}
3862
3863
3864/**
3865 * Notification about the interrupt FF being set.
3866 *
3867 * @param pVM VM Handle.
3868 * @thread Any.
3869 */
3870REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3871{
3872 LogFlow(("REMR3NotifyInterruptClear:\n"));
3873 if (pVM->rem.s.fInREM)
3874 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3875}
3876
3877
3878/**
3879 * Notification about pending timer(s).
3880 *
3881 * @param pVM VM Handle.
3882 * @thread Any.
3883 */
3884REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3885{
3886#ifndef DEBUG_bird
3887 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3888#endif
3889 if (pVM->rem.s.fInREM)
3890 {
3891 if (VM_IS_EMT(pVM))
3892 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3893 else
3894 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3895 CPU_INTERRUPT_EXTERNAL_TIMER);
3896 }
3897}
3898
3899
3900/**
3901 * Notification about pending DMA transfers.
3902 *
3903 * @param pVM VM Handle.
3904 * @thread Any.
3905 */
3906REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3907{
3908 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3909 if (pVM->rem.s.fInREM)
3910 {
3911 if (VM_IS_EMT(pVM))
3912 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3913 else
3914 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3915 CPU_INTERRUPT_EXTERNAL_DMA);
3916 }
3917}
3918
3919
3920/**
3921 * Notification about pending timer(s).
3922 *
3923 * @param pVM VM Handle.
3924 * @thread Any.
3925 */
3926REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3927{
3928 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3929 if (pVM->rem.s.fInREM)
3930 {
3931 if (VM_IS_EMT(pVM))
3932 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3933 else
3934 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3935 CPU_INTERRUPT_EXTERNAL_EXIT);
3936 }
3937}
3938
3939
3940/**
3941 * Notification about pending FF set by an external thread.
3942 *
3943 * @param pVM VM handle.
3944 * @thread Any.
3945 */
3946REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3947{
3948 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3949 if (pVM->rem.s.fInREM)
3950 {
3951 if (VM_IS_EMT(pVM))
3952 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3953 else
3954 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3955 CPU_INTERRUPT_EXTERNAL_EXIT);
3956 }
3957}
3958
3959
3960#ifdef VBOX_WITH_STATISTICS
3961void remR3ProfileStart(int statcode)
3962{
3963 STAMPROFILEADV *pStat;
3964 switch(statcode)
3965 {
3966 case STATS_EMULATE_SINGLE_INSTR:
3967 pStat = &gStatExecuteSingleInstr;
3968 break;
3969 case STATS_QEMU_COMPILATION:
3970 pStat = &gStatCompilationQEmu;
3971 break;
3972 case STATS_QEMU_RUN_EMULATED_CODE:
3973 pStat = &gStatRunCodeQEmu;
3974 break;
3975 case STATS_QEMU_TOTAL:
3976 pStat = &gStatTotalTimeQEmu;
3977 break;
3978 case STATS_QEMU_RUN_TIMERS:
3979 pStat = &gStatTimers;
3980 break;
3981 case STATS_TLB_LOOKUP:
3982 pStat= &gStatTBLookup;
3983 break;
3984 case STATS_IRQ_HANDLING:
3985 pStat= &gStatIRQ;
3986 break;
3987 case STATS_RAW_CHECK:
3988 pStat = &gStatRawCheck;
3989 break;
3990
3991 default:
3992 AssertMsgFailed(("unknown stat %d\n", statcode));
3993 return;
3994 }
3995 STAM_PROFILE_ADV_START(pStat, a);
3996}
3997
3998
3999void remR3ProfileStop(int statcode)
4000{
4001 STAMPROFILEADV *pStat;
4002 switch(statcode)
4003 {
4004 case STATS_EMULATE_SINGLE_INSTR:
4005 pStat = &gStatExecuteSingleInstr;
4006 break;
4007 case STATS_QEMU_COMPILATION:
4008 pStat = &gStatCompilationQEmu;
4009 break;
4010 case STATS_QEMU_RUN_EMULATED_CODE:
4011 pStat = &gStatRunCodeQEmu;
4012 break;
4013 case STATS_QEMU_TOTAL:
4014 pStat = &gStatTotalTimeQEmu;
4015 break;
4016 case STATS_QEMU_RUN_TIMERS:
4017 pStat = &gStatTimers;
4018 break;
4019 case STATS_TLB_LOOKUP:
4020 pStat= &gStatTBLookup;
4021 break;
4022 case STATS_IRQ_HANDLING:
4023 pStat= &gStatIRQ;
4024 break;
4025 case STATS_RAW_CHECK:
4026 pStat = &gStatRawCheck;
4027 break;
4028 default:
4029 AssertMsgFailed(("unknown stat %d\n", statcode));
4030 return;
4031 }
4032 STAM_PROFILE_ADV_STOP(pStat, a);
4033}
4034#endif
4035
4036/**
4037 * Raise an RC, force rem exit.
4038 *
4039 * @param pVM VM handle.
4040 * @param rc The rc.
4041 */
4042void remR3RaiseRC(PVM pVM, int rc)
4043{
4044 Log(("remR3RaiseRC: rc=%Vrc\n", rc));
4045 Assert(pVM->rem.s.fInREM);
4046 VM_ASSERT_EMT(pVM);
4047 pVM->rem.s.rc = rc;
4048 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4049}
4050
4051
4052/* -+- timers -+- */
4053
4054uint64_t cpu_get_tsc(CPUX86State *env)
4055{
4056 STAM_COUNTER_INC(&gStatCpuGetTSC);
4057 return TMCpuTickGet(env->pVM);
4058}
4059
4060
4061/* -+- interrupts -+- */
4062
4063void cpu_set_ferr(CPUX86State *env)
4064{
4065 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4066 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4067}
4068
4069int cpu_get_pic_interrupt(CPUState *env)
4070{
4071 uint8_t u8Interrupt;
4072 int rc;
4073
4074 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4075 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4076 * with the (a)pic.
4077 */
4078 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4079 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4080 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4081 * remove this kludge. */
4082 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4083 {
4084 rc = VINF_SUCCESS;
4085 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4086 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4087 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4088 }
4089 else
4090 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4091
4092 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Vrc\n", u8Interrupt, rc));
4093 if (VBOX_SUCCESS(rc))
4094 {
4095 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4096 env->interrupt_request |= CPU_INTERRUPT_HARD;
4097 return u8Interrupt;
4098 }
4099 return -1;
4100}
4101
4102
4103/* -+- local apic -+- */
4104
4105void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4106{
4107 int rc = PDMApicSetBase(env->pVM, val);
4108 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Vrc\n", val, rc)); NOREF(rc);
4109}
4110
4111uint64_t cpu_get_apic_base(CPUX86State *env)
4112{
4113 uint64_t u64;
4114 int rc = PDMApicGetBase(env->pVM, &u64);
4115 if (VBOX_SUCCESS(rc))
4116 {
4117 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4118 return u64;
4119 }
4120 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Vrc)\n", rc));
4121 return 0;
4122}
4123
4124void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4125{
4126 int rc = PDMApicSetTPR(env->pVM, val);
4127 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Vrc\n", val, rc)); NOREF(rc);
4128}
4129
4130uint8_t cpu_get_apic_tpr(CPUX86State *env)
4131{
4132 uint8_t u8;
4133 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4134 if (VBOX_SUCCESS(rc))
4135 {
4136 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4137 return u8;
4138 }
4139 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Vrc)\n", rc));
4140 return 0;
4141}
4142
4143
4144uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4145{
4146 uint64_t value;
4147 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4148 if (VBOX_SUCCESS(rc))
4149 {
4150 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4151 return value;
4152 }
4153 /** @todo: exception ? */
4154 LogFlow(("cpu_apic_rdms returns 0 (rc=%Vrc)\n", rc));
4155 return value;
4156}
4157
4158void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4159{
4160 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4161 /** @todo: exception if error ? */
4162 LogFlow(("cpu_apic_wrmsr: rc=%Vrc\n", rc)); NOREF(rc);
4163}
4164/* -+- I/O Ports -+- */
4165
4166#undef LOG_GROUP
4167#define LOG_GROUP LOG_GROUP_REM_IOPORT
4168
4169void cpu_outb(CPUState *env, int addr, int val)
4170{
4171 int rc;
4172
4173 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4174 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4175
4176 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4177 if (RT_LIKELY(rc == VINF_SUCCESS))
4178 return;
4179 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4180 {
4181 Log(("cpu_outb: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4182 remR3RaiseRC(env->pVM, rc);
4183 return;
4184 }
4185 remAbort(rc, __FUNCTION__);
4186}
4187
4188void cpu_outw(CPUState *env, int addr, int val)
4189{
4190 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4191 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4192 if (RT_LIKELY(rc == VINF_SUCCESS))
4193 return;
4194 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4195 {
4196 Log(("cpu_outw: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4197 remR3RaiseRC(env->pVM, rc);
4198 return;
4199 }
4200 remAbort(rc, __FUNCTION__);
4201}
4202
4203void cpu_outl(CPUState *env, int addr, int val)
4204{
4205 int rc;
4206 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4207 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4208 if (RT_LIKELY(rc == VINF_SUCCESS))
4209 return;
4210 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4211 {
4212 Log(("cpu_outl: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
4213 remR3RaiseRC(env->pVM, rc);
4214 return;
4215 }
4216 remAbort(rc, __FUNCTION__);
4217}
4218
4219int cpu_inb(CPUState *env, int addr)
4220{
4221 uint32_t u32 = 0;
4222 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4223 if (RT_LIKELY(rc == VINF_SUCCESS))
4224 {
4225 if (/*addr != 0x61 && */addr != 0x71)
4226 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4227 return (int)u32;
4228 }
4229 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4230 {
4231 Log(("cpu_inb: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4232 remR3RaiseRC(env->pVM, rc);
4233 return (int)u32;
4234 }
4235 remAbort(rc, __FUNCTION__);
4236 return 0xff;
4237}
4238
4239int cpu_inw(CPUState *env, int addr)
4240{
4241 uint32_t u32 = 0;
4242 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4243 if (RT_LIKELY(rc == VINF_SUCCESS))
4244 {
4245 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4246 return (int)u32;
4247 }
4248 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4249 {
4250 Log(("cpu_inw: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4251 remR3RaiseRC(env->pVM, rc);
4252 return (int)u32;
4253 }
4254 remAbort(rc, __FUNCTION__);
4255 return 0xffff;
4256}
4257
4258int cpu_inl(CPUState *env, int addr)
4259{
4260 uint32_t u32 = 0;
4261 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4262 if (RT_LIKELY(rc == VINF_SUCCESS))
4263 {
4264//if (addr==0x01f0 && u32 == 0x6b6d)
4265// loglevel = ~0;
4266 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4267 return (int)u32;
4268 }
4269 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4270 {
4271 Log(("cpu_inl: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
4272 remR3RaiseRC(env->pVM, rc);
4273 return (int)u32;
4274 }
4275 remAbort(rc, __FUNCTION__);
4276 return 0xffffffff;
4277}
4278
4279#undef LOG_GROUP
4280#define LOG_GROUP LOG_GROUP_REM
4281
4282
4283/* -+- helpers and misc other interfaces -+- */
4284
4285/**
4286 * Perform the CPUID instruction.
4287 *
4288 * ASMCpuId cannot be invoked from some source files where this is used because of global
4289 * register allocations.
4290 *
4291 * @param env Pointer to the recompiler CPU structure.
4292 * @param uOperator CPUID operation (eax).
4293 * @param pvEAX Where to store eax.
4294 * @param pvEBX Where to store ebx.
4295 * @param pvECX Where to store ecx.
4296 * @param pvEDX Where to store edx.
4297 */
4298void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4299{
4300 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4301}
4302
4303
4304#if 0 /* not used */
4305/**
4306 * Interface for qemu hardware to report back fatal errors.
4307 */
4308void hw_error(const char *pszFormat, ...)
4309{
4310 /*
4311 * Bitch about it.
4312 */
4313 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4314 * this in my Odin32 tree at home! */
4315 va_list args;
4316 va_start(args, pszFormat);
4317 RTLogPrintf("fatal error in virtual hardware:");
4318 RTLogPrintfV(pszFormat, args);
4319 va_end(args);
4320 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4321
4322 /*
4323 * If we're in REM context we'll sync back the state before 'jumping' to
4324 * the EMs failure handling.
4325 */
4326 PVM pVM = cpu_single_env->pVM;
4327 if (pVM->rem.s.fInREM)
4328 REMR3StateBack(pVM);
4329 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4330 AssertMsgFailed(("EMR3FatalError returned!\n"));
4331}
4332#endif
4333
4334/**
4335 * Interface for the qemu cpu to report unhandled situation
4336 * raising a fatal VM error.
4337 */
4338void cpu_abort(CPUState *env, const char *pszFormat, ...)
4339{
4340 va_list args;
4341 PVM pVM;
4342
4343 /*
4344 * Bitch about it.
4345 */
4346#ifndef _MSC_VER
4347 /** @todo: MSVC is right - it's not valid C */
4348 RTLogFlags(NULL, "nodisabled nobuffered");
4349#endif
4350 va_start(args, pszFormat);
4351 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4352 va_end(args);
4353 va_start(args, pszFormat);
4354 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4355 va_end(args);
4356
4357 /*
4358 * If we're in REM context we'll sync back the state before 'jumping' to
4359 * the EMs failure handling.
4360 */
4361 pVM = cpu_single_env->pVM;
4362 if (pVM->rem.s.fInREM)
4363 REMR3StateBack(pVM);
4364 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4365 AssertMsgFailed(("EMR3FatalError returned!\n"));
4366}
4367
4368
4369/**
4370 * Aborts the VM.
4371 *
4372 * @param rc VBox error code.
4373 * @param pszTip Hint about why/when this happend.
4374 */
4375static void remAbort(int rc, const char *pszTip)
4376{
4377 PVM pVM;
4378
4379 /*
4380 * Bitch about it.
4381 */
4382 RTLogPrintf("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip);
4383 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip));
4384
4385 /*
4386 * Jump back to where we entered the recompiler.
4387 */
4388 pVM = cpu_single_env->pVM;
4389 if (pVM->rem.s.fInREM)
4390 REMR3StateBack(pVM);
4391 EMR3FatalError(pVM, rc);
4392 AssertMsgFailed(("EMR3FatalError returned!\n"));
4393}
4394
4395
4396/**
4397 * Dumps a linux system call.
4398 * @param pVM VM handle.
4399 */
4400void remR3DumpLnxSyscall(PVM pVM)
4401{
4402 static const char *apsz[] =
4403 {
4404 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4405 "sys_exit",
4406 "sys_fork",
4407 "sys_read",
4408 "sys_write",
4409 "sys_open", /* 5 */
4410 "sys_close",
4411 "sys_waitpid",
4412 "sys_creat",
4413 "sys_link",
4414 "sys_unlink", /* 10 */
4415 "sys_execve",
4416 "sys_chdir",
4417 "sys_time",
4418 "sys_mknod",
4419 "sys_chmod", /* 15 */
4420 "sys_lchown16",
4421 "sys_ni_syscall", /* old break syscall holder */
4422 "sys_stat",
4423 "sys_lseek",
4424 "sys_getpid", /* 20 */
4425 "sys_mount",
4426 "sys_oldumount",
4427 "sys_setuid16",
4428 "sys_getuid16",
4429 "sys_stime", /* 25 */
4430 "sys_ptrace",
4431 "sys_alarm",
4432 "sys_fstat",
4433 "sys_pause",
4434 "sys_utime", /* 30 */
4435 "sys_ni_syscall", /* old stty syscall holder */
4436 "sys_ni_syscall", /* old gtty syscall holder */
4437 "sys_access",
4438 "sys_nice",
4439 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4440 "sys_sync",
4441 "sys_kill",
4442 "sys_rename",
4443 "sys_mkdir",
4444 "sys_rmdir", /* 40 */
4445 "sys_dup",
4446 "sys_pipe",
4447 "sys_times",
4448 "sys_ni_syscall", /* old prof syscall holder */
4449 "sys_brk", /* 45 */
4450 "sys_setgid16",
4451 "sys_getgid16",
4452 "sys_signal",
4453 "sys_geteuid16",
4454 "sys_getegid16", /* 50 */
4455 "sys_acct",
4456 "sys_umount", /* recycled never used phys() */
4457 "sys_ni_syscall", /* old lock syscall holder */
4458 "sys_ioctl",
4459 "sys_fcntl", /* 55 */
4460 "sys_ni_syscall", /* old mpx syscall holder */
4461 "sys_setpgid",
4462 "sys_ni_syscall", /* old ulimit syscall holder */
4463 "sys_olduname",
4464 "sys_umask", /* 60 */
4465 "sys_chroot",
4466 "sys_ustat",
4467 "sys_dup2",
4468 "sys_getppid",
4469 "sys_getpgrp", /* 65 */
4470 "sys_setsid",
4471 "sys_sigaction",
4472 "sys_sgetmask",
4473 "sys_ssetmask",
4474 "sys_setreuid16", /* 70 */
4475 "sys_setregid16",
4476 "sys_sigsuspend",
4477 "sys_sigpending",
4478 "sys_sethostname",
4479 "sys_setrlimit", /* 75 */
4480 "sys_old_getrlimit",
4481 "sys_getrusage",
4482 "sys_gettimeofday",
4483 "sys_settimeofday",
4484 "sys_getgroups16", /* 80 */
4485 "sys_setgroups16",
4486 "old_select",
4487 "sys_symlink",
4488 "sys_lstat",
4489 "sys_readlink", /* 85 */
4490 "sys_uselib",
4491 "sys_swapon",
4492 "sys_reboot",
4493 "old_readdir",
4494 "old_mmap", /* 90 */
4495 "sys_munmap",
4496 "sys_truncate",
4497 "sys_ftruncate",
4498 "sys_fchmod",
4499 "sys_fchown16", /* 95 */
4500 "sys_getpriority",
4501 "sys_setpriority",
4502 "sys_ni_syscall", /* old profil syscall holder */
4503 "sys_statfs",
4504 "sys_fstatfs", /* 100 */
4505 "sys_ioperm",
4506 "sys_socketcall",
4507 "sys_syslog",
4508 "sys_setitimer",
4509 "sys_getitimer", /* 105 */
4510 "sys_newstat",
4511 "sys_newlstat",
4512 "sys_newfstat",
4513 "sys_uname",
4514 "sys_iopl", /* 110 */
4515 "sys_vhangup",
4516 "sys_ni_syscall", /* old "idle" system call */
4517 "sys_vm86old",
4518 "sys_wait4",
4519 "sys_swapoff", /* 115 */
4520 "sys_sysinfo",
4521 "sys_ipc",
4522 "sys_fsync",
4523 "sys_sigreturn",
4524 "sys_clone", /* 120 */
4525 "sys_setdomainname",
4526 "sys_newuname",
4527 "sys_modify_ldt",
4528 "sys_adjtimex",
4529 "sys_mprotect", /* 125 */
4530 "sys_sigprocmask",
4531 "sys_ni_syscall", /* old "create_module" */
4532 "sys_init_module",
4533 "sys_delete_module",
4534 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4535 "sys_quotactl",
4536 "sys_getpgid",
4537 "sys_fchdir",
4538 "sys_bdflush",
4539 "sys_sysfs", /* 135 */
4540 "sys_personality",
4541 "sys_ni_syscall", /* reserved for afs_syscall */
4542 "sys_setfsuid16",
4543 "sys_setfsgid16",
4544 "sys_llseek", /* 140 */
4545 "sys_getdents",
4546 "sys_select",
4547 "sys_flock",
4548 "sys_msync",
4549 "sys_readv", /* 145 */
4550 "sys_writev",
4551 "sys_getsid",
4552 "sys_fdatasync",
4553 "sys_sysctl",
4554 "sys_mlock", /* 150 */
4555 "sys_munlock",
4556 "sys_mlockall",
4557 "sys_munlockall",
4558 "sys_sched_setparam",
4559 "sys_sched_getparam", /* 155 */
4560 "sys_sched_setscheduler",
4561 "sys_sched_getscheduler",
4562 "sys_sched_yield",
4563 "sys_sched_get_priority_max",
4564 "sys_sched_get_priority_min", /* 160 */
4565 "sys_sched_rr_get_interval",
4566 "sys_nanosleep",
4567 "sys_mremap",
4568 "sys_setresuid16",
4569 "sys_getresuid16", /* 165 */
4570 "sys_vm86",
4571 "sys_ni_syscall", /* Old sys_query_module */
4572 "sys_poll",
4573 "sys_nfsservctl",
4574 "sys_setresgid16", /* 170 */
4575 "sys_getresgid16",
4576 "sys_prctl",
4577 "sys_rt_sigreturn",
4578 "sys_rt_sigaction",
4579 "sys_rt_sigprocmask", /* 175 */
4580 "sys_rt_sigpending",
4581 "sys_rt_sigtimedwait",
4582 "sys_rt_sigqueueinfo",
4583 "sys_rt_sigsuspend",
4584 "sys_pread64", /* 180 */
4585 "sys_pwrite64",
4586 "sys_chown16",
4587 "sys_getcwd",
4588 "sys_capget",
4589 "sys_capset", /* 185 */
4590 "sys_sigaltstack",
4591 "sys_sendfile",
4592 "sys_ni_syscall", /* reserved for streams1 */
4593 "sys_ni_syscall", /* reserved for streams2 */
4594 "sys_vfork", /* 190 */
4595 "sys_getrlimit",
4596 "sys_mmap2",
4597 "sys_truncate64",
4598 "sys_ftruncate64",
4599 "sys_stat64", /* 195 */
4600 "sys_lstat64",
4601 "sys_fstat64",
4602 "sys_lchown",
4603 "sys_getuid",
4604 "sys_getgid", /* 200 */
4605 "sys_geteuid",
4606 "sys_getegid",
4607 "sys_setreuid",
4608 "sys_setregid",
4609 "sys_getgroups", /* 205 */
4610 "sys_setgroups",
4611 "sys_fchown",
4612 "sys_setresuid",
4613 "sys_getresuid",
4614 "sys_setresgid", /* 210 */
4615 "sys_getresgid",
4616 "sys_chown",
4617 "sys_setuid",
4618 "sys_setgid",
4619 "sys_setfsuid", /* 215 */
4620 "sys_setfsgid",
4621 "sys_pivot_root",
4622 "sys_mincore",
4623 "sys_madvise",
4624 "sys_getdents64", /* 220 */
4625 "sys_fcntl64",
4626 "sys_ni_syscall", /* reserved for TUX */
4627 "sys_ni_syscall",
4628 "sys_gettid",
4629 "sys_readahead", /* 225 */
4630 "sys_setxattr",
4631 "sys_lsetxattr",
4632 "sys_fsetxattr",
4633 "sys_getxattr",
4634 "sys_lgetxattr", /* 230 */
4635 "sys_fgetxattr",
4636 "sys_listxattr",
4637 "sys_llistxattr",
4638 "sys_flistxattr",
4639 "sys_removexattr", /* 235 */
4640 "sys_lremovexattr",
4641 "sys_fremovexattr",
4642 "sys_tkill",
4643 "sys_sendfile64",
4644 "sys_futex", /* 240 */
4645 "sys_sched_setaffinity",
4646 "sys_sched_getaffinity",
4647 "sys_set_thread_area",
4648 "sys_get_thread_area",
4649 "sys_io_setup", /* 245 */
4650 "sys_io_destroy",
4651 "sys_io_getevents",
4652 "sys_io_submit",
4653 "sys_io_cancel",
4654 "sys_fadvise64", /* 250 */
4655 "sys_ni_syscall",
4656 "sys_exit_group",
4657 "sys_lookup_dcookie",
4658 "sys_epoll_create",
4659 "sys_epoll_ctl", /* 255 */
4660 "sys_epoll_wait",
4661 "sys_remap_file_pages",
4662 "sys_set_tid_address",
4663 "sys_timer_create",
4664 "sys_timer_settime", /* 260 */
4665 "sys_timer_gettime",
4666 "sys_timer_getoverrun",
4667 "sys_timer_delete",
4668 "sys_clock_settime",
4669 "sys_clock_gettime", /* 265 */
4670 "sys_clock_getres",
4671 "sys_clock_nanosleep",
4672 "sys_statfs64",
4673 "sys_fstatfs64",
4674 "sys_tgkill", /* 270 */
4675 "sys_utimes",
4676 "sys_fadvise64_64",
4677 "sys_ni_syscall" /* sys_vserver */
4678 };
4679
4680 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4681 switch (uEAX)
4682 {
4683 default:
4684 if (uEAX < ELEMENTS(apsz))
4685 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4686 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4687 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4688 else
4689 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4690 break;
4691
4692 }
4693}
4694
4695
4696/**
4697 * Dumps an OpenBSD system call.
4698 * @param pVM VM handle.
4699 */
4700void remR3DumpOBsdSyscall(PVM pVM)
4701{
4702 static const char *apsz[] =
4703 {
4704 "SYS_syscall", //0
4705 "SYS_exit", //1
4706 "SYS_fork", //2
4707 "SYS_read", //3
4708 "SYS_write", //4
4709 "SYS_open", //5
4710 "SYS_close", //6
4711 "SYS_wait4", //7
4712 "SYS_8",
4713 "SYS_link", //9
4714 "SYS_unlink", //10
4715 "SYS_11",
4716 "SYS_chdir", //12
4717 "SYS_fchdir", //13
4718 "SYS_mknod", //14
4719 "SYS_chmod", //15
4720 "SYS_chown", //16
4721 "SYS_break", //17
4722 "SYS_18",
4723 "SYS_19",
4724 "SYS_getpid", //20
4725 "SYS_mount", //21
4726 "SYS_unmount", //22
4727 "SYS_setuid", //23
4728 "SYS_getuid", //24
4729 "SYS_geteuid", //25
4730 "SYS_ptrace", //26
4731 "SYS_recvmsg", //27
4732 "SYS_sendmsg", //28
4733 "SYS_recvfrom", //29
4734 "SYS_accept", //30
4735 "SYS_getpeername", //31
4736 "SYS_getsockname", //32
4737 "SYS_access", //33
4738 "SYS_chflags", //34
4739 "SYS_fchflags", //35
4740 "SYS_sync", //36
4741 "SYS_kill", //37
4742 "SYS_38",
4743 "SYS_getppid", //39
4744 "SYS_40",
4745 "SYS_dup", //41
4746 "SYS_opipe", //42
4747 "SYS_getegid", //43
4748 "SYS_profil", //44
4749 "SYS_ktrace", //45
4750 "SYS_sigaction", //46
4751 "SYS_getgid", //47
4752 "SYS_sigprocmask", //48
4753 "SYS_getlogin", //49
4754 "SYS_setlogin", //50
4755 "SYS_acct", //51
4756 "SYS_sigpending", //52
4757 "SYS_osigaltstack", //53
4758 "SYS_ioctl", //54
4759 "SYS_reboot", //55
4760 "SYS_revoke", //56
4761 "SYS_symlink", //57
4762 "SYS_readlink", //58
4763 "SYS_execve", //59
4764 "SYS_umask", //60
4765 "SYS_chroot", //61
4766 "SYS_62",
4767 "SYS_63",
4768 "SYS_64",
4769 "SYS_65",
4770 "SYS_vfork", //66
4771 "SYS_67",
4772 "SYS_68",
4773 "SYS_sbrk", //69
4774 "SYS_sstk", //70
4775 "SYS_61",
4776 "SYS_vadvise", //72
4777 "SYS_munmap", //73
4778 "SYS_mprotect", //74
4779 "SYS_madvise", //75
4780 "SYS_76",
4781 "SYS_77",
4782 "SYS_mincore", //78
4783 "SYS_getgroups", //79
4784 "SYS_setgroups", //80
4785 "SYS_getpgrp", //81
4786 "SYS_setpgid", //82
4787 "SYS_setitimer", //83
4788 "SYS_84",
4789 "SYS_85",
4790 "SYS_getitimer", //86
4791 "SYS_87",
4792 "SYS_88",
4793 "SYS_89",
4794 "SYS_dup2", //90
4795 "SYS_91",
4796 "SYS_fcntl", //92
4797 "SYS_select", //93
4798 "SYS_94",
4799 "SYS_fsync", //95
4800 "SYS_setpriority", //96
4801 "SYS_socket", //97
4802 "SYS_connect", //98
4803 "SYS_99",
4804 "SYS_getpriority", //100
4805 "SYS_101",
4806 "SYS_102",
4807 "SYS_sigreturn", //103
4808 "SYS_bind", //104
4809 "SYS_setsockopt", //105
4810 "SYS_listen", //106
4811 "SYS_107",
4812 "SYS_108",
4813 "SYS_109",
4814 "SYS_110",
4815 "SYS_sigsuspend", //111
4816 "SYS_112",
4817 "SYS_113",
4818 "SYS_114",
4819 "SYS_115",
4820 "SYS_gettimeofday", //116
4821 "SYS_getrusage", //117
4822 "SYS_getsockopt", //118
4823 "SYS_119",
4824 "SYS_readv", //120
4825 "SYS_writev", //121
4826 "SYS_settimeofday", //122
4827 "SYS_fchown", //123
4828 "SYS_fchmod", //124
4829 "SYS_125",
4830 "SYS_setreuid", //126
4831 "SYS_setregid", //127
4832 "SYS_rename", //128
4833 "SYS_129",
4834 "SYS_130",
4835 "SYS_flock", //131
4836 "SYS_mkfifo", //132
4837 "SYS_sendto", //133
4838 "SYS_shutdown", //134
4839 "SYS_socketpair", //135
4840 "SYS_mkdir", //136
4841 "SYS_rmdir", //137
4842 "SYS_utimes", //138
4843 "SYS_139",
4844 "SYS_adjtime", //140
4845 "SYS_141",
4846 "SYS_142",
4847 "SYS_143",
4848 "SYS_144",
4849 "SYS_145",
4850 "SYS_146",
4851 "SYS_setsid", //147
4852 "SYS_quotactl", //148
4853 "SYS_149",
4854 "SYS_150",
4855 "SYS_151",
4856 "SYS_152",
4857 "SYS_153",
4858 "SYS_154",
4859 "SYS_nfssvc", //155
4860 "SYS_156",
4861 "SYS_157",
4862 "SYS_158",
4863 "SYS_159",
4864 "SYS_160",
4865 "SYS_getfh", //161
4866 "SYS_162",
4867 "SYS_163",
4868 "SYS_164",
4869 "SYS_sysarch", //165
4870 "SYS_166",
4871 "SYS_167",
4872 "SYS_168",
4873 "SYS_169",
4874 "SYS_170",
4875 "SYS_171",
4876 "SYS_172",
4877 "SYS_pread", //173
4878 "SYS_pwrite", //174
4879 "SYS_175",
4880 "SYS_176",
4881 "SYS_177",
4882 "SYS_178",
4883 "SYS_179",
4884 "SYS_180",
4885 "SYS_setgid", //181
4886 "SYS_setegid", //182
4887 "SYS_seteuid", //183
4888 "SYS_lfs_bmapv", //184
4889 "SYS_lfs_markv", //185
4890 "SYS_lfs_segclean", //186
4891 "SYS_lfs_segwait", //187
4892 "SYS_188",
4893 "SYS_189",
4894 "SYS_190",
4895 "SYS_pathconf", //191
4896 "SYS_fpathconf", //192
4897 "SYS_swapctl", //193
4898 "SYS_getrlimit", //194
4899 "SYS_setrlimit", //195
4900 "SYS_getdirentries", //196
4901 "SYS_mmap", //197
4902 "SYS___syscall", //198
4903 "SYS_lseek", //199
4904 "SYS_truncate", //200
4905 "SYS_ftruncate", //201
4906 "SYS___sysctl", //202
4907 "SYS_mlock", //203
4908 "SYS_munlock", //204
4909 "SYS_205",
4910 "SYS_futimes", //206
4911 "SYS_getpgid", //207
4912 "SYS_xfspioctl", //208
4913 "SYS_209",
4914 "SYS_210",
4915 "SYS_211",
4916 "SYS_212",
4917 "SYS_213",
4918 "SYS_214",
4919 "SYS_215",
4920 "SYS_216",
4921 "SYS_217",
4922 "SYS_218",
4923 "SYS_219",
4924 "SYS_220",
4925 "SYS_semget", //221
4926 "SYS_222",
4927 "SYS_223",
4928 "SYS_224",
4929 "SYS_msgget", //225
4930 "SYS_msgsnd", //226
4931 "SYS_msgrcv", //227
4932 "SYS_shmat", //228
4933 "SYS_229",
4934 "SYS_shmdt", //230
4935 "SYS_231",
4936 "SYS_clock_gettime", //232
4937 "SYS_clock_settime", //233
4938 "SYS_clock_getres", //234
4939 "SYS_235",
4940 "SYS_236",
4941 "SYS_237",
4942 "SYS_238",
4943 "SYS_239",
4944 "SYS_nanosleep", //240
4945 "SYS_241",
4946 "SYS_242",
4947 "SYS_243",
4948 "SYS_244",
4949 "SYS_245",
4950 "SYS_246",
4951 "SYS_247",
4952 "SYS_248",
4953 "SYS_249",
4954 "SYS_minherit", //250
4955 "SYS_rfork", //251
4956 "SYS_poll", //252
4957 "SYS_issetugid", //253
4958 "SYS_lchown", //254
4959 "SYS_getsid", //255
4960 "SYS_msync", //256
4961 "SYS_257",
4962 "SYS_258",
4963 "SYS_259",
4964 "SYS_getfsstat", //260
4965 "SYS_statfs", //261
4966 "SYS_fstatfs", //262
4967 "SYS_pipe", //263
4968 "SYS_fhopen", //264
4969 "SYS_265",
4970 "SYS_fhstatfs", //266
4971 "SYS_preadv", //267
4972 "SYS_pwritev", //268
4973 "SYS_kqueue", //269
4974 "SYS_kevent", //270
4975 "SYS_mlockall", //271
4976 "SYS_munlockall", //272
4977 "SYS_getpeereid", //273
4978 "SYS_274",
4979 "SYS_275",
4980 "SYS_276",
4981 "SYS_277",
4982 "SYS_278",
4983 "SYS_279",
4984 "SYS_280",
4985 "SYS_getresuid", //281
4986 "SYS_setresuid", //282
4987 "SYS_getresgid", //283
4988 "SYS_setresgid", //284
4989 "SYS_285",
4990 "SYS_mquery", //286
4991 "SYS_closefrom", //287
4992 "SYS_sigaltstack", //288
4993 "SYS_shmget", //289
4994 "SYS_semop", //290
4995 "SYS_stat", //291
4996 "SYS_fstat", //292
4997 "SYS_lstat", //293
4998 "SYS_fhstat", //294
4999 "SYS___semctl", //295
5000 "SYS_shmctl", //296
5001 "SYS_msgctl", //297
5002 "SYS_MAXSYSCALL", //298
5003 //299
5004 //300
5005 };
5006 uint32_t uEAX;
5007 if (!LogIsEnabled())
5008 return;
5009 uEAX = CPUMGetGuestEAX(pVM);
5010 switch (uEAX)
5011 {
5012 default:
5013 if (uEAX < ELEMENTS(apsz))
5014 {
5015 uint32_t au32Args[8] = {0};
5016 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5017 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5018 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5019 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5020 }
5021 else
5022 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5023 break;
5024 }
5025}
5026
5027
5028#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5029/**
5030 * The Dll main entry point (stub).
5031 */
5032bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5033{
5034 return true;
5035}
5036
5037void *memcpy(void *dst, const void *src, size_t size)
5038{
5039 uint8_t*pbDst = dst, *pbSrc = src;
5040 while (size-- > 0)
5041 *pbDst++ = *pbSrc++;
5042 return dst;
5043}
5044
5045#endif
5046
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette