VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 17036

Last change on this file since 17036 was 17036, checked in by vboxsync, 16 years ago

nit picking.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 156.7 KB
Line 
1/* $Id: VBoxRecompiler.c 17036 2009-02-23 22:30:51Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77
78/** Copy 80-bit fpu register at pSrc to pDst.
79 * This is probably faster than *calling* memcpy.
80 */
81#define REM_COPY_FPU_REG(pDst, pSrc) \
82 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
90static void remR3StateUpdate(PVM pVM);
91
92static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
93static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
95static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98
99static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
100static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
102static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105
106
107/*******************************************************************************
108* Global Variables *
109*******************************************************************************/
110
111/** @todo Move stats to REM::s some rainy day we have nothing do to. */
112#ifdef VBOX_WITH_STATISTICS
113static STAMPROFILEADV gStatExecuteSingleInstr;
114static STAMPROFILEADV gStatCompilationQEmu;
115static STAMPROFILEADV gStatRunCodeQEmu;
116static STAMPROFILEADV gStatTotalTimeQEmu;
117static STAMPROFILEADV gStatTimers;
118static STAMPROFILEADV gStatTBLookup;
119static STAMPROFILEADV gStatIRQ;
120static STAMPROFILEADV gStatRawCheck;
121static STAMPROFILEADV gStatMemRead;
122static STAMPROFILEADV gStatMemWrite;
123static STAMPROFILE gStatGCPhys2HCVirt;
124static STAMPROFILE gStatHCVirt2GCPhys;
125static STAMCOUNTER gStatCpuGetTSC;
126static STAMCOUNTER gStatRefuseTFInhibit;
127static STAMCOUNTER gStatRefuseVM86;
128static STAMCOUNTER gStatRefusePaging;
129static STAMCOUNTER gStatRefusePAE;
130static STAMCOUNTER gStatRefuseIOPLNot0;
131static STAMCOUNTER gStatRefuseIF0;
132static STAMCOUNTER gStatRefuseCode16;
133static STAMCOUNTER gStatRefuseWP0;
134static STAMCOUNTER gStatRefuseRing1or2;
135static STAMCOUNTER gStatRefuseCanExecute;
136static STAMCOUNTER gStatREMGDTChange;
137static STAMCOUNTER gStatREMIDTChange;
138static STAMCOUNTER gStatREMLDTRChange;
139static STAMCOUNTER gStatREMTRChange;
140static STAMCOUNTER gStatSelOutOfSync[6];
141static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
142static STAMCOUNTER gStatFlushTBs;
143#endif
144
145/*
146 * Global stuff.
147 */
148
149/** MMIO read callbacks. */
150CPUReadMemoryFunc *g_apfnMMIORead[3] =
151{
152 remR3MMIOReadU8,
153 remR3MMIOReadU16,
154 remR3MMIOReadU32
155};
156
157/** MMIO write callbacks. */
158CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
159{
160 remR3MMIOWriteU8,
161 remR3MMIOWriteU16,
162 remR3MMIOWriteU32
163};
164
165/** Handler read callbacks. */
166CPUReadMemoryFunc *g_apfnHandlerRead[3] =
167{
168 remR3HandlerReadU8,
169 remR3HandlerReadU16,
170 remR3HandlerReadU32
171};
172
173/** Handler write callbacks. */
174CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
175{
176 remR3HandlerWriteU8,
177 remR3HandlerWriteU16,
178 remR3HandlerWriteU32
179};
180
181
182#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
183/*
184 * Debugger commands.
185 */
186static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
187
188/** '.remstep' arguments. */
189static const DBGCVARDESC g_aArgRemStep[] =
190{
191 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
192 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
193};
194
195/** Command descriptors. */
196static const DBGCCMD g_aCmds[] =
197{
198 {
199 .pszCmd ="remstep",
200 .cArgsMin = 0,
201 .cArgsMax = 1,
202 .paArgDescs = &g_aArgRemStep[0],
203 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
204 .pResultDesc = NULL,
205 .fFlags = 0,
206 .pfnHandler = remR3CmdDisasEnableStepping,
207 .pszSyntax = "[on/off]",
208 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
209 "If no arguments show the current state."
210 }
211};
212#endif
213
214
215/*******************************************************************************
216* Internal Functions *
217*******************************************************************************/
218void remAbort(int rc, const char *pszTip);
219extern int testmath(void);
220
221/* Put them here to avoid unused variable warning. */
222AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
223#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
224//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
225/* Why did this have to be identical?? */
226AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
227#else
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#endif
230
231
232/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
233uint8_t* code_gen_prologue;
234
235/**
236 * Initializes the REM.
237 *
238 * @returns VBox status code.
239 * @param pVM The VM to operate on.
240 */
241REMR3DECL(int) REMR3Init(PVM pVM)
242{
243 uint32_t u32Dummy;
244 int rc;
245
246#ifdef VBOX_ENABLE_VBOXREM64
247 LogRel(("Using 64-bit aware REM\n"));
248#endif
249
250 /*
251 * Assert sanity.
252 */
253 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
254 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
255 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
256#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
257 Assert(!testmath());
258#endif
259 /*
260 * Init some internal data members.
261 */
262 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
263 pVM->rem.s.Env.pVM = pVM;
264#ifdef CPU_RAW_MODE_INIT
265 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
266#endif
267
268 /* ctx. */
269 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
270 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
271
272 /* ignore all notifications */
273 pVM->rem.s.fIgnoreAll = true;
274
275 code_gen_prologue = RTMemExecAlloc(_1K);
276
277 cpu_exec_init_all(0);
278
279 /*
280 * Init the recompiler.
281 */
282 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
283 {
284 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
285 return VERR_GENERAL_FAILURE;
286 }
287 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
288 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
289
290 /* allocate code buffer for single instruction emulation. */
291 pVM->rem.s.Env.cbCodeBuffer = 4096;
292 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
293 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
294
295 /* finally, set the cpu_single_env global. */
296 cpu_single_env = &pVM->rem.s.Env;
297
298 /* Nothing is pending by default */
299 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
300
301 /*
302 * Register ram types.
303 */
304 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
305 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
306 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
308 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
309
310 /* stop ignoring. */
311 pVM->rem.s.fIgnoreAll = false;
312
313 /*
314 * Register the saved state data unit.
315 */
316 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
317 NULL, remR3Save, NULL,
318 NULL, remR3Load, NULL);
319 if (RT_FAILURE(rc))
320 return rc;
321
322#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
323 /*
324 * Debugger commands.
325 */
326 static bool fRegisteredCmds = false;
327 if (!fRegisteredCmds)
328 {
329 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
330 if (RT_SUCCESS(rc))
331 fRegisteredCmds = true;
332 }
333#endif
334
335#ifdef VBOX_WITH_STATISTICS
336 /*
337 * Statistics.
338 */
339 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
340 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
341 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
342 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
343 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
344 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
345 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
348 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
349 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
350 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
351
352 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
353
354 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
355 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
356 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
357 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
358 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
359 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
360 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
361 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
362 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
363 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
364 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
365
366 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
367 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
368 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
369 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
370
371 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
372 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
373 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
377
378 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
379 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
384
385
386#endif
387
388#ifdef DEBUG_ALL_LOGGING
389 loglevel = ~0;
390# ifdef DEBUG_TMP_LOGGING
391 logfile = fopen("/tmp/vbox-qemu.log", "w");
392# endif
393#endif
394
395 return rc;
396}
397
398
399/**
400 * Terminates the REM.
401 *
402 * Termination means cleaning up and freeing all resources,
403 * the VM it self is at this point powered off or suspended.
404 *
405 * @returns VBox status code.
406 * @param pVM The VM to operate on.
407 */
408REMR3DECL(int) REMR3Term(PVM pVM)
409{
410 return VINF_SUCCESS;
411}
412
413
414/**
415 * The VM is being reset.
416 *
417 * For the REM component this means to call the cpu_reset() and
418 * reinitialize some state variables.
419 *
420 * @param pVM VM handle.
421 */
422REMR3DECL(void) REMR3Reset(PVM pVM)
423{
424 /*
425 * Reset the REM cpu.
426 */
427 pVM->rem.s.fIgnoreAll = true;
428 cpu_reset(&pVM->rem.s.Env);
429 pVM->rem.s.cInvalidatedPages = 0;
430 pVM->rem.s.fIgnoreAll = false;
431
432 /* Clear raw ring 0 init state */
433 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
434
435 /* Flush the TBs the next time we execute code here. */
436 pVM->rem.s.fFlushTBs = true;
437}
438
439
440/**
441 * Execute state save operation.
442 *
443 * @returns VBox status code.
444 * @param pVM VM Handle.
445 * @param pSSM SSM operation handle.
446 */
447static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
448{
449 /*
450 * Save the required CPU Env bits.
451 * (Not much because we're never in REM when doing the save.)
452 */
453 PREM pRem = &pVM->rem.s;
454 LogFlow(("remR3Save:\n"));
455 Assert(!pRem->fInREM);
456 SSMR3PutU32(pSSM, pRem->Env.hflags);
457 SSMR3PutU32(pSSM, ~0); /* separator */
458
459 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
460 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
461 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
462
463 return SSMR3PutU32(pSSM, ~0); /* terminator */
464}
465
466
467/**
468 * Execute state load operation.
469 *
470 * @returns VBox status code.
471 * @param pVM VM Handle.
472 * @param pSSM SSM operation handle.
473 * @param u32Version Data layout version.
474 */
475static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
476{
477 uint32_t u32Dummy;
478 uint32_t fRawRing0 = false;
479 uint32_t u32Sep;
480 int rc;
481 PREM pRem;
482 LogFlow(("remR3Load:\n"));
483
484 /*
485 * Validate version.
486 */
487 if ( u32Version != REM_SAVED_STATE_VERSION
488 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
489 {
490 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
491 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
492 }
493
494 /*
495 * Do a reset to be on the safe side...
496 */
497 REMR3Reset(pVM);
498
499 /*
500 * Ignore all ignorable notifications.
501 * (Not doing this will cause serious trouble.)
502 */
503 pVM->rem.s.fIgnoreAll = true;
504
505 /*
506 * Load the required CPU Env bits.
507 * (Not much because we're never in REM when doing the save.)
508 */
509 pRem = &pVM->rem.s;
510 Assert(!pRem->fInREM);
511 SSMR3GetU32(pSSM, &pRem->Env.hflags);
512 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
513 {
514 /* Redundant REM CPU state has to be loaded, but can be ignored. */
515 CPUX86State_Ver16 temp;
516 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
517 }
518
519 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
520 if (RT_FAILURE(rc))
521 return rc;
522 if (u32Sep != ~0U)
523 {
524 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
525 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
526 }
527
528 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
529 SSMR3GetUInt(pSSM, &fRawRing0);
530 if (fRawRing0)
531 pRem->Env.state |= CPU_RAW_RING0;
532
533 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
534 {
535 unsigned i;
536
537 /*
538 * Load the REM stuff.
539 */
540 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
541 if (RT_FAILURE(rc))
542 return rc;
543 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
544 {
545 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
546 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
547 }
548 for (i = 0; i < pRem->cInvalidatedPages; i++)
549 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
550 }
551
552 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
553 if (RT_FAILURE(rc))
554 return rc;
555
556 /* check the terminator. */
557 rc = SSMR3GetU32(pSSM, &u32Sep);
558 if (RT_FAILURE(rc))
559 return rc;
560 if (u32Sep != ~0U)
561 {
562 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
563 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
564 }
565
566 /*
567 * Get the CPUID features.
568 */
569 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
570 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
571
572 /*
573 * Sync the Load Flush the TLB
574 */
575 tlb_flush(&pRem->Env, 1);
576
577 /*
578 * Stop ignoring ignornable notifications.
579 */
580 pVM->rem.s.fIgnoreAll = false;
581
582 /*
583 * Sync the whole CPU state when executing code in the recompiler.
584 */
585 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
586 return VINF_SUCCESS;
587}
588
589
590
591#undef LOG_GROUP
592#define LOG_GROUP LOG_GROUP_REM_RUN
593
594/**
595 * Single steps an instruction in recompiled mode.
596 *
597 * Before calling this function the REM state needs to be in sync with
598 * the VM. Call REMR3State() to perform the sync. It's only necessary
599 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
600 * and after calling REMR3StateBack().
601 *
602 * @returns VBox status code.
603 *
604 * @param pVM VM Handle.
605 */
606REMR3DECL(int) REMR3Step(PVM pVM)
607{
608 int rc, interrupt_request;
609 RTGCPTR GCPtrPC;
610 bool fBp;
611
612 /*
613 * Lock the REM - we don't wanna have anyone interrupting us
614 * while stepping - and enabled single stepping. We also ignore
615 * pending interrupts and suchlike.
616 */
617 interrupt_request = pVM->rem.s.Env.interrupt_request;
618 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
619 pVM->rem.s.Env.interrupt_request = 0;
620 cpu_single_step(&pVM->rem.s.Env, 1);
621
622 /*
623 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
624 */
625 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
626 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
627
628 /*
629 * Execute and handle the return code.
630 * We execute without enabling the cpu tick, so on success we'll
631 * just flip it on and off to make sure it moves
632 */
633 rc = cpu_exec(&pVM->rem.s.Env);
634 if (rc == EXCP_DEBUG)
635 {
636 TMCpuTickResume(pVM);
637 TMCpuTickPause(pVM);
638 TMVirtualResume(pVM);
639 TMVirtualPause(pVM);
640 rc = VINF_EM_DBG_STEPPED;
641 }
642 else
643 {
644 switch (rc)
645 {
646 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
647 case EXCP_HLT:
648 case EXCP_HALTED: rc = VINF_EM_HALT; break;
649 case EXCP_RC:
650 rc = pVM->rem.s.rc;
651 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
652 break;
653 default:
654 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
655 rc = VERR_INTERNAL_ERROR;
656 break;
657 }
658 }
659
660 /*
661 * Restore the stuff we changed to prevent interruption.
662 * Unlock the REM.
663 */
664 if (fBp)
665 {
666 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
667 Assert(rc2 == 0); NOREF(rc2);
668 }
669 cpu_single_step(&pVM->rem.s.Env, 0);
670 pVM->rem.s.Env.interrupt_request = interrupt_request;
671
672 return rc;
673}
674
675
676/**
677 * Set a breakpoint using the REM facilities.
678 *
679 * @returns VBox status code.
680 * @param pVM The VM handle.
681 * @param Address The breakpoint address.
682 * @thread The emulation thread.
683 */
684REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
685{
686 VM_ASSERT_EMT(pVM);
687 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
688 {
689 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
690 return VINF_SUCCESS;
691 }
692 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
693 return VERR_REM_NO_MORE_BP_SLOTS;
694}
695
696
697/**
698 * Clears a breakpoint set by REMR3BreakpointSet().
699 *
700 * @returns VBox status code.
701 * @param pVM The VM handle.
702 * @param Address The breakpoint address.
703 * @thread The emulation thread.
704 */
705REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
706{
707 VM_ASSERT_EMT(pVM);
708 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
709 {
710 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
711 return VINF_SUCCESS;
712 }
713 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
714 return VERR_REM_BP_NOT_FOUND;
715}
716
717
718/**
719 * Emulate an instruction.
720 *
721 * This function executes one instruction without letting anyone
722 * interrupt it. This is intended for being called while being in
723 * raw mode and thus will take care of all the state syncing between
724 * REM and the rest.
725 *
726 * @returns VBox status code.
727 * @param pVM VM handle.
728 */
729REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
730{
731 bool fFlushTBs;
732
733 int rc, rc2;
734 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
735
736 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
737 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
738 */
739 if (HWACCMIsEnabled(pVM))
740 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
741
742 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
743 fFlushTBs = pVM->rem.s.fFlushTBs;
744 pVM->rem.s.fFlushTBs = false;
745
746 /*
747 * Sync the state and enable single instruction / single stepping.
748 */
749 rc = REMR3State(pVM);
750 pVM->rem.s.fFlushTBs = fFlushTBs;
751 if (RT_SUCCESS(rc))
752 {
753 int interrupt_request = pVM->rem.s.Env.interrupt_request;
754 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
755 Assert(!pVM->rem.s.Env.singlestep_enabled);
756 /*
757 * Now we set the execute single instruction flag and enter the cpu_exec loop.
758 */
759 TMNotifyStartOfExecution(pVM);
760 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
761 rc = cpu_exec(&pVM->rem.s.Env);
762 TMNotifyEndOfExecution(pVM);
763 switch (rc)
764 {
765 /*
766 * Executed without anything out of the way happening.
767 */
768 case EXCP_SINGLE_INSTR:
769 rc = VINF_EM_RESCHEDULE;
770 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
771 break;
772
773 /*
774 * If we take a trap or start servicing a pending interrupt, we might end up here.
775 * (Timer thread or some other thread wishing EMT's attention.)
776 */
777 case EXCP_INTERRUPT:
778 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
779 rc = VINF_EM_RESCHEDULE;
780 break;
781
782 /*
783 * Single step, we assume!
784 * If there was a breakpoint there we're fucked now.
785 */
786 case EXCP_DEBUG:
787 {
788 /* breakpoint or single step? */
789 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
790 int iBP;
791 rc = VINF_EM_DBG_STEPPED;
792 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
793 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
794 {
795 rc = VINF_EM_DBG_BREAKPOINT;
796 break;
797 }
798 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
799 break;
800 }
801
802 /*
803 * hlt instruction.
804 */
805 case EXCP_HLT:
806 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
807 rc = VINF_EM_HALT;
808 break;
809
810 /*
811 * The VM has halted.
812 */
813 case EXCP_HALTED:
814 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
815 rc = VINF_EM_HALT;
816 break;
817
818 /*
819 * Switch to RAW-mode.
820 */
821 case EXCP_EXECUTE_RAW:
822 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
823 rc = VINF_EM_RESCHEDULE_RAW;
824 break;
825
826 /*
827 * Switch to hardware accelerated RAW-mode.
828 */
829 case EXCP_EXECUTE_HWACC:
830 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
831 rc = VINF_EM_RESCHEDULE_HWACC;
832 break;
833
834 /*
835 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
836 */
837 case EXCP_RC:
838 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
839 rc = pVM->rem.s.rc;
840 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
841 break;
842
843 /*
844 * Figure out the rest when they arrive....
845 */
846 default:
847 AssertMsgFailed(("rc=%d\n", rc));
848 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
849 rc = VINF_EM_RESCHEDULE;
850 break;
851 }
852
853 /*
854 * Switch back the state.
855 */
856 pVM->rem.s.Env.interrupt_request = interrupt_request;
857 rc2 = REMR3StateBack(pVM);
858 AssertRC(rc2);
859 }
860
861 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
862 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
863 return rc;
864}
865
866
867/**
868 * Runs code in recompiled mode.
869 *
870 * Before calling this function the REM state needs to be in sync with
871 * the VM. Call REMR3State() to perform the sync. It's only necessary
872 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
873 * and after calling REMR3StateBack().
874 *
875 * @returns VBox status code.
876 *
877 * @param pVM VM Handle.
878 */
879REMR3DECL(int) REMR3Run(PVM pVM)
880{
881 int rc;
882 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
883 Assert(pVM->rem.s.fInREM);
884
885 TMNotifyStartOfExecution(pVM);
886 rc = cpu_exec(&pVM->rem.s.Env);
887 TMNotifyEndOfExecution(pVM);
888 switch (rc)
889 {
890 /*
891 * This happens when the execution was interrupted
892 * by an external event, like pending timers.
893 */
894 case EXCP_INTERRUPT:
895 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
896 rc = VINF_SUCCESS;
897 break;
898
899 /*
900 * hlt instruction.
901 */
902 case EXCP_HLT:
903 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
904 rc = VINF_EM_HALT;
905 break;
906
907 /*
908 * The VM has halted.
909 */
910 case EXCP_HALTED:
911 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
912 rc = VINF_EM_HALT;
913 break;
914
915 /*
916 * Breakpoint/single step.
917 */
918 case EXCP_DEBUG:
919 {
920#if 0//def DEBUG_bird
921 static int iBP = 0;
922 printf("howdy, breakpoint! iBP=%d\n", iBP);
923 switch (iBP)
924 {
925 case 0:
926 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
927 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
928 //pVM->rem.s.Env.interrupt_request = 0;
929 //pVM->rem.s.Env.exception_index = -1;
930 //g_fInterruptDisabled = 1;
931 rc = VINF_SUCCESS;
932 asm("int3");
933 break;
934 default:
935 asm("int3");
936 break;
937 }
938 iBP++;
939#else
940 /* breakpoint or single step? */
941 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
942 int iBP;
943 rc = VINF_EM_DBG_STEPPED;
944 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
945 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
946 {
947 rc = VINF_EM_DBG_BREAKPOINT;
948 break;
949 }
950 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
951#endif
952 break;
953 }
954
955 /*
956 * Switch to RAW-mode.
957 */
958 case EXCP_EXECUTE_RAW:
959 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
960 rc = VINF_EM_RESCHEDULE_RAW;
961 break;
962
963 /*
964 * Switch to hardware accelerated RAW-mode.
965 */
966 case EXCP_EXECUTE_HWACC:
967 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
968 rc = VINF_EM_RESCHEDULE_HWACC;
969 break;
970
971 /*
972 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
973 */
974 case EXCP_RC:
975 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
976 rc = pVM->rem.s.rc;
977 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
978 break;
979
980 /*
981 * Figure out the rest when they arrive....
982 */
983 default:
984 AssertMsgFailed(("rc=%d\n", rc));
985 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
986 rc = VINF_SUCCESS;
987 break;
988 }
989
990 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
991 return rc;
992}
993
994
995/**
996 * Check if the cpu state is suitable for Raw execution.
997 *
998 * @returns boolean
999 * @param env The CPU env struct.
1000 * @param eip The EIP to check this for (might differ from env->eip).
1001 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1002 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1003 *
1004 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1005 */
1006bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1007{
1008 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1009 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1010 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1011 uint32_t u32CR0;
1012
1013 /* Update counter. */
1014 env->pVM->rem.s.cCanExecuteRaw++;
1015
1016 if (HWACCMIsEnabled(env->pVM))
1017 {
1018 CPUMCTX Ctx;
1019
1020 env->state |= CPU_RAW_HWACC;
1021
1022 /*
1023 * Create partial context for HWACCMR3CanExecuteGuest
1024 */
1025 Ctx.cr0 = env->cr[0];
1026 Ctx.cr3 = env->cr[3];
1027 Ctx.cr4 = env->cr[4];
1028
1029 Ctx.tr = env->tr.selector;
1030 Ctx.trHid.u64Base = env->tr.base;
1031 Ctx.trHid.u32Limit = env->tr.limit;
1032 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1033
1034 Ctx.idtr.cbIdt = env->idt.limit;
1035 Ctx.idtr.pIdt = env->idt.base;
1036
1037 Ctx.gdtr.cbGdt = env->gdt.limit;
1038 Ctx.gdtr.pGdt = env->gdt.base;
1039
1040 Ctx.rsp = env->regs[R_ESP];
1041#ifdef LOG_ENABLED
1042 Ctx.rip = env->eip;
1043#endif
1044
1045 Ctx.eflags.u32 = env->eflags;
1046
1047 Ctx.cs = env->segs[R_CS].selector;
1048 Ctx.csHid.u64Base = env->segs[R_CS].base;
1049 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1050 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1051
1052 Ctx.ds = env->segs[R_DS].selector;
1053 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1054 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1055 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1056
1057 Ctx.es = env->segs[R_ES].selector;
1058 Ctx.esHid.u64Base = env->segs[R_ES].base;
1059 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1060 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1061
1062 Ctx.fs = env->segs[R_FS].selector;
1063 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1064 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1065 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1066
1067 Ctx.gs = env->segs[R_GS].selector;
1068 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1069 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1070 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1071
1072 Ctx.ss = env->segs[R_SS].selector;
1073 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1074 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1075 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1076
1077 Ctx.msrEFER = env->efer;
1078
1079 /* Hardware accelerated raw-mode:
1080 *
1081 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1082 */
1083 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1084 {
1085 *piException = EXCP_EXECUTE_HWACC;
1086 return true;
1087 }
1088 return false;
1089 }
1090
1091 /*
1092 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1093 * or 32 bits protected mode ring 0 code
1094 *
1095 * The tests are ordered by the likelyhood of being true during normal execution.
1096 */
1097 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1098 {
1099 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1100 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1101 return false;
1102 }
1103
1104#ifndef VBOX_RAW_V86
1105 if (fFlags & VM_MASK) {
1106 STAM_COUNTER_INC(&gStatRefuseVM86);
1107 Log2(("raw mode refused: VM_MASK\n"));
1108 return false;
1109 }
1110#endif
1111
1112 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1113 {
1114#ifndef DEBUG_bird
1115 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1116#endif
1117 return false;
1118 }
1119
1120 if (env->singlestep_enabled)
1121 {
1122 //Log2(("raw mode refused: Single step\n"));
1123 return false;
1124 }
1125
1126 if (env->nb_breakpoints > 0)
1127 {
1128 //Log2(("raw mode refused: Breakpoints\n"));
1129 return false;
1130 }
1131
1132 u32CR0 = env->cr[0];
1133 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1134 {
1135 STAM_COUNTER_INC(&gStatRefusePaging);
1136 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1137 return false;
1138 }
1139
1140 if (env->cr[4] & CR4_PAE_MASK)
1141 {
1142 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1143 {
1144 STAM_COUNTER_INC(&gStatRefusePAE);
1145 return false;
1146 }
1147 }
1148
1149 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1150 {
1151 if (!EMIsRawRing3Enabled(env->pVM))
1152 return false;
1153
1154 if (!(env->eflags & IF_MASK))
1155 {
1156 STAM_COUNTER_INC(&gStatRefuseIF0);
1157 Log2(("raw mode refused: IF (RawR3)\n"));
1158 return false;
1159 }
1160
1161 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1162 {
1163 STAM_COUNTER_INC(&gStatRefuseWP0);
1164 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1165 return false;
1166 }
1167 }
1168 else
1169 {
1170 if (!EMIsRawRing0Enabled(env->pVM))
1171 return false;
1172
1173 // Let's start with pure 32 bits ring 0 code first
1174 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1175 {
1176 STAM_COUNTER_INC(&gStatRefuseCode16);
1177 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1178 return false;
1179 }
1180
1181 // Only R0
1182 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1183 {
1184 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1185 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1186 return false;
1187 }
1188
1189 if (!(u32CR0 & CR0_WP_MASK))
1190 {
1191 STAM_COUNTER_INC(&gStatRefuseWP0);
1192 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1193 return false;
1194 }
1195
1196 if (PATMIsPatchGCAddr(env->pVM, eip))
1197 {
1198 Log2(("raw r0 mode forced: patch code\n"));
1199 *piException = EXCP_EXECUTE_RAW;
1200 return true;
1201 }
1202
1203#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1204 if (!(env->eflags & IF_MASK))
1205 {
1206 STAM_COUNTER_INC(&gStatRefuseIF0);
1207 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1208 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1209 return false;
1210 }
1211#endif
1212
1213 env->state |= CPU_RAW_RING0;
1214 }
1215
1216 /*
1217 * Don't reschedule the first time we're called, because there might be
1218 * special reasons why we're here that is not covered by the above checks.
1219 */
1220 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1221 {
1222 Log2(("raw mode refused: first scheduling\n"));
1223 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1224 return false;
1225 }
1226
1227 Assert(PGMPhysIsA20Enabled(env->pVM));
1228 *piException = EXCP_EXECUTE_RAW;
1229 return true;
1230}
1231
1232
1233/**
1234 * Fetches a code byte.
1235 *
1236 * @returns Success indicator (bool) for ease of use.
1237 * @param env The CPU environment structure.
1238 * @param GCPtrInstr Where to fetch code.
1239 * @param pu8Byte Where to store the byte on success
1240 */
1241bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1242{
1243 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1244 if (RT_SUCCESS(rc))
1245 return true;
1246 return false;
1247}
1248
1249
1250/**
1251 * Flush (or invalidate if you like) page table/dir entry.
1252 *
1253 * (invlpg instruction; tlb_flush_page)
1254 *
1255 * @param env Pointer to cpu environment.
1256 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1257 */
1258void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1259{
1260 PVM pVM = env->pVM;
1261 PCPUMCTX pCtx;
1262 int rc;
1263
1264 /*
1265 * When we're replaying invlpg instructions or restoring a saved
1266 * state we disable this path.
1267 */
1268 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1269 return;
1270 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1271 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1272
1273 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1274
1275 /*
1276 * Update the control registers before calling PGMFlushPage.
1277 */
1278 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1279 pCtx->cr0 = env->cr[0];
1280 pCtx->cr3 = env->cr[3];
1281 pCtx->cr4 = env->cr[4];
1282
1283 /*
1284 * Let PGM do the rest.
1285 */
1286 rc = PGMInvalidatePage(pVM, GCPtr);
1287 if (RT_FAILURE(rc))
1288 {
1289 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1290 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1291 }
1292 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1293}
1294
1295
1296#ifndef REM_PHYS_ADDR_IN_TLB
1297void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1298{
1299 void *pv;
1300 int rc;
1301
1302 /* Address must be aligned enough to fiddle with lower bits */
1303 Assert((physAddr & 0x3) == 0);
1304
1305 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1306 Assert( rc == VINF_SUCCESS
1307 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1308 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1309 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1310 if (RT_FAILURE(rc))
1311 return (void *)1;
1312 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1313 return (void *)((uintptr_t)pv | 2);
1314 return pv;
1315}
1316
1317target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1318{
1319 RTGCPHYS rv = 0;
1320 int rc;
1321
1322 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1323 Assert (RT_SUCCESS(rc));
1324
1325 return (target_ulong)rv;
1326}
1327#endif
1328
1329/**
1330 * Called from tlb_protect_code in order to write monitor a code page.
1331 *
1332 * @param env Pointer to the CPU environment.
1333 * @param GCPtr Code page to monitor
1334 */
1335void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1336{
1337#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1338 Assert(env->pVM->rem.s.fInREM);
1339 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1340 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1341 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1342 && !(env->eflags & VM_MASK) /* no V86 mode */
1343 && !HWACCMIsEnabled(env->pVM))
1344 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1345#endif
1346}
1347
1348/**
1349 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1350 *
1351 * @param env Pointer to the CPU environment.
1352 * @param GCPtr Code page to monitor
1353 */
1354void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1355{
1356 Assert(env->pVM->rem.s.fInREM);
1357#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1358 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1359 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1360 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1361 && !(env->eflags & VM_MASK) /* no V86 mode */
1362 && !HWACCMIsEnabled(env->pVM))
1363 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1364#endif
1365}
1366
1367/**
1368 * Called when the CPU is initialized, any of the CRx registers are changed or
1369 * when the A20 line is modified.
1370 *
1371 * @param env Pointer to the CPU environment.
1372 * @param fGlobal Set if the flush is global.
1373 */
1374void remR3FlushTLB(CPUState *env, bool fGlobal)
1375{
1376 PVM pVM = env->pVM;
1377 PCPUMCTX pCtx;
1378
1379 /*
1380 * When we're replaying invlpg instructions or restoring a saved
1381 * state we disable this path.
1382 */
1383 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1384 return;
1385 Assert(pVM->rem.s.fInREM);
1386
1387 /*
1388 * The caller doesn't check cr4, so we have to do that for ourselves.
1389 */
1390 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1391 fGlobal = true;
1392 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1393
1394 /*
1395 * Update the control registers before calling PGMR3FlushTLB.
1396 */
1397 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1398 pCtx->cr0 = env->cr[0];
1399 pCtx->cr3 = env->cr[3];
1400 pCtx->cr4 = env->cr[4];
1401
1402 /*
1403 * Let PGM do the rest.
1404 */
1405 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1406}
1407
1408
1409/**
1410 * Called when any of the cr0, cr4 or efer registers is updated.
1411 *
1412 * @param env Pointer to the CPU environment.
1413 */
1414void remR3ChangeCpuMode(CPUState *env)
1415{
1416 int rc;
1417 PVM pVM = env->pVM;
1418 PCPUMCTX pCtx;
1419
1420 /*
1421 * When we're replaying loads or restoring a saved
1422 * state this path is disabled.
1423 */
1424 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1425 return;
1426 Assert(pVM->rem.s.fInREM);
1427
1428 /*
1429 * Update the control registers before calling PGMChangeMode()
1430 * as it may need to map whatever cr3 is pointing to.
1431 */
1432 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1433 pCtx->cr0 = env->cr[0];
1434 pCtx->cr3 = env->cr[3];
1435 pCtx->cr4 = env->cr[4];
1436
1437#ifdef TARGET_X86_64
1438 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1439 if (rc != VINF_SUCCESS)
1440 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1441#else
1442 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1443 if (rc != VINF_SUCCESS)
1444 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1445#endif
1446}
1447
1448
1449/**
1450 * Called from compiled code to run dma.
1451 *
1452 * @param env Pointer to the CPU environment.
1453 */
1454void remR3DmaRun(CPUState *env)
1455{
1456 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1457 PDMR3DmaRun(env->pVM);
1458 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1459}
1460
1461
1462/**
1463 * Called from compiled code to schedule pending timers in VMM
1464 *
1465 * @param env Pointer to the CPU environment.
1466 */
1467void remR3TimersRun(CPUState *env)
1468{
1469 LogFlow(("remR3TimersRun:\n"));
1470 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1471 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1472 TMR3TimerQueuesDo(env->pVM);
1473 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1474 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1475}
1476
1477
1478/**
1479 * Record trap occurance
1480 *
1481 * @returns VBox status code
1482 * @param env Pointer to the CPU environment.
1483 * @param uTrap Trap nr
1484 * @param uErrorCode Error code
1485 * @param pvNextEIP Next EIP
1486 */
1487int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1488{
1489 PVM pVM = env->pVM;
1490#ifdef VBOX_WITH_STATISTICS
1491 static STAMCOUNTER s_aStatTrap[255];
1492 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1493#endif
1494
1495#ifdef VBOX_WITH_STATISTICS
1496 if (uTrap < 255)
1497 {
1498 if (!s_aRegisters[uTrap])
1499 {
1500 char szStatName[64];
1501 s_aRegisters[uTrap] = true;
1502 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1503 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1504 }
1505 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1506 }
1507#endif
1508 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1509 if( uTrap < 0x20
1510 && (env->cr[0] & X86_CR0_PE)
1511 && !(env->eflags & X86_EFL_VM))
1512 {
1513#ifdef DEBUG
1514 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1515#endif
1516 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1517 {
1518 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1519 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1520 return VERR_REM_TOO_MANY_TRAPS;
1521 }
1522 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1523 pVM->rem.s.cPendingExceptions = 1;
1524 pVM->rem.s.uPendingException = uTrap;
1525 pVM->rem.s.uPendingExcptEIP = env->eip;
1526 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1527 }
1528 else
1529 {
1530 pVM->rem.s.cPendingExceptions = 0;
1531 pVM->rem.s.uPendingException = uTrap;
1532 pVM->rem.s.uPendingExcptEIP = env->eip;
1533 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1534 }
1535 return VINF_SUCCESS;
1536}
1537
1538
1539/*
1540 * Clear current active trap
1541 *
1542 * @param pVM VM Handle.
1543 */
1544void remR3TrapClear(PVM pVM)
1545{
1546 pVM->rem.s.cPendingExceptions = 0;
1547 pVM->rem.s.uPendingException = 0;
1548 pVM->rem.s.uPendingExcptEIP = 0;
1549 pVM->rem.s.uPendingExcptCR2 = 0;
1550}
1551
1552
1553/*
1554 * Record previous call instruction addresses
1555 *
1556 * @param env Pointer to the CPU environment.
1557 */
1558void remR3RecordCall(CPUState *env)
1559{
1560 CSAMR3RecordCallAddress(env->pVM, env->eip);
1561}
1562
1563
1564/**
1565 * Syncs the internal REM state with the VM.
1566 *
1567 * This must be called before REMR3Run() is invoked whenever when the REM
1568 * state is not up to date. Calling it several times in a row is not
1569 * permitted.
1570 *
1571 * @returns VBox status code.
1572 *
1573 * @param pVM VM Handle.
1574 * @param fFlushTBs Flush all translation blocks before executing code
1575 *
1576 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1577 * no do this since the majority of the callers don't want any unnecessary of events
1578 * pending that would immediatly interrupt execution.
1579 */
1580REMR3DECL(int) REMR3State(PVM pVM)
1581{
1582 register const CPUMCTX *pCtx;
1583 register unsigned fFlags;
1584 bool fHiddenSelRegsValid;
1585 unsigned i;
1586 TRPMEVENT enmType;
1587 uint8_t u8TrapNo;
1588 int rc;
1589
1590 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1591 Log2(("REMR3State:\n"));
1592
1593 pCtx = pVM->rem.s.pCtx;
1594 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1595
1596 Assert(!pVM->rem.s.fInREM);
1597 pVM->rem.s.fInStateSync = true;
1598
1599 /*
1600 * If we have to flush TBs, do that immediately.
1601 */
1602 if (pVM->rem.s.fFlushTBs)
1603 {
1604 STAM_COUNTER_INC(&gStatFlushTBs);
1605 tb_flush(&pVM->rem.s.Env);
1606 pVM->rem.s.fFlushTBs = false;
1607 }
1608
1609 /*
1610 * Copy the registers which require no special handling.
1611 */
1612#ifdef TARGET_X86_64
1613 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1614 Assert(R_EAX == 0);
1615 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1616 Assert(R_ECX == 1);
1617 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1618 Assert(R_EDX == 2);
1619 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1620 Assert(R_EBX == 3);
1621 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1622 Assert(R_ESP == 4);
1623 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1624 Assert(R_EBP == 5);
1625 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1626 Assert(R_ESI == 6);
1627 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1628 Assert(R_EDI == 7);
1629 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1630 pVM->rem.s.Env.regs[8] = pCtx->r8;
1631 pVM->rem.s.Env.regs[9] = pCtx->r9;
1632 pVM->rem.s.Env.regs[10] = pCtx->r10;
1633 pVM->rem.s.Env.regs[11] = pCtx->r11;
1634 pVM->rem.s.Env.regs[12] = pCtx->r12;
1635 pVM->rem.s.Env.regs[13] = pCtx->r13;
1636 pVM->rem.s.Env.regs[14] = pCtx->r14;
1637 pVM->rem.s.Env.regs[15] = pCtx->r15;
1638
1639 pVM->rem.s.Env.eip = pCtx->rip;
1640
1641 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1642#else
1643 Assert(R_EAX == 0);
1644 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1645 Assert(R_ECX == 1);
1646 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1647 Assert(R_EDX == 2);
1648 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1649 Assert(R_EBX == 3);
1650 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1651 Assert(R_ESP == 4);
1652 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1653 Assert(R_EBP == 5);
1654 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1655 Assert(R_ESI == 6);
1656 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1657 Assert(R_EDI == 7);
1658 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1659 pVM->rem.s.Env.eip = pCtx->eip;
1660
1661 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1662#endif
1663
1664 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1665
1666 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1667 for (i=0;i<8;i++)
1668 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1669
1670 /*
1671 * Clear the halted hidden flag (the interrupt waking up the CPU can
1672 * have been dispatched in raw mode).
1673 */
1674 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1675
1676 /*
1677 * Replay invlpg?
1678 */
1679 if (pVM->rem.s.cInvalidatedPages)
1680 {
1681 RTUINT i;
1682
1683 pVM->rem.s.fIgnoreInvlPg = true;
1684 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1685 {
1686 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1687 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1688 }
1689 pVM->rem.s.fIgnoreInvlPg = false;
1690 pVM->rem.s.cInvalidatedPages = 0;
1691 }
1692
1693 /* Replay notification changes? */
1694 if (pVM->rem.s.cHandlerNotifications)
1695 REMR3ReplayHandlerNotifications(pVM);
1696
1697 /* Update MSRs; before CRx registers! */
1698 pVM->rem.s.Env.efer = pCtx->msrEFER;
1699 pVM->rem.s.Env.star = pCtx->msrSTAR;
1700 pVM->rem.s.Env.pat = pCtx->msrPAT;
1701#ifdef TARGET_X86_64
1702 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1703 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1704 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1705 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1706
1707 /* Update the internal long mode activate flag according to the new EFER value. */
1708 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1709 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1710 else
1711 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1712#endif
1713
1714
1715 /*
1716 * Registers which are rarely changed and require special handling / order when changed.
1717 */
1718 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1719 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1720 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1721 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1722 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1723 {
1724 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1725 {
1726 pVM->rem.s.fIgnoreCR3Load = true;
1727 tlb_flush(&pVM->rem.s.Env, true);
1728 pVM->rem.s.fIgnoreCR3Load = false;
1729 }
1730
1731 /* CR4 before CR0! */
1732 if (fFlags & CPUM_CHANGED_CR4)
1733 {
1734 pVM->rem.s.fIgnoreCR3Load = true;
1735 pVM->rem.s.fIgnoreCpuMode = true;
1736 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1737 pVM->rem.s.fIgnoreCpuMode = false;
1738 pVM->rem.s.fIgnoreCR3Load = false;
1739 }
1740
1741 if (fFlags & CPUM_CHANGED_CR0)
1742 {
1743 pVM->rem.s.fIgnoreCR3Load = true;
1744 pVM->rem.s.fIgnoreCpuMode = true;
1745 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1746 pVM->rem.s.fIgnoreCpuMode = false;
1747 pVM->rem.s.fIgnoreCR3Load = false;
1748 }
1749
1750 if (fFlags & CPUM_CHANGED_CR3)
1751 {
1752 pVM->rem.s.fIgnoreCR3Load = true;
1753 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1754 pVM->rem.s.fIgnoreCR3Load = false;
1755 }
1756
1757 if (fFlags & CPUM_CHANGED_GDTR)
1758 {
1759 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1760 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1761 }
1762
1763 if (fFlags & CPUM_CHANGED_IDTR)
1764 {
1765 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1766 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1767 }
1768
1769 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1770 {
1771 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1772 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1773 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1774 }
1775
1776 if (fFlags & CPUM_CHANGED_LDTR)
1777 {
1778 if (fHiddenSelRegsValid)
1779 {
1780 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1781 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1782 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1783 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1784 }
1785 else
1786 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1787 }
1788
1789 if (fFlags & CPUM_CHANGED_CPUID)
1790 {
1791 uint32_t u32Dummy;
1792
1793 /*
1794 * Get the CPUID features.
1795 */
1796 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1797 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1798 }
1799
1800 /* Sync FPU state after CR4, CPUID and EFER (!). */
1801 if (fFlags & CPUM_CHANGED_FPU_REM)
1802 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1803 }
1804
1805 /*
1806 * Sync TR unconditionally to make life simpler.
1807 */
1808 pVM->rem.s.Env.tr.selector = pCtx->tr;
1809 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1810 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1811 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1812 /* Note! do_interrupt will fault if the busy flag is still set... */
1813 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1814
1815 /*
1816 * Update selector registers.
1817 * This must be done *after* we've synced gdt, ldt and crX registers
1818 * since we're reading the GDT/LDT om sync_seg. This will happen with
1819 * saved state which takes a quick dip into rawmode for instance.
1820 */
1821 /*
1822 * Stack; Note first check this one as the CPL might have changed. The
1823 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1824 */
1825
1826 if (fHiddenSelRegsValid)
1827 {
1828 /* The hidden selector registers are valid in the CPU context. */
1829 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1830
1831 /* Set current CPL */
1832 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1833
1834 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1835 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1836 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1837 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1838 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1839 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1840 }
1841 else
1842 {
1843 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1844 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1845 {
1846 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1847
1848 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1849 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1850#ifdef VBOX_WITH_STATISTICS
1851 if (pVM->rem.s.Env.segs[R_SS].newselector)
1852 {
1853 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1854 }
1855#endif
1856 }
1857 else
1858 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1859
1860 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1861 {
1862 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1863 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1864#ifdef VBOX_WITH_STATISTICS
1865 if (pVM->rem.s.Env.segs[R_ES].newselector)
1866 {
1867 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1868 }
1869#endif
1870 }
1871 else
1872 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1873
1874 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1875 {
1876 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1877 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1878#ifdef VBOX_WITH_STATISTICS
1879 if (pVM->rem.s.Env.segs[R_CS].newselector)
1880 {
1881 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1882 }
1883#endif
1884 }
1885 else
1886 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1887
1888 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1889 {
1890 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1891 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1892#ifdef VBOX_WITH_STATISTICS
1893 if (pVM->rem.s.Env.segs[R_DS].newselector)
1894 {
1895 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1896 }
1897#endif
1898 }
1899 else
1900 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1901
1902 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1903 * be the same but not the base/limit. */
1904 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1905 {
1906 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1907 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1908#ifdef VBOX_WITH_STATISTICS
1909 if (pVM->rem.s.Env.segs[R_FS].newselector)
1910 {
1911 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1912 }
1913#endif
1914 }
1915 else
1916 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1917
1918 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1919 {
1920 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1921 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1922#ifdef VBOX_WITH_STATISTICS
1923 if (pVM->rem.s.Env.segs[R_GS].newselector)
1924 {
1925 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1926 }
1927#endif
1928 }
1929 else
1930 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1931 }
1932
1933 /*
1934 * Check for traps.
1935 */
1936 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1937 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1938 if (RT_SUCCESS(rc))
1939 {
1940#ifdef DEBUG
1941 if (u8TrapNo == 0x80)
1942 {
1943 remR3DumpLnxSyscall(pVM);
1944 remR3DumpOBsdSyscall(pVM);
1945 }
1946#endif
1947
1948 pVM->rem.s.Env.exception_index = u8TrapNo;
1949 if (enmType != TRPM_SOFTWARE_INT)
1950 {
1951 pVM->rem.s.Env.exception_is_int = 0;
1952 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1953 }
1954 else
1955 {
1956 /*
1957 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1958 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1959 * for int03 and into.
1960 */
1961 pVM->rem.s.Env.exception_is_int = 1;
1962 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1963 /* int 3 may be generated by one-byte 0xcc */
1964 if (u8TrapNo == 3)
1965 {
1966 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1967 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1968 }
1969 /* int 4 may be generated by one-byte 0xce */
1970 else if (u8TrapNo == 4)
1971 {
1972 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1973 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1974 }
1975 }
1976
1977 /* get error code and cr2 if needed. */
1978 switch (u8TrapNo)
1979 {
1980 case 0x0e:
1981 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1982 /* fallthru */
1983 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1984 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1985 break;
1986
1987 case 0x11: case 0x08:
1988 default:
1989 pVM->rem.s.Env.error_code = 0;
1990 break;
1991 }
1992
1993 /*
1994 * We can now reset the active trap since the recompiler is gonna have a go at it.
1995 */
1996 rc = TRPMResetTrap(pVM);
1997 AssertRC(rc);
1998 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
1999 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2000 }
2001
2002 /*
2003 * Clear old interrupt request flags; Check for pending hardware interrupts.
2004 * (See @remark for why we don't check for other FFs.)
2005 */
2006 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2007 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2008 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2009 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2010
2011 /*
2012 * We're now in REM mode.
2013 */
2014 pVM->rem.s.fInREM = true;
2015 pVM->rem.s.fInStateSync = false;
2016 pVM->rem.s.cCanExecuteRaw = 0;
2017 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2018 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2019 return VINF_SUCCESS;
2020}
2021
2022
2023/**
2024 * Syncs back changes in the REM state to the the VM state.
2025 *
2026 * This must be called after invoking REMR3Run().
2027 * Calling it several times in a row is not permitted.
2028 *
2029 * @returns VBox status code.
2030 *
2031 * @param pVM VM Handle.
2032 */
2033REMR3DECL(int) REMR3StateBack(PVM pVM)
2034{
2035 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2036 unsigned i;
2037
2038 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2039 Log2(("REMR3StateBack:\n"));
2040 Assert(pVM->rem.s.fInREM);
2041
2042 /*
2043 * Copy back the registers.
2044 * This is done in the order they are declared in the CPUMCTX structure.
2045 */
2046
2047 /** @todo FOP */
2048 /** @todo FPUIP */
2049 /** @todo CS */
2050 /** @todo FPUDP */
2051 /** @todo DS */
2052
2053 /** @todo check if FPU/XMM was actually used in the recompiler */
2054 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2055//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2056
2057#ifdef TARGET_X86_64
2058 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2059 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2060 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2061 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2062 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2063 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2064 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2065 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2066 pCtx->r8 = pVM->rem.s.Env.regs[8];
2067 pCtx->r9 = pVM->rem.s.Env.regs[9];
2068 pCtx->r10 = pVM->rem.s.Env.regs[10];
2069 pCtx->r11 = pVM->rem.s.Env.regs[11];
2070 pCtx->r12 = pVM->rem.s.Env.regs[12];
2071 pCtx->r13 = pVM->rem.s.Env.regs[13];
2072 pCtx->r14 = pVM->rem.s.Env.regs[14];
2073 pCtx->r15 = pVM->rem.s.Env.regs[15];
2074
2075 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2076
2077#else
2078 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2079 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2080 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2081 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2082 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2083 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2084 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2085
2086 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2087#endif
2088
2089 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2090
2091#ifdef VBOX_WITH_STATISTICS
2092 if (pVM->rem.s.Env.segs[R_SS].newselector)
2093 {
2094 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2095 }
2096 if (pVM->rem.s.Env.segs[R_GS].newselector)
2097 {
2098 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2099 }
2100 if (pVM->rem.s.Env.segs[R_FS].newselector)
2101 {
2102 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2103 }
2104 if (pVM->rem.s.Env.segs[R_ES].newselector)
2105 {
2106 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2107 }
2108 if (pVM->rem.s.Env.segs[R_DS].newselector)
2109 {
2110 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2111 }
2112 if (pVM->rem.s.Env.segs[R_CS].newselector)
2113 {
2114 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2115 }
2116#endif
2117 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2118 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2119 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2120 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2121 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2122
2123#ifdef TARGET_X86_64
2124 pCtx->rip = pVM->rem.s.Env.eip;
2125 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2126#else
2127 pCtx->eip = pVM->rem.s.Env.eip;
2128 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2129#endif
2130
2131 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2132 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2133 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2134 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2135
2136 for (i=0;i<8;i++)
2137 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2138
2139 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2140 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2141 {
2142 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2143 STAM_COUNTER_INC(&gStatREMGDTChange);
2144 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2145 }
2146
2147 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2148 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2149 {
2150 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2151 STAM_COUNTER_INC(&gStatREMIDTChange);
2152 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2153 }
2154
2155 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2156 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2157 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2158 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2159 {
2160 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2161 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2162 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2163 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2164 STAM_COUNTER_INC(&gStatREMLDTRChange);
2165 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2166 }
2167
2168 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2169 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2170 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2171 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2172 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2173 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2174 : 0) )
2175 {
2176 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2177 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2178 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2179 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2180 pCtx->tr = pVM->rem.s.Env.tr.selector;
2181 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2182 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2183 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2184 if (pCtx->trHid.Attr.u)
2185 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2186 STAM_COUNTER_INC(&gStatREMTRChange);
2187 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2188 }
2189
2190 /** @todo These values could still be out of sync! */
2191 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2192 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2193 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2194 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2195
2196 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2197 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2198 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2199
2200 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2201 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2202 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2203
2204 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2205 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2206 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2207
2208 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2209 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2210 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2211
2212 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2213 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2214 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2215
2216 /* Sysenter MSR */
2217 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2218 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2219 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2220
2221 /* System MSRs. */
2222 pCtx->msrEFER = pVM->rem.s.Env.efer;
2223 pCtx->msrSTAR = pVM->rem.s.Env.star;
2224 pCtx->msrPAT = pVM->rem.s.Env.pat;
2225#ifdef TARGET_X86_64
2226 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2227 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2228 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2229 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2230#endif
2231
2232 remR3TrapClear(pVM);
2233
2234 /*
2235 * Check for traps.
2236 */
2237 if ( pVM->rem.s.Env.exception_index >= 0
2238 && pVM->rem.s.Env.exception_index < 256)
2239 {
2240 int rc;
2241
2242 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2243 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2244 AssertRC(rc);
2245 switch (pVM->rem.s.Env.exception_index)
2246 {
2247 case 0x0e:
2248 TRPMSetFaultAddress(pVM, pCtx->cr2);
2249 /* fallthru */
2250 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2251 case 0x11: case 0x08: /* 0 */
2252 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2253 break;
2254 }
2255
2256 }
2257
2258 /*
2259 * We're not longer in REM mode.
2260 */
2261 pVM->rem.s.fInREM = false;
2262 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2263 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2264 return VINF_SUCCESS;
2265}
2266
2267
2268/**
2269 * This is called by the disassembler when it wants to update the cpu state
2270 * before for instance doing a register dump.
2271 */
2272static void remR3StateUpdate(PVM pVM)
2273{
2274 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2275 unsigned i;
2276
2277 Assert(pVM->rem.s.fInREM);
2278
2279 /*
2280 * Copy back the registers.
2281 * This is done in the order they are declared in the CPUMCTX structure.
2282 */
2283
2284 /** @todo FOP */
2285 /** @todo FPUIP */
2286 /** @todo CS */
2287 /** @todo FPUDP */
2288 /** @todo DS */
2289 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2290 pCtx->fpu.MXCSR = 0;
2291 pCtx->fpu.MXCSR_MASK = 0;
2292
2293 /** @todo check if FPU/XMM was actually used in the recompiler */
2294 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2295//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2296
2297#ifdef TARGET_X86_64
2298 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2299 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2300 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2301 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2302 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2303 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2304 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2305 pCtx->r8 = pVM->rem.s.Env.regs[8];
2306 pCtx->r9 = pVM->rem.s.Env.regs[9];
2307 pCtx->r10 = pVM->rem.s.Env.regs[10];
2308 pCtx->r11 = pVM->rem.s.Env.regs[11];
2309 pCtx->r12 = pVM->rem.s.Env.regs[12];
2310 pCtx->r13 = pVM->rem.s.Env.regs[13];
2311 pCtx->r14 = pVM->rem.s.Env.regs[14];
2312 pCtx->r15 = pVM->rem.s.Env.regs[15];
2313
2314 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2315#else
2316 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2317 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2318 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2319 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2320 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2321 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2322 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2323
2324 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2325#endif
2326
2327 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2328
2329 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2330 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2331 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2332 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2333 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2334
2335#ifdef TARGET_X86_64
2336 pCtx->rip = pVM->rem.s.Env.eip;
2337 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2338#else
2339 pCtx->eip = pVM->rem.s.Env.eip;
2340 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2341#endif
2342
2343 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2344 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2345 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2346 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2347
2348 for (i=0;i<8;i++)
2349 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2350
2351 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2352 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2353 {
2354 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2355 STAM_COUNTER_INC(&gStatREMGDTChange);
2356 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2357 }
2358
2359 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2360 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2361 {
2362 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2363 STAM_COUNTER_INC(&gStatREMIDTChange);
2364 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2365 }
2366
2367 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2368 {
2369 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2370 STAM_COUNTER_INC(&gStatREMLDTRChange);
2371 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2372 }
2373 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2374 {
2375 pCtx->tr = pVM->rem.s.Env.tr.selector;
2376 STAM_COUNTER_INC(&gStatREMTRChange);
2377 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2378 }
2379
2380 /** @todo These values could still be out of sync! */
2381 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2382 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2383 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2384 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2385
2386 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2387 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2388 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2389
2390 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2391 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2392 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2393
2394 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2395 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2396 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2397
2398 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2399 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2400 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2401
2402 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2403 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2404 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2405
2406 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2407 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2408 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2409
2410 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2411 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2412 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2413
2414 /* Sysenter MSR */
2415 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2416 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2417 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2418
2419 /* System MSRs. */
2420 pCtx->msrEFER = pVM->rem.s.Env.efer;
2421 pCtx->msrSTAR = pVM->rem.s.Env.star;
2422 pCtx->msrPAT = pVM->rem.s.Env.pat;
2423#ifdef TARGET_X86_64
2424 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2425 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2426 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2427 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2428#endif
2429
2430}
2431
2432
2433/**
2434 * Update the VMM state information if we're currently in REM.
2435 *
2436 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2437 * we're currently executing in REM and the VMM state is invalid. This method will of
2438 * course check that we're executing in REM before syncing any data over to the VMM.
2439 *
2440 * @param pVM The VM handle.
2441 */
2442REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2443{
2444 if (pVM->rem.s.fInREM)
2445 remR3StateUpdate(pVM);
2446}
2447
2448
2449#undef LOG_GROUP
2450#define LOG_GROUP LOG_GROUP_REM
2451
2452
2453/**
2454 * Notify the recompiler about Address Gate 20 state change.
2455 *
2456 * This notification is required since A20 gate changes are
2457 * initialized from a device driver and the VM might just as
2458 * well be in REM mode as in RAW mode.
2459 *
2460 * @param pVM VM handle.
2461 * @param fEnable True if the gate should be enabled.
2462 * False if the gate should be disabled.
2463 */
2464REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2465{
2466 bool fSaved;
2467
2468 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2469 VM_ASSERT_EMT(pVM);
2470
2471 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2472 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2473
2474 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2475
2476 pVM->rem.s.fIgnoreAll = fSaved;
2477}
2478
2479
2480/**
2481 * Replays the invalidated recorded pages.
2482 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2483 *
2484 * @param pVM VM handle.
2485 */
2486REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2487{
2488 RTUINT i;
2489
2490 VM_ASSERT_EMT(pVM);
2491
2492 /*
2493 * Sync the required registers.
2494 */
2495 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2496 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2497 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2498 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2499
2500 /*
2501 * Replay the flushes.
2502 */
2503 pVM->rem.s.fIgnoreInvlPg = true;
2504 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2505 {
2506 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2507 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2508 }
2509 pVM->rem.s.fIgnoreInvlPg = false;
2510 pVM->rem.s.cInvalidatedPages = 0;
2511}
2512
2513
2514/**
2515 * Replays the handler notification changes
2516 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2517 *
2518 * @param pVM VM handle.
2519 */
2520REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2521{
2522 /*
2523 * Replay the flushes.
2524 */
2525 RTUINT i;
2526 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2527
2528 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2529 VM_ASSERT_EMT(pVM);
2530
2531 pVM->rem.s.cHandlerNotifications = 0;
2532 for (i = 0; i < c; i++)
2533 {
2534 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2535 switch (pRec->enmKind)
2536 {
2537 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2538 REMR3NotifyHandlerPhysicalRegister(pVM,
2539 pRec->u.PhysicalRegister.enmType,
2540 pRec->u.PhysicalRegister.GCPhys,
2541 pRec->u.PhysicalRegister.cb,
2542 pRec->u.PhysicalRegister.fHasHCHandler);
2543 break;
2544
2545 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2546 REMR3NotifyHandlerPhysicalDeregister(pVM,
2547 pRec->u.PhysicalDeregister.enmType,
2548 pRec->u.PhysicalDeregister.GCPhys,
2549 pRec->u.PhysicalDeregister.cb,
2550 pRec->u.PhysicalDeregister.fHasHCHandler,
2551 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2552 break;
2553
2554 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2555 REMR3NotifyHandlerPhysicalModify(pVM,
2556 pRec->u.PhysicalModify.enmType,
2557 pRec->u.PhysicalModify.GCPhysOld,
2558 pRec->u.PhysicalModify.GCPhysNew,
2559 pRec->u.PhysicalModify.cb,
2560 pRec->u.PhysicalModify.fHasHCHandler,
2561 pRec->u.PhysicalModify.fRestoreAsRAM);
2562 break;
2563
2564 default:
2565 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2566 break;
2567 }
2568 }
2569 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2570}
2571
2572
2573/**
2574 * Notify REM about changed code page.
2575 *
2576 * @returns VBox status code.
2577 * @param pVM VM handle.
2578 * @param pvCodePage Code page address
2579 */
2580REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2581{
2582#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2583 int rc;
2584 RTGCPHYS PhysGC;
2585 uint64_t flags;
2586
2587 VM_ASSERT_EMT(pVM);
2588
2589 /*
2590 * Get the physical page address.
2591 */
2592 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2593 if (rc == VINF_SUCCESS)
2594 {
2595 /*
2596 * Sync the required registers and flush the whole page.
2597 * (Easier to do the whole page than notifying it about each physical
2598 * byte that was changed.
2599 */
2600 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2601 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2602 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2603 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2604
2605 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2606 }
2607#endif
2608 return VINF_SUCCESS;
2609}
2610
2611
2612/**
2613 * Notification about a successful MMR3PhysRegister() call.
2614 *
2615 * @param pVM VM handle.
2616 * @param GCPhys The physical address the RAM.
2617 * @param cb Size of the memory.
2618 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2619 */
2620REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2621{
2622 uint32_t cbBitmap;
2623 int rc;
2624 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2625 VM_ASSERT_EMT(pVM);
2626
2627 /*
2628 * Validate input - we trust the caller.
2629 */
2630 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2631 Assert(cb);
2632 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2633
2634 /*
2635 * Base ram?
2636 */
2637 if (!GCPhys)
2638 {
2639 phys_ram_size = cb;
2640 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2641#ifndef VBOX_STRICT
2642 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2643 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2644#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2645 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2646 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2647 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2648 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2649 AssertRC(rc);
2650 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2651#endif
2652 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2653 }
2654
2655 /*
2656 * Register the ram.
2657 */
2658 Assert(!pVM->rem.s.fIgnoreAll);
2659 pVM->rem.s.fIgnoreAll = true;
2660
2661#ifdef VBOX_WITH_NEW_PHYS_CODE
2662 if (fFlags & MM_RAM_FLAGS_RESERVED)
2663 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2664 else
2665 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2666#else
2667 if (!GCPhys)
2668 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2669 else
2670 {
2671 if (fFlags & MM_RAM_FLAGS_RESERVED)
2672 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2673 else
2674 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2675 }
2676#endif
2677 Assert(pVM->rem.s.fIgnoreAll);
2678 pVM->rem.s.fIgnoreAll = false;
2679}
2680
2681#ifndef VBOX_WITH_NEW_PHYS_CODE
2682
2683/**
2684 * Notification about a successful PGMR3PhysRegisterChunk() call.
2685 *
2686 * @param pVM VM handle.
2687 * @param GCPhys The physical address the RAM.
2688 * @param cb Size of the memory.
2689 * @param pvRam The HC address of the RAM.
2690 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2691 */
2692REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2693{
2694 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2695 VM_ASSERT_EMT(pVM);
2696
2697 /*
2698 * Validate input - we trust the caller.
2699 */
2700 Assert(pvRam);
2701 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2702 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2703 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2704 Assert(fFlags == 0 /* normal RAM */);
2705 Assert(!pVM->rem.s.fIgnoreAll);
2706 pVM->rem.s.fIgnoreAll = true;
2707 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2708 Assert(pVM->rem.s.fIgnoreAll);
2709 pVM->rem.s.fIgnoreAll = false;
2710}
2711
2712
2713/**
2714 * Grows dynamically allocated guest RAM.
2715 * Will raise a fatal error if the operation fails.
2716 *
2717 * @param physaddr The physical address.
2718 */
2719void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2720{
2721 int rc;
2722 PVM pVM = cpu_single_env->pVM;
2723 const RTGCPHYS GCPhys = physaddr;
2724
2725 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2726 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2727 if (RT_SUCCESS(rc))
2728 return;
2729
2730 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2731 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2732 AssertFatalFailed();
2733}
2734
2735#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2736
2737/**
2738 * Notification about a successful MMR3PhysRomRegister() call.
2739 *
2740 * @param pVM VM handle.
2741 * @param GCPhys The physical address of the ROM.
2742 * @param cb The size of the ROM.
2743 * @param pvCopy Pointer to the ROM copy.
2744 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2745 * This function will be called when ever the protection of the
2746 * shadow ROM changes (at reset and end of POST).
2747 */
2748REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2749{
2750 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2751 VM_ASSERT_EMT(pVM);
2752
2753 /*
2754 * Validate input - we trust the caller.
2755 */
2756 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2757 Assert(cb);
2758 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2759 Assert(pvCopy);
2760 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2761
2762 /*
2763 * Register the rom.
2764 */
2765 Assert(!pVM->rem.s.fIgnoreAll);
2766 pVM->rem.s.fIgnoreAll = true;
2767
2768 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2769
2770 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2771
2772 Assert(pVM->rem.s.fIgnoreAll);
2773 pVM->rem.s.fIgnoreAll = false;
2774}
2775
2776
2777/**
2778 * Notification about a successful memory deregistration or reservation.
2779 *
2780 * @param pVM VM Handle.
2781 * @param GCPhys Start physical address.
2782 * @param cb The size of the range.
2783 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2784 * reserve any memory soon.
2785 */
2786REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2787{
2788 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2789 VM_ASSERT_EMT(pVM);
2790
2791 /*
2792 * Validate input - we trust the caller.
2793 */
2794 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2795 Assert(cb);
2796 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2797
2798 /*
2799 * Unassigning the memory.
2800 */
2801 Assert(!pVM->rem.s.fIgnoreAll);
2802 pVM->rem.s.fIgnoreAll = true;
2803
2804 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2805
2806 Assert(pVM->rem.s.fIgnoreAll);
2807 pVM->rem.s.fIgnoreAll = false;
2808}
2809
2810
2811/**
2812 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2813 *
2814 * @param pVM VM Handle.
2815 * @param enmType Handler type.
2816 * @param GCPhys Handler range address.
2817 * @param cb Size of the handler range.
2818 * @param fHasHCHandler Set if the handler has a HC callback function.
2819 *
2820 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2821 * Handler memory type to memory which has no HC handler.
2822 */
2823REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2824{
2825 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2826 enmType, GCPhys, cb, fHasHCHandler));
2827 VM_ASSERT_EMT(pVM);
2828 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2829 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2830
2831 if (pVM->rem.s.cHandlerNotifications)
2832 REMR3ReplayHandlerNotifications(pVM);
2833
2834 Assert(!pVM->rem.s.fIgnoreAll);
2835 pVM->rem.s.fIgnoreAll = true;
2836
2837 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2838 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2839 else if (fHasHCHandler)
2840 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2841
2842 Assert(pVM->rem.s.fIgnoreAll);
2843 pVM->rem.s.fIgnoreAll = false;
2844}
2845
2846
2847/**
2848 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2849 *
2850 * @param pVM VM Handle.
2851 * @param enmType Handler type.
2852 * @param GCPhys Handler range address.
2853 * @param cb Size of the handler range.
2854 * @param fHasHCHandler Set if the handler has a HC callback function.
2855 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2856 */
2857REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2858{
2859 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2860 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2861 VM_ASSERT_EMT(pVM);
2862
2863 if (pVM->rem.s.cHandlerNotifications)
2864 REMR3ReplayHandlerNotifications(pVM);
2865
2866 Assert(!pVM->rem.s.fIgnoreAll);
2867 pVM->rem.s.fIgnoreAll = true;
2868
2869/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2870 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2871 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2872 else if (fHasHCHandler)
2873 {
2874 if (!fRestoreAsRAM)
2875 {
2876 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2877 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2878 }
2879 else
2880 {
2881 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2882 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2883 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2884 }
2885 }
2886
2887 Assert(pVM->rem.s.fIgnoreAll);
2888 pVM->rem.s.fIgnoreAll = false;
2889}
2890
2891
2892/**
2893 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2894 *
2895 * @param pVM VM Handle.
2896 * @param enmType Handler type.
2897 * @param GCPhysOld Old handler range address.
2898 * @param GCPhysNew New handler range address.
2899 * @param cb Size of the handler range.
2900 * @param fHasHCHandler Set if the handler has a HC callback function.
2901 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2902 */
2903REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2904{
2905 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2906 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2907 VM_ASSERT_EMT(pVM);
2908 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2909
2910 if (pVM->rem.s.cHandlerNotifications)
2911 REMR3ReplayHandlerNotifications(pVM);
2912
2913 if (fHasHCHandler)
2914 {
2915 Assert(!pVM->rem.s.fIgnoreAll);
2916 pVM->rem.s.fIgnoreAll = true;
2917
2918 /*
2919 * Reset the old page.
2920 */
2921 if (!fRestoreAsRAM)
2922 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2923 else
2924 {
2925 /* This is not perfect, but it'll do for PD monitoring... */
2926 Assert(cb == PAGE_SIZE);
2927 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2928 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2929 }
2930
2931 /*
2932 * Update the new page.
2933 */
2934 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2935 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2936 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2937
2938 Assert(pVM->rem.s.fIgnoreAll);
2939 pVM->rem.s.fIgnoreAll = false;
2940 }
2941}
2942
2943
2944/**
2945 * Checks if we're handling access to this page or not.
2946 *
2947 * @returns true if we're trapping access.
2948 * @returns false if we aren't.
2949 * @param pVM The VM handle.
2950 * @param GCPhys The physical address.
2951 *
2952 * @remark This function will only work correctly in VBOX_STRICT builds!
2953 */
2954REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2955{
2956#ifdef VBOX_STRICT
2957 unsigned long off;
2958 if (pVM->rem.s.cHandlerNotifications)
2959 REMR3ReplayHandlerNotifications(pVM);
2960
2961 off = get_phys_page_offset(GCPhys);
2962 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2963 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2964 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2965#else
2966 return false;
2967#endif
2968}
2969
2970
2971/**
2972 * Deals with a rare case in get_phys_addr_code where the code
2973 * is being monitored.
2974 *
2975 * It could also be an MMIO page, in which case we will raise a fatal error.
2976 *
2977 * @returns The physical address corresponding to addr.
2978 * @param env The cpu environment.
2979 * @param addr The virtual address.
2980 * @param pTLBEntry The TLB entry.
2981 */
2982target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2983 target_ulong addr,
2984 CPUTLBEntry* pTLBEntry,
2985 target_phys_addr_t ioTLBEntry)
2986{
2987 PVM pVM = env->pVM;
2988
2989 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2990 {
2991 /* If code memory is being monitored, appropriate IOTLB entry will have
2992 handler IO type, and addend will provide real physical address, no
2993 matter if we store VA in TLB or not, as handlers are always passed PA */
2994 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
2995 return ret;
2996 }
2997 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
2998 "*** handlers\n",
2999 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3000 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3001 LogRel(("*** mmio\n"));
3002 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3003 LogRel(("*** phys\n"));
3004 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3005 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3006 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3007 AssertFatalFailed();
3008}
3009
3010/**
3011 * Read guest RAM and ROM.
3012 *
3013 * @param SrcGCPhys The source address (guest physical).
3014 * @param pvDst The destination address.
3015 * @param cb Number of bytes
3016 */
3017void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3018{
3019 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3020 VBOX_CHECK_ADDR(SrcGCPhys);
3021 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3022#ifdef VBOX_DEBUG_PHYS
3023 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3024#endif
3025 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3026}
3027
3028
3029/**
3030 * Read guest RAM and ROM, unsigned 8-bit.
3031 *
3032 * @param SrcGCPhys The source address (guest physical).
3033 */
3034RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3035{
3036 uint8_t val;
3037 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3038 VBOX_CHECK_ADDR(SrcGCPhys);
3039 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3040 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3041#ifdef VBOX_DEBUG_PHYS
3042 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3043#endif
3044 return val;
3045}
3046
3047
3048/**
3049 * Read guest RAM and ROM, signed 8-bit.
3050 *
3051 * @param SrcGCPhys The source address (guest physical).
3052 */
3053RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3054{
3055 int8_t val;
3056 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3057 VBOX_CHECK_ADDR(SrcGCPhys);
3058 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3059 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3060#ifdef VBOX_DEBUG_PHYS
3061 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3062#endif
3063 return val;
3064}
3065
3066
3067/**
3068 * Read guest RAM and ROM, unsigned 16-bit.
3069 *
3070 * @param SrcGCPhys The source address (guest physical).
3071 */
3072RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3073{
3074 uint16_t val;
3075 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3076 VBOX_CHECK_ADDR(SrcGCPhys);
3077 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3078 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3079#ifdef VBOX_DEBUG_PHYS
3080 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3081#endif
3082 return val;
3083}
3084
3085
3086/**
3087 * Read guest RAM and ROM, signed 16-bit.
3088 *
3089 * @param SrcGCPhys The source address (guest physical).
3090 */
3091RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3092{
3093 int16_t val;
3094 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3095 VBOX_CHECK_ADDR(SrcGCPhys);
3096 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3097 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3098#ifdef VBOX_DEBUG_PHYS
3099 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3100#endif
3101 return val;
3102}
3103
3104
3105/**
3106 * Read guest RAM and ROM, unsigned 32-bit.
3107 *
3108 * @param SrcGCPhys The source address (guest physical).
3109 */
3110RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3111{
3112 uint32_t val;
3113 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3114 VBOX_CHECK_ADDR(SrcGCPhys);
3115 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3116 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3117#ifdef VBOX_DEBUG_PHYS
3118 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3119#endif
3120 return val;
3121}
3122
3123
3124/**
3125 * Read guest RAM and ROM, signed 32-bit.
3126 *
3127 * @param SrcGCPhys The source address (guest physical).
3128 */
3129RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3130{
3131 int32_t val;
3132 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3133 VBOX_CHECK_ADDR(SrcGCPhys);
3134 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3135 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3136#ifdef VBOX_DEBUG_PHYS
3137 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3138#endif
3139 return val;
3140}
3141
3142
3143/**
3144 * Read guest RAM and ROM, unsigned 64-bit.
3145 *
3146 * @param SrcGCPhys The source address (guest physical).
3147 */
3148uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3149{
3150 uint64_t val;
3151 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3152 VBOX_CHECK_ADDR(SrcGCPhys);
3153 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3154 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3155#ifdef VBOX_DEBUG_PHYS
3156 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3157#endif
3158 return val;
3159}
3160
3161/**
3162 * Read guest RAM and ROM, signed 64-bit.
3163 *
3164 * @param SrcGCPhys The source address (guest physical).
3165 */
3166int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3167{
3168 int64_t val;
3169 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3170 VBOX_CHECK_ADDR(SrcGCPhys);
3171 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3172 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3173#ifdef VBOX_DEBUG_PHYS
3174 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3175#endif
3176 return val;
3177}
3178
3179
3180/**
3181 * Write guest RAM.
3182 *
3183 * @param DstGCPhys The destination address (guest physical).
3184 * @param pvSrc The source address.
3185 * @param cb Number of bytes to write
3186 */
3187void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3188{
3189 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3190 VBOX_CHECK_ADDR(DstGCPhys);
3191 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3192 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3193#ifdef VBOX_DEBUG_PHYS
3194 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3195#endif
3196}
3197
3198
3199/**
3200 * Write guest RAM, unsigned 8-bit.
3201 *
3202 * @param DstGCPhys The destination address (guest physical).
3203 * @param val Value
3204 */
3205void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3206{
3207 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3208 VBOX_CHECK_ADDR(DstGCPhys);
3209 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3210 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3211#ifdef VBOX_DEBUG_PHYS
3212 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3213#endif
3214}
3215
3216
3217/**
3218 * Write guest RAM, unsigned 8-bit.
3219 *
3220 * @param DstGCPhys The destination address (guest physical).
3221 * @param val Value
3222 */
3223void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3224{
3225 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3226 VBOX_CHECK_ADDR(DstGCPhys);
3227 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3228 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3229#ifdef VBOX_DEBUG_PHYS
3230 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3231#endif
3232}
3233
3234
3235/**
3236 * Write guest RAM, unsigned 32-bit.
3237 *
3238 * @param DstGCPhys The destination address (guest physical).
3239 * @param val Value
3240 */
3241void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3242{
3243 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3244 VBOX_CHECK_ADDR(DstGCPhys);
3245 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3246 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3247#ifdef VBOX_DEBUG_PHYS
3248 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3249#endif
3250}
3251
3252
3253/**
3254 * Write guest RAM, unsigned 64-bit.
3255 *
3256 * @param DstGCPhys The destination address (guest physical).
3257 * @param val Value
3258 */
3259void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3260{
3261 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3262 VBOX_CHECK_ADDR(DstGCPhys);
3263 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3264 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3265#ifdef VBOX_DEBUG_PHYS
3266 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3267#endif
3268}
3269
3270#undef LOG_GROUP
3271#define LOG_GROUP LOG_GROUP_REM_MMIO
3272
3273/** Read MMIO memory. */
3274static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3275{
3276 uint32_t u32 = 0;
3277 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3278 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3279 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3280 return u32;
3281}
3282
3283/** Read MMIO memory. */
3284static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3285{
3286 uint32_t u32 = 0;
3287 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3288 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3289 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3290 return u32;
3291}
3292
3293/** Read MMIO memory. */
3294static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3295{
3296 uint32_t u32 = 0;
3297 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3298 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3299 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3300 return u32;
3301}
3302
3303/** Write to MMIO memory. */
3304static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3305{
3306 int rc;
3307 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3308 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3309 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3310}
3311
3312/** Write to MMIO memory. */
3313static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3314{
3315 int rc;
3316 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3317 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3318 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3319}
3320
3321/** Write to MMIO memory. */
3322static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3323{
3324 int rc;
3325 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3326 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3327 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3328}
3329
3330
3331#undef LOG_GROUP
3332#define LOG_GROUP LOG_GROUP_REM_HANDLER
3333
3334/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3335
3336static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3337{
3338 uint8_t u8;
3339 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3340 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3341 return u8;
3342}
3343
3344static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3345{
3346 uint16_t u16;
3347 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3348 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3349 return u16;
3350}
3351
3352static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3353{
3354 uint32_t u32;
3355 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3356 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3357 return u32;
3358}
3359
3360static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3361{
3362 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3363 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3364}
3365
3366static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3367{
3368 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3369 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3370}
3371
3372static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3373{
3374 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3375 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3376}
3377
3378/* -+- disassembly -+- */
3379
3380#undef LOG_GROUP
3381#define LOG_GROUP LOG_GROUP_REM_DISAS
3382
3383
3384/**
3385 * Enables or disables singled stepped disassembly.
3386 *
3387 * @returns VBox status code.
3388 * @param pVM VM handle.
3389 * @param fEnable To enable set this flag, to disable clear it.
3390 */
3391static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3392{
3393 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3394 VM_ASSERT_EMT(pVM);
3395
3396 if (fEnable)
3397 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3398 else
3399 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3400 return VINF_SUCCESS;
3401}
3402
3403
3404/**
3405 * Enables or disables singled stepped disassembly.
3406 *
3407 * @returns VBox status code.
3408 * @param pVM VM handle.
3409 * @param fEnable To enable set this flag, to disable clear it.
3410 */
3411REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3412{
3413 PVMREQ pReq;
3414 int rc;
3415
3416 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3417 if (VM_IS_EMT(pVM))
3418 return remR3DisasEnableStepping(pVM, fEnable);
3419
3420 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3421 AssertRC(rc);
3422 if (RT_SUCCESS(rc))
3423 rc = pReq->iStatus;
3424 VMR3ReqFree(pReq);
3425 return rc;
3426}
3427
3428
3429#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3430/**
3431 * External Debugger Command: .remstep [on|off|1|0]
3432 */
3433static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3434{
3435 bool fEnable;
3436 int rc;
3437
3438 /* print status */
3439 if (cArgs == 0)
3440 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3441 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3442
3443 /* convert the argument and change the mode. */
3444 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3445 if (RT_FAILURE(rc))
3446 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3447 rc = REMR3DisasEnableStepping(pVM, fEnable);
3448 if (RT_FAILURE(rc))
3449 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3450 return rc;
3451}
3452#endif
3453
3454
3455/**
3456 * Disassembles n instructions and prints them to the log.
3457 *
3458 * @returns Success indicator.
3459 * @param env Pointer to the recompiler CPU structure.
3460 * @param f32BitCode Indicates that whether or not the code should
3461 * be disassembled as 16 or 32 bit. If -1 the CS
3462 * selector will be inspected.
3463 * @param nrInstructions Nr of instructions to disassemble
3464 * @param pszPrefix
3465 * @remark not currently used for anything but ad-hoc debugging.
3466 */
3467bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3468{
3469 int i, rc;
3470 RTGCPTR GCPtrPC;
3471 uint8_t *pvPC;
3472 RTINTPTR off;
3473 DISCPUSTATE Cpu;
3474
3475 /*
3476 * Determin 16/32 bit mode.
3477 */
3478 if (f32BitCode == -1)
3479 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3480
3481 /*
3482 * Convert cs:eip to host context address.
3483 * We don't care to much about cross page correctness presently.
3484 */
3485 GCPtrPC = env->segs[R_CS].base + env->eip;
3486 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3487 {
3488 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3489
3490 /* convert eip to physical address. */
3491 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3492 GCPtrPC,
3493 env->cr[3],
3494 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3495 (void**)&pvPC);
3496 if (RT_FAILURE(rc))
3497 {
3498 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3499 return false;
3500 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3501 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3502 }
3503 }
3504 else
3505 {
3506 /* physical address */
3507 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3508 (void**)&pvPC);
3509 if (RT_FAILURE(rc))
3510 return false;
3511 }
3512
3513 /*
3514 * Disassemble.
3515 */
3516 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3517 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3518 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3519 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3520 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3521 //Cpu.dwUserData[2] = GCPtrPC;
3522
3523 for (i=0;i<nrInstructions;i++)
3524 {
3525 char szOutput[256];
3526 uint32_t cbOp;
3527 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3528 return false;
3529 if (pszPrefix)
3530 Log(("%s: %s", pszPrefix, szOutput));
3531 else
3532 Log(("%s", szOutput));
3533
3534 pvPC += cbOp;
3535 }
3536 return true;
3537}
3538
3539
3540/** @todo need to test the new code, using the old code in the mean while. */
3541#define USE_OLD_DUMP_AND_DISASSEMBLY
3542
3543/**
3544 * Disassembles one instruction and prints it to the log.
3545 *
3546 * @returns Success indicator.
3547 * @param env Pointer to the recompiler CPU structure.
3548 * @param f32BitCode Indicates that whether or not the code should
3549 * be disassembled as 16 or 32 bit. If -1 the CS
3550 * selector will be inspected.
3551 * @param pszPrefix
3552 */
3553bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3554{
3555#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3556 PVM pVM = env->pVM;
3557 RTGCPTR GCPtrPC;
3558 uint8_t *pvPC;
3559 char szOutput[256];
3560 uint32_t cbOp;
3561 RTINTPTR off;
3562 DISCPUSTATE Cpu;
3563
3564
3565 /* Doesn't work in long mode. */
3566 if (env->hflags & HF_LMA_MASK)
3567 return false;
3568
3569 /*
3570 * Determin 16/32 bit mode.
3571 */
3572 if (f32BitCode == -1)
3573 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3574
3575 /*
3576 * Log registers
3577 */
3578 if (LogIs2Enabled())
3579 {
3580 remR3StateUpdate(pVM);
3581 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3582 }
3583
3584 /*
3585 * Convert cs:eip to host context address.
3586 * We don't care to much about cross page correctness presently.
3587 */
3588 GCPtrPC = env->segs[R_CS].base + env->eip;
3589 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3590 {
3591 /* convert eip to physical address. */
3592 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3593 GCPtrPC,
3594 env->cr[3],
3595 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3596 (void**)&pvPC);
3597 if (RT_FAILURE(rc))
3598 {
3599 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3600 return false;
3601 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3602 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3603 }
3604 }
3605 else
3606 {
3607
3608 /* physical address */
3609 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3610 if (RT_FAILURE(rc))
3611 return false;
3612 }
3613
3614 /*
3615 * Disassemble.
3616 */
3617 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3618 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3619 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3620 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3621 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3622 //Cpu.dwUserData[2] = GCPtrPC;
3623 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3624 return false;
3625
3626 if (!f32BitCode)
3627 {
3628 if (pszPrefix)
3629 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3630 else
3631 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3632 }
3633 else
3634 {
3635 if (pszPrefix)
3636 Log(("%s: %s", pszPrefix, szOutput));
3637 else
3638 Log(("%s", szOutput));
3639 }
3640 return true;
3641
3642#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3643 PVM pVM = env->pVM;
3644 const bool fLog = LogIsEnabled();
3645 const bool fLog2 = LogIs2Enabled();
3646 int rc = VINF_SUCCESS;
3647
3648 /*
3649 * Don't bother if there ain't any log output to do.
3650 */
3651 if (!fLog && !fLog2)
3652 return true;
3653
3654 /*
3655 * Update the state so DBGF reads the correct register values.
3656 */
3657 remR3StateUpdate(pVM);
3658
3659 /*
3660 * Log registers if requested.
3661 */
3662 if (!fLog2)
3663 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3664
3665 /*
3666 * Disassemble to log.
3667 */
3668 if (fLog)
3669 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3670
3671 return RT_SUCCESS(rc);
3672#endif
3673}
3674
3675
3676/**
3677 * Disassemble recompiled code.
3678 *
3679 * @param phFileIgnored Ignored, logfile usually.
3680 * @param pvCode Pointer to the code block.
3681 * @param cb Size of the code block.
3682 */
3683void disas(FILE *phFile, void *pvCode, unsigned long cb)
3684{
3685#ifdef DEBUG_TMP_LOGGING
3686# define DISAS_PRINTF(x...) fprintf(phFile, x)
3687#else
3688# define DISAS_PRINTF(x...) RTLogPrintf(x)
3689 if (LogIs2Enabled())
3690#endif
3691 {
3692 unsigned off = 0;
3693 char szOutput[256];
3694 DISCPUSTATE Cpu;
3695
3696 memset(&Cpu, 0, sizeof(Cpu));
3697#ifdef RT_ARCH_X86
3698 Cpu.mode = CPUMODE_32BIT;
3699#else
3700 Cpu.mode = CPUMODE_64BIT;
3701#endif
3702
3703 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3704 while (off < cb)
3705 {
3706 uint32_t cbInstr;
3707 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3708 DISAS_PRINTF("%s", szOutput);
3709 else
3710 {
3711 DISAS_PRINTF("disas error\n");
3712 cbInstr = 1;
3713#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3714 break;
3715#endif
3716 }
3717 off += cbInstr;
3718 }
3719 }
3720
3721#undef DISAS_PRINTF
3722}
3723
3724
3725/**
3726 * Disassemble guest code.
3727 *
3728 * @param phFileIgnored Ignored, logfile usually.
3729 * @param uCode The guest address of the code to disassemble. (flat?)
3730 * @param cb Number of bytes to disassemble.
3731 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3732 */
3733void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3734{
3735#ifdef DEBUG_TMP_LOGGING
3736# define DISAS_PRINTF(x...) fprintf(phFile, x)
3737#else
3738# define DISAS_PRINTF(x...) RTLogPrintf(x)
3739 if (LogIs2Enabled())
3740#endif
3741 {
3742 PVM pVM = cpu_single_env->pVM;
3743 RTSEL cs;
3744 RTGCUINTPTR eip;
3745
3746 /*
3747 * Update the state so DBGF reads the correct register values (flags).
3748 */
3749 remR3StateUpdate(pVM);
3750
3751 /*
3752 * Do the disassembling.
3753 */
3754 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3755 cs = cpu_single_env->segs[R_CS].selector;
3756 eip = uCode - cpu_single_env->segs[R_CS].base;
3757 for (;;)
3758 {
3759 char szBuf[256];
3760 uint32_t cbInstr;
3761 int rc = DBGFR3DisasInstrEx(pVM,
3762 cs,
3763 eip,
3764 0,
3765 szBuf, sizeof(szBuf),
3766 &cbInstr);
3767 if (RT_SUCCESS(rc))
3768 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3769 else
3770 {
3771 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3772 cbInstr = 1;
3773 }
3774
3775 /* next */
3776 if (cb <= cbInstr)
3777 break;
3778 cb -= cbInstr;
3779 uCode += cbInstr;
3780 eip += cbInstr;
3781 }
3782 }
3783#undef DISAS_PRINTF
3784}
3785
3786
3787/**
3788 * Looks up a guest symbol.
3789 *
3790 * @returns Pointer to symbol name. This is a static buffer.
3791 * @param orig_addr The address in question.
3792 */
3793const char *lookup_symbol(target_ulong orig_addr)
3794{
3795 RTGCINTPTR off = 0;
3796 DBGFSYMBOL Sym;
3797 PVM pVM = cpu_single_env->pVM;
3798 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3799 if (RT_SUCCESS(rc))
3800 {
3801 static char szSym[sizeof(Sym.szName) + 48];
3802 if (!off)
3803 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3804 else if (off > 0)
3805 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3806 else
3807 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3808 return szSym;
3809 }
3810 return "<N/A>";
3811}
3812
3813
3814#undef LOG_GROUP
3815#define LOG_GROUP LOG_GROUP_REM
3816
3817
3818/* -+- FF notifications -+- */
3819
3820
3821/**
3822 * Notification about a pending interrupt.
3823 *
3824 * @param pVM VM Handle.
3825 * @param u8Interrupt Interrupt
3826 * @thread The emulation thread.
3827 */
3828REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3829{
3830 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3831 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3832}
3833
3834/**
3835 * Notification about a pending interrupt.
3836 *
3837 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3838 * @param pVM VM Handle.
3839 * @thread The emulation thread.
3840 */
3841REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3842{
3843 return pVM->rem.s.u32PendingInterrupt;
3844}
3845
3846/**
3847 * Notification about the interrupt FF being set.
3848 *
3849 * @param pVM VM Handle.
3850 * @thread The emulation thread.
3851 */
3852REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3853{
3854 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3855 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3856 if (pVM->rem.s.fInREM)
3857 {
3858 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3859 CPU_INTERRUPT_EXTERNAL_HARD);
3860 }
3861}
3862
3863
3864/**
3865 * Notification about the interrupt FF being set.
3866 *
3867 * @param pVM VM Handle.
3868 * @thread Any.
3869 */
3870REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3871{
3872 LogFlow(("REMR3NotifyInterruptClear:\n"));
3873 if (pVM->rem.s.fInREM)
3874 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3875}
3876
3877
3878/**
3879 * Notification about pending timer(s).
3880 *
3881 * @param pVM VM Handle.
3882 * @thread Any.
3883 */
3884REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3885{
3886#ifndef DEBUG_bird
3887 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3888#endif
3889 if (pVM->rem.s.fInREM)
3890 {
3891 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3892 CPU_INTERRUPT_EXTERNAL_TIMER);
3893 }
3894}
3895
3896
3897/**
3898 * Notification about pending DMA transfers.
3899 *
3900 * @param pVM VM Handle.
3901 * @thread Any.
3902 */
3903REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3904{
3905 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3906 if (pVM->rem.s.fInREM)
3907 {
3908 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3909 CPU_INTERRUPT_EXTERNAL_DMA);
3910 }
3911}
3912
3913
3914/**
3915 * Notification about pending timer(s).
3916 *
3917 * @param pVM VM Handle.
3918 * @thread Any.
3919 */
3920REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3921{
3922 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3923 if (pVM->rem.s.fInREM)
3924 {
3925 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3926 CPU_INTERRUPT_EXTERNAL_EXIT);
3927 }
3928}
3929
3930
3931/**
3932 * Notification about pending FF set by an external thread.
3933 *
3934 * @param pVM VM handle.
3935 * @thread Any.
3936 */
3937REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3938{
3939 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3940 if (pVM->rem.s.fInREM)
3941 {
3942 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3943 CPU_INTERRUPT_EXTERNAL_EXIT);
3944 }
3945}
3946
3947
3948#ifdef VBOX_WITH_STATISTICS
3949void remR3ProfileStart(int statcode)
3950{
3951 STAMPROFILEADV *pStat;
3952 switch(statcode)
3953 {
3954 case STATS_EMULATE_SINGLE_INSTR:
3955 pStat = &gStatExecuteSingleInstr;
3956 break;
3957 case STATS_QEMU_COMPILATION:
3958 pStat = &gStatCompilationQEmu;
3959 break;
3960 case STATS_QEMU_RUN_EMULATED_CODE:
3961 pStat = &gStatRunCodeQEmu;
3962 break;
3963 case STATS_QEMU_TOTAL:
3964 pStat = &gStatTotalTimeQEmu;
3965 break;
3966 case STATS_QEMU_RUN_TIMERS:
3967 pStat = &gStatTimers;
3968 break;
3969 case STATS_TLB_LOOKUP:
3970 pStat= &gStatTBLookup;
3971 break;
3972 case STATS_IRQ_HANDLING:
3973 pStat= &gStatIRQ;
3974 break;
3975 case STATS_RAW_CHECK:
3976 pStat = &gStatRawCheck;
3977 break;
3978
3979 default:
3980 AssertMsgFailed(("unknown stat %d\n", statcode));
3981 return;
3982 }
3983 STAM_PROFILE_ADV_START(pStat, a);
3984}
3985
3986
3987void remR3ProfileStop(int statcode)
3988{
3989 STAMPROFILEADV *pStat;
3990 switch(statcode)
3991 {
3992 case STATS_EMULATE_SINGLE_INSTR:
3993 pStat = &gStatExecuteSingleInstr;
3994 break;
3995 case STATS_QEMU_COMPILATION:
3996 pStat = &gStatCompilationQEmu;
3997 break;
3998 case STATS_QEMU_RUN_EMULATED_CODE:
3999 pStat = &gStatRunCodeQEmu;
4000 break;
4001 case STATS_QEMU_TOTAL:
4002 pStat = &gStatTotalTimeQEmu;
4003 break;
4004 case STATS_QEMU_RUN_TIMERS:
4005 pStat = &gStatTimers;
4006 break;
4007 case STATS_TLB_LOOKUP:
4008 pStat= &gStatTBLookup;
4009 break;
4010 case STATS_IRQ_HANDLING:
4011 pStat= &gStatIRQ;
4012 break;
4013 case STATS_RAW_CHECK:
4014 pStat = &gStatRawCheck;
4015 break;
4016 default:
4017 AssertMsgFailed(("unknown stat %d\n", statcode));
4018 return;
4019 }
4020 STAM_PROFILE_ADV_STOP(pStat, a);
4021}
4022#endif
4023
4024/**
4025 * Raise an RC, force rem exit.
4026 *
4027 * @param pVM VM handle.
4028 * @param rc The rc.
4029 */
4030void remR3RaiseRC(PVM pVM, int rc)
4031{
4032 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4033 Assert(pVM->rem.s.fInREM);
4034 VM_ASSERT_EMT(pVM);
4035 pVM->rem.s.rc = rc;
4036 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4037}
4038
4039
4040/* -+- timers -+- */
4041
4042uint64_t cpu_get_tsc(CPUX86State *env)
4043{
4044 STAM_COUNTER_INC(&gStatCpuGetTSC);
4045 return TMCpuTickGet(env->pVM);
4046}
4047
4048
4049/* -+- interrupts -+- */
4050
4051void cpu_set_ferr(CPUX86State *env)
4052{
4053 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4054 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4055}
4056
4057int cpu_get_pic_interrupt(CPUState *env)
4058{
4059 uint8_t u8Interrupt;
4060 int rc;
4061
4062 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4063 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4064 * with the (a)pic.
4065 */
4066 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4067 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4068 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4069 * remove this kludge. */
4070 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4071 {
4072 rc = VINF_SUCCESS;
4073 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4074 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4075 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4076 }
4077 else
4078 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4079
4080 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4081 if (RT_SUCCESS(rc))
4082 {
4083 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4084 env->interrupt_request |= CPU_INTERRUPT_HARD;
4085 return u8Interrupt;
4086 }
4087 return -1;
4088}
4089
4090
4091/* -+- local apic -+- */
4092
4093void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4094{
4095 int rc = PDMApicSetBase(env->pVM, val);
4096 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4097}
4098
4099uint64_t cpu_get_apic_base(CPUX86State *env)
4100{
4101 uint64_t u64;
4102 int rc = PDMApicGetBase(env->pVM, &u64);
4103 if (RT_SUCCESS(rc))
4104 {
4105 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4106 return u64;
4107 }
4108 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4109 return 0;
4110}
4111
4112void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4113{
4114 int rc = PDMApicSetTPR(env->pVM, val);
4115 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4116}
4117
4118uint8_t cpu_get_apic_tpr(CPUX86State *env)
4119{
4120 uint8_t u8;
4121 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4122 if (RT_SUCCESS(rc))
4123 {
4124 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4125 return u8;
4126 }
4127 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4128 return 0;
4129}
4130
4131
4132uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4133{
4134 uint64_t value;
4135 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4136 if (RT_SUCCESS(rc))
4137 {
4138 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4139 return value;
4140 }
4141 /** @todo: exception ? */
4142 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4143 return value;
4144}
4145
4146void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4147{
4148 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4149 /** @todo: exception if error ? */
4150 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4151}
4152
4153uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4154{
4155 return CPUMGetGuestMsr(env->pVM, msr);
4156}
4157
4158void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4159{
4160 CPUMSetGuestMsr(env->pVM, msr, val);
4161}
4162/* -+- I/O Ports -+- */
4163
4164#undef LOG_GROUP
4165#define LOG_GROUP LOG_GROUP_REM_IOPORT
4166
4167void cpu_outb(CPUState *env, int addr, int val)
4168{
4169 int rc;
4170
4171 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4172 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4173
4174 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4175 if (RT_LIKELY(rc == VINF_SUCCESS))
4176 return;
4177 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4178 {
4179 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4180 remR3RaiseRC(env->pVM, rc);
4181 return;
4182 }
4183 remAbort(rc, __FUNCTION__);
4184}
4185
4186void cpu_outw(CPUState *env, int addr, int val)
4187{
4188 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4189 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4190 if (RT_LIKELY(rc == VINF_SUCCESS))
4191 return;
4192 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4193 {
4194 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4195 remR3RaiseRC(env->pVM, rc);
4196 return;
4197 }
4198 remAbort(rc, __FUNCTION__);
4199}
4200
4201void cpu_outl(CPUState *env, int addr, int val)
4202{
4203 int rc;
4204 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4205 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4206 if (RT_LIKELY(rc == VINF_SUCCESS))
4207 return;
4208 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4209 {
4210 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4211 remR3RaiseRC(env->pVM, rc);
4212 return;
4213 }
4214 remAbort(rc, __FUNCTION__);
4215}
4216
4217int cpu_inb(CPUState *env, int addr)
4218{
4219 uint32_t u32 = 0;
4220 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4221 if (RT_LIKELY(rc == VINF_SUCCESS))
4222 {
4223 if (/*addr != 0x61 && */addr != 0x71)
4224 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4225 return (int)u32;
4226 }
4227 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4228 {
4229 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4230 remR3RaiseRC(env->pVM, rc);
4231 return (int)u32;
4232 }
4233 remAbort(rc, __FUNCTION__);
4234 return 0xff;
4235}
4236
4237int cpu_inw(CPUState *env, int addr)
4238{
4239 uint32_t u32 = 0;
4240 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4241 if (RT_LIKELY(rc == VINF_SUCCESS))
4242 {
4243 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4244 return (int)u32;
4245 }
4246 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4247 {
4248 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4249 remR3RaiseRC(env->pVM, rc);
4250 return (int)u32;
4251 }
4252 remAbort(rc, __FUNCTION__);
4253 return 0xffff;
4254}
4255
4256int cpu_inl(CPUState *env, int addr)
4257{
4258 uint32_t u32 = 0;
4259 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4260 if (RT_LIKELY(rc == VINF_SUCCESS))
4261 {
4262//if (addr==0x01f0 && u32 == 0x6b6d)
4263// loglevel = ~0;
4264 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4265 return (int)u32;
4266 }
4267 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4268 {
4269 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4270 remR3RaiseRC(env->pVM, rc);
4271 return (int)u32;
4272 }
4273 remAbort(rc, __FUNCTION__);
4274 return 0xffffffff;
4275}
4276
4277#undef LOG_GROUP
4278#define LOG_GROUP LOG_GROUP_REM
4279
4280
4281/* -+- helpers and misc other interfaces -+- */
4282
4283/**
4284 * Perform the CPUID instruction.
4285 *
4286 * ASMCpuId cannot be invoked from some source files where this is used because of global
4287 * register allocations.
4288 *
4289 * @param env Pointer to the recompiler CPU structure.
4290 * @param uOperator CPUID operation (eax).
4291 * @param pvEAX Where to store eax.
4292 * @param pvEBX Where to store ebx.
4293 * @param pvECX Where to store ecx.
4294 * @param pvEDX Where to store edx.
4295 */
4296void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4297{
4298 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4299}
4300
4301
4302#if 0 /* not used */
4303/**
4304 * Interface for qemu hardware to report back fatal errors.
4305 */
4306void hw_error(const char *pszFormat, ...)
4307{
4308 /*
4309 * Bitch about it.
4310 */
4311 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4312 * this in my Odin32 tree at home! */
4313 va_list args;
4314 va_start(args, pszFormat);
4315 RTLogPrintf("fatal error in virtual hardware:");
4316 RTLogPrintfV(pszFormat, args);
4317 va_end(args);
4318 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4319
4320 /*
4321 * If we're in REM context we'll sync back the state before 'jumping' to
4322 * the EMs failure handling.
4323 */
4324 PVM pVM = cpu_single_env->pVM;
4325 if (pVM->rem.s.fInREM)
4326 REMR3StateBack(pVM);
4327 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4328 AssertMsgFailed(("EMR3FatalError returned!\n"));
4329}
4330#endif
4331
4332/**
4333 * Interface for the qemu cpu to report unhandled situation
4334 * raising a fatal VM error.
4335 */
4336void cpu_abort(CPUState *env, const char *pszFormat, ...)
4337{
4338 va_list args;
4339 PVM pVM;
4340
4341 /*
4342 * Bitch about it.
4343 */
4344#ifndef _MSC_VER
4345 /** @todo: MSVC is right - it's not valid C */
4346 RTLogFlags(NULL, "nodisabled nobuffered");
4347#endif
4348 va_start(args, pszFormat);
4349 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4350 va_end(args);
4351 va_start(args, pszFormat);
4352 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4353 va_end(args);
4354
4355 /*
4356 * If we're in REM context we'll sync back the state before 'jumping' to
4357 * the EMs failure handling.
4358 */
4359 pVM = cpu_single_env->pVM;
4360 if (pVM->rem.s.fInREM)
4361 REMR3StateBack(pVM);
4362 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4363 AssertMsgFailed(("EMR3FatalError returned!\n"));
4364}
4365
4366
4367/**
4368 * Aborts the VM.
4369 *
4370 * @param rc VBox error code.
4371 * @param pszTip Hint about why/when this happend.
4372 */
4373void remAbort(int rc, const char *pszTip)
4374{
4375 PVM pVM;
4376
4377 /*
4378 * Bitch about it.
4379 */
4380 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4381 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4382
4383 /*
4384 * Jump back to where we entered the recompiler.
4385 */
4386 pVM = cpu_single_env->pVM;
4387 if (pVM->rem.s.fInREM)
4388 REMR3StateBack(pVM);
4389 EMR3FatalError(pVM, rc);
4390 AssertMsgFailed(("EMR3FatalError returned!\n"));
4391}
4392
4393
4394/**
4395 * Dumps a linux system call.
4396 * @param pVM VM handle.
4397 */
4398void remR3DumpLnxSyscall(PVM pVM)
4399{
4400 static const char *apsz[] =
4401 {
4402 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4403 "sys_exit",
4404 "sys_fork",
4405 "sys_read",
4406 "sys_write",
4407 "sys_open", /* 5 */
4408 "sys_close",
4409 "sys_waitpid",
4410 "sys_creat",
4411 "sys_link",
4412 "sys_unlink", /* 10 */
4413 "sys_execve",
4414 "sys_chdir",
4415 "sys_time",
4416 "sys_mknod",
4417 "sys_chmod", /* 15 */
4418 "sys_lchown16",
4419 "sys_ni_syscall", /* old break syscall holder */
4420 "sys_stat",
4421 "sys_lseek",
4422 "sys_getpid", /* 20 */
4423 "sys_mount",
4424 "sys_oldumount",
4425 "sys_setuid16",
4426 "sys_getuid16",
4427 "sys_stime", /* 25 */
4428 "sys_ptrace",
4429 "sys_alarm",
4430 "sys_fstat",
4431 "sys_pause",
4432 "sys_utime", /* 30 */
4433 "sys_ni_syscall", /* old stty syscall holder */
4434 "sys_ni_syscall", /* old gtty syscall holder */
4435 "sys_access",
4436 "sys_nice",
4437 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4438 "sys_sync",
4439 "sys_kill",
4440 "sys_rename",
4441 "sys_mkdir",
4442 "sys_rmdir", /* 40 */
4443 "sys_dup",
4444 "sys_pipe",
4445 "sys_times",
4446 "sys_ni_syscall", /* old prof syscall holder */
4447 "sys_brk", /* 45 */
4448 "sys_setgid16",
4449 "sys_getgid16",
4450 "sys_signal",
4451 "sys_geteuid16",
4452 "sys_getegid16", /* 50 */
4453 "sys_acct",
4454 "sys_umount", /* recycled never used phys() */
4455 "sys_ni_syscall", /* old lock syscall holder */
4456 "sys_ioctl",
4457 "sys_fcntl", /* 55 */
4458 "sys_ni_syscall", /* old mpx syscall holder */
4459 "sys_setpgid",
4460 "sys_ni_syscall", /* old ulimit syscall holder */
4461 "sys_olduname",
4462 "sys_umask", /* 60 */
4463 "sys_chroot",
4464 "sys_ustat",
4465 "sys_dup2",
4466 "sys_getppid",
4467 "sys_getpgrp", /* 65 */
4468 "sys_setsid",
4469 "sys_sigaction",
4470 "sys_sgetmask",
4471 "sys_ssetmask",
4472 "sys_setreuid16", /* 70 */
4473 "sys_setregid16",
4474 "sys_sigsuspend",
4475 "sys_sigpending",
4476 "sys_sethostname",
4477 "sys_setrlimit", /* 75 */
4478 "sys_old_getrlimit",
4479 "sys_getrusage",
4480 "sys_gettimeofday",
4481 "sys_settimeofday",
4482 "sys_getgroups16", /* 80 */
4483 "sys_setgroups16",
4484 "old_select",
4485 "sys_symlink",
4486 "sys_lstat",
4487 "sys_readlink", /* 85 */
4488 "sys_uselib",
4489 "sys_swapon",
4490 "sys_reboot",
4491 "old_readdir",
4492 "old_mmap", /* 90 */
4493 "sys_munmap",
4494 "sys_truncate",
4495 "sys_ftruncate",
4496 "sys_fchmod",
4497 "sys_fchown16", /* 95 */
4498 "sys_getpriority",
4499 "sys_setpriority",
4500 "sys_ni_syscall", /* old profil syscall holder */
4501 "sys_statfs",
4502 "sys_fstatfs", /* 100 */
4503 "sys_ioperm",
4504 "sys_socketcall",
4505 "sys_syslog",
4506 "sys_setitimer",
4507 "sys_getitimer", /* 105 */
4508 "sys_newstat",
4509 "sys_newlstat",
4510 "sys_newfstat",
4511 "sys_uname",
4512 "sys_iopl", /* 110 */
4513 "sys_vhangup",
4514 "sys_ni_syscall", /* old "idle" system call */
4515 "sys_vm86old",
4516 "sys_wait4",
4517 "sys_swapoff", /* 115 */
4518 "sys_sysinfo",
4519 "sys_ipc",
4520 "sys_fsync",
4521 "sys_sigreturn",
4522 "sys_clone", /* 120 */
4523 "sys_setdomainname",
4524 "sys_newuname",
4525 "sys_modify_ldt",
4526 "sys_adjtimex",
4527 "sys_mprotect", /* 125 */
4528 "sys_sigprocmask",
4529 "sys_ni_syscall", /* old "create_module" */
4530 "sys_init_module",
4531 "sys_delete_module",
4532 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4533 "sys_quotactl",
4534 "sys_getpgid",
4535 "sys_fchdir",
4536 "sys_bdflush",
4537 "sys_sysfs", /* 135 */
4538 "sys_personality",
4539 "sys_ni_syscall", /* reserved for afs_syscall */
4540 "sys_setfsuid16",
4541 "sys_setfsgid16",
4542 "sys_llseek", /* 140 */
4543 "sys_getdents",
4544 "sys_select",
4545 "sys_flock",
4546 "sys_msync",
4547 "sys_readv", /* 145 */
4548 "sys_writev",
4549 "sys_getsid",
4550 "sys_fdatasync",
4551 "sys_sysctl",
4552 "sys_mlock", /* 150 */
4553 "sys_munlock",
4554 "sys_mlockall",
4555 "sys_munlockall",
4556 "sys_sched_setparam",
4557 "sys_sched_getparam", /* 155 */
4558 "sys_sched_setscheduler",
4559 "sys_sched_getscheduler",
4560 "sys_sched_yield",
4561 "sys_sched_get_priority_max",
4562 "sys_sched_get_priority_min", /* 160 */
4563 "sys_sched_rr_get_interval",
4564 "sys_nanosleep",
4565 "sys_mremap",
4566 "sys_setresuid16",
4567 "sys_getresuid16", /* 165 */
4568 "sys_vm86",
4569 "sys_ni_syscall", /* Old sys_query_module */
4570 "sys_poll",
4571 "sys_nfsservctl",
4572 "sys_setresgid16", /* 170 */
4573 "sys_getresgid16",
4574 "sys_prctl",
4575 "sys_rt_sigreturn",
4576 "sys_rt_sigaction",
4577 "sys_rt_sigprocmask", /* 175 */
4578 "sys_rt_sigpending",
4579 "sys_rt_sigtimedwait",
4580 "sys_rt_sigqueueinfo",
4581 "sys_rt_sigsuspend",
4582 "sys_pread64", /* 180 */
4583 "sys_pwrite64",
4584 "sys_chown16",
4585 "sys_getcwd",
4586 "sys_capget",
4587 "sys_capset", /* 185 */
4588 "sys_sigaltstack",
4589 "sys_sendfile",
4590 "sys_ni_syscall", /* reserved for streams1 */
4591 "sys_ni_syscall", /* reserved for streams2 */
4592 "sys_vfork", /* 190 */
4593 "sys_getrlimit",
4594 "sys_mmap2",
4595 "sys_truncate64",
4596 "sys_ftruncate64",
4597 "sys_stat64", /* 195 */
4598 "sys_lstat64",
4599 "sys_fstat64",
4600 "sys_lchown",
4601 "sys_getuid",
4602 "sys_getgid", /* 200 */
4603 "sys_geteuid",
4604 "sys_getegid",
4605 "sys_setreuid",
4606 "sys_setregid",
4607 "sys_getgroups", /* 205 */
4608 "sys_setgroups",
4609 "sys_fchown",
4610 "sys_setresuid",
4611 "sys_getresuid",
4612 "sys_setresgid", /* 210 */
4613 "sys_getresgid",
4614 "sys_chown",
4615 "sys_setuid",
4616 "sys_setgid",
4617 "sys_setfsuid", /* 215 */
4618 "sys_setfsgid",
4619 "sys_pivot_root",
4620 "sys_mincore",
4621 "sys_madvise",
4622 "sys_getdents64", /* 220 */
4623 "sys_fcntl64",
4624 "sys_ni_syscall", /* reserved for TUX */
4625 "sys_ni_syscall",
4626 "sys_gettid",
4627 "sys_readahead", /* 225 */
4628 "sys_setxattr",
4629 "sys_lsetxattr",
4630 "sys_fsetxattr",
4631 "sys_getxattr",
4632 "sys_lgetxattr", /* 230 */
4633 "sys_fgetxattr",
4634 "sys_listxattr",
4635 "sys_llistxattr",
4636 "sys_flistxattr",
4637 "sys_removexattr", /* 235 */
4638 "sys_lremovexattr",
4639 "sys_fremovexattr",
4640 "sys_tkill",
4641 "sys_sendfile64",
4642 "sys_futex", /* 240 */
4643 "sys_sched_setaffinity",
4644 "sys_sched_getaffinity",
4645 "sys_set_thread_area",
4646 "sys_get_thread_area",
4647 "sys_io_setup", /* 245 */
4648 "sys_io_destroy",
4649 "sys_io_getevents",
4650 "sys_io_submit",
4651 "sys_io_cancel",
4652 "sys_fadvise64", /* 250 */
4653 "sys_ni_syscall",
4654 "sys_exit_group",
4655 "sys_lookup_dcookie",
4656 "sys_epoll_create",
4657 "sys_epoll_ctl", /* 255 */
4658 "sys_epoll_wait",
4659 "sys_remap_file_pages",
4660 "sys_set_tid_address",
4661 "sys_timer_create",
4662 "sys_timer_settime", /* 260 */
4663 "sys_timer_gettime",
4664 "sys_timer_getoverrun",
4665 "sys_timer_delete",
4666 "sys_clock_settime",
4667 "sys_clock_gettime", /* 265 */
4668 "sys_clock_getres",
4669 "sys_clock_nanosleep",
4670 "sys_statfs64",
4671 "sys_fstatfs64",
4672 "sys_tgkill", /* 270 */
4673 "sys_utimes",
4674 "sys_fadvise64_64",
4675 "sys_ni_syscall" /* sys_vserver */
4676 };
4677
4678 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4679 switch (uEAX)
4680 {
4681 default:
4682 if (uEAX < RT_ELEMENTS(apsz))
4683 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4684 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4685 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4686 else
4687 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4688 break;
4689
4690 }
4691}
4692
4693
4694/**
4695 * Dumps an OpenBSD system call.
4696 * @param pVM VM handle.
4697 */
4698void remR3DumpOBsdSyscall(PVM pVM)
4699{
4700 static const char *apsz[] =
4701 {
4702 "SYS_syscall", //0
4703 "SYS_exit", //1
4704 "SYS_fork", //2
4705 "SYS_read", //3
4706 "SYS_write", //4
4707 "SYS_open", //5
4708 "SYS_close", //6
4709 "SYS_wait4", //7
4710 "SYS_8",
4711 "SYS_link", //9
4712 "SYS_unlink", //10
4713 "SYS_11",
4714 "SYS_chdir", //12
4715 "SYS_fchdir", //13
4716 "SYS_mknod", //14
4717 "SYS_chmod", //15
4718 "SYS_chown", //16
4719 "SYS_break", //17
4720 "SYS_18",
4721 "SYS_19",
4722 "SYS_getpid", //20
4723 "SYS_mount", //21
4724 "SYS_unmount", //22
4725 "SYS_setuid", //23
4726 "SYS_getuid", //24
4727 "SYS_geteuid", //25
4728 "SYS_ptrace", //26
4729 "SYS_recvmsg", //27
4730 "SYS_sendmsg", //28
4731 "SYS_recvfrom", //29
4732 "SYS_accept", //30
4733 "SYS_getpeername", //31
4734 "SYS_getsockname", //32
4735 "SYS_access", //33
4736 "SYS_chflags", //34
4737 "SYS_fchflags", //35
4738 "SYS_sync", //36
4739 "SYS_kill", //37
4740 "SYS_38",
4741 "SYS_getppid", //39
4742 "SYS_40",
4743 "SYS_dup", //41
4744 "SYS_opipe", //42
4745 "SYS_getegid", //43
4746 "SYS_profil", //44
4747 "SYS_ktrace", //45
4748 "SYS_sigaction", //46
4749 "SYS_getgid", //47
4750 "SYS_sigprocmask", //48
4751 "SYS_getlogin", //49
4752 "SYS_setlogin", //50
4753 "SYS_acct", //51
4754 "SYS_sigpending", //52
4755 "SYS_osigaltstack", //53
4756 "SYS_ioctl", //54
4757 "SYS_reboot", //55
4758 "SYS_revoke", //56
4759 "SYS_symlink", //57
4760 "SYS_readlink", //58
4761 "SYS_execve", //59
4762 "SYS_umask", //60
4763 "SYS_chroot", //61
4764 "SYS_62",
4765 "SYS_63",
4766 "SYS_64",
4767 "SYS_65",
4768 "SYS_vfork", //66
4769 "SYS_67",
4770 "SYS_68",
4771 "SYS_sbrk", //69
4772 "SYS_sstk", //70
4773 "SYS_61",
4774 "SYS_vadvise", //72
4775 "SYS_munmap", //73
4776 "SYS_mprotect", //74
4777 "SYS_madvise", //75
4778 "SYS_76",
4779 "SYS_77",
4780 "SYS_mincore", //78
4781 "SYS_getgroups", //79
4782 "SYS_setgroups", //80
4783 "SYS_getpgrp", //81
4784 "SYS_setpgid", //82
4785 "SYS_setitimer", //83
4786 "SYS_84",
4787 "SYS_85",
4788 "SYS_getitimer", //86
4789 "SYS_87",
4790 "SYS_88",
4791 "SYS_89",
4792 "SYS_dup2", //90
4793 "SYS_91",
4794 "SYS_fcntl", //92
4795 "SYS_select", //93
4796 "SYS_94",
4797 "SYS_fsync", //95
4798 "SYS_setpriority", //96
4799 "SYS_socket", //97
4800 "SYS_connect", //98
4801 "SYS_99",
4802 "SYS_getpriority", //100
4803 "SYS_101",
4804 "SYS_102",
4805 "SYS_sigreturn", //103
4806 "SYS_bind", //104
4807 "SYS_setsockopt", //105
4808 "SYS_listen", //106
4809 "SYS_107",
4810 "SYS_108",
4811 "SYS_109",
4812 "SYS_110",
4813 "SYS_sigsuspend", //111
4814 "SYS_112",
4815 "SYS_113",
4816 "SYS_114",
4817 "SYS_115",
4818 "SYS_gettimeofday", //116
4819 "SYS_getrusage", //117
4820 "SYS_getsockopt", //118
4821 "SYS_119",
4822 "SYS_readv", //120
4823 "SYS_writev", //121
4824 "SYS_settimeofday", //122
4825 "SYS_fchown", //123
4826 "SYS_fchmod", //124
4827 "SYS_125",
4828 "SYS_setreuid", //126
4829 "SYS_setregid", //127
4830 "SYS_rename", //128
4831 "SYS_129",
4832 "SYS_130",
4833 "SYS_flock", //131
4834 "SYS_mkfifo", //132
4835 "SYS_sendto", //133
4836 "SYS_shutdown", //134
4837 "SYS_socketpair", //135
4838 "SYS_mkdir", //136
4839 "SYS_rmdir", //137
4840 "SYS_utimes", //138
4841 "SYS_139",
4842 "SYS_adjtime", //140
4843 "SYS_141",
4844 "SYS_142",
4845 "SYS_143",
4846 "SYS_144",
4847 "SYS_145",
4848 "SYS_146",
4849 "SYS_setsid", //147
4850 "SYS_quotactl", //148
4851 "SYS_149",
4852 "SYS_150",
4853 "SYS_151",
4854 "SYS_152",
4855 "SYS_153",
4856 "SYS_154",
4857 "SYS_nfssvc", //155
4858 "SYS_156",
4859 "SYS_157",
4860 "SYS_158",
4861 "SYS_159",
4862 "SYS_160",
4863 "SYS_getfh", //161
4864 "SYS_162",
4865 "SYS_163",
4866 "SYS_164",
4867 "SYS_sysarch", //165
4868 "SYS_166",
4869 "SYS_167",
4870 "SYS_168",
4871 "SYS_169",
4872 "SYS_170",
4873 "SYS_171",
4874 "SYS_172",
4875 "SYS_pread", //173
4876 "SYS_pwrite", //174
4877 "SYS_175",
4878 "SYS_176",
4879 "SYS_177",
4880 "SYS_178",
4881 "SYS_179",
4882 "SYS_180",
4883 "SYS_setgid", //181
4884 "SYS_setegid", //182
4885 "SYS_seteuid", //183
4886 "SYS_lfs_bmapv", //184
4887 "SYS_lfs_markv", //185
4888 "SYS_lfs_segclean", //186
4889 "SYS_lfs_segwait", //187
4890 "SYS_188",
4891 "SYS_189",
4892 "SYS_190",
4893 "SYS_pathconf", //191
4894 "SYS_fpathconf", //192
4895 "SYS_swapctl", //193
4896 "SYS_getrlimit", //194
4897 "SYS_setrlimit", //195
4898 "SYS_getdirentries", //196
4899 "SYS_mmap", //197
4900 "SYS___syscall", //198
4901 "SYS_lseek", //199
4902 "SYS_truncate", //200
4903 "SYS_ftruncate", //201
4904 "SYS___sysctl", //202
4905 "SYS_mlock", //203
4906 "SYS_munlock", //204
4907 "SYS_205",
4908 "SYS_futimes", //206
4909 "SYS_getpgid", //207
4910 "SYS_xfspioctl", //208
4911 "SYS_209",
4912 "SYS_210",
4913 "SYS_211",
4914 "SYS_212",
4915 "SYS_213",
4916 "SYS_214",
4917 "SYS_215",
4918 "SYS_216",
4919 "SYS_217",
4920 "SYS_218",
4921 "SYS_219",
4922 "SYS_220",
4923 "SYS_semget", //221
4924 "SYS_222",
4925 "SYS_223",
4926 "SYS_224",
4927 "SYS_msgget", //225
4928 "SYS_msgsnd", //226
4929 "SYS_msgrcv", //227
4930 "SYS_shmat", //228
4931 "SYS_229",
4932 "SYS_shmdt", //230
4933 "SYS_231",
4934 "SYS_clock_gettime", //232
4935 "SYS_clock_settime", //233
4936 "SYS_clock_getres", //234
4937 "SYS_235",
4938 "SYS_236",
4939 "SYS_237",
4940 "SYS_238",
4941 "SYS_239",
4942 "SYS_nanosleep", //240
4943 "SYS_241",
4944 "SYS_242",
4945 "SYS_243",
4946 "SYS_244",
4947 "SYS_245",
4948 "SYS_246",
4949 "SYS_247",
4950 "SYS_248",
4951 "SYS_249",
4952 "SYS_minherit", //250
4953 "SYS_rfork", //251
4954 "SYS_poll", //252
4955 "SYS_issetugid", //253
4956 "SYS_lchown", //254
4957 "SYS_getsid", //255
4958 "SYS_msync", //256
4959 "SYS_257",
4960 "SYS_258",
4961 "SYS_259",
4962 "SYS_getfsstat", //260
4963 "SYS_statfs", //261
4964 "SYS_fstatfs", //262
4965 "SYS_pipe", //263
4966 "SYS_fhopen", //264
4967 "SYS_265",
4968 "SYS_fhstatfs", //266
4969 "SYS_preadv", //267
4970 "SYS_pwritev", //268
4971 "SYS_kqueue", //269
4972 "SYS_kevent", //270
4973 "SYS_mlockall", //271
4974 "SYS_munlockall", //272
4975 "SYS_getpeereid", //273
4976 "SYS_274",
4977 "SYS_275",
4978 "SYS_276",
4979 "SYS_277",
4980 "SYS_278",
4981 "SYS_279",
4982 "SYS_280",
4983 "SYS_getresuid", //281
4984 "SYS_setresuid", //282
4985 "SYS_getresgid", //283
4986 "SYS_setresgid", //284
4987 "SYS_285",
4988 "SYS_mquery", //286
4989 "SYS_closefrom", //287
4990 "SYS_sigaltstack", //288
4991 "SYS_shmget", //289
4992 "SYS_semop", //290
4993 "SYS_stat", //291
4994 "SYS_fstat", //292
4995 "SYS_lstat", //293
4996 "SYS_fhstat", //294
4997 "SYS___semctl", //295
4998 "SYS_shmctl", //296
4999 "SYS_msgctl", //297
5000 "SYS_MAXSYSCALL", //298
5001 //299
5002 //300
5003 };
5004 uint32_t uEAX;
5005 if (!LogIsEnabled())
5006 return;
5007 uEAX = CPUMGetGuestEAX(pVM);
5008 switch (uEAX)
5009 {
5010 default:
5011 if (uEAX < RT_ELEMENTS(apsz))
5012 {
5013 uint32_t au32Args[8] = {0};
5014 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5015 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5016 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5017 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5018 }
5019 else
5020 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5021 break;
5022 }
5023}
5024
5025
5026#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5027/**
5028 * The Dll main entry point (stub).
5029 */
5030bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5031{
5032 return true;
5033}
5034
5035void *memcpy(void *dst, const void *src, size_t size)
5036{
5037 uint8_t*pbDst = dst, *pbSrc = src;
5038 while (size-- > 0)
5039 *pbDst++ = *pbSrc++;
5040 return dst;
5041}
5042
5043#endif
5044
5045void cpu_smm_update(CPUState* env)
5046{
5047}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette