VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 14335

Last change on this file since 14335 was 14277, checked in by vboxsync, 16 years ago

Implemented support for virtual addresses in TLB, improves performance greatly,
but not fully functional as breaks some sync checks, so disabled.
To enable - comment out $(REM_MOD)_DEFS += REM_PHYS_ADDR_IN_TLB
in Makefile.kmk

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 159.4 KB
Line 
1/* $Id: VBoxRecompiler.c 14277 2008-11-18 09:10:18Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33void cpu_exec_init_all(unsigned long tb_size);
34
35#include <VBox/rem.h>
36#include <VBox/vmapi.h>
37#include <VBox/tm.h>
38#include <VBox/ssm.h>
39#include <VBox/em.h>
40#include <VBox/trpm.h>
41#include <VBox/iom.h>
42#include <VBox/mm.h>
43#include <VBox/pgm.h>
44#include <VBox/pdm.h>
45#include <VBox/dbgf.h>
46#include <VBox/dbg.h>
47#include <VBox/hwaccm.h>
48#include <VBox/patm.h>
49#include <VBox/csam.h>
50#include "REMInternal.h"
51#include <VBox/vm.h>
52#include <VBox/param.h>
53#include <VBox/err.h>
54
55#include <VBox/log.h>
56#include <iprt/semaphore.h>
57#include <iprt/asm.h>
58#include <iprt/assert.h>
59#include <iprt/thread.h>
60#include <iprt/string.h>
61
62/* Don't wanna include everything. */
63extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
64extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
65extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
66extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
67extern void tlb_flush(CPUState *env, int flush_global);
68extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
69extern void sync_ldtr(CPUX86State *env1, int selector);
70extern int sync_tr(CPUX86State *env1, int selector);
71
72#ifdef VBOX_STRICT
73unsigned long get_phys_page_offset(target_ulong addr);
74#endif
75
76
77/*******************************************************************************
78* Defined Constants And Macros *
79*******************************************************************************/
80
81/** Copy 80-bit fpu register at pSrc to pDst.
82 * This is probably faster than *calling* memcpy.
83 */
84#define REM_COPY_FPU_REG(pDst, pSrc) \
85 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
86
87
88/*******************************************************************************
89* Internal Functions *
90*******************************************************************************/
91static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
92static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
93static void remR3StateUpdate(PVM pVM);
94
95static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
97static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
98static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101
102static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
104static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
105static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
108
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113
114/** @todo Move stats to REM::s some rainy day we have nothing do to. */
115#ifdef VBOX_WITH_STATISTICS
116static STAMPROFILEADV gStatExecuteSingleInstr;
117static STAMPROFILEADV gStatCompilationQEmu;
118static STAMPROFILEADV gStatRunCodeQEmu;
119static STAMPROFILEADV gStatTotalTimeQEmu;
120static STAMPROFILEADV gStatTimers;
121static STAMPROFILEADV gStatTBLookup;
122static STAMPROFILEADV gStatIRQ;
123static STAMPROFILEADV gStatRawCheck;
124static STAMPROFILEADV gStatMemRead;
125static STAMPROFILEADV gStatMemWrite;
126static STAMPROFILE gStatGCPhys2HCVirt;
127static STAMPROFILE gStatHCVirt2GCPhys;
128static STAMCOUNTER gStatCpuGetTSC;
129static STAMCOUNTER gStatRefuseTFInhibit;
130static STAMCOUNTER gStatRefuseVM86;
131static STAMCOUNTER gStatRefusePaging;
132static STAMCOUNTER gStatRefusePAE;
133static STAMCOUNTER gStatRefuseIOPLNot0;
134static STAMCOUNTER gStatRefuseIF0;
135static STAMCOUNTER gStatRefuseCode16;
136static STAMCOUNTER gStatRefuseWP0;
137static STAMCOUNTER gStatRefuseRing1or2;
138static STAMCOUNTER gStatRefuseCanExecute;
139static STAMCOUNTER gStatREMGDTChange;
140static STAMCOUNTER gStatREMIDTChange;
141static STAMCOUNTER gStatREMLDTRChange;
142static STAMCOUNTER gStatREMTRChange;
143static STAMCOUNTER gStatSelOutOfSync[6];
144static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
145static STAMCOUNTER gStatFlushTBs;
146#endif
147
148/*
149 * Global stuff.
150 */
151
152/** MMIO read callbacks. */
153CPUReadMemoryFunc *g_apfnMMIORead[3] =
154{
155 remR3MMIOReadU8,
156 remR3MMIOReadU16,
157 remR3MMIOReadU32
158};
159
160/** MMIO write callbacks. */
161CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
162{
163 remR3MMIOWriteU8,
164 remR3MMIOWriteU16,
165 remR3MMIOWriteU32
166};
167
168/** Handler read callbacks. */
169CPUReadMemoryFunc *g_apfnHandlerRead[3] =
170{
171 remR3HandlerReadU8,
172 remR3HandlerReadU16,
173 remR3HandlerReadU32
174};
175
176/** Handler write callbacks. */
177CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
178{
179 remR3HandlerWriteU8,
180 remR3HandlerWriteU16,
181 remR3HandlerWriteU32
182};
183
184
185#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
186/*
187 * Debugger commands.
188 */
189static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
190
191/** '.remstep' arguments. */
192static const DBGCVARDESC g_aArgRemStep[] =
193{
194 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
195 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
196};
197
198/** Command descriptors. */
199static const DBGCCMD g_aCmds[] =
200{
201 {
202 .pszCmd ="remstep",
203 .cArgsMin = 0,
204 .cArgsMax = 1,
205 .paArgDescs = &g_aArgRemStep[0],
206 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
207 .pResultDesc = NULL,
208 .fFlags = 0,
209 .pfnHandler = remR3CmdDisasEnableStepping,
210 .pszSyntax = "[on/off]",
211 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
212 "If no arguments show the current state."
213 }
214};
215#endif
216
217
218/*******************************************************************************
219* Internal Functions *
220*******************************************************************************/
221static void remAbort(int rc, const char *pszTip);
222extern int testmath(void);
223
224/* Put them here to avoid unused variable warning. */
225AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
226#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
227//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
228/* Why did this have to be identical?? */
229AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
230#else
231AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
232#endif
233
234
235/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
236uint8_t* code_gen_prologue;
237
238/**
239 * Initializes the REM.
240 *
241 * @returns VBox status code.
242 * @param pVM The VM to operate on.
243 */
244REMR3DECL(int) REMR3Init(PVM pVM)
245{
246 uint32_t u32Dummy;
247 unsigned i;
248 int rc;
249
250 /*
251 * Assert sanity.
252 */
253 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
254 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
255 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
256#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
257 Assert(!testmath());
258#endif
259 /*
260 * Init some internal data members.
261 */
262 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
263 pVM->rem.s.Env.pVM = pVM;
264#ifdef CPU_RAW_MODE_INIT
265 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
266#endif
267
268 /* ctx. */
269 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
270 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
271
272 /* ignore all notifications */
273 pVM->rem.s.fIgnoreAll = true;
274
275 code_gen_prologue = RTMemExecAlloc(_1K);
276
277 cpu_exec_init_all(0);
278
279 /*
280 * Init the recompiler.
281 */
282 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
283 {
284 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
285 return VERR_GENERAL_FAILURE;
286 }
287 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
288 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
289
290 /* allocate code buffer for single instruction emulation. */
291 pVM->rem.s.Env.cbCodeBuffer = 4096;
292 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
293 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
294
295 /* finally, set the cpu_single_env global. */
296 cpu_single_env = &pVM->rem.s.Env;
297
298 /* Nothing is pending by default */
299 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
300
301 /*
302 * Register ram types.
303 */
304 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
305 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
306 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
308 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
309
310 /* stop ignoring. */
311 pVM->rem.s.fIgnoreAll = false;
312
313 /*
314 * Register the saved state data unit.
315 */
316 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
317 NULL, remR3Save, NULL,
318 NULL, remR3Load, NULL);
319 if (RT_FAILURE(rc))
320 return rc;
321
322#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
323 /*
324 * Debugger commands.
325 */
326 static bool fRegisteredCmds = false;
327 if (!fRegisteredCmds)
328 {
329 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
330 if (RT_SUCCESS(rc))
331 fRegisteredCmds = true;
332 }
333#endif
334
335#ifdef VBOX_WITH_STATISTICS
336 /*
337 * Statistics.
338 */
339 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
340 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
341 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
342 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
343 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
344 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
345 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
348 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
349 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
350 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
351
352 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
353
354 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
355 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
356 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
357 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
358 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
359 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
360 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
361 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
362 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
363 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
364 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
365
366 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
367 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
368 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
369 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
370
371 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
372 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
373 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
377
378 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
379 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
384
385
386#endif
387
388#ifdef DEBUG_ALL_LOGGING
389 loglevel = ~0;
390 logfile = fopen("/tmp/vbox-qemu.log", "w");
391#endif
392
393 return rc;
394}
395
396
397/**
398 * Terminates the REM.
399 *
400 * Termination means cleaning up and freeing all resources,
401 * the VM it self is at this point powered off or suspended.
402 *
403 * @returns VBox status code.
404 * @param pVM The VM to operate on.
405 */
406REMR3DECL(int) REMR3Term(PVM pVM)
407{
408 return VINF_SUCCESS;
409}
410
411
412/**
413 * The VM is being reset.
414 *
415 * For the REM component this means to call the cpu_reset() and
416 * reinitialize some state variables.
417 *
418 * @param pVM VM handle.
419 */
420REMR3DECL(void) REMR3Reset(PVM pVM)
421{
422 /*
423 * Reset the REM cpu.
424 */
425 pVM->rem.s.fIgnoreAll = true;
426 cpu_reset(&pVM->rem.s.Env);
427 pVM->rem.s.cInvalidatedPages = 0;
428 pVM->rem.s.fIgnoreAll = false;
429
430 /* Clear raw ring 0 init state */
431 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
432
433 /* Flush the TBs the next time we execute code here. */
434 pVM->rem.s.fFlushTBs = true;
435}
436
437
438/**
439 * Execute state save operation.
440 *
441 * @returns VBox status code.
442 * @param pVM VM Handle.
443 * @param pSSM SSM operation handle.
444 */
445static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
446{
447 /*
448 * Save the required CPU Env bits.
449 * (Not much because we're never in REM when doing the save.)
450 */
451 PREM pRem = &pVM->rem.s;
452 LogFlow(("remR3Save:\n"));
453 Assert(!pRem->fInREM);
454 SSMR3PutU32(pSSM, pRem->Env.hflags);
455 SSMR3PutU32(pSSM, ~0); /* separator */
456
457 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
458 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
459 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
460
461 return SSMR3PutU32(pSSM, ~0); /* terminator */
462}
463
464
465/**
466 * Execute state load operation.
467 *
468 * @returns VBox status code.
469 * @param pVM VM Handle.
470 * @param pSSM SSM operation handle.
471 * @param u32Version Data layout version.
472 */
473static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
474{
475 uint32_t u32Dummy;
476 uint32_t fRawRing0 = false;
477 uint32_t u32Sep;
478 int rc;
479 PREM pRem;
480 LogFlow(("remR3Load:\n"));
481
482 /*
483 * Validate version.
484 */
485 if ( u32Version != REM_SAVED_STATE_VERSION
486 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
487 {
488 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
489 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
490 }
491
492 /*
493 * Do a reset to be on the safe side...
494 */
495 REMR3Reset(pVM);
496
497 /*
498 * Ignore all ignorable notifications.
499 * (Not doing this will cause serious trouble.)
500 */
501 pVM->rem.s.fIgnoreAll = true;
502
503 /*
504 * Load the required CPU Env bits.
505 * (Not much because we're never in REM when doing the save.)
506 */
507 pRem = &pVM->rem.s;
508 Assert(!pRem->fInREM);
509 SSMR3GetU32(pSSM, &pRem->Env.hflags);
510 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
511 {
512 /* Redundant REM CPU state has to be loaded, but can be ignored. */
513 CPUX86State_Ver16 temp;
514 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
515 }
516
517 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
518 if (RT_FAILURE(rc))
519 return rc;
520 if (u32Sep != ~0U)
521 {
522 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
523 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
524 }
525
526 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
527 SSMR3GetUInt(pSSM, &fRawRing0);
528 if (fRawRing0)
529 pRem->Env.state |= CPU_RAW_RING0;
530
531 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
532 {
533 unsigned i;
534
535 /*
536 * Load the REM stuff.
537 */
538 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
539 if (RT_FAILURE(rc))
540 return rc;
541 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
542 {
543 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
544 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
545 }
546 for (i = 0; i < pRem->cInvalidatedPages; i++)
547 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
548 }
549
550 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
551 if (RT_FAILURE(rc))
552 return rc;
553
554 /* check the terminator. */
555 rc = SSMR3GetU32(pSSM, &u32Sep);
556 if (RT_FAILURE(rc))
557 return rc;
558 if (u32Sep != ~0U)
559 {
560 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
561 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
562 }
563
564 /*
565 * Get the CPUID features.
566 */
567 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
568 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
569
570 /*
571 * Sync the Load Flush the TLB
572 */
573 tlb_flush(&pRem->Env, 1);
574
575 /*
576 * Stop ignoring ignornable notifications.
577 */
578 pVM->rem.s.fIgnoreAll = false;
579
580 /*
581 * Sync the whole CPU state when executing code in the recompiler.
582 */
583 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
584 return VINF_SUCCESS;
585}
586
587
588
589#undef LOG_GROUP
590#define LOG_GROUP LOG_GROUP_REM_RUN
591
592/**
593 * Single steps an instruction in recompiled mode.
594 *
595 * Before calling this function the REM state needs to be in sync with
596 * the VM. Call REMR3State() to perform the sync. It's only necessary
597 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
598 * and after calling REMR3StateBack().
599 *
600 * @returns VBox status code.
601 *
602 * @param pVM VM Handle.
603 */
604REMR3DECL(int) REMR3Step(PVM pVM)
605{
606 int rc, interrupt_request;
607 RTGCPTR GCPtrPC;
608 bool fBp;
609
610 /*
611 * Lock the REM - we don't wanna have anyone interrupting us
612 * while stepping - and enabled single stepping. We also ignore
613 * pending interrupts and suchlike.
614 */
615 interrupt_request = pVM->rem.s.Env.interrupt_request;
616 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
617 pVM->rem.s.Env.interrupt_request = 0;
618 cpu_single_step(&pVM->rem.s.Env, 1);
619
620 /*
621 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
622 */
623 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
624 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
625
626 /*
627 * Execute and handle the return code.
628 * We execute without enabling the cpu tick, so on success we'll
629 * just flip it on and off to make sure it moves
630 */
631 rc = cpu_exec(&pVM->rem.s.Env);
632 if (rc == EXCP_DEBUG)
633 {
634 TMCpuTickResume(pVM);
635 TMCpuTickPause(pVM);
636 TMVirtualResume(pVM);
637 TMVirtualPause(pVM);
638 rc = VINF_EM_DBG_STEPPED;
639 }
640 else
641 {
642 AssertMsgFailed(("Damn, this shouldn't happen! cpu_exec returned %d while singlestepping\n", rc));
643 switch (rc)
644 {
645 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
646 case EXCP_HLT:
647 case EXCP_HALTED: rc = VINF_EM_HALT; break;
648 case EXCP_RC:
649 rc = pVM->rem.s.rc;
650 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
651 break;
652 default:
653 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
654 rc = VERR_INTERNAL_ERROR;
655 break;
656 }
657 }
658
659 /*
660 * Restore the stuff we changed to prevent interruption.
661 * Unlock the REM.
662 */
663 if (fBp)
664 {
665 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
666 Assert(rc2 == 0); NOREF(rc2);
667 }
668 cpu_single_step(&pVM->rem.s.Env, 0);
669 pVM->rem.s.Env.interrupt_request = interrupt_request;
670
671 return rc;
672}
673
674
675/**
676 * Set a breakpoint using the REM facilities.
677 *
678 * @returns VBox status code.
679 * @param pVM The VM handle.
680 * @param Address The breakpoint address.
681 * @thread The emulation thread.
682 */
683REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
684{
685 VM_ASSERT_EMT(pVM);
686 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
687 {
688 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
689 return VINF_SUCCESS;
690 }
691 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
692 return VERR_REM_NO_MORE_BP_SLOTS;
693}
694
695
696/**
697 * Clears a breakpoint set by REMR3BreakpointSet().
698 *
699 * @returns VBox status code.
700 * @param pVM The VM handle.
701 * @param Address The breakpoint address.
702 * @thread The emulation thread.
703 */
704REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
705{
706 VM_ASSERT_EMT(pVM);
707 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
708 {
709 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
710 return VINF_SUCCESS;
711 }
712 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
713 return VERR_REM_BP_NOT_FOUND;
714}
715
716
717/**
718 * Emulate an instruction.
719 *
720 * This function executes one instruction without letting anyone
721 * interrupt it. This is intended for being called while being in
722 * raw mode and thus will take care of all the state syncing between
723 * REM and the rest.
724 *
725 * @returns VBox status code.
726 * @param pVM VM handle.
727 */
728REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
729{
730 bool fFlushTBs;
731
732 int rc, rc2;
733 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
734
735 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
736 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
737 */
738 if (HWACCMIsEnabled(pVM))
739 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
740
741 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
742 fFlushTBs = pVM->rem.s.fFlushTBs;
743 pVM->rem.s.fFlushTBs = false;
744
745 /*
746 * Sync the state and enable single instruction / single stepping.
747 */
748 rc = REMR3State(pVM);
749 pVM->rem.s.fFlushTBs = fFlushTBs;
750 if (RT_SUCCESS(rc))
751 {
752 int interrupt_request = pVM->rem.s.Env.interrupt_request;
753 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
754 Assert(!pVM->rem.s.Env.singlestep_enabled);
755#if 1
756
757 /*
758 * Now we set the execute single instruction flag and enter the cpu_exec loop.
759 */
760 TMNotifyStartOfExecution(pVM);
761 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
762 rc = cpu_exec(&pVM->rem.s.Env);
763 TMNotifyEndOfExecution(pVM);
764 switch (rc)
765 {
766 /*
767 * Executed without anything out of the way happening.
768 */
769 case EXCP_SINGLE_INSTR:
770 rc = VINF_EM_RESCHEDULE;
771 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
772 break;
773
774 /*
775 * If we take a trap or start servicing a pending interrupt, we might end up here.
776 * (Timer thread or some other thread wishing EMT's attention.)
777 */
778 case EXCP_INTERRUPT:
779 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
780 rc = VINF_EM_RESCHEDULE;
781 break;
782
783 /*
784 * Single step, we assume!
785 * If there was a breakpoint there we're fucked now.
786 */
787 case EXCP_DEBUG:
788 {
789 /* breakpoint or single step? */
790 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
791 int iBP;
792 rc = VINF_EM_DBG_STEPPED;
793 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
794 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
795 {
796 rc = VINF_EM_DBG_BREAKPOINT;
797 break;
798 }
799 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
800 break;
801 }
802
803 /*
804 * hlt instruction.
805 */
806 case EXCP_HLT:
807 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
808 rc = VINF_EM_HALT;
809 break;
810
811 /*
812 * The VM has halted.
813 */
814 case EXCP_HALTED:
815 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
816 rc = VINF_EM_HALT;
817 break;
818
819 /*
820 * Switch to RAW-mode.
821 */
822 case EXCP_EXECUTE_RAW:
823 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
824 rc = VINF_EM_RESCHEDULE_RAW;
825 break;
826
827 /*
828 * Switch to hardware accelerated RAW-mode.
829 */
830 case EXCP_EXECUTE_HWACC:
831 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
832 rc = VINF_EM_RESCHEDULE_HWACC;
833 break;
834
835 /*
836 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
837 */
838 case EXCP_RC:
839 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
840 rc = pVM->rem.s.rc;
841 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
842 break;
843
844 /*
845 * Figure out the rest when they arrive....
846 */
847 default:
848 AssertMsgFailed(("rc=%d\n", rc));
849 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
850 rc = VINF_EM_RESCHEDULE;
851 break;
852 }
853
854 /*
855 * Switch back the state.
856 */
857#else
858 pVM->rem.s.Env.interrupt_request = 0;
859 cpu_single_step(&pVM->rem.s.Env, 1);
860
861 /*
862 * Execute and handle the return code.
863 * We execute without enabling the cpu tick, so on success we'll
864 * just flip it on and off to make sure it moves.
865 *
866 * (We do not use emulate_single_instr() because that doesn't enter the
867 * right way in will cause serious trouble if a longjmp was attempted.)
868 */
869# ifdef DEBUG_bird
870 remR3DisasInstr(&pVM->rem.s.Env, 1, "REMR3EmulateInstruction");
871# endif
872 TMNotifyStartOfExecution(pVM);
873 int cTimesMax = 16384;
874 uint32_t eip = pVM->rem.s.Env.eip;
875 do
876 {
877 rc = cpu_exec(&pVM->rem.s.Env);
878
879 } while ( eip == pVM->rem.s.Env.eip
880 && (rc == EXCP_DEBUG || rc == EXCP_EXECUTE_RAW)
881 && --cTimesMax > 0);
882 TMNotifyEndOfExecution(pVM);
883 switch (rc)
884 {
885 /*
886 * Single step, we assume!
887 * If there was a breakpoint there we're fucked now.
888 */
889 case EXCP_DEBUG:
890 {
891 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG\n"));
892 rc = VINF_EM_RESCHEDULE;
893 break;
894 }
895
896 /*
897 * We cannot be interrupted!
898 */
899 case EXCP_INTERRUPT:
900 AssertMsgFailed(("Shouldn't happen! Everything was locked!\n"));
901 rc = VERR_INTERNAL_ERROR;
902 break;
903
904 /*
905 * hlt instruction.
906 */
907 case EXCP_HLT:
908 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
909 rc = VINF_EM_HALT;
910 break;
911
912 /*
913 * The VM has halted.
914 */
915 case EXCP_HALTED:
916 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
917 rc = VINF_EM_HALT;
918 break;
919
920 /*
921 * Switch to RAW-mode.
922 */
923 case EXCP_EXECUTE_RAW:
924 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
925 rc = VINF_EM_RESCHEDULE_RAW;
926 break;
927
928 /*
929 * Switch to hardware accelerated RAW-mode.
930 */
931 case EXCP_EXECUTE_HWACC:
932 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
933 rc = VINF_EM_RESCHEDULE_HWACC;
934 break;
935
936 /*
937 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
938 */
939 case EXCP_RC:
940 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
941 rc = pVM->rem.s.rc;
942 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
943 break;
944
945 /*
946 * Figure out the rest when they arrive....
947 */
948 default:
949 AssertMsgFailed(("rc=%d\n", rc));
950 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
951 rc = VINF_SUCCESS;
952 break;
953 }
954
955 /*
956 * Switch back the state.
957 */
958 cpu_single_step(&pVM->rem.s.Env, 0);
959#endif
960 pVM->rem.s.Env.interrupt_request = interrupt_request;
961 rc2 = REMR3StateBack(pVM);
962 AssertRC(rc2);
963 }
964
965 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
966 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
967 return rc;
968}
969
970
971/**
972 * Runs code in recompiled mode.
973 *
974 * Before calling this function the REM state needs to be in sync with
975 * the VM. Call REMR3State() to perform the sync. It's only necessary
976 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
977 * and after calling REMR3StateBack().
978 *
979 * @returns VBox status code.
980 *
981 * @param pVM VM Handle.
982 */
983REMR3DECL(int) REMR3Run(PVM pVM)
984{
985 int rc;
986 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
987 Assert(pVM->rem.s.fInREM);
988
989 TMNotifyStartOfExecution(pVM);
990 rc = cpu_exec(&pVM->rem.s.Env);
991 TMNotifyEndOfExecution(pVM);
992 switch (rc)
993 {
994 /*
995 * This happens when the execution was interrupted
996 * by an external event, like pending timers.
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_SUCCESS;
1001 break;
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Breakpoint/single step.
1021 */
1022 case EXCP_DEBUG:
1023 {
1024#if 0//def DEBUG_bird
1025 static int iBP = 0;
1026 printf("howdy, breakpoint! iBP=%d\n", iBP);
1027 switch (iBP)
1028 {
1029 case 0:
1030 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1031 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1032 //pVM->rem.s.Env.interrupt_request = 0;
1033 //pVM->rem.s.Env.exception_index = -1;
1034 //g_fInterruptDisabled = 1;
1035 rc = VINF_SUCCESS;
1036 asm("int3");
1037 break;
1038 default:
1039 asm("int3");
1040 break;
1041 }
1042 iBP++;
1043#else
1044 /* breakpoint or single step? */
1045 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1046 int iBP;
1047 rc = VINF_EM_DBG_STEPPED;
1048 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1049 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1050 {
1051 rc = VINF_EM_DBG_BREAKPOINT;
1052 break;
1053 }
1054 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1055#endif
1056 break;
1057 }
1058
1059 /*
1060 * Switch to RAW-mode.
1061 */
1062 case EXCP_EXECUTE_RAW:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1064 rc = VINF_EM_RESCHEDULE_RAW;
1065 break;
1066
1067 /*
1068 * Switch to hardware accelerated RAW-mode.
1069 */
1070 case EXCP_EXECUTE_HWACC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1072 rc = VINF_EM_RESCHEDULE_HWACC;
1073 break;
1074
1075 /*
1076 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1077 */
1078 case EXCP_RC:
1079 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1080 rc = pVM->rem.s.rc;
1081 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1082 break;
1083
1084 /*
1085 * Figure out the rest when they arrive....
1086 */
1087 default:
1088 AssertMsgFailed(("rc=%d\n", rc));
1089 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1090 rc = VINF_SUCCESS;
1091 break;
1092 }
1093
1094 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1095 return rc;
1096}
1097
1098
1099/**
1100 * Check if the cpu state is suitable for Raw execution.
1101 *
1102 * @returns boolean
1103 * @param env The CPU env struct.
1104 * @param eip The EIP to check this for (might differ from env->eip).
1105 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1106 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1107 *
1108 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1109 */
1110bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1111{
1112 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1114 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1115 uint32_t u32CR0;
1116
1117 /* Update counter. */
1118 env->pVM->rem.s.cCanExecuteRaw++;
1119
1120 if (HWACCMIsEnabled(env->pVM))
1121 {
1122 CPUMCTX Ctx;
1123
1124 env->state |= CPU_RAW_HWACC;
1125
1126 /*
1127 * Create partial context for HWACCMR3CanExecuteGuest
1128 */
1129 Ctx.cr0 = env->cr[0];
1130 Ctx.cr3 = env->cr[3];
1131 Ctx.cr4 = env->cr[4];
1132
1133 Ctx.tr = env->tr.selector;
1134 Ctx.trHid.u64Base = env->tr.base;
1135 Ctx.trHid.u32Limit = env->tr.limit;
1136 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1137
1138 Ctx.idtr.cbIdt = env->idt.limit;
1139 Ctx.idtr.pIdt = env->idt.base;
1140
1141 Ctx.eflags.u32 = env->eflags;
1142
1143 Ctx.cs = env->segs[R_CS].selector;
1144 Ctx.csHid.u64Base = env->segs[R_CS].base;
1145 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1146 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1147
1148 Ctx.ds = env->segs[R_DS].selector;
1149 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1150 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1151 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1152
1153 Ctx.es = env->segs[R_ES].selector;
1154 Ctx.esHid.u64Base = env->segs[R_ES].base;
1155 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1156 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1157
1158 Ctx.fs = env->segs[R_FS].selector;
1159 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1160 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1161 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1162
1163 Ctx.gs = env->segs[R_GS].selector;
1164 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1165 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1166 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1167
1168 Ctx.ss = env->segs[R_SS].selector;
1169 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1170 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1171 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1172
1173 Ctx.msrEFER = env->efer;
1174
1175 /* Hardware accelerated raw-mode:
1176 *
1177 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1178 */
1179 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1180 {
1181 *piException = EXCP_EXECUTE_HWACC;
1182 return true;
1183 }
1184 return false;
1185 }
1186
1187 /*
1188 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1189 * or 32 bits protected mode ring 0 code
1190 *
1191 * The tests are ordered by the likelyhood of being true during normal execution.
1192 */
1193 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1194 {
1195 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1196 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1197 return false;
1198 }
1199
1200#ifndef VBOX_RAW_V86
1201 if (fFlags & VM_MASK) {
1202 STAM_COUNTER_INC(&gStatRefuseVM86);
1203 Log2(("raw mode refused: VM_MASK\n"));
1204 return false;
1205 }
1206#endif
1207
1208 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1209 {
1210#ifndef DEBUG_bird
1211 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1212#endif
1213 return false;
1214 }
1215
1216 if (env->singlestep_enabled)
1217 {
1218 //Log2(("raw mode refused: Single step\n"));
1219 return false;
1220 }
1221
1222 if (env->nb_breakpoints > 0)
1223 {
1224 //Log2(("raw mode refused: Breakpoints\n"));
1225 return false;
1226 }
1227
1228 u32CR0 = env->cr[0];
1229 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1230 {
1231 STAM_COUNTER_INC(&gStatRefusePaging);
1232 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1233 return false;
1234 }
1235
1236 if (env->cr[4] & CR4_PAE_MASK)
1237 {
1238 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1239 {
1240 STAM_COUNTER_INC(&gStatRefusePAE);
1241 return false;
1242 }
1243 }
1244
1245 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1246 {
1247 if (!EMIsRawRing3Enabled(env->pVM))
1248 return false;
1249
1250 if (!(env->eflags & IF_MASK))
1251 {
1252 STAM_COUNTER_INC(&gStatRefuseIF0);
1253 Log2(("raw mode refused: IF (RawR3)\n"));
1254 return false;
1255 }
1256
1257 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseWP0);
1260 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1261 return false;
1262 }
1263 }
1264 else
1265 {
1266 if (!EMIsRawRing0Enabled(env->pVM))
1267 return false;
1268
1269 // Let's start with pure 32 bits ring 0 code first
1270 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1271 {
1272 STAM_COUNTER_INC(&gStatRefuseCode16);
1273 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1274 return false;
1275 }
1276
1277 // Only R0
1278 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1279 {
1280 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1281 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1282 return false;
1283 }
1284
1285 if (!(u32CR0 & CR0_WP_MASK))
1286 {
1287 STAM_COUNTER_INC(&gStatRefuseWP0);
1288 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1289 return false;
1290 }
1291
1292 if (PATMIsPatchGCAddr(env->pVM, eip))
1293 {
1294 Log2(("raw r0 mode forced: patch code\n"));
1295 *piException = EXCP_EXECUTE_RAW;
1296 return true;
1297 }
1298
1299#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1300 if (!(env->eflags & IF_MASK))
1301 {
1302 STAM_COUNTER_INC(&gStatRefuseIF0);
1303 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1304 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1305 return false;
1306 }
1307#endif
1308
1309 env->state |= CPU_RAW_RING0;
1310 }
1311
1312 /*
1313 * Don't reschedule the first time we're called, because there might be
1314 * special reasons why we're here that is not covered by the above checks.
1315 */
1316 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1317 {
1318 Log2(("raw mode refused: first scheduling\n"));
1319 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1320 return false;
1321 }
1322
1323 Assert(PGMPhysIsA20Enabled(env->pVM));
1324 *piException = EXCP_EXECUTE_RAW;
1325 return true;
1326}
1327
1328
1329/**
1330 * Fetches a code byte.
1331 *
1332 * @returns Success indicator (bool) for ease of use.
1333 * @param env The CPU environment structure.
1334 * @param GCPtrInstr Where to fetch code.
1335 * @param pu8Byte Where to store the byte on success
1336 */
1337bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1338{
1339 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1340 if (RT_SUCCESS(rc))
1341 return true;
1342 return false;
1343}
1344
1345
1346/**
1347 * Flush (or invalidate if you like) page table/dir entry.
1348 *
1349 * (invlpg instruction; tlb_flush_page)
1350 *
1351 * @param env Pointer to cpu environment.
1352 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1353 */
1354void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1355{
1356 PVM pVM = env->pVM;
1357 PCPUMCTX pCtx;
1358 int rc;
1359
1360 /*
1361 * When we're replaying invlpg instructions or restoring a saved
1362 * state we disable this path.
1363 */
1364 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1365 return;
1366 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1367 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1368
1369 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1370
1371 /*
1372 * Update the control registers before calling PGMFlushPage.
1373 */
1374 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1375 pCtx->cr0 = env->cr[0];
1376 pCtx->cr3 = env->cr[3];
1377 pCtx->cr4 = env->cr[4];
1378
1379 /*
1380 * Let PGM do the rest.
1381 */
1382 rc = PGMInvalidatePage(pVM, GCPtr);
1383 if (RT_FAILURE(rc))
1384 {
1385 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1386 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1387 }
1388 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1389}
1390
1391
1392#ifndef REM_PHYS_ADDR_IN_TLB
1393void* remR3GCPhys2HCVirt(CPUState *env1, target_ulong physAddr)
1394{
1395 void* rv = NULL;
1396 int rc;
1397
1398 rc = PGMPhysGCPhys2HCPtr(env1->pVM, (RTGCPHYS)physAddr, 1, &rv);
1399 Assert (RT_SUCCESS(rc));
1400
1401 return rv;
1402}
1403
1404target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1405{
1406 RTGCPHYS rv = 0;
1407 int rc;
1408
1409 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1410 Assert (RT_SUCCESS(rc));
1411
1412 return (target_ulong)rv;
1413}
1414#endif
1415
1416/**
1417 * Called from tlb_protect_code in order to write monitor a code page.
1418 *
1419 * @param env Pointer to the CPU environment.
1420 * @param GCPtr Code page to monitor
1421 */
1422void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1423{
1424#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1425 Assert(env->pVM->rem.s.fInREM);
1426 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1427 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1428 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1429 && !(env->eflags & VM_MASK) /* no V86 mode */
1430 && !HWACCMIsEnabled(env->pVM))
1431 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1432#endif
1433}
1434
1435/**
1436 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1437 *
1438 * @param env Pointer to the CPU environment.
1439 * @param GCPtr Code page to monitor
1440 */
1441void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1442{
1443 Assert(env->pVM->rem.s.fInREM);
1444#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1445 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1446 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1447 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1448 && !(env->eflags & VM_MASK) /* no V86 mode */
1449 && !HWACCMIsEnabled(env->pVM))
1450 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1451#endif
1452}
1453
1454
1455/**
1456 * Called when the CPU is initialized, any of the CRx registers are changed or
1457 * when the A20 line is modified.
1458 *
1459 * @param env Pointer to the CPU environment.
1460 * @param fGlobal Set if the flush is global.
1461 */
1462void remR3FlushTLB(CPUState *env, bool fGlobal)
1463{
1464 PVM pVM = env->pVM;
1465 PCPUMCTX pCtx;
1466
1467 /*
1468 * When we're replaying invlpg instructions or restoring a saved
1469 * state we disable this path.
1470 */
1471 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1472 return;
1473 Assert(pVM->rem.s.fInREM);
1474
1475 /*
1476 * The caller doesn't check cr4, so we have to do that for ourselves.
1477 */
1478 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1479 fGlobal = true;
1480 Log(("remR3FlushTLB: CR0=%RGr CR3=%RGr CR4=%RGr %s\n", env->cr[0], env->cr[3], env->cr[4], fGlobal ? " global" : ""));
1481
1482 /*
1483 * Update the control registers before calling PGMR3FlushTLB.
1484 */
1485 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1486 pCtx->cr0 = env->cr[0];
1487 pCtx->cr3 = env->cr[3];
1488 pCtx->cr4 = env->cr[4];
1489
1490 /*
1491 * Let PGM do the rest.
1492 */
1493 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1494}
1495
1496
1497/**
1498 * Called when any of the cr0, cr4 or efer registers is updated.
1499 *
1500 * @param env Pointer to the CPU environment.
1501 */
1502void remR3ChangeCpuMode(CPUState *env)
1503{
1504 int rc;
1505 PVM pVM = env->pVM;
1506 PCPUMCTX pCtx;
1507
1508 /*
1509 * When we're replaying loads or restoring a saved
1510 * state this path is disabled.
1511 */
1512 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1513 return;
1514 Assert(pVM->rem.s.fInREM);
1515
1516 /*
1517 * Update the control registers before calling PGMChangeMode()
1518 * as it may need to map whatever cr3 is pointing to.
1519 */
1520 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1521 pCtx->cr0 = env->cr[0];
1522 pCtx->cr3 = env->cr[3];
1523 pCtx->cr4 = env->cr[4];
1524
1525#ifdef TARGET_X86_64
1526 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1527 if (rc != VINF_SUCCESS)
1528 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1529#else
1530 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1531 if (rc != VINF_SUCCESS)
1532 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1533#endif
1534}
1535
1536
1537/**
1538 * Called from compiled code to run dma.
1539 *
1540 * @param env Pointer to the CPU environment.
1541 */
1542void remR3DmaRun(CPUState *env)
1543{
1544 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1545 PDMR3DmaRun(env->pVM);
1546 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1547}
1548
1549
1550/**
1551 * Called from compiled code to schedule pending timers in VMM
1552 *
1553 * @param env Pointer to the CPU environment.
1554 */
1555void remR3TimersRun(CPUState *env)
1556{
1557 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1558 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1559 TMR3TimerQueuesDo(env->pVM);
1560 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1561 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1562}
1563
1564
1565/**
1566 * Record trap occurance
1567 *
1568 * @returns VBox status code
1569 * @param env Pointer to the CPU environment.
1570 * @param uTrap Trap nr
1571 * @param uErrorCode Error code
1572 * @param pvNextEIP Next EIP
1573 */
1574int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, uint32_t pvNextEIP)
1575{
1576 PVM pVM = env->pVM;
1577#ifdef VBOX_WITH_STATISTICS
1578 static STAMCOUNTER s_aStatTrap[255];
1579 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1580#endif
1581
1582#ifdef VBOX_WITH_STATISTICS
1583 if (uTrap < 255)
1584 {
1585 if (!s_aRegisters[uTrap])
1586 {
1587 char szStatName[64];
1588 s_aRegisters[uTrap] = true;
1589 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1590 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1591 }
1592 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1593 }
1594#endif
1595 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, (RTGCPTR)pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1596 if( uTrap < 0x20
1597 && (env->cr[0] & X86_CR0_PE)
1598 && !(env->eflags & X86_EFL_VM))
1599 {
1600#ifdef DEBUG
1601 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1602#endif
1603 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1604 {
1605 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, (RTGCPTR)pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1606 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1607 return VERR_REM_TOO_MANY_TRAPS;
1608 }
1609 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1610 pVM->rem.s.cPendingExceptions = 1;
1611 pVM->rem.s.uPendingException = uTrap;
1612 pVM->rem.s.uPendingExcptEIP = env->eip;
1613 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1614 }
1615 else
1616 {
1617 pVM->rem.s.cPendingExceptions = 0;
1618 pVM->rem.s.uPendingException = uTrap;
1619 pVM->rem.s.uPendingExcptEIP = env->eip;
1620 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1621 }
1622 return VINF_SUCCESS;
1623}
1624
1625
1626/*
1627 * Clear current active trap
1628 *
1629 * @param pVM VM Handle.
1630 */
1631void remR3TrapClear(PVM pVM)
1632{
1633 pVM->rem.s.cPendingExceptions = 0;
1634 pVM->rem.s.uPendingException = 0;
1635 pVM->rem.s.uPendingExcptEIP = 0;
1636 pVM->rem.s.uPendingExcptCR2 = 0;
1637}
1638
1639
1640/*
1641 * Record previous call instruction addresses
1642 *
1643 * @param env Pointer to the CPU environment.
1644 */
1645void remR3RecordCall(CPUState *env)
1646{
1647 CSAMR3RecordCallAddress(env->pVM, env->eip);
1648}
1649
1650
1651/**
1652 * Syncs the internal REM state with the VM.
1653 *
1654 * This must be called before REMR3Run() is invoked whenever when the REM
1655 * state is not up to date. Calling it several times in a row is not
1656 * permitted.
1657 *
1658 * @returns VBox status code.
1659 *
1660 * @param pVM VM Handle.
1661 * @param fFlushTBs Flush all translation blocks before executing code
1662 *
1663 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1664 * no do this since the majority of the callers don't want any unnecessary of events
1665 * pending that would immediatly interrupt execution.
1666 */
1667REMR3DECL(int) REMR3State(PVM pVM)
1668{
1669 register const CPUMCTX *pCtx;
1670 register unsigned fFlags;
1671 bool fHiddenSelRegsValid;
1672 unsigned i;
1673 TRPMEVENT enmType;
1674 uint8_t u8TrapNo;
1675 int rc;
1676
1677 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1678 Log2(("REMR3State:\n"));
1679
1680 pCtx = pVM->rem.s.pCtx;
1681 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1682
1683 Assert(!pVM->rem.s.fInREM);
1684 pVM->rem.s.fInStateSync = true;
1685
1686 /*
1687 * If we have to flush TBs, do that immediately.
1688 */
1689 if (pVM->rem.s.fFlushTBs)
1690 {
1691 STAM_COUNTER_INC(&gStatFlushTBs);
1692 tb_flush(&pVM->rem.s.Env);
1693 pVM->rem.s.fFlushTBs = false;
1694 }
1695
1696 /*
1697 * Copy the registers which require no special handling.
1698 */
1699#ifdef TARGET_X86_64
1700 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1701 Assert(R_EAX == 0);
1702 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1703 Assert(R_ECX == 1);
1704 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1705 Assert(R_EDX == 2);
1706 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1707 Assert(R_EBX == 3);
1708 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1709 Assert(R_ESP == 4);
1710 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1711 Assert(R_EBP == 5);
1712 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1713 Assert(R_ESI == 6);
1714 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1715 Assert(R_EDI == 7);
1716 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1717 pVM->rem.s.Env.regs[8] = pCtx->r8;
1718 pVM->rem.s.Env.regs[9] = pCtx->r9;
1719 pVM->rem.s.Env.regs[10] = pCtx->r10;
1720 pVM->rem.s.Env.regs[11] = pCtx->r11;
1721 pVM->rem.s.Env.regs[12] = pCtx->r12;
1722 pVM->rem.s.Env.regs[13] = pCtx->r13;
1723 pVM->rem.s.Env.regs[14] = pCtx->r14;
1724 pVM->rem.s.Env.regs[15] = pCtx->r15;
1725
1726 pVM->rem.s.Env.eip = pCtx->rip;
1727
1728 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1729#else
1730 Assert(R_EAX == 0);
1731 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1732 Assert(R_ECX == 1);
1733 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1734 Assert(R_EDX == 2);
1735 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1736 Assert(R_EBX == 3);
1737 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1738 Assert(R_ESP == 4);
1739 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1740 Assert(R_EBP == 5);
1741 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1742 Assert(R_ESI == 6);
1743 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1744 Assert(R_EDI == 7);
1745 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1746 pVM->rem.s.Env.eip = pCtx->eip;
1747
1748 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1749#endif
1750
1751 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1752
1753 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1754 for (i=0;i<8;i++)
1755 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1756
1757 /*
1758 * Clear the halted hidden flag (the interrupt waking up the CPU can
1759 * have been dispatched in raw mode).
1760 */
1761 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1762
1763 /*
1764 * Replay invlpg?
1765 */
1766 if (pVM->rem.s.cInvalidatedPages)
1767 {
1768 RTUINT i;
1769
1770 pVM->rem.s.fIgnoreInvlPg = true;
1771 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1772 {
1773 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1774 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1775 }
1776 pVM->rem.s.fIgnoreInvlPg = false;
1777 pVM->rem.s.cInvalidatedPages = 0;
1778 }
1779
1780 /* Replay notification changes? */
1781 if (pVM->rem.s.cHandlerNotifications)
1782 REMR3ReplayHandlerNotifications(pVM);
1783
1784 /* Update MSRs; before CRx registers! */
1785 pVM->rem.s.Env.efer = pCtx->msrEFER;
1786 pVM->rem.s.Env.star = pCtx->msrSTAR;
1787 pVM->rem.s.Env.pat = pCtx->msrPAT;
1788#ifdef TARGET_X86_64
1789 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1790 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1791 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1792 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1793
1794 /* Update the internal long mode activate flag according to the new EFER value. */
1795 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1796 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1797 else
1798 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1799#endif
1800
1801
1802 /*
1803 * Registers which are rarely changed and require special handling / order when changed.
1804 */
1805 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1806 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1807 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1808 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1809 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1810 {
1811 if (fFlags & CPUM_CHANGED_FPU_REM)
1812 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1813
1814 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1815 {
1816 pVM->rem.s.fIgnoreCR3Load = true;
1817 tlb_flush(&pVM->rem.s.Env, true);
1818 pVM->rem.s.fIgnoreCR3Load = false;
1819 }
1820
1821 /* CR4 before CR0! */
1822 if (fFlags & CPUM_CHANGED_CR4)
1823 {
1824 pVM->rem.s.fIgnoreCR3Load = true;
1825 pVM->rem.s.fIgnoreCpuMode = true;
1826 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1827 pVM->rem.s.fIgnoreCpuMode = false;
1828 pVM->rem.s.fIgnoreCR3Load = false;
1829 }
1830
1831 if (fFlags & CPUM_CHANGED_CR0)
1832 {
1833 pVM->rem.s.fIgnoreCR3Load = true;
1834 pVM->rem.s.fIgnoreCpuMode = true;
1835 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1836 pVM->rem.s.fIgnoreCpuMode = false;
1837 pVM->rem.s.fIgnoreCR3Load = false;
1838 }
1839
1840 if (fFlags & CPUM_CHANGED_CR3)
1841 {
1842 pVM->rem.s.fIgnoreCR3Load = true;
1843 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1844 pVM->rem.s.fIgnoreCR3Load = false;
1845 }
1846
1847 if (fFlags & CPUM_CHANGED_GDTR)
1848 {
1849 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1850 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1851 }
1852
1853 if (fFlags & CPUM_CHANGED_IDTR)
1854 {
1855 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1856 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1857 }
1858
1859 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1860 {
1861 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1862 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1863 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1864 }
1865
1866 if (fFlags & CPUM_CHANGED_LDTR)
1867 {
1868 if (fHiddenSelRegsValid)
1869 {
1870 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1871 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1872 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1873 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1874 }
1875 else
1876 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1877 }
1878
1879 if (fFlags & CPUM_CHANGED_TR)
1880 {
1881 if (fHiddenSelRegsValid)
1882 {
1883 pVM->rem.s.Env.tr.selector = pCtx->tr;
1884 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1885 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1886 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1887 }
1888 else
1889 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1890
1891 /** @note do_interrupt will fault if the busy flag is still set.... */
1892 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1893 }
1894
1895 if (fFlags & CPUM_CHANGED_CPUID)
1896 {
1897 uint32_t u32Dummy;
1898
1899 /*
1900 * Get the CPUID features.
1901 */
1902 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1903 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1904 }
1905 }
1906
1907 /*
1908 * Update selector registers.
1909 * This must be done *after* we've synced gdt, ldt and crX registers
1910 * since we're reading the GDT/LDT om sync_seg. This will happen with
1911 * saved state which takes a quick dip into rawmode for instance.
1912 */
1913 /*
1914 * Stack; Note first check this one as the CPL might have changed. The
1915 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1916 */
1917
1918 if (fHiddenSelRegsValid)
1919 {
1920 /* The hidden selector registers are valid in the CPU context. */
1921 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1922
1923 /* Set current CPL */
1924 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1925
1926 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1927 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1928 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1929 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1930 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1931 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1932 }
1933 else
1934 {
1935 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1936 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1937 {
1938 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1939
1940 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1941 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1942#ifdef VBOX_WITH_STATISTICS
1943 if (pVM->rem.s.Env.segs[R_SS].newselector)
1944 {
1945 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1946 }
1947#endif
1948 }
1949 else
1950 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1951
1952 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1953 {
1954 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1955 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1956#ifdef VBOX_WITH_STATISTICS
1957 if (pVM->rem.s.Env.segs[R_ES].newselector)
1958 {
1959 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1960 }
1961#endif
1962 }
1963 else
1964 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1965
1966 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1967 {
1968 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1969 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1970#ifdef VBOX_WITH_STATISTICS
1971 if (pVM->rem.s.Env.segs[R_CS].newselector)
1972 {
1973 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1974 }
1975#endif
1976 }
1977 else
1978 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1979
1980 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1981 {
1982 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1983 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1984#ifdef VBOX_WITH_STATISTICS
1985 if (pVM->rem.s.Env.segs[R_DS].newselector)
1986 {
1987 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1988 }
1989#endif
1990 }
1991 else
1992 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1993
1994 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1995 * be the same but not the base/limit. */
1996 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1997 {
1998 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1999 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2000#ifdef VBOX_WITH_STATISTICS
2001 if (pVM->rem.s.Env.segs[R_FS].newselector)
2002 {
2003 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2004 }
2005#endif
2006 }
2007 else
2008 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2009
2010 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2011 {
2012 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2013 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2014#ifdef VBOX_WITH_STATISTICS
2015 if (pVM->rem.s.Env.segs[R_GS].newselector)
2016 {
2017 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2018 }
2019#endif
2020 }
2021 else
2022 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2023 }
2024
2025 /*
2026 * Check for traps.
2027 */
2028 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2029 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2030 if (RT_SUCCESS(rc))
2031 {
2032#ifdef DEBUG
2033 if (u8TrapNo == 0x80)
2034 {
2035 remR3DumpLnxSyscall(pVM);
2036 remR3DumpOBsdSyscall(pVM);
2037 }
2038#endif
2039
2040 pVM->rem.s.Env.exception_index = u8TrapNo;
2041 if (enmType != TRPM_SOFTWARE_INT)
2042 {
2043 pVM->rem.s.Env.exception_is_int = 0;
2044 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2045 }
2046 else
2047 {
2048 /*
2049 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2050 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2051 * for int03 and into.
2052 */
2053 pVM->rem.s.Env.exception_is_int = 1;
2054 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2055 /* int 3 may be generated by one-byte 0xcc */
2056 if (u8TrapNo == 3)
2057 {
2058 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2059 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2060 }
2061 /* int 4 may be generated by one-byte 0xce */
2062 else if (u8TrapNo == 4)
2063 {
2064 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2065 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2066 }
2067 }
2068
2069 /* get error code and cr2 if needed. */
2070 switch (u8TrapNo)
2071 {
2072 case 0x0e:
2073 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2074 /* fallthru */
2075 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2076 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2077 break;
2078
2079 case 0x11: case 0x08:
2080 default:
2081 pVM->rem.s.Env.error_code = 0;
2082 break;
2083 }
2084
2085 /*
2086 * We can now reset the active trap since the recompiler is gonna have a go at it.
2087 */
2088 rc = TRPMResetTrap(pVM);
2089 AssertRC(rc);
2090 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2091 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2092 }
2093
2094 /*
2095 * Clear old interrupt request flags; Check for pending hardware interrupts.
2096 * (See @remark for why we don't check for other FFs.)
2097 */
2098 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2099 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2100 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2101 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2102
2103 /*
2104 * We're now in REM mode.
2105 */
2106 pVM->rem.s.fInREM = true;
2107 pVM->rem.s.fInStateSync = false;
2108 pVM->rem.s.cCanExecuteRaw = 0;
2109 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2110 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2111 return VINF_SUCCESS;
2112}
2113
2114
2115/**
2116 * Syncs back changes in the REM state to the the VM state.
2117 *
2118 * This must be called after invoking REMR3Run().
2119 * Calling it several times in a row is not permitted.
2120 *
2121 * @returns VBox status code.
2122 *
2123 * @param pVM VM Handle.
2124 */
2125REMR3DECL(int) REMR3StateBack(PVM pVM)
2126{
2127 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2128 unsigned i;
2129
2130 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2131 Log2(("REMR3StateBack:\n"));
2132 Assert(pVM->rem.s.fInREM);
2133
2134 /*
2135 * Copy back the registers.
2136 * This is done in the order they are declared in the CPUMCTX structure.
2137 */
2138
2139 /** @todo FOP */
2140 /** @todo FPUIP */
2141 /** @todo CS */
2142 /** @todo FPUDP */
2143 /** @todo DS */
2144 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2145 pCtx->fpu.MXCSR = 0;
2146 pCtx->fpu.MXCSR_MASK = 0;
2147
2148 /** @todo check if FPU/XMM was actually used in the recompiler */
2149 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2150//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2151
2152#ifdef TARGET_X86_64
2153 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2154 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2155 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2156 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2157 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2158 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2159 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2160 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2161 pCtx->r8 = pVM->rem.s.Env.regs[8];
2162 pCtx->r9 = pVM->rem.s.Env.regs[9];
2163 pCtx->r10 = pVM->rem.s.Env.regs[10];
2164 pCtx->r11 = pVM->rem.s.Env.regs[11];
2165 pCtx->r12 = pVM->rem.s.Env.regs[12];
2166 pCtx->r13 = pVM->rem.s.Env.regs[13];
2167 pCtx->r14 = pVM->rem.s.Env.regs[14];
2168 pCtx->r15 = pVM->rem.s.Env.regs[15];
2169
2170 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2171
2172#else
2173 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2174 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2175 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2176 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2177 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2178 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2179 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2180
2181 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2182#endif
2183
2184 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2185
2186#ifdef VBOX_WITH_STATISTICS
2187 if (pVM->rem.s.Env.segs[R_SS].newselector)
2188 {
2189 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2190 }
2191 if (pVM->rem.s.Env.segs[R_GS].newselector)
2192 {
2193 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2194 }
2195 if (pVM->rem.s.Env.segs[R_FS].newselector)
2196 {
2197 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2198 }
2199 if (pVM->rem.s.Env.segs[R_ES].newselector)
2200 {
2201 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2202 }
2203 if (pVM->rem.s.Env.segs[R_DS].newselector)
2204 {
2205 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2206 }
2207 if (pVM->rem.s.Env.segs[R_CS].newselector)
2208 {
2209 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2210 }
2211#endif
2212 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2213 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2214 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2215 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2216 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2217
2218#ifdef TARGET_X86_64
2219 pCtx->rip = pVM->rem.s.Env.eip;
2220 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2221#else
2222 pCtx->eip = pVM->rem.s.Env.eip;
2223 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2224#endif
2225
2226 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2227 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2228 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2229 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2230
2231 for (i=0;i<8;i++)
2232 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2233
2234 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2235 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2236 {
2237 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2238 STAM_COUNTER_INC(&gStatREMGDTChange);
2239 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2240 }
2241
2242 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2243 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2244 {
2245 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2246 STAM_COUNTER_INC(&gStatREMIDTChange);
2247 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2248 }
2249
2250 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2251 {
2252 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2253 STAM_COUNTER_INC(&gStatREMLDTRChange);
2254 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2255 }
2256 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2257 {
2258 pCtx->tr = pVM->rem.s.Env.tr.selector;
2259 STAM_COUNTER_INC(&gStatREMTRChange);
2260 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2261 }
2262
2263 /** @todo These values could still be out of sync! */
2264 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2265 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2266 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2267 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2268
2269 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2270 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2271 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2272
2273 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2274 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2275 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2276
2277 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2278 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2279 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2280
2281 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2282 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2283 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2284
2285 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2286 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2287 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2288
2289 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2290 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2291 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2292
2293 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2294 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2295 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2296
2297 /* Sysenter MSR */
2298 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2299 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2300 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2301
2302 /* System MSRs. */
2303 pCtx->msrEFER = pVM->rem.s.Env.efer;
2304 pCtx->msrSTAR = pVM->rem.s.Env.star;
2305 pCtx->msrPAT = pVM->rem.s.Env.pat;
2306#ifdef TARGET_X86_64
2307 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2308 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2309 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2310 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2311#endif
2312
2313 remR3TrapClear(pVM);
2314
2315 /*
2316 * Check for traps.
2317 */
2318 if ( pVM->rem.s.Env.exception_index >= 0
2319 && pVM->rem.s.Env.exception_index < 256)
2320 {
2321 int rc;
2322
2323 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2324 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2325 AssertRC(rc);
2326 switch (pVM->rem.s.Env.exception_index)
2327 {
2328 case 0x0e:
2329 TRPMSetFaultAddress(pVM, pCtx->cr2);
2330 /* fallthru */
2331 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2332 case 0x11: case 0x08: /* 0 */
2333 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2334 break;
2335 }
2336
2337 }
2338
2339 /*
2340 * We're not longer in REM mode.
2341 */
2342 pVM->rem.s.fInREM = false;
2343 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2344 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2345 return VINF_SUCCESS;
2346}
2347
2348
2349/**
2350 * This is called by the disassembler when it wants to update the cpu state
2351 * before for instance doing a register dump.
2352 */
2353static void remR3StateUpdate(PVM pVM)
2354{
2355 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2356 unsigned i;
2357
2358 Assert(pVM->rem.s.fInREM);
2359
2360 /*
2361 * Copy back the registers.
2362 * This is done in the order they are declared in the CPUMCTX structure.
2363 */
2364
2365 /** @todo FOP */
2366 /** @todo FPUIP */
2367 /** @todo CS */
2368 /** @todo FPUDP */
2369 /** @todo DS */
2370 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2371 pCtx->fpu.MXCSR = 0;
2372 pCtx->fpu.MXCSR_MASK = 0;
2373
2374 /** @todo check if FPU/XMM was actually used in the recompiler */
2375 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2376//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2377
2378#ifdef TARGET_X86_64
2379 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2380 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2381 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2382 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2383 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2384 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2385 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2386 pCtx->r8 = pVM->rem.s.Env.regs[8];
2387 pCtx->r9 = pVM->rem.s.Env.regs[9];
2388 pCtx->r10 = pVM->rem.s.Env.regs[10];
2389 pCtx->r11 = pVM->rem.s.Env.regs[11];
2390 pCtx->r12 = pVM->rem.s.Env.regs[12];
2391 pCtx->r13 = pVM->rem.s.Env.regs[13];
2392 pCtx->r14 = pVM->rem.s.Env.regs[14];
2393 pCtx->r15 = pVM->rem.s.Env.regs[15];
2394
2395 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2396#else
2397 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2398 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2399 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2400 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2401 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2402 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2403 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2404
2405 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2406#endif
2407
2408 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2409
2410 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2411 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2412 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2413 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2414 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2415
2416#ifdef TARGET_X86_64
2417 pCtx->rip = pVM->rem.s.Env.eip;
2418 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2419#else
2420 pCtx->eip = pVM->rem.s.Env.eip;
2421 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2422#endif
2423
2424 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2425 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2426 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2427 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2428
2429 for (i=0;i<8;i++)
2430 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2431
2432 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2433 if (pCtx->gdtr.pGdt != (uint32_t)pVM->rem.s.Env.gdt.base)
2434 {
2435 pCtx->gdtr.pGdt = (uint32_t)pVM->rem.s.Env.gdt.base;
2436 STAM_COUNTER_INC(&gStatREMGDTChange);
2437 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2438 }
2439
2440 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2441 if (pCtx->idtr.pIdt != (uint32_t)pVM->rem.s.Env.idt.base)
2442 {
2443 pCtx->idtr.pIdt = (uint32_t)pVM->rem.s.Env.idt.base;
2444 STAM_COUNTER_INC(&gStatREMIDTChange);
2445 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2446 }
2447
2448 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2449 {
2450 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2451 STAM_COUNTER_INC(&gStatREMLDTRChange);
2452 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2453 }
2454 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2455 {
2456 pCtx->tr = pVM->rem.s.Env.tr.selector;
2457 STAM_COUNTER_INC(&gStatREMTRChange);
2458 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2459 }
2460
2461 /** @todo These values could still be out of sync! */
2462 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2463 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2464 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2465 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2466
2467 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2468 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2469 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2470
2471 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2472 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2473 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2474
2475 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2476 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2477 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2478
2479 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2480 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2481 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2482
2483 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2484 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2485 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2486
2487 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2488 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2489 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2490
2491 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2492 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2493 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2494
2495 /* Sysenter MSR */
2496 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2497 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2498 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2499
2500 /* System MSRs. */
2501 pCtx->msrEFER = pVM->rem.s.Env.efer;
2502 pCtx->msrSTAR = pVM->rem.s.Env.star;
2503 pCtx->msrPAT = pVM->rem.s.Env.pat;
2504#ifdef TARGET_X86_64
2505 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2506 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2507 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2508 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2509#endif
2510
2511}
2512
2513
2514/**
2515 * Update the VMM state information if we're currently in REM.
2516 *
2517 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2518 * we're currently executing in REM and the VMM state is invalid. This method will of
2519 * course check that we're executing in REM before syncing any data over to the VMM.
2520 *
2521 * @param pVM The VM handle.
2522 */
2523REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2524{
2525 if (pVM->rem.s.fInREM)
2526 remR3StateUpdate(pVM);
2527}
2528
2529
2530#undef LOG_GROUP
2531#define LOG_GROUP LOG_GROUP_REM
2532
2533
2534/**
2535 * Notify the recompiler about Address Gate 20 state change.
2536 *
2537 * This notification is required since A20 gate changes are
2538 * initialized from a device driver and the VM might just as
2539 * well be in REM mode as in RAW mode.
2540 *
2541 * @param pVM VM handle.
2542 * @param fEnable True if the gate should be enabled.
2543 * False if the gate should be disabled.
2544 */
2545REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2546{
2547 bool fSaved;
2548
2549 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2550 VM_ASSERT_EMT(pVM);
2551
2552 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2553 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2554
2555 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2556
2557 pVM->rem.s.fIgnoreAll = fSaved;
2558}
2559
2560
2561/**
2562 * Replays the invalidated recorded pages.
2563 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2564 *
2565 * @param pVM VM handle.
2566 */
2567REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2568{
2569 RTUINT i;
2570
2571 VM_ASSERT_EMT(pVM);
2572
2573 /*
2574 * Sync the required registers.
2575 */
2576 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2577 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2578 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2579 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2580
2581 /*
2582 * Replay the flushes.
2583 */
2584 pVM->rem.s.fIgnoreInvlPg = true;
2585 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2586 {
2587 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2588 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2589 }
2590 pVM->rem.s.fIgnoreInvlPg = false;
2591 pVM->rem.s.cInvalidatedPages = 0;
2592}
2593
2594
2595/**
2596 * Replays the handler notification changes
2597 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2598 *
2599 * @param pVM VM handle.
2600 */
2601REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2602{
2603 /*
2604 * Replay the flushes.
2605 */
2606 RTUINT i;
2607 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2608
2609 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2610 VM_ASSERT_EMT(pVM);
2611
2612 pVM->rem.s.cHandlerNotifications = 0;
2613 for (i = 0; i < c; i++)
2614 {
2615 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2616 switch (pRec->enmKind)
2617 {
2618 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2619 REMR3NotifyHandlerPhysicalRegister(pVM,
2620 pRec->u.PhysicalRegister.enmType,
2621 pRec->u.PhysicalRegister.GCPhys,
2622 pRec->u.PhysicalRegister.cb,
2623 pRec->u.PhysicalRegister.fHasHCHandler);
2624 break;
2625
2626 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2627 REMR3NotifyHandlerPhysicalDeregister(pVM,
2628 pRec->u.PhysicalDeregister.enmType,
2629 pRec->u.PhysicalDeregister.GCPhys,
2630 pRec->u.PhysicalDeregister.cb,
2631 pRec->u.PhysicalDeregister.fHasHCHandler,
2632 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2633 break;
2634
2635 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2636 REMR3NotifyHandlerPhysicalModify(pVM,
2637 pRec->u.PhysicalModify.enmType,
2638 pRec->u.PhysicalModify.GCPhysOld,
2639 pRec->u.PhysicalModify.GCPhysNew,
2640 pRec->u.PhysicalModify.cb,
2641 pRec->u.PhysicalModify.fHasHCHandler,
2642 pRec->u.PhysicalModify.fRestoreAsRAM);
2643 break;
2644
2645 default:
2646 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2647 break;
2648 }
2649 }
2650 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2651}
2652
2653
2654/**
2655 * Notify REM about changed code page.
2656 *
2657 * @returns VBox status code.
2658 * @param pVM VM handle.
2659 * @param pvCodePage Code page address
2660 */
2661REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2662{
2663#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2664 int rc;
2665 RTGCPHYS PhysGC;
2666 uint64_t flags;
2667
2668 VM_ASSERT_EMT(pVM);
2669
2670 /*
2671 * Get the physical page address.
2672 */
2673 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2674 if (rc == VINF_SUCCESS)
2675 {
2676 /*
2677 * Sync the required registers and flush the whole page.
2678 * (Easier to do the whole page than notifying it about each physical
2679 * byte that was changed.
2680 */
2681 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2682 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2683 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2684 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2685
2686 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2687 }
2688#endif
2689 return VINF_SUCCESS;
2690}
2691
2692
2693/**
2694 * Notification about a successful MMR3PhysRegister() call.
2695 *
2696 * @param pVM VM handle.
2697 * @param GCPhys The physical address the RAM.
2698 * @param cb Size of the memory.
2699 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2700 */
2701REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2702{
2703 uint32_t cbBitmap;
2704 int rc;
2705 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2706 VM_ASSERT_EMT(pVM);
2707
2708 /*
2709 * Validate input - we trust the caller.
2710 */
2711 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2712 Assert(cb);
2713 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2714
2715 /*
2716 * Base ram?
2717 */
2718 if (!GCPhys)
2719 {
2720 phys_ram_size = cb;
2721 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2722#ifndef VBOX_STRICT
2723 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2724 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2725#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2726 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2727 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2728 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2729 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2730 AssertRC(rc);
2731 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2732#endif
2733 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2734 }
2735
2736 /*
2737 * Register the ram.
2738 */
2739 Assert(!pVM->rem.s.fIgnoreAll);
2740 pVM->rem.s.fIgnoreAll = true;
2741
2742#ifdef VBOX_WITH_NEW_PHYS_CODE
2743 if (fFlags & MM_RAM_FLAGS_RESERVED)
2744 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2745 else
2746 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2747#else
2748 if (!GCPhys)
2749 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2750 else
2751 {
2752 if (fFlags & MM_RAM_FLAGS_RESERVED)
2753 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2754 else
2755 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2756 }
2757#endif
2758 Assert(pVM->rem.s.fIgnoreAll);
2759 pVM->rem.s.fIgnoreAll = false;
2760}
2761
2762#ifndef VBOX_WITH_NEW_PHYS_CODE
2763
2764/**
2765 * Notification about a successful PGMR3PhysRegisterChunk() call.
2766 *
2767 * @param pVM VM handle.
2768 * @param GCPhys The physical address the RAM.
2769 * @param cb Size of the memory.
2770 * @param pvRam The HC address of the RAM.
2771 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2772 */
2773REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2774{
2775 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2776 VM_ASSERT_EMT(pVM);
2777
2778 /*
2779 * Validate input - we trust the caller.
2780 */
2781 Assert(pvRam);
2782 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2783 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2784 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2785 Assert(fFlags == 0 /* normal RAM */);
2786 Assert(!pVM->rem.s.fIgnoreAll);
2787 pVM->rem.s.fIgnoreAll = true;
2788 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2789 Assert(pVM->rem.s.fIgnoreAll);
2790 pVM->rem.s.fIgnoreAll = false;
2791}
2792
2793
2794/**
2795 * Grows dynamically allocated guest RAM.
2796 * Will raise a fatal error if the operation fails.
2797 *
2798 * @param physaddr The physical address.
2799 */
2800void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2801{
2802 int rc;
2803 PVM pVM = cpu_single_env->pVM;
2804 const RTGCPHYS GCPhys = physaddr;
2805
2806 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2807 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2808 if (RT_SUCCESS(rc))
2809 return;
2810
2811 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2812 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2813 AssertFatalFailed();
2814}
2815
2816#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2817
2818/**
2819 * Notification about a successful MMR3PhysRomRegister() call.
2820 *
2821 * @param pVM VM handle.
2822 * @param GCPhys The physical address of the ROM.
2823 * @param cb The size of the ROM.
2824 * @param pvCopy Pointer to the ROM copy.
2825 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2826 * This function will be called when ever the protection of the
2827 * shadow ROM changes (at reset and end of POST).
2828 */
2829REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2830{
2831 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2832 VM_ASSERT_EMT(pVM);
2833
2834 /*
2835 * Validate input - we trust the caller.
2836 */
2837 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2838 Assert(cb);
2839 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2840 Assert(pvCopy);
2841 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2842
2843 /*
2844 * Register the rom.
2845 */
2846 Assert(!pVM->rem.s.fIgnoreAll);
2847 pVM->rem.s.fIgnoreAll = true;
2848
2849 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2850
2851 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2852
2853 Assert(pVM->rem.s.fIgnoreAll);
2854 pVM->rem.s.fIgnoreAll = false;
2855}
2856
2857
2858/**
2859 * Notification about a successful memory deregistration or reservation.
2860 *
2861 * @param pVM VM Handle.
2862 * @param GCPhys Start physical address.
2863 * @param cb The size of the range.
2864 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2865 * reserve any memory soon.
2866 */
2867REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2868{
2869 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2870 VM_ASSERT_EMT(pVM);
2871
2872 /*
2873 * Validate input - we trust the caller.
2874 */
2875 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2876 Assert(cb);
2877 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2878
2879 /*
2880 * Unassigning the memory.
2881 */
2882 Assert(!pVM->rem.s.fIgnoreAll);
2883 pVM->rem.s.fIgnoreAll = true;
2884
2885 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2886
2887 Assert(pVM->rem.s.fIgnoreAll);
2888 pVM->rem.s.fIgnoreAll = false;
2889}
2890
2891
2892/**
2893 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2894 *
2895 * @param pVM VM Handle.
2896 * @param enmType Handler type.
2897 * @param GCPhys Handler range address.
2898 * @param cb Size of the handler range.
2899 * @param fHasHCHandler Set if the handler has a HC callback function.
2900 *
2901 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2902 * Handler memory type to memory which has no HC handler.
2903 */
2904REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2905{
2906 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2907 enmType, GCPhys, cb, fHasHCHandler));
2908 VM_ASSERT_EMT(pVM);
2909 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2910 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2911
2912 if (pVM->rem.s.cHandlerNotifications)
2913 REMR3ReplayHandlerNotifications(pVM);
2914
2915 Assert(!pVM->rem.s.fIgnoreAll);
2916 pVM->rem.s.fIgnoreAll = true;
2917
2918 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2919 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2920 else if (fHasHCHandler)
2921 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2922
2923 Assert(pVM->rem.s.fIgnoreAll);
2924 pVM->rem.s.fIgnoreAll = false;
2925}
2926
2927
2928/**
2929 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2930 *
2931 * @param pVM VM Handle.
2932 * @param enmType Handler type.
2933 * @param GCPhys Handler range address.
2934 * @param cb Size of the handler range.
2935 * @param fHasHCHandler Set if the handler has a HC callback function.
2936 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2937 */
2938REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2939{
2940 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2941 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2942 VM_ASSERT_EMT(pVM);
2943
2944 if (pVM->rem.s.cHandlerNotifications)
2945 REMR3ReplayHandlerNotifications(pVM);
2946
2947 Assert(!pVM->rem.s.fIgnoreAll);
2948 pVM->rem.s.fIgnoreAll = true;
2949
2950/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2951 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2952 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2953 else if (fHasHCHandler)
2954 {
2955 if (!fRestoreAsRAM)
2956 {
2957 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2958 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2959 }
2960 else
2961 {
2962 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2963 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2964 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2965 }
2966 }
2967
2968 Assert(pVM->rem.s.fIgnoreAll);
2969 pVM->rem.s.fIgnoreAll = false;
2970}
2971
2972
2973/**
2974 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2975 *
2976 * @param pVM VM Handle.
2977 * @param enmType Handler type.
2978 * @param GCPhysOld Old handler range address.
2979 * @param GCPhysNew New handler range address.
2980 * @param cb Size of the handler range.
2981 * @param fHasHCHandler Set if the handler has a HC callback function.
2982 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2983 */
2984REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2985{
2986 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2987 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2988 VM_ASSERT_EMT(pVM);
2989 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2990
2991 if (pVM->rem.s.cHandlerNotifications)
2992 REMR3ReplayHandlerNotifications(pVM);
2993
2994 if (fHasHCHandler)
2995 {
2996 Assert(!pVM->rem.s.fIgnoreAll);
2997 pVM->rem.s.fIgnoreAll = true;
2998
2999 /*
3000 * Reset the old page.
3001 */
3002 if (!fRestoreAsRAM)
3003 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3004 else
3005 {
3006 /* This is not perfect, but it'll do for PD monitoring... */
3007 Assert(cb == PAGE_SIZE);
3008 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3009 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3010 }
3011
3012 /*
3013 * Update the new page.
3014 */
3015 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3016 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3017 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3018
3019 Assert(pVM->rem.s.fIgnoreAll);
3020 pVM->rem.s.fIgnoreAll = false;
3021 }
3022}
3023
3024
3025/**
3026 * Checks if we're handling access to this page or not.
3027 *
3028 * @returns true if we're trapping access.
3029 * @returns false if we aren't.
3030 * @param pVM The VM handle.
3031 * @param GCPhys The physical address.
3032 *
3033 * @remark This function will only work correctly in VBOX_STRICT builds!
3034 */
3035REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3036{
3037#ifdef VBOX_STRICT
3038 unsigned long off;
3039 if (pVM->rem.s.cHandlerNotifications)
3040 REMR3ReplayHandlerNotifications(pVM);
3041
3042 off = get_phys_page_offset(GCPhys);
3043 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3044 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3045 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3046#else
3047 return false;
3048#endif
3049}
3050
3051
3052/**
3053 * Deals with a rare case in get_phys_addr_code where the code
3054 * is being monitored.
3055 *
3056 * It could also be an MMIO page, in which case we will raise a fatal error.
3057 *
3058 * @returns The physical address corresponding to addr.
3059 * @param env The cpu environment.
3060 * @param addr The virtual address.
3061 * @param pTLBEntry The TLB entry.
3062 */
3063target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry)
3064{
3065 PVM pVM = env->pVM;
3066 if ((pTLBEntry->addr_code & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3067 {
3068 target_ulong ret = pTLBEntry->addend + addr;
3069 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%RGv addr_code=%RGv addend=%RGp ret=%RGp\n",
3070 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, ret);
3071 return ret;
3072 }
3073 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n"
3074 "*** handlers\n",
3075 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType));
3076 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3077 LogRel(("*** mmio\n"));
3078 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3079 LogRel(("*** phys\n"));
3080 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3081 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3082 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3083 AssertFatalFailed();
3084}
3085
3086/**
3087 * Read guest RAM and ROM.
3088 *
3089 * @param SrcGCPhys The source address (guest physical).
3090 * @param pvDst The destination address.
3091 * @param cb Number of bytes
3092 */
3093void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3094{
3095 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3096 VBOX_CHECK_ADDR(SrcGCPhys);
3097 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3098#ifdef VBOX_DEBUG_PHYS
3099 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3100#endif
3101 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3102}
3103
3104
3105/**
3106 * Read guest RAM and ROM, unsigned 8-bit.
3107 *
3108 * @param SrcGCPhys The source address (guest physical).
3109 */
3110uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3111{
3112 uint8_t val;
3113 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3114 VBOX_CHECK_ADDR(SrcGCPhys);
3115 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3116 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3117#ifdef VBOX_DEBUG_PHYS
3118 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3119#endif
3120 return val;
3121}
3122
3123
3124/**
3125 * Read guest RAM and ROM, signed 8-bit.
3126 *
3127 * @param SrcGCPhys The source address (guest physical).
3128 */
3129int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3130{
3131 int8_t val;
3132 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3133 VBOX_CHECK_ADDR(SrcGCPhys);
3134 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3135 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3136#ifdef VBOX_DEBUG_PHYS
3137 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3138#endif
3139 return val;
3140}
3141
3142
3143/**
3144 * Read guest RAM and ROM, unsigned 16-bit.
3145 *
3146 * @param SrcGCPhys The source address (guest physical).
3147 */
3148uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3149{
3150 uint16_t val;
3151 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3152 VBOX_CHECK_ADDR(SrcGCPhys);
3153 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3154 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3155#ifdef VBOX_DEBUG_PHYS
3156 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3157#endif
3158 return val;
3159}
3160
3161
3162/**
3163 * Read guest RAM and ROM, signed 16-bit.
3164 *
3165 * @param SrcGCPhys The source address (guest physical).
3166 */
3167int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3168{
3169 uint16_t val;
3170 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3171 VBOX_CHECK_ADDR(SrcGCPhys);
3172 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3173 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3174#ifdef VBOX_DEBUG_PHYS
3175 LogRel(("reads16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3176#endif
3177 return val;
3178}
3179
3180
3181/**
3182 * Read guest RAM and ROM, unsigned 32-bit.
3183 *
3184 * @param SrcGCPhys The source address (guest physical).
3185 */
3186uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3187{
3188 uint32_t val;
3189 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3190 VBOX_CHECK_ADDR(SrcGCPhys);
3191 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3192 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3193#ifdef VBOX_DEBUG_PHYS
3194 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3195#endif
3196 return val;
3197}
3198
3199
3200/**
3201 * Read guest RAM and ROM, signed 32-bit.
3202 *
3203 * @param SrcGCPhys The source address (guest physical).
3204 */
3205int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3206{
3207 int32_t val;
3208 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3209 VBOX_CHECK_ADDR(SrcGCPhys);
3210 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3211 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3212#ifdef VBOX_DEBUG_PHYS
3213 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3214#endif
3215 return val;
3216}
3217
3218
3219/**
3220 * Read guest RAM and ROM, unsigned 64-bit.
3221 *
3222 * @param SrcGCPhys The source address (guest physical).
3223 */
3224uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3225{
3226 uint64_t val;
3227 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3228 VBOX_CHECK_ADDR(SrcGCPhys);
3229 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3230 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3231#ifdef VBOX_DEBUG_PHYS
3232 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3233#endif
3234 return val;
3235}
3236
3237/**
3238 * Read guest RAM and ROM, signed 64-bit.
3239 *
3240 * @param SrcGCPhys The source address (guest physical).
3241 */
3242int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3243{
3244 int64_t val;
3245 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3246 VBOX_CHECK_ADDR(SrcGCPhys);
3247 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3248 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3249#ifdef VBOX_DEBUG_PHYS
3250 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3251#endif
3252 return val;
3253}
3254
3255
3256/**
3257 * Write guest RAM.
3258 *
3259 * @param DstGCPhys The destination address (guest physical).
3260 * @param pvSrc The source address.
3261 * @param cb Number of bytes to write
3262 */
3263void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3264{
3265 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3266 VBOX_CHECK_ADDR(DstGCPhys);
3267 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3268 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3269#ifdef VBOX_DEBUG_PHYS
3270 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3271#endif
3272}
3273
3274
3275/**
3276 * Write guest RAM, unsigned 8-bit.
3277 *
3278 * @param DstGCPhys The destination address (guest physical).
3279 * @param val Value
3280 */
3281void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3282{
3283 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3284 VBOX_CHECK_ADDR(DstGCPhys);
3285 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3286 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3287#ifdef VBOX_DEBUG_PHYS
3288 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3289#endif
3290}
3291
3292
3293/**
3294 * Write guest RAM, unsigned 8-bit.
3295 *
3296 * @param DstGCPhys The destination address (guest physical).
3297 * @param val Value
3298 */
3299void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3300{
3301 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3302 VBOX_CHECK_ADDR(DstGCPhys);
3303 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3304 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3305#ifdef VBOX_DEBUG_PHYS
3306 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3307#endif
3308}
3309
3310
3311/**
3312 * Write guest RAM, unsigned 32-bit.
3313 *
3314 * @param DstGCPhys The destination address (guest physical).
3315 * @param val Value
3316 */
3317void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3318{
3319 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3320 VBOX_CHECK_ADDR(DstGCPhys);
3321 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3322 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3323#ifdef VBOX_DEBUG_PHYS
3324 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3325#endif
3326}
3327
3328
3329/**
3330 * Write guest RAM, unsigned 64-bit.
3331 *
3332 * @param DstGCPhys The destination address (guest physical).
3333 * @param val Value
3334 */
3335void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3336{
3337 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3338 VBOX_CHECK_ADDR(DstGCPhys);
3339 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3340 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3341#ifdef VBOX_DEBUG_PHYS
3342 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3343#endif
3344}
3345
3346#undef LOG_GROUP
3347#define LOG_GROUP LOG_GROUP_REM_MMIO
3348
3349/** Read MMIO memory. */
3350static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3351{
3352 uint32_t u32 = 0;
3353 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3354 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3355 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3356 return u32;
3357}
3358
3359/** Read MMIO memory. */
3360static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3361{
3362 uint32_t u32 = 0;
3363 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3364 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3365 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3366 return u32;
3367}
3368
3369/** Read MMIO memory. */
3370static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3371{
3372 uint32_t u32 = 0;
3373 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3374 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3375 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3376 return u32;
3377}
3378
3379/** Write to MMIO memory. */
3380static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3381{
3382 int rc;
3383 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3384 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3385 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3386}
3387
3388/** Write to MMIO memory. */
3389static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3390{
3391 int rc;
3392 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3393 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3394 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3395}
3396
3397/** Write to MMIO memory. */
3398static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3399{
3400 int rc;
3401 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3402 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3403 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3404}
3405
3406
3407#undef LOG_GROUP
3408#define LOG_GROUP LOG_GROUP_REM_HANDLER
3409
3410/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3411
3412static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3413{
3414 uint8_t u8;
3415 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3416 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3417 return u8;
3418}
3419
3420static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3421{
3422 uint16_t u16;
3423 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3424 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3425 return u16;
3426}
3427
3428static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3429{
3430 uint32_t u32;
3431 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3432 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3433 return u32;
3434}
3435
3436static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3437{
3438 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3439 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3440}
3441
3442static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3443{
3444 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3445 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3446}
3447
3448static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3449{
3450 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3451 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3452}
3453
3454/* -+- disassembly -+- */
3455
3456#undef LOG_GROUP
3457#define LOG_GROUP LOG_GROUP_REM_DISAS
3458
3459
3460/**
3461 * Enables or disables singled stepped disassembly.
3462 *
3463 * @returns VBox status code.
3464 * @param pVM VM handle.
3465 * @param fEnable To enable set this flag, to disable clear it.
3466 */
3467static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3468{
3469 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3470 VM_ASSERT_EMT(pVM);
3471
3472 if (fEnable)
3473 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3474 else
3475 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3476 return VINF_SUCCESS;
3477}
3478
3479
3480/**
3481 * Enables or disables singled stepped disassembly.
3482 *
3483 * @returns VBox status code.
3484 * @param pVM VM handle.
3485 * @param fEnable To enable set this flag, to disable clear it.
3486 */
3487REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3488{
3489 PVMREQ pReq;
3490 int rc;
3491
3492 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3493 if (VM_IS_EMT(pVM))
3494 return remR3DisasEnableStepping(pVM, fEnable);
3495
3496 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3497 AssertRC(rc);
3498 if (RT_SUCCESS(rc))
3499 rc = pReq->iStatus;
3500 VMR3ReqFree(pReq);
3501 return rc;
3502}
3503
3504
3505#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3506/**
3507 * External Debugger Command: .remstep [on|off|1|0]
3508 */
3509static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3510{
3511 bool fEnable;
3512 int rc;
3513
3514 /* print status */
3515 if (cArgs == 0)
3516 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3517 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3518
3519 /* convert the argument and change the mode. */
3520 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3521 if (RT_FAILURE(rc))
3522 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3523 rc = REMR3DisasEnableStepping(pVM, fEnable);
3524 if (RT_FAILURE(rc))
3525 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3526 return rc;
3527}
3528#endif
3529
3530
3531/**
3532 * Disassembles n instructions and prints them to the log.
3533 *
3534 * @returns Success indicator.
3535 * @param env Pointer to the recompiler CPU structure.
3536 * @param f32BitCode Indicates that whether or not the code should
3537 * be disassembled as 16 or 32 bit. If -1 the CS
3538 * selector will be inspected.
3539 * @param nrInstructions Nr of instructions to disassemble
3540 * @param pszPrefix
3541 * @remark not currently used for anything but ad-hoc debugging.
3542 */
3543bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3544{
3545 int i, rc;
3546 RTGCPTR GCPtrPC;
3547 uint8_t *pvPC;
3548 RTINTPTR off;
3549 DISCPUSTATE Cpu;
3550
3551 /*
3552 * Determin 16/32 bit mode.
3553 */
3554 if (f32BitCode == -1)
3555 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3556
3557 /*
3558 * Convert cs:eip to host context address.
3559 * We don't care to much about cross page correctness presently.
3560 */
3561 GCPtrPC = env->segs[R_CS].base + env->eip;
3562 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3563 {
3564 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3565
3566 /* convert eip to physical address. */
3567 rc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM,
3568 GCPtrPC,
3569 env->cr[3],
3570 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3571 (void**)&pvPC);
3572 if (RT_FAILURE(rc))
3573 {
3574 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3575 return false;
3576 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3577 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3578 }
3579 }
3580 else
3581 {
3582 /* physical address */
3583 rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3584 (void**)&pvPC);
3585 if (RT_FAILURE(rc))
3586 return false;
3587 }
3588
3589 /*
3590 * Disassemble.
3591 */
3592 off = env->eip - (RTGCUINTPTR)pvPC;
3593 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3594 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3595 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3596 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3597 //Cpu.dwUserData[2] = GCPtrPC;
3598
3599 for (i=0;i<nrInstructions;i++)
3600 {
3601 char szOutput[256];
3602 uint32_t cbOp;
3603 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3604 return false;
3605 if (pszPrefix)
3606 Log(("%s: %s", pszPrefix, szOutput));
3607 else
3608 Log(("%s", szOutput));
3609
3610 pvPC += cbOp;
3611 }
3612 return true;
3613}
3614
3615
3616/** @todo need to test the new code, using the old code in the mean while. */
3617#define USE_OLD_DUMP_AND_DISASSEMBLY
3618
3619/**
3620 * Disassembles one instruction and prints it to the log.
3621 *
3622 * @returns Success indicator.
3623 * @param env Pointer to the recompiler CPU structure.
3624 * @param f32BitCode Indicates that whether or not the code should
3625 * be disassembled as 16 or 32 bit. If -1 the CS
3626 * selector will be inspected.
3627 * @param pszPrefix
3628 */
3629bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3630{
3631#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3632 PVM pVM = env->pVM;
3633 RTGCPTR GCPtrPC;
3634 uint8_t *pvPC;
3635 char szOutput[256];
3636 uint32_t cbOp;
3637 RTINTPTR off;
3638 DISCPUSTATE Cpu;
3639
3640
3641 /* Doesn't work in long mode. */
3642 if (env->hflags & HF_LMA_MASK)
3643 return false;
3644
3645 /*
3646 * Determin 16/32 bit mode.
3647 */
3648 if (f32BitCode == -1)
3649 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3650
3651 /*
3652 * Log registers
3653 */
3654 if (LogIs2Enabled())
3655 {
3656 remR3StateUpdate(pVM);
3657 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3658 }
3659
3660 /*
3661 * Convert cs:eip to host context address.
3662 * We don't care to much about cross page correctness presently.
3663 */
3664 GCPtrPC = env->segs[R_CS].base + env->eip;
3665 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3666 {
3667 /* convert eip to physical address. */
3668 int rc = PGMPhysGCPtr2HCPtrByGstCR3(pVM,
3669 GCPtrPC,
3670 env->cr[3],
3671 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3672 (void**)&pvPC);
3673 if (RT_FAILURE(rc))
3674 {
3675 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3676 return false;
3677 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3678 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3679 }
3680 }
3681 else
3682 {
3683
3684 /* physical address */
3685 int rc = PGMPhysGCPhys2HCPtr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3686 if (RT_FAILURE(rc))
3687 return false;
3688 }
3689
3690 /*
3691 * Disassemble.
3692 */
3693 off = env->eip - (RTGCUINTPTR)pvPC;
3694 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3695 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3696 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3697 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3698 //Cpu.dwUserData[2] = GCPtrPC;
3699 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3700 return false;
3701
3702 if (!f32BitCode)
3703 {
3704 if (pszPrefix)
3705 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3706 else
3707 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3708 }
3709 else
3710 {
3711 if (pszPrefix)
3712 Log(("%s: %s", pszPrefix, szOutput));
3713 else
3714 Log(("%s", szOutput));
3715 }
3716 return true;
3717
3718#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3719 PVM pVM = env->pVM;
3720 const bool fLog = LogIsEnabled();
3721 const bool fLog2 = LogIs2Enabled();
3722 int rc = VINF_SUCCESS;
3723
3724 /*
3725 * Don't bother if there ain't any log output to do.
3726 */
3727 if (!fLog && !fLog2)
3728 return true;
3729
3730 /*
3731 * Update the state so DBGF reads the correct register values.
3732 */
3733 remR3StateUpdate(pVM);
3734
3735 /*
3736 * Log registers if requested.
3737 */
3738 if (!fLog2)
3739 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3740
3741 /*
3742 * Disassemble to log.
3743 */
3744 if (fLog)
3745 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3746
3747 return RT_SUCCESS(rc);
3748#endif
3749}
3750
3751
3752/**
3753 * Disassemble recompiled code.
3754 *
3755 * @param phFileIgnored Ignored, logfile usually.
3756 * @param pvCode Pointer to the code block.
3757 * @param cb Size of the code block.
3758 */
3759void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
3760{
3761 if (LogIs2Enabled())
3762 {
3763 unsigned off = 0;
3764 char szOutput[256];
3765 DISCPUSTATE Cpu;
3766
3767 memset(&Cpu, 0, sizeof(Cpu));
3768#ifdef RT_ARCH_X86
3769 Cpu.mode = CPUMODE_32BIT;
3770#else
3771 Cpu.mode = CPUMODE_64BIT;
3772#endif
3773
3774 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3775 while (off < cb)
3776 {
3777 uint32_t cbInstr;
3778 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3779 RTLogPrintf("%s", szOutput);
3780 else
3781 {
3782 RTLogPrintf("disas error\n");
3783 cbInstr = 1;
3784#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3785 break;
3786#endif
3787 }
3788 off += cbInstr;
3789 }
3790 }
3791 NOREF(phFileIgnored);
3792}
3793
3794
3795/**
3796 * Disassemble guest code.
3797 *
3798 * @param phFileIgnored Ignored, logfile usually.
3799 * @param uCode The guest address of the code to disassemble. (flat?)
3800 * @param cb Number of bytes to disassemble.
3801 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3802 */
3803void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
3804{
3805 if (LogIs2Enabled())
3806 {
3807 PVM pVM = cpu_single_env->pVM;
3808 RTSEL cs;
3809 RTGCUINTPTR eip;
3810
3811 /*
3812 * Update the state so DBGF reads the correct register values (flags).
3813 */
3814 remR3StateUpdate(pVM);
3815
3816 /*
3817 * Do the disassembling.
3818 */
3819 RTLogPrintf("Guest Code: PC=%RGp %RGp bytes fFlags=%d\n", uCode, cb, fFlags);
3820 cs = cpu_single_env->segs[R_CS].selector;
3821 eip = uCode - cpu_single_env->segs[R_CS].base;
3822 for (;;)
3823 {
3824 char szBuf[256];
3825 uint32_t cbInstr;
3826 int rc = DBGFR3DisasInstrEx(pVM,
3827 cs,
3828 eip,
3829 0,
3830 szBuf, sizeof(szBuf),
3831 &cbInstr);
3832 if (RT_SUCCESS(rc))
3833 RTLogPrintf("%RGp %s\n", uCode, szBuf);
3834 else
3835 {
3836 RTLogPrintf("%RGp %04x:%RGp: %s\n", uCode, cs, eip, szBuf);
3837 cbInstr = 1;
3838 }
3839
3840 /* next */
3841 if (cb <= cbInstr)
3842 break;
3843 cb -= cbInstr;
3844 uCode += cbInstr;
3845 eip += cbInstr;
3846 }
3847 }
3848 NOREF(phFileIgnored);
3849}
3850
3851
3852/**
3853 * Looks up a guest symbol.
3854 *
3855 * @returns Pointer to symbol name. This is a static buffer.
3856 * @param orig_addr The address in question.
3857 */
3858const char *lookup_symbol(target_ulong orig_addr)
3859{
3860 RTGCINTPTR off = 0;
3861 DBGFSYMBOL Sym;
3862 PVM pVM = cpu_single_env->pVM;
3863 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3864 if (RT_SUCCESS(rc))
3865 {
3866 static char szSym[sizeof(Sym.szName) + 48];
3867 if (!off)
3868 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3869 else if (off > 0)
3870 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3871 else
3872 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3873 return szSym;
3874 }
3875 return "<N/A>";
3876}
3877
3878
3879#undef LOG_GROUP
3880#define LOG_GROUP LOG_GROUP_REM
3881
3882
3883/* -+- FF notifications -+- */
3884
3885
3886/**
3887 * Notification about a pending interrupt.
3888 *
3889 * @param pVM VM Handle.
3890 * @param u8Interrupt Interrupt
3891 * @thread The emulation thread.
3892 */
3893REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3894{
3895 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3896 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3897}
3898
3899/**
3900 * Notification about a pending interrupt.
3901 *
3902 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3903 * @param pVM VM Handle.
3904 * @thread The emulation thread.
3905 */
3906REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3907{
3908 return pVM->rem.s.u32PendingInterrupt;
3909}
3910
3911/**
3912 * Notification about the interrupt FF being set.
3913 *
3914 * @param pVM VM Handle.
3915 * @thread The emulation thread.
3916 */
3917REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3918{
3919 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3920 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3921 if (pVM->rem.s.fInREM)
3922 {
3923 if (VM_IS_EMT(pVM))
3924 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3925 else
3926 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3927 CPU_INTERRUPT_EXTERNAL_HARD);
3928 }
3929}
3930
3931
3932/**
3933 * Notification about the interrupt FF being set.
3934 *
3935 * @param pVM VM Handle.
3936 * @thread Any.
3937 */
3938REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3939{
3940 LogFlow(("REMR3NotifyInterruptClear:\n"));
3941 if (pVM->rem.s.fInREM)
3942 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3943}
3944
3945
3946/**
3947 * Notification about pending timer(s).
3948 *
3949 * @param pVM VM Handle.
3950 * @thread Any.
3951 */
3952REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3953{
3954#ifndef DEBUG_bird
3955 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3956#endif
3957 if (pVM->rem.s.fInREM)
3958 {
3959 if (VM_IS_EMT(pVM))
3960 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3961 else
3962 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3963 CPU_INTERRUPT_EXTERNAL_TIMER);
3964 }
3965}
3966
3967
3968/**
3969 * Notification about pending DMA transfers.
3970 *
3971 * @param pVM VM Handle.
3972 * @thread Any.
3973 */
3974REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3975{
3976 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3977 if (pVM->rem.s.fInREM)
3978 {
3979 if (VM_IS_EMT(pVM))
3980 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3981 else
3982 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3983 CPU_INTERRUPT_EXTERNAL_DMA);
3984 }
3985}
3986
3987
3988/**
3989 * Notification about pending timer(s).
3990 *
3991 * @param pVM VM Handle.
3992 * @thread Any.
3993 */
3994REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3995{
3996 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3997 if (pVM->rem.s.fInREM)
3998 {
3999 if (VM_IS_EMT(pVM))
4000 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
4001 else
4002 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4003 CPU_INTERRUPT_EXTERNAL_EXIT);
4004 }
4005}
4006
4007
4008/**
4009 * Notification about pending FF set by an external thread.
4010 *
4011 * @param pVM VM handle.
4012 * @thread Any.
4013 */
4014REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4015{
4016 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4017 if (pVM->rem.s.fInREM)
4018 {
4019 if (VM_IS_EMT(pVM))
4020 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
4021 else
4022 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4023 CPU_INTERRUPT_EXTERNAL_EXIT);
4024 }
4025}
4026
4027
4028#ifdef VBOX_WITH_STATISTICS
4029void remR3ProfileStart(int statcode)
4030{
4031 STAMPROFILEADV *pStat;
4032 switch(statcode)
4033 {
4034 case STATS_EMULATE_SINGLE_INSTR:
4035 pStat = &gStatExecuteSingleInstr;
4036 break;
4037 case STATS_QEMU_COMPILATION:
4038 pStat = &gStatCompilationQEmu;
4039 break;
4040 case STATS_QEMU_RUN_EMULATED_CODE:
4041 pStat = &gStatRunCodeQEmu;
4042 break;
4043 case STATS_QEMU_TOTAL:
4044 pStat = &gStatTotalTimeQEmu;
4045 break;
4046 case STATS_QEMU_RUN_TIMERS:
4047 pStat = &gStatTimers;
4048 break;
4049 case STATS_TLB_LOOKUP:
4050 pStat= &gStatTBLookup;
4051 break;
4052 case STATS_IRQ_HANDLING:
4053 pStat= &gStatIRQ;
4054 break;
4055 case STATS_RAW_CHECK:
4056 pStat = &gStatRawCheck;
4057 break;
4058
4059 default:
4060 AssertMsgFailed(("unknown stat %d\n", statcode));
4061 return;
4062 }
4063 STAM_PROFILE_ADV_START(pStat, a);
4064}
4065
4066
4067void remR3ProfileStop(int statcode)
4068{
4069 STAMPROFILEADV *pStat;
4070 switch(statcode)
4071 {
4072 case STATS_EMULATE_SINGLE_INSTR:
4073 pStat = &gStatExecuteSingleInstr;
4074 break;
4075 case STATS_QEMU_COMPILATION:
4076 pStat = &gStatCompilationQEmu;
4077 break;
4078 case STATS_QEMU_RUN_EMULATED_CODE:
4079 pStat = &gStatRunCodeQEmu;
4080 break;
4081 case STATS_QEMU_TOTAL:
4082 pStat = &gStatTotalTimeQEmu;
4083 break;
4084 case STATS_QEMU_RUN_TIMERS:
4085 pStat = &gStatTimers;
4086 break;
4087 case STATS_TLB_LOOKUP:
4088 pStat= &gStatTBLookup;
4089 break;
4090 case STATS_IRQ_HANDLING:
4091 pStat= &gStatIRQ;
4092 break;
4093 case STATS_RAW_CHECK:
4094 pStat = &gStatRawCheck;
4095 break;
4096 default:
4097 AssertMsgFailed(("unknown stat %d\n", statcode));
4098 return;
4099 }
4100 STAM_PROFILE_ADV_STOP(pStat, a);
4101}
4102#endif
4103
4104/**
4105 * Raise an RC, force rem exit.
4106 *
4107 * @param pVM VM handle.
4108 * @param rc The rc.
4109 */
4110void remR3RaiseRC(PVM pVM, int rc)
4111{
4112 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4113 Assert(pVM->rem.s.fInREM);
4114 VM_ASSERT_EMT(pVM);
4115 pVM->rem.s.rc = rc;
4116 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4117}
4118
4119
4120/* -+- timers -+- */
4121
4122uint64_t cpu_get_tsc(CPUX86State *env)
4123{
4124 STAM_COUNTER_INC(&gStatCpuGetTSC);
4125 return TMCpuTickGet(env->pVM);
4126}
4127
4128
4129/* -+- interrupts -+- */
4130
4131void cpu_set_ferr(CPUX86State *env)
4132{
4133 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4134 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4135}
4136
4137int cpu_get_pic_interrupt(CPUState *env)
4138{
4139 uint8_t u8Interrupt;
4140 int rc;
4141
4142 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4143 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4144 * with the (a)pic.
4145 */
4146 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4147 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4148 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4149 * remove this kludge. */
4150 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4151 {
4152 rc = VINF_SUCCESS;
4153 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4154 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4155 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4156 }
4157 else
4158 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4159
4160 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4161 if (RT_SUCCESS(rc))
4162 {
4163 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4164 env->interrupt_request |= CPU_INTERRUPT_HARD;
4165 return u8Interrupt;
4166 }
4167 return -1;
4168}
4169
4170
4171/* -+- local apic -+- */
4172
4173void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4174{
4175 int rc = PDMApicSetBase(env->pVM, val);
4176 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4177}
4178
4179uint64_t cpu_get_apic_base(CPUX86State *env)
4180{
4181 uint64_t u64;
4182 int rc = PDMApicGetBase(env->pVM, &u64);
4183 if (RT_SUCCESS(rc))
4184 {
4185 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4186 return u64;
4187 }
4188 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4189 return 0;
4190}
4191
4192void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4193{
4194 int rc = PDMApicSetTPR(env->pVM, val);
4195 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4196}
4197
4198uint8_t cpu_get_apic_tpr(CPUX86State *env)
4199{
4200 uint8_t u8;
4201 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4202 if (RT_SUCCESS(rc))
4203 {
4204 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4205 return u8;
4206 }
4207 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4208 return 0;
4209}
4210
4211
4212uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4213{
4214 uint64_t value;
4215 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4216 if (RT_SUCCESS(rc))
4217 {
4218 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4219 return value;
4220 }
4221 /** @todo: exception ? */
4222 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4223 return value;
4224}
4225
4226void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4227{
4228 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4229 /** @todo: exception if error ? */
4230 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4231}
4232/* -+- I/O Ports -+- */
4233
4234#undef LOG_GROUP
4235#define LOG_GROUP LOG_GROUP_REM_IOPORT
4236
4237void cpu_outb(CPUState *env, int addr, int val)
4238{
4239 int rc;
4240
4241 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4242 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4243
4244 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4245 if (RT_LIKELY(rc == VINF_SUCCESS))
4246 return;
4247 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4248 {
4249 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4250 remR3RaiseRC(env->pVM, rc);
4251 return;
4252 }
4253 remAbort(rc, __FUNCTION__);
4254}
4255
4256void cpu_outw(CPUState *env, int addr, int val)
4257{
4258 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4259 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4260 if (RT_LIKELY(rc == VINF_SUCCESS))
4261 return;
4262 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4263 {
4264 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4265 remR3RaiseRC(env->pVM, rc);
4266 return;
4267 }
4268 remAbort(rc, __FUNCTION__);
4269}
4270
4271void cpu_outl(CPUState *env, int addr, int val)
4272{
4273 int rc;
4274 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4275 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4276 if (RT_LIKELY(rc == VINF_SUCCESS))
4277 return;
4278 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4279 {
4280 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4281 remR3RaiseRC(env->pVM, rc);
4282 return;
4283 }
4284 remAbort(rc, __FUNCTION__);
4285}
4286
4287int cpu_inb(CPUState *env, int addr)
4288{
4289 uint32_t u32 = 0;
4290 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4291 if (RT_LIKELY(rc == VINF_SUCCESS))
4292 {
4293 if (/*addr != 0x61 && */addr != 0x71)
4294 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4295 return (int)u32;
4296 }
4297 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4298 {
4299 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4300 remR3RaiseRC(env->pVM, rc);
4301 return (int)u32;
4302 }
4303 remAbort(rc, __FUNCTION__);
4304 return 0xff;
4305}
4306
4307int cpu_inw(CPUState *env, int addr)
4308{
4309 uint32_t u32 = 0;
4310 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4311 if (RT_LIKELY(rc == VINF_SUCCESS))
4312 {
4313 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4314 return (int)u32;
4315 }
4316 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4317 {
4318 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4319 remR3RaiseRC(env->pVM, rc);
4320 return (int)u32;
4321 }
4322 remAbort(rc, __FUNCTION__);
4323 return 0xffff;
4324}
4325
4326int cpu_inl(CPUState *env, int addr)
4327{
4328 uint32_t u32 = 0;
4329 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4330 if (RT_LIKELY(rc == VINF_SUCCESS))
4331 {
4332//if (addr==0x01f0 && u32 == 0x6b6d)
4333// loglevel = ~0;
4334 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4335 return (int)u32;
4336 }
4337 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4338 {
4339 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4340 remR3RaiseRC(env->pVM, rc);
4341 return (int)u32;
4342 }
4343 remAbort(rc, __FUNCTION__);
4344 return 0xffffffff;
4345}
4346
4347#undef LOG_GROUP
4348#define LOG_GROUP LOG_GROUP_REM
4349
4350
4351/* -+- helpers and misc other interfaces -+- */
4352
4353/**
4354 * Perform the CPUID instruction.
4355 *
4356 * ASMCpuId cannot be invoked from some source files where this is used because of global
4357 * register allocations.
4358 *
4359 * @param env Pointer to the recompiler CPU structure.
4360 * @param uOperator CPUID operation (eax).
4361 * @param pvEAX Where to store eax.
4362 * @param pvEBX Where to store ebx.
4363 * @param pvECX Where to store ecx.
4364 * @param pvEDX Where to store edx.
4365 */
4366void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4367{
4368 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4369}
4370
4371
4372#if 0 /* not used */
4373/**
4374 * Interface for qemu hardware to report back fatal errors.
4375 */
4376void hw_error(const char *pszFormat, ...)
4377{
4378 /*
4379 * Bitch about it.
4380 */
4381 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4382 * this in my Odin32 tree at home! */
4383 va_list args;
4384 va_start(args, pszFormat);
4385 RTLogPrintf("fatal error in virtual hardware:");
4386 RTLogPrintfV(pszFormat, args);
4387 va_end(args);
4388 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4389
4390 /*
4391 * If we're in REM context we'll sync back the state before 'jumping' to
4392 * the EMs failure handling.
4393 */
4394 PVM pVM = cpu_single_env->pVM;
4395 if (pVM->rem.s.fInREM)
4396 REMR3StateBack(pVM);
4397 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4398 AssertMsgFailed(("EMR3FatalError returned!\n"));
4399}
4400#endif
4401
4402/**
4403 * Interface for the qemu cpu to report unhandled situation
4404 * raising a fatal VM error.
4405 */
4406void cpu_abort(CPUState *env, const char *pszFormat, ...)
4407{
4408 va_list args;
4409 PVM pVM;
4410
4411 /*
4412 * Bitch about it.
4413 */
4414#ifndef _MSC_VER
4415 /** @todo: MSVC is right - it's not valid C */
4416 RTLogFlags(NULL, "nodisabled nobuffered");
4417#endif
4418 va_start(args, pszFormat);
4419 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4420 va_end(args);
4421 va_start(args, pszFormat);
4422 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4423 va_end(args);
4424
4425 /*
4426 * If we're in REM context we'll sync back the state before 'jumping' to
4427 * the EMs failure handling.
4428 */
4429 pVM = cpu_single_env->pVM;
4430 if (pVM->rem.s.fInREM)
4431 REMR3StateBack(pVM);
4432 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4433 AssertMsgFailed(("EMR3FatalError returned!\n"));
4434}
4435
4436
4437/**
4438 * Aborts the VM.
4439 *
4440 * @param rc VBox error code.
4441 * @param pszTip Hint about why/when this happend.
4442 */
4443static void remAbort(int rc, const char *pszTip)
4444{
4445 PVM pVM;
4446
4447 /*
4448 * Bitch about it.
4449 */
4450 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4451 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4452
4453 /*
4454 * Jump back to where we entered the recompiler.
4455 */
4456 pVM = cpu_single_env->pVM;
4457 if (pVM->rem.s.fInREM)
4458 REMR3StateBack(pVM);
4459 EMR3FatalError(pVM, rc);
4460 AssertMsgFailed(("EMR3FatalError returned!\n"));
4461}
4462
4463
4464/**
4465 * Dumps a linux system call.
4466 * @param pVM VM handle.
4467 */
4468void remR3DumpLnxSyscall(PVM pVM)
4469{
4470 static const char *apsz[] =
4471 {
4472 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4473 "sys_exit",
4474 "sys_fork",
4475 "sys_read",
4476 "sys_write",
4477 "sys_open", /* 5 */
4478 "sys_close",
4479 "sys_waitpid",
4480 "sys_creat",
4481 "sys_link",
4482 "sys_unlink", /* 10 */
4483 "sys_execve",
4484 "sys_chdir",
4485 "sys_time",
4486 "sys_mknod",
4487 "sys_chmod", /* 15 */
4488 "sys_lchown16",
4489 "sys_ni_syscall", /* old break syscall holder */
4490 "sys_stat",
4491 "sys_lseek",
4492 "sys_getpid", /* 20 */
4493 "sys_mount",
4494 "sys_oldumount",
4495 "sys_setuid16",
4496 "sys_getuid16",
4497 "sys_stime", /* 25 */
4498 "sys_ptrace",
4499 "sys_alarm",
4500 "sys_fstat",
4501 "sys_pause",
4502 "sys_utime", /* 30 */
4503 "sys_ni_syscall", /* old stty syscall holder */
4504 "sys_ni_syscall", /* old gtty syscall holder */
4505 "sys_access",
4506 "sys_nice",
4507 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4508 "sys_sync",
4509 "sys_kill",
4510 "sys_rename",
4511 "sys_mkdir",
4512 "sys_rmdir", /* 40 */
4513 "sys_dup",
4514 "sys_pipe",
4515 "sys_times",
4516 "sys_ni_syscall", /* old prof syscall holder */
4517 "sys_brk", /* 45 */
4518 "sys_setgid16",
4519 "sys_getgid16",
4520 "sys_signal",
4521 "sys_geteuid16",
4522 "sys_getegid16", /* 50 */
4523 "sys_acct",
4524 "sys_umount", /* recycled never used phys() */
4525 "sys_ni_syscall", /* old lock syscall holder */
4526 "sys_ioctl",
4527 "sys_fcntl", /* 55 */
4528 "sys_ni_syscall", /* old mpx syscall holder */
4529 "sys_setpgid",
4530 "sys_ni_syscall", /* old ulimit syscall holder */
4531 "sys_olduname",
4532 "sys_umask", /* 60 */
4533 "sys_chroot",
4534 "sys_ustat",
4535 "sys_dup2",
4536 "sys_getppid",
4537 "sys_getpgrp", /* 65 */
4538 "sys_setsid",
4539 "sys_sigaction",
4540 "sys_sgetmask",
4541 "sys_ssetmask",
4542 "sys_setreuid16", /* 70 */
4543 "sys_setregid16",
4544 "sys_sigsuspend",
4545 "sys_sigpending",
4546 "sys_sethostname",
4547 "sys_setrlimit", /* 75 */
4548 "sys_old_getrlimit",
4549 "sys_getrusage",
4550 "sys_gettimeofday",
4551 "sys_settimeofday",
4552 "sys_getgroups16", /* 80 */
4553 "sys_setgroups16",
4554 "old_select",
4555 "sys_symlink",
4556 "sys_lstat",
4557 "sys_readlink", /* 85 */
4558 "sys_uselib",
4559 "sys_swapon",
4560 "sys_reboot",
4561 "old_readdir",
4562 "old_mmap", /* 90 */
4563 "sys_munmap",
4564 "sys_truncate",
4565 "sys_ftruncate",
4566 "sys_fchmod",
4567 "sys_fchown16", /* 95 */
4568 "sys_getpriority",
4569 "sys_setpriority",
4570 "sys_ni_syscall", /* old profil syscall holder */
4571 "sys_statfs",
4572 "sys_fstatfs", /* 100 */
4573 "sys_ioperm",
4574 "sys_socketcall",
4575 "sys_syslog",
4576 "sys_setitimer",
4577 "sys_getitimer", /* 105 */
4578 "sys_newstat",
4579 "sys_newlstat",
4580 "sys_newfstat",
4581 "sys_uname",
4582 "sys_iopl", /* 110 */
4583 "sys_vhangup",
4584 "sys_ni_syscall", /* old "idle" system call */
4585 "sys_vm86old",
4586 "sys_wait4",
4587 "sys_swapoff", /* 115 */
4588 "sys_sysinfo",
4589 "sys_ipc",
4590 "sys_fsync",
4591 "sys_sigreturn",
4592 "sys_clone", /* 120 */
4593 "sys_setdomainname",
4594 "sys_newuname",
4595 "sys_modify_ldt",
4596 "sys_adjtimex",
4597 "sys_mprotect", /* 125 */
4598 "sys_sigprocmask",
4599 "sys_ni_syscall", /* old "create_module" */
4600 "sys_init_module",
4601 "sys_delete_module",
4602 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4603 "sys_quotactl",
4604 "sys_getpgid",
4605 "sys_fchdir",
4606 "sys_bdflush",
4607 "sys_sysfs", /* 135 */
4608 "sys_personality",
4609 "sys_ni_syscall", /* reserved for afs_syscall */
4610 "sys_setfsuid16",
4611 "sys_setfsgid16",
4612 "sys_llseek", /* 140 */
4613 "sys_getdents",
4614 "sys_select",
4615 "sys_flock",
4616 "sys_msync",
4617 "sys_readv", /* 145 */
4618 "sys_writev",
4619 "sys_getsid",
4620 "sys_fdatasync",
4621 "sys_sysctl",
4622 "sys_mlock", /* 150 */
4623 "sys_munlock",
4624 "sys_mlockall",
4625 "sys_munlockall",
4626 "sys_sched_setparam",
4627 "sys_sched_getparam", /* 155 */
4628 "sys_sched_setscheduler",
4629 "sys_sched_getscheduler",
4630 "sys_sched_yield",
4631 "sys_sched_get_priority_max",
4632 "sys_sched_get_priority_min", /* 160 */
4633 "sys_sched_rr_get_interval",
4634 "sys_nanosleep",
4635 "sys_mremap",
4636 "sys_setresuid16",
4637 "sys_getresuid16", /* 165 */
4638 "sys_vm86",
4639 "sys_ni_syscall", /* Old sys_query_module */
4640 "sys_poll",
4641 "sys_nfsservctl",
4642 "sys_setresgid16", /* 170 */
4643 "sys_getresgid16",
4644 "sys_prctl",
4645 "sys_rt_sigreturn",
4646 "sys_rt_sigaction",
4647 "sys_rt_sigprocmask", /* 175 */
4648 "sys_rt_sigpending",
4649 "sys_rt_sigtimedwait",
4650 "sys_rt_sigqueueinfo",
4651 "sys_rt_sigsuspend",
4652 "sys_pread64", /* 180 */
4653 "sys_pwrite64",
4654 "sys_chown16",
4655 "sys_getcwd",
4656 "sys_capget",
4657 "sys_capset", /* 185 */
4658 "sys_sigaltstack",
4659 "sys_sendfile",
4660 "sys_ni_syscall", /* reserved for streams1 */
4661 "sys_ni_syscall", /* reserved for streams2 */
4662 "sys_vfork", /* 190 */
4663 "sys_getrlimit",
4664 "sys_mmap2",
4665 "sys_truncate64",
4666 "sys_ftruncate64",
4667 "sys_stat64", /* 195 */
4668 "sys_lstat64",
4669 "sys_fstat64",
4670 "sys_lchown",
4671 "sys_getuid",
4672 "sys_getgid", /* 200 */
4673 "sys_geteuid",
4674 "sys_getegid",
4675 "sys_setreuid",
4676 "sys_setregid",
4677 "sys_getgroups", /* 205 */
4678 "sys_setgroups",
4679 "sys_fchown",
4680 "sys_setresuid",
4681 "sys_getresuid",
4682 "sys_setresgid", /* 210 */
4683 "sys_getresgid",
4684 "sys_chown",
4685 "sys_setuid",
4686 "sys_setgid",
4687 "sys_setfsuid", /* 215 */
4688 "sys_setfsgid",
4689 "sys_pivot_root",
4690 "sys_mincore",
4691 "sys_madvise",
4692 "sys_getdents64", /* 220 */
4693 "sys_fcntl64",
4694 "sys_ni_syscall", /* reserved for TUX */
4695 "sys_ni_syscall",
4696 "sys_gettid",
4697 "sys_readahead", /* 225 */
4698 "sys_setxattr",
4699 "sys_lsetxattr",
4700 "sys_fsetxattr",
4701 "sys_getxattr",
4702 "sys_lgetxattr", /* 230 */
4703 "sys_fgetxattr",
4704 "sys_listxattr",
4705 "sys_llistxattr",
4706 "sys_flistxattr",
4707 "sys_removexattr", /* 235 */
4708 "sys_lremovexattr",
4709 "sys_fremovexattr",
4710 "sys_tkill",
4711 "sys_sendfile64",
4712 "sys_futex", /* 240 */
4713 "sys_sched_setaffinity",
4714 "sys_sched_getaffinity",
4715 "sys_set_thread_area",
4716 "sys_get_thread_area",
4717 "sys_io_setup", /* 245 */
4718 "sys_io_destroy",
4719 "sys_io_getevents",
4720 "sys_io_submit",
4721 "sys_io_cancel",
4722 "sys_fadvise64", /* 250 */
4723 "sys_ni_syscall",
4724 "sys_exit_group",
4725 "sys_lookup_dcookie",
4726 "sys_epoll_create",
4727 "sys_epoll_ctl", /* 255 */
4728 "sys_epoll_wait",
4729 "sys_remap_file_pages",
4730 "sys_set_tid_address",
4731 "sys_timer_create",
4732 "sys_timer_settime", /* 260 */
4733 "sys_timer_gettime",
4734 "sys_timer_getoverrun",
4735 "sys_timer_delete",
4736 "sys_clock_settime",
4737 "sys_clock_gettime", /* 265 */
4738 "sys_clock_getres",
4739 "sys_clock_nanosleep",
4740 "sys_statfs64",
4741 "sys_fstatfs64",
4742 "sys_tgkill", /* 270 */
4743 "sys_utimes",
4744 "sys_fadvise64_64",
4745 "sys_ni_syscall" /* sys_vserver */
4746 };
4747
4748 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4749 switch (uEAX)
4750 {
4751 default:
4752 if (uEAX < RT_ELEMENTS(apsz))
4753 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4754 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4755 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4756 else
4757 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4758 break;
4759
4760 }
4761}
4762
4763
4764/**
4765 * Dumps an OpenBSD system call.
4766 * @param pVM VM handle.
4767 */
4768void remR3DumpOBsdSyscall(PVM pVM)
4769{
4770 static const char *apsz[] =
4771 {
4772 "SYS_syscall", //0
4773 "SYS_exit", //1
4774 "SYS_fork", //2
4775 "SYS_read", //3
4776 "SYS_write", //4
4777 "SYS_open", //5
4778 "SYS_close", //6
4779 "SYS_wait4", //7
4780 "SYS_8",
4781 "SYS_link", //9
4782 "SYS_unlink", //10
4783 "SYS_11",
4784 "SYS_chdir", //12
4785 "SYS_fchdir", //13
4786 "SYS_mknod", //14
4787 "SYS_chmod", //15
4788 "SYS_chown", //16
4789 "SYS_break", //17
4790 "SYS_18",
4791 "SYS_19",
4792 "SYS_getpid", //20
4793 "SYS_mount", //21
4794 "SYS_unmount", //22
4795 "SYS_setuid", //23
4796 "SYS_getuid", //24
4797 "SYS_geteuid", //25
4798 "SYS_ptrace", //26
4799 "SYS_recvmsg", //27
4800 "SYS_sendmsg", //28
4801 "SYS_recvfrom", //29
4802 "SYS_accept", //30
4803 "SYS_getpeername", //31
4804 "SYS_getsockname", //32
4805 "SYS_access", //33
4806 "SYS_chflags", //34
4807 "SYS_fchflags", //35
4808 "SYS_sync", //36
4809 "SYS_kill", //37
4810 "SYS_38",
4811 "SYS_getppid", //39
4812 "SYS_40",
4813 "SYS_dup", //41
4814 "SYS_opipe", //42
4815 "SYS_getegid", //43
4816 "SYS_profil", //44
4817 "SYS_ktrace", //45
4818 "SYS_sigaction", //46
4819 "SYS_getgid", //47
4820 "SYS_sigprocmask", //48
4821 "SYS_getlogin", //49
4822 "SYS_setlogin", //50
4823 "SYS_acct", //51
4824 "SYS_sigpending", //52
4825 "SYS_osigaltstack", //53
4826 "SYS_ioctl", //54
4827 "SYS_reboot", //55
4828 "SYS_revoke", //56
4829 "SYS_symlink", //57
4830 "SYS_readlink", //58
4831 "SYS_execve", //59
4832 "SYS_umask", //60
4833 "SYS_chroot", //61
4834 "SYS_62",
4835 "SYS_63",
4836 "SYS_64",
4837 "SYS_65",
4838 "SYS_vfork", //66
4839 "SYS_67",
4840 "SYS_68",
4841 "SYS_sbrk", //69
4842 "SYS_sstk", //70
4843 "SYS_61",
4844 "SYS_vadvise", //72
4845 "SYS_munmap", //73
4846 "SYS_mprotect", //74
4847 "SYS_madvise", //75
4848 "SYS_76",
4849 "SYS_77",
4850 "SYS_mincore", //78
4851 "SYS_getgroups", //79
4852 "SYS_setgroups", //80
4853 "SYS_getpgrp", //81
4854 "SYS_setpgid", //82
4855 "SYS_setitimer", //83
4856 "SYS_84",
4857 "SYS_85",
4858 "SYS_getitimer", //86
4859 "SYS_87",
4860 "SYS_88",
4861 "SYS_89",
4862 "SYS_dup2", //90
4863 "SYS_91",
4864 "SYS_fcntl", //92
4865 "SYS_select", //93
4866 "SYS_94",
4867 "SYS_fsync", //95
4868 "SYS_setpriority", //96
4869 "SYS_socket", //97
4870 "SYS_connect", //98
4871 "SYS_99",
4872 "SYS_getpriority", //100
4873 "SYS_101",
4874 "SYS_102",
4875 "SYS_sigreturn", //103
4876 "SYS_bind", //104
4877 "SYS_setsockopt", //105
4878 "SYS_listen", //106
4879 "SYS_107",
4880 "SYS_108",
4881 "SYS_109",
4882 "SYS_110",
4883 "SYS_sigsuspend", //111
4884 "SYS_112",
4885 "SYS_113",
4886 "SYS_114",
4887 "SYS_115",
4888 "SYS_gettimeofday", //116
4889 "SYS_getrusage", //117
4890 "SYS_getsockopt", //118
4891 "SYS_119",
4892 "SYS_readv", //120
4893 "SYS_writev", //121
4894 "SYS_settimeofday", //122
4895 "SYS_fchown", //123
4896 "SYS_fchmod", //124
4897 "SYS_125",
4898 "SYS_setreuid", //126
4899 "SYS_setregid", //127
4900 "SYS_rename", //128
4901 "SYS_129",
4902 "SYS_130",
4903 "SYS_flock", //131
4904 "SYS_mkfifo", //132
4905 "SYS_sendto", //133
4906 "SYS_shutdown", //134
4907 "SYS_socketpair", //135
4908 "SYS_mkdir", //136
4909 "SYS_rmdir", //137
4910 "SYS_utimes", //138
4911 "SYS_139",
4912 "SYS_adjtime", //140
4913 "SYS_141",
4914 "SYS_142",
4915 "SYS_143",
4916 "SYS_144",
4917 "SYS_145",
4918 "SYS_146",
4919 "SYS_setsid", //147
4920 "SYS_quotactl", //148
4921 "SYS_149",
4922 "SYS_150",
4923 "SYS_151",
4924 "SYS_152",
4925 "SYS_153",
4926 "SYS_154",
4927 "SYS_nfssvc", //155
4928 "SYS_156",
4929 "SYS_157",
4930 "SYS_158",
4931 "SYS_159",
4932 "SYS_160",
4933 "SYS_getfh", //161
4934 "SYS_162",
4935 "SYS_163",
4936 "SYS_164",
4937 "SYS_sysarch", //165
4938 "SYS_166",
4939 "SYS_167",
4940 "SYS_168",
4941 "SYS_169",
4942 "SYS_170",
4943 "SYS_171",
4944 "SYS_172",
4945 "SYS_pread", //173
4946 "SYS_pwrite", //174
4947 "SYS_175",
4948 "SYS_176",
4949 "SYS_177",
4950 "SYS_178",
4951 "SYS_179",
4952 "SYS_180",
4953 "SYS_setgid", //181
4954 "SYS_setegid", //182
4955 "SYS_seteuid", //183
4956 "SYS_lfs_bmapv", //184
4957 "SYS_lfs_markv", //185
4958 "SYS_lfs_segclean", //186
4959 "SYS_lfs_segwait", //187
4960 "SYS_188",
4961 "SYS_189",
4962 "SYS_190",
4963 "SYS_pathconf", //191
4964 "SYS_fpathconf", //192
4965 "SYS_swapctl", //193
4966 "SYS_getrlimit", //194
4967 "SYS_setrlimit", //195
4968 "SYS_getdirentries", //196
4969 "SYS_mmap", //197
4970 "SYS___syscall", //198
4971 "SYS_lseek", //199
4972 "SYS_truncate", //200
4973 "SYS_ftruncate", //201
4974 "SYS___sysctl", //202
4975 "SYS_mlock", //203
4976 "SYS_munlock", //204
4977 "SYS_205",
4978 "SYS_futimes", //206
4979 "SYS_getpgid", //207
4980 "SYS_xfspioctl", //208
4981 "SYS_209",
4982 "SYS_210",
4983 "SYS_211",
4984 "SYS_212",
4985 "SYS_213",
4986 "SYS_214",
4987 "SYS_215",
4988 "SYS_216",
4989 "SYS_217",
4990 "SYS_218",
4991 "SYS_219",
4992 "SYS_220",
4993 "SYS_semget", //221
4994 "SYS_222",
4995 "SYS_223",
4996 "SYS_224",
4997 "SYS_msgget", //225
4998 "SYS_msgsnd", //226
4999 "SYS_msgrcv", //227
5000 "SYS_shmat", //228
5001 "SYS_229",
5002 "SYS_shmdt", //230
5003 "SYS_231",
5004 "SYS_clock_gettime", //232
5005 "SYS_clock_settime", //233
5006 "SYS_clock_getres", //234
5007 "SYS_235",
5008 "SYS_236",
5009 "SYS_237",
5010 "SYS_238",
5011 "SYS_239",
5012 "SYS_nanosleep", //240
5013 "SYS_241",
5014 "SYS_242",
5015 "SYS_243",
5016 "SYS_244",
5017 "SYS_245",
5018 "SYS_246",
5019 "SYS_247",
5020 "SYS_248",
5021 "SYS_249",
5022 "SYS_minherit", //250
5023 "SYS_rfork", //251
5024 "SYS_poll", //252
5025 "SYS_issetugid", //253
5026 "SYS_lchown", //254
5027 "SYS_getsid", //255
5028 "SYS_msync", //256
5029 "SYS_257",
5030 "SYS_258",
5031 "SYS_259",
5032 "SYS_getfsstat", //260
5033 "SYS_statfs", //261
5034 "SYS_fstatfs", //262
5035 "SYS_pipe", //263
5036 "SYS_fhopen", //264
5037 "SYS_265",
5038 "SYS_fhstatfs", //266
5039 "SYS_preadv", //267
5040 "SYS_pwritev", //268
5041 "SYS_kqueue", //269
5042 "SYS_kevent", //270
5043 "SYS_mlockall", //271
5044 "SYS_munlockall", //272
5045 "SYS_getpeereid", //273
5046 "SYS_274",
5047 "SYS_275",
5048 "SYS_276",
5049 "SYS_277",
5050 "SYS_278",
5051 "SYS_279",
5052 "SYS_280",
5053 "SYS_getresuid", //281
5054 "SYS_setresuid", //282
5055 "SYS_getresgid", //283
5056 "SYS_setresgid", //284
5057 "SYS_285",
5058 "SYS_mquery", //286
5059 "SYS_closefrom", //287
5060 "SYS_sigaltstack", //288
5061 "SYS_shmget", //289
5062 "SYS_semop", //290
5063 "SYS_stat", //291
5064 "SYS_fstat", //292
5065 "SYS_lstat", //293
5066 "SYS_fhstat", //294
5067 "SYS___semctl", //295
5068 "SYS_shmctl", //296
5069 "SYS_msgctl", //297
5070 "SYS_MAXSYSCALL", //298
5071 //299
5072 //300
5073 };
5074 uint32_t uEAX;
5075 if (!LogIsEnabled())
5076 return;
5077 uEAX = CPUMGetGuestEAX(pVM);
5078 switch (uEAX)
5079 {
5080 default:
5081 if (uEAX < RT_ELEMENTS(apsz))
5082 {
5083 uint32_t au32Args[8] = {0};
5084 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5085 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5086 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5087 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5088 }
5089 else
5090 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5091 break;
5092 }
5093}
5094
5095
5096#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5097/**
5098 * The Dll main entry point (stub).
5099 */
5100bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5101{
5102 return true;
5103}
5104
5105void *memcpy(void *dst, const void *src, size_t size)
5106{
5107 uint8_t*pbDst = dst, *pbSrc = src;
5108 while (size-- > 0)
5109 *pbDst++ = *pbSrc++;
5110 return dst;
5111}
5112
5113#endif
5114
5115void cpu_smm_update(CPUState* env)
5116{
5117}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette