VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 17344

Last change on this file since 17344 was 17300, checked in by vboxsync, 16 years ago

Always sync rip when checking for rescheduling

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.3 KB
Line 
1/* $Id: VBoxRecompiler.c 17300 2009-03-03 16:21:15Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145
146/*
147 * Global stuff.
148 */
149
150/** MMIO read callbacks. */
151CPUReadMemoryFunc *g_apfnMMIORead[3] =
152{
153 remR3MMIOReadU8,
154 remR3MMIOReadU16,
155 remR3MMIOReadU32
156};
157
158/** MMIO write callbacks. */
159CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
160{
161 remR3MMIOWriteU8,
162 remR3MMIOWriteU16,
163 remR3MMIOWriteU32
164};
165
166/** Handler read callbacks. */
167CPUReadMemoryFunc *g_apfnHandlerRead[3] =
168{
169 remR3HandlerReadU8,
170 remR3HandlerReadU16,
171 remR3HandlerReadU32
172};
173
174/** Handler write callbacks. */
175CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
176{
177 remR3HandlerWriteU8,
178 remR3HandlerWriteU16,
179 remR3HandlerWriteU32
180};
181
182
183#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
184/*
185 * Debugger commands.
186 */
187static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
188
189/** '.remstep' arguments. */
190static const DBGCVARDESC g_aArgRemStep[] =
191{
192 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
193 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
194};
195
196/** Command descriptors. */
197static const DBGCCMD g_aCmds[] =
198{
199 {
200 .pszCmd ="remstep",
201 .cArgsMin = 0,
202 .cArgsMax = 1,
203 .paArgDescs = &g_aArgRemStep[0],
204 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
205 .pResultDesc = NULL,
206 .fFlags = 0,
207 .pfnHandler = remR3CmdDisasEnableStepping,
208 .pszSyntax = "[on/off]",
209 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
210 "If no arguments show the current state."
211 }
212};
213#endif
214
215
216/*******************************************************************************
217* Internal Functions *
218*******************************************************************************/
219void remAbort(int rc, const char *pszTip);
220extern int testmath(void);
221
222/* Put them here to avoid unused variable warning. */
223AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
224#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
225//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
226/* Why did this have to be identical?? */
227AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
228#else
229AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
230#endif
231
232
233/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
234uint8_t *code_gen_prologue;
235
236/**
237 * Initializes the REM.
238 *
239 * @returns VBox status code.
240 * @param pVM The VM to operate on.
241 */
242REMR3DECL(int) REMR3Init(PVM pVM)
243{
244 uint32_t u32Dummy;
245 int rc;
246
247#ifdef VBOX_ENABLE_VBOXREM64
248 LogRel(("Using 64-bit aware REM\n"));
249#endif
250
251 /*
252 * Assert sanity.
253 */
254 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
255 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
256 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
257#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
258 Assert(!testmath());
259#endif
260 /*
261 * Init some internal data members.
262 */
263 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
264 pVM->rem.s.Env.pVM = pVM;
265#ifdef CPU_RAW_MODE_INIT
266 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
267#endif
268
269 /* ctx. */
270 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
271 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
272
273 /* ignore all notifications */
274 pVM->rem.s.fIgnoreAll = true;
275
276 code_gen_prologue = RTMemExecAlloc(_1K);
277 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
278
279 cpu_exec_init_all(0);
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (RT_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
332 if (RT_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
367
368 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
369 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
370 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
371 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
372
373 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
379
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387 /** @todo missing /REM/Tb*Count stats */
388
389#endif
390
391#ifdef DEBUG_ALL_LOGGING
392 loglevel = ~0;
393# ifdef DEBUG_TMP_LOGGING
394 logfile = fopen("/tmp/vbox-qemu.log", "w");
395# endif
396#endif
397
398 return rc;
399}
400
401
402/**
403 * Terminates the REM.
404 *
405 * Termination means cleaning up and freeing all resources,
406 * the VM it self is at this point powered off or suspended.
407 *
408 * @returns VBox status code.
409 * @param pVM The VM to operate on.
410 */
411REMR3DECL(int) REMR3Term(PVM pVM)
412{
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * The VM is being reset.
419 *
420 * For the REM component this means to call the cpu_reset() and
421 * reinitialize some state variables.
422 *
423 * @param pVM VM handle.
424 */
425REMR3DECL(void) REMR3Reset(PVM pVM)
426{
427 /*
428 * Reset the REM cpu.
429 */
430 pVM->rem.s.fIgnoreAll = true;
431 cpu_reset(&pVM->rem.s.Env);
432 pVM->rem.s.cInvalidatedPages = 0;
433 pVM->rem.s.fIgnoreAll = false;
434
435 /* Clear raw ring 0 init state */
436 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
437
438 /* Flush the TBs the next time we execute code here. */
439 pVM->rem.s.fFlushTBs = true;
440}
441
442
443/**
444 * Execute state save operation.
445 *
446 * @returns VBox status code.
447 * @param pVM VM Handle.
448 * @param pSSM SSM operation handle.
449 */
450static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
451{
452 /*
453 * Save the required CPU Env bits.
454 * (Not much because we're never in REM when doing the save.)
455 */
456 PREM pRem = &pVM->rem.s;
457 LogFlow(("remR3Save:\n"));
458 Assert(!pRem->fInREM);
459 SSMR3PutU32(pSSM, pRem->Env.hflags);
460 SSMR3PutU32(pSSM, ~0); /* separator */
461
462 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
463 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
464 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
465
466 return SSMR3PutU32(pSSM, ~0); /* terminator */
467}
468
469
470/**
471 * Execute state load operation.
472 *
473 * @returns VBox status code.
474 * @param pVM VM Handle.
475 * @param pSSM SSM operation handle.
476 * @param u32Version Data layout version.
477 */
478static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
479{
480 uint32_t u32Dummy;
481 uint32_t fRawRing0 = false;
482 uint32_t u32Sep;
483 int rc;
484 PREM pRem;
485 LogFlow(("remR3Load:\n"));
486
487 /*
488 * Validate version.
489 */
490 if ( u32Version != REM_SAVED_STATE_VERSION
491 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
492 {
493 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
494 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
495 }
496
497 /*
498 * Do a reset to be on the safe side...
499 */
500 REMR3Reset(pVM);
501
502 /*
503 * Ignore all ignorable notifications.
504 * (Not doing this will cause serious trouble.)
505 */
506 pVM->rem.s.fIgnoreAll = true;
507
508 /*
509 * Load the required CPU Env bits.
510 * (Not much because we're never in REM when doing the save.)
511 */
512 pRem = &pVM->rem.s;
513 Assert(!pRem->fInREM);
514 SSMR3GetU32(pSSM, &pRem->Env.hflags);
515 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
516 {
517 /* Redundant REM CPU state has to be loaded, but can be ignored. */
518 CPUX86State_Ver16 temp;
519 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
520 }
521
522 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
523 if (RT_FAILURE(rc))
524 return rc;
525 if (u32Sep != ~0U)
526 {
527 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
528 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
529 }
530
531 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
532 SSMR3GetUInt(pSSM, &fRawRing0);
533 if (fRawRing0)
534 pRem->Env.state |= CPU_RAW_RING0;
535
536 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
537 {
538 unsigned i;
539
540 /*
541 * Load the REM stuff.
542 */
543 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
544 if (RT_FAILURE(rc))
545 return rc;
546 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
547 {
548 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
549 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
550 }
551 for (i = 0; i < pRem->cInvalidatedPages; i++)
552 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
553 }
554
555 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
556 if (RT_FAILURE(rc))
557 return rc;
558
559 /* check the terminator. */
560 rc = SSMR3GetU32(pSSM, &u32Sep);
561 if (RT_FAILURE(rc))
562 return rc;
563 if (u32Sep != ~0U)
564 {
565 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
566 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
567 }
568
569 /*
570 * Get the CPUID features.
571 */
572 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
573 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
574
575 /*
576 * Sync the Load Flush the TLB
577 */
578 tlb_flush(&pRem->Env, 1);
579
580 /*
581 * Stop ignoring ignornable notifications.
582 */
583 pVM->rem.s.fIgnoreAll = false;
584
585 /*
586 * Sync the whole CPU state when executing code in the recompiler.
587 */
588 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
589 return VINF_SUCCESS;
590}
591
592
593
594#undef LOG_GROUP
595#define LOG_GROUP LOG_GROUP_REM_RUN
596
597/**
598 * Single steps an instruction in recompiled mode.
599 *
600 * Before calling this function the REM state needs to be in sync with
601 * the VM. Call REMR3State() to perform the sync. It's only necessary
602 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
603 * and after calling REMR3StateBack().
604 *
605 * @returns VBox status code.
606 *
607 * @param pVM VM Handle.
608 */
609REMR3DECL(int) REMR3Step(PVM pVM)
610{
611 int rc, interrupt_request;
612 RTGCPTR GCPtrPC;
613 bool fBp;
614
615 /*
616 * Lock the REM - we don't wanna have anyone interrupting us
617 * while stepping - and enabled single stepping. We also ignore
618 * pending interrupts and suchlike.
619 */
620 interrupt_request = pVM->rem.s.Env.interrupt_request;
621 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
622 pVM->rem.s.Env.interrupt_request = 0;
623 cpu_single_step(&pVM->rem.s.Env, 1);
624
625 /*
626 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
627 */
628 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
629 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
630
631 /*
632 * Execute and handle the return code.
633 * We execute without enabling the cpu tick, so on success we'll
634 * just flip it on and off to make sure it moves
635 */
636 rc = cpu_exec(&pVM->rem.s.Env);
637 if (rc == EXCP_DEBUG)
638 {
639 TMCpuTickResume(pVM);
640 TMCpuTickPause(pVM);
641 TMVirtualResume(pVM);
642 TMVirtualPause(pVM);
643 rc = VINF_EM_DBG_STEPPED;
644 }
645 else
646 {
647 switch (rc)
648 {
649 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
650 case EXCP_HLT:
651 case EXCP_HALTED: rc = VINF_EM_HALT; break;
652 case EXCP_RC:
653 rc = pVM->rem.s.rc;
654 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
655 break;
656 default:
657 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
658 rc = VERR_INTERNAL_ERROR;
659 break;
660 }
661 }
662
663 /*
664 * Restore the stuff we changed to prevent interruption.
665 * Unlock the REM.
666 */
667 if (fBp)
668 {
669 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
670 Assert(rc2 == 0); NOREF(rc2);
671 }
672 cpu_single_step(&pVM->rem.s.Env, 0);
673 pVM->rem.s.Env.interrupt_request = interrupt_request;
674
675 return rc;
676}
677
678
679/**
680 * Set a breakpoint using the REM facilities.
681 *
682 * @returns VBox status code.
683 * @param pVM The VM handle.
684 * @param Address The breakpoint address.
685 * @thread The emulation thread.
686 */
687REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
688{
689 VM_ASSERT_EMT(pVM);
690 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
691 {
692 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
693 return VINF_SUCCESS;
694 }
695 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
696 return VERR_REM_NO_MORE_BP_SLOTS;
697}
698
699
700/**
701 * Clears a breakpoint set by REMR3BreakpointSet().
702 *
703 * @returns VBox status code.
704 * @param pVM The VM handle.
705 * @param Address The breakpoint address.
706 * @thread The emulation thread.
707 */
708REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
709{
710 VM_ASSERT_EMT(pVM);
711 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
712 {
713 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
714 return VINF_SUCCESS;
715 }
716 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
717 return VERR_REM_BP_NOT_FOUND;
718}
719
720
721/**
722 * Emulate an instruction.
723 *
724 * This function executes one instruction without letting anyone
725 * interrupt it. This is intended for being called while being in
726 * raw mode and thus will take care of all the state syncing between
727 * REM and the rest.
728 *
729 * @returns VBox status code.
730 * @param pVM VM handle.
731 */
732REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
733{
734 bool fFlushTBs;
735
736 int rc, rc2;
737 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
738
739 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
740 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
741 */
742 if (HWACCMIsEnabled(pVM))
743 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
744
745 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
746 fFlushTBs = pVM->rem.s.fFlushTBs;
747 pVM->rem.s.fFlushTBs = false;
748
749 /*
750 * Sync the state and enable single instruction / single stepping.
751 */
752 rc = REMR3State(pVM);
753 pVM->rem.s.fFlushTBs = fFlushTBs;
754 if (RT_SUCCESS(rc))
755 {
756 int interrupt_request = pVM->rem.s.Env.interrupt_request;
757 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
758 Assert(!pVM->rem.s.Env.singlestep_enabled);
759 /*
760 * Now we set the execute single instruction flag and enter the cpu_exec loop.
761 */
762 TMNotifyStartOfExecution(pVM);
763 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
764 rc = cpu_exec(&pVM->rem.s.Env);
765 TMNotifyEndOfExecution(pVM);
766 switch (rc)
767 {
768 /*
769 * Executed without anything out of the way happening.
770 */
771 case EXCP_SINGLE_INSTR:
772 rc = VINF_EM_RESCHEDULE;
773 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
774 break;
775
776 /*
777 * If we take a trap or start servicing a pending interrupt, we might end up here.
778 * (Timer thread or some other thread wishing EMT's attention.)
779 */
780 case EXCP_INTERRUPT:
781 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
782 rc = VINF_EM_RESCHEDULE;
783 break;
784
785 /*
786 * Single step, we assume!
787 * If there was a breakpoint there we're fucked now.
788 */
789 case EXCP_DEBUG:
790 {
791 /* breakpoint or single step? */
792 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
793 int iBP;
794 rc = VINF_EM_DBG_STEPPED;
795 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
796 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
797 {
798 rc = VINF_EM_DBG_BREAKPOINT;
799 break;
800 }
801 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
802 break;
803 }
804
805 /*
806 * hlt instruction.
807 */
808 case EXCP_HLT:
809 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
810 rc = VINF_EM_HALT;
811 break;
812
813 /*
814 * The VM has halted.
815 */
816 case EXCP_HALTED:
817 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
818 rc = VINF_EM_HALT;
819 break;
820
821 /*
822 * Switch to RAW-mode.
823 */
824 case EXCP_EXECUTE_RAW:
825 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
826 rc = VINF_EM_RESCHEDULE_RAW;
827 break;
828
829 /*
830 * Switch to hardware accelerated RAW-mode.
831 */
832 case EXCP_EXECUTE_HWACC:
833 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
834 rc = VINF_EM_RESCHEDULE_HWACC;
835 break;
836
837 /*
838 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
839 */
840 case EXCP_RC:
841 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
842 rc = pVM->rem.s.rc;
843 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
844 break;
845
846 /*
847 * Figure out the rest when they arrive....
848 */
849 default:
850 AssertMsgFailed(("rc=%d\n", rc));
851 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
852 rc = VINF_EM_RESCHEDULE;
853 break;
854 }
855
856 /*
857 * Switch back the state.
858 */
859 pVM->rem.s.Env.interrupt_request = interrupt_request;
860 rc2 = REMR3StateBack(pVM);
861 AssertRC(rc2);
862 }
863
864 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
865 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
866 return rc;
867}
868
869
870/**
871 * Runs code in recompiled mode.
872 *
873 * Before calling this function the REM state needs to be in sync with
874 * the VM. Call REMR3State() to perform the sync. It's only necessary
875 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
876 * and after calling REMR3StateBack().
877 *
878 * @returns VBox status code.
879 *
880 * @param pVM VM Handle.
881 */
882REMR3DECL(int) REMR3Run(PVM pVM)
883{
884 int rc;
885 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
886 Assert(pVM->rem.s.fInREM);
887
888 TMNotifyStartOfExecution(pVM);
889 rc = cpu_exec(&pVM->rem.s.Env);
890 TMNotifyEndOfExecution(pVM);
891 switch (rc)
892 {
893 /*
894 * This happens when the execution was interrupted
895 * by an external event, like pending timers.
896 */
897 case EXCP_INTERRUPT:
898 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
899 rc = VINF_SUCCESS;
900 break;
901
902 /*
903 * hlt instruction.
904 */
905 case EXCP_HLT:
906 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
907 rc = VINF_EM_HALT;
908 break;
909
910 /*
911 * The VM has halted.
912 */
913 case EXCP_HALTED:
914 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
915 rc = VINF_EM_HALT;
916 break;
917
918 /*
919 * Breakpoint/single step.
920 */
921 case EXCP_DEBUG:
922 {
923#if 0//def DEBUG_bird
924 static int iBP = 0;
925 printf("howdy, breakpoint! iBP=%d\n", iBP);
926 switch (iBP)
927 {
928 case 0:
929 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
930 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
931 //pVM->rem.s.Env.interrupt_request = 0;
932 //pVM->rem.s.Env.exception_index = -1;
933 //g_fInterruptDisabled = 1;
934 rc = VINF_SUCCESS;
935 asm("int3");
936 break;
937 default:
938 asm("int3");
939 break;
940 }
941 iBP++;
942#else
943 /* breakpoint or single step? */
944 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
945 int iBP;
946 rc = VINF_EM_DBG_STEPPED;
947 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
948 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
949 {
950 rc = VINF_EM_DBG_BREAKPOINT;
951 break;
952 }
953 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
954#endif
955 break;
956 }
957
958 /*
959 * Switch to RAW-mode.
960 */
961 case EXCP_EXECUTE_RAW:
962 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
963 rc = VINF_EM_RESCHEDULE_RAW;
964 break;
965
966 /*
967 * Switch to hardware accelerated RAW-mode.
968 */
969 case EXCP_EXECUTE_HWACC:
970 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
971 rc = VINF_EM_RESCHEDULE_HWACC;
972 break;
973
974 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
975 /*
976 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
977 */
978 case EXCP_RC:
979 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
980 rc = pVM->rem.s.rc;
981 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
982 break;
983
984 /*
985 * Figure out the rest when they arrive....
986 */
987 default:
988 AssertMsgFailed(("rc=%d\n", rc));
989 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
990 rc = VINF_SUCCESS;
991 break;
992 }
993
994 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
995 return rc;
996}
997
998
999/**
1000 * Check if the cpu state is suitable for Raw execution.
1001 *
1002 * @returns boolean
1003 * @param env The CPU env struct.
1004 * @param eip The EIP to check this for (might differ from env->eip).
1005 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1006 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1007 *
1008 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1009 */
1010bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1011{
1012 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1013 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1014 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1015 uint32_t u32CR0;
1016
1017 /* Update counter. */
1018 env->pVM->rem.s.cCanExecuteRaw++;
1019
1020 if (HWACCMIsEnabled(env->pVM))
1021 {
1022 CPUMCTX Ctx;
1023
1024 env->state |= CPU_RAW_HWACC;
1025
1026 /*
1027 * Create partial context for HWACCMR3CanExecuteGuest
1028 */
1029 Ctx.cr0 = env->cr[0];
1030 Ctx.cr3 = env->cr[3];
1031 Ctx.cr4 = env->cr[4];
1032
1033 Ctx.tr = env->tr.selector;
1034 Ctx.trHid.u64Base = env->tr.base;
1035 Ctx.trHid.u32Limit = env->tr.limit;
1036 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1037
1038 Ctx.idtr.cbIdt = env->idt.limit;
1039 Ctx.idtr.pIdt = env->idt.base;
1040
1041 Ctx.gdtr.cbGdt = env->gdt.limit;
1042 Ctx.gdtr.pGdt = env->gdt.base;
1043
1044 Ctx.rsp = env->regs[R_ESP];
1045 Ctx.rip = env->eip;
1046
1047 Ctx.eflags.u32 = env->eflags;
1048
1049 Ctx.cs = env->segs[R_CS].selector;
1050 Ctx.csHid.u64Base = env->segs[R_CS].base;
1051 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1052 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1053
1054 Ctx.ds = env->segs[R_DS].selector;
1055 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1056 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1057 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1058
1059 Ctx.es = env->segs[R_ES].selector;
1060 Ctx.esHid.u64Base = env->segs[R_ES].base;
1061 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1062 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1063
1064 Ctx.fs = env->segs[R_FS].selector;
1065 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1066 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1067 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1068
1069 Ctx.gs = env->segs[R_GS].selector;
1070 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1071 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1072 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1073
1074 Ctx.ss = env->segs[R_SS].selector;
1075 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1076 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1077 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1078
1079 Ctx.msrEFER = env->efer;
1080
1081 /* Hardware accelerated raw-mode:
1082 *
1083 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1084 */
1085 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1086 {
1087 *piException = EXCP_EXECUTE_HWACC;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 /*
1094 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1095 * or 32 bits protected mode ring 0 code
1096 *
1097 * The tests are ordered by the likelyhood of being true during normal execution.
1098 */
1099 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1100 {
1101 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1102 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1103 return false;
1104 }
1105
1106#ifndef VBOX_RAW_V86
1107 if (fFlags & VM_MASK) {
1108 STAM_COUNTER_INC(&gStatRefuseVM86);
1109 Log2(("raw mode refused: VM_MASK\n"));
1110 return false;
1111 }
1112#endif
1113
1114 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1115 {
1116#ifndef DEBUG_bird
1117 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1118#endif
1119 return false;
1120 }
1121
1122 if (env->singlestep_enabled)
1123 {
1124 //Log2(("raw mode refused: Single step\n"));
1125 return false;
1126 }
1127
1128 if (env->nb_breakpoints > 0)
1129 {
1130 //Log2(("raw mode refused: Breakpoints\n"));
1131 return false;
1132 }
1133
1134 u32CR0 = env->cr[0];
1135 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1136 {
1137 STAM_COUNTER_INC(&gStatRefusePaging);
1138 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1139 return false;
1140 }
1141
1142 if (env->cr[4] & CR4_PAE_MASK)
1143 {
1144 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1145 {
1146 STAM_COUNTER_INC(&gStatRefusePAE);
1147 return false;
1148 }
1149 }
1150
1151 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1152 {
1153 if (!EMIsRawRing3Enabled(env->pVM))
1154 return false;
1155
1156 if (!(env->eflags & IF_MASK))
1157 {
1158 STAM_COUNTER_INC(&gStatRefuseIF0);
1159 Log2(("raw mode refused: IF (RawR3)\n"));
1160 return false;
1161 }
1162
1163 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1164 {
1165 STAM_COUNTER_INC(&gStatRefuseWP0);
1166 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1167 return false;
1168 }
1169 }
1170 else
1171 {
1172 if (!EMIsRawRing0Enabled(env->pVM))
1173 return false;
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1177 {
1178 STAM_COUNTER_INC(&gStatRefuseCode16);
1179 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1180 return false;
1181 }
1182
1183 // Only R0
1184 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1185 {
1186 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1187 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1188 return false;
1189 }
1190
1191 if (!(u32CR0 & CR0_WP_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseWP0);
1194 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1195 return false;
1196 }
1197
1198 if (PATMIsPatchGCAddr(env->pVM, eip))
1199 {
1200 Log2(("raw r0 mode forced: patch code\n"));
1201 *piException = EXCP_EXECUTE_RAW;
1202 return true;
1203 }
1204
1205#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1206 if (!(env->eflags & IF_MASK))
1207 {
1208 STAM_COUNTER_INC(&gStatRefuseIF0);
1209 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1210 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1211 return false;
1212 }
1213#endif
1214
1215 env->state |= CPU_RAW_RING0;
1216 }
1217
1218 /*
1219 * Don't reschedule the first time we're called, because there might be
1220 * special reasons why we're here that is not covered by the above checks.
1221 */
1222 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1223 {
1224 Log2(("raw mode refused: first scheduling\n"));
1225 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1226 return false;
1227 }
1228
1229 Assert(PGMPhysIsA20Enabled(env->pVM));
1230 *piException = EXCP_EXECUTE_RAW;
1231 return true;
1232}
1233
1234
1235/**
1236 * Fetches a code byte.
1237 *
1238 * @returns Success indicator (bool) for ease of use.
1239 * @param env The CPU environment structure.
1240 * @param GCPtrInstr Where to fetch code.
1241 * @param pu8Byte Where to store the byte on success
1242 */
1243bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1244{
1245 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1246 if (RT_SUCCESS(rc))
1247 return true;
1248 return false;
1249}
1250
1251
1252/**
1253 * Flush (or invalidate if you like) page table/dir entry.
1254 *
1255 * (invlpg instruction; tlb_flush_page)
1256 *
1257 * @param env Pointer to cpu environment.
1258 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1259 */
1260void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1261{
1262 PVM pVM = env->pVM;
1263 PCPUMCTX pCtx;
1264 int rc;
1265
1266 /*
1267 * When we're replaying invlpg instructions or restoring a saved
1268 * state we disable this path.
1269 */
1270 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1271 return;
1272 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1273 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1274
1275 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1276
1277 /*
1278 * Update the control registers before calling PGMFlushPage.
1279 */
1280 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1281 pCtx->cr0 = env->cr[0];
1282 pCtx->cr3 = env->cr[3];
1283 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1284 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1285 pCtx->cr4 = env->cr[4];
1286
1287 /*
1288 * Let PGM do the rest.
1289 */
1290 rc = PGMInvalidatePage(pVM, GCPtr);
1291 if (RT_FAILURE(rc))
1292 {
1293 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1294 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1295 }
1296 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1297}
1298
1299
1300#ifndef REM_PHYS_ADDR_IN_TLB
1301void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1302{
1303 void *pv;
1304 int rc;
1305
1306 /* Address must be aligned enough to fiddle with lower bits */
1307 Assert((physAddr & 0x3) == 0);
1308
1309 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1310 Assert( rc == VINF_SUCCESS
1311 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1312 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1313 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1314 if (RT_FAILURE(rc))
1315 return (void *)1;
1316 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1317 return (void *)((uintptr_t)pv | 2);
1318 return pv;
1319}
1320
1321target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1322{
1323 RTGCPHYS rv = 0;
1324 int rc;
1325
1326 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1327 Assert (RT_SUCCESS(rc));
1328
1329 return (target_ulong)rv;
1330}
1331#endif
1332
1333/**
1334 * Called from tlb_protect_code in order to write monitor a code page.
1335 *
1336 * @param env Pointer to the CPU environment.
1337 * @param GCPtr Code page to monitor
1338 */
1339void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1340{
1341#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1342 Assert(env->pVM->rem.s.fInREM);
1343 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1344 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1345 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1346 && !(env->eflags & VM_MASK) /* no V86 mode */
1347 && !HWACCMIsEnabled(env->pVM))
1348 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1349#endif
1350}
1351
1352/**
1353 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1354 *
1355 * @param env Pointer to the CPU environment.
1356 * @param GCPtr Code page to monitor
1357 */
1358void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1359{
1360 Assert(env->pVM->rem.s.fInREM);
1361#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1362 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1363 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1364 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1365 && !(env->eflags & VM_MASK) /* no V86 mode */
1366 && !HWACCMIsEnabled(env->pVM))
1367 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1368#endif
1369}
1370
1371
1372/**
1373 * Called when the CPU is initialized, any of the CRx registers are changed or
1374 * when the A20 line is modified.
1375 *
1376 * @param env Pointer to the CPU environment.
1377 * @param fGlobal Set if the flush is global.
1378 */
1379void remR3FlushTLB(CPUState *env, bool fGlobal)
1380{
1381 PVM pVM = env->pVM;
1382 PCPUMCTX pCtx;
1383
1384 /*
1385 * When we're replaying invlpg instructions or restoring a saved
1386 * state we disable this path.
1387 */
1388 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1389 return;
1390 Assert(pVM->rem.s.fInREM);
1391
1392 /*
1393 * The caller doesn't check cr4, so we have to do that for ourselves.
1394 */
1395 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1396 fGlobal = true;
1397 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1398
1399 /*
1400 * Update the control registers before calling PGMR3FlushTLB.
1401 */
1402 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1403 pCtx->cr0 = env->cr[0];
1404 pCtx->cr3 = env->cr[3];
1405 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1406 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1407 pCtx->cr4 = env->cr[4];
1408
1409 /*
1410 * Let PGM do the rest.
1411 */
1412 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1413}
1414
1415
1416/**
1417 * Called when any of the cr0, cr4 or efer registers is updated.
1418 *
1419 * @param env Pointer to the CPU environment.
1420 */
1421void remR3ChangeCpuMode(CPUState *env)
1422{
1423 int rc;
1424 PVM pVM = env->pVM;
1425 PCPUMCTX pCtx;
1426
1427 /*
1428 * When we're replaying loads or restoring a saved
1429 * state this path is disabled.
1430 */
1431 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1432 return;
1433 Assert(pVM->rem.s.fInREM);
1434
1435 /*
1436 * Update the control registers before calling PGMChangeMode()
1437 * as it may need to map whatever cr3 is pointing to.
1438 */
1439 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1440 pCtx->cr0 = env->cr[0];
1441 pCtx->cr3 = env->cr[3];
1442 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1443 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1444 pCtx->cr4 = env->cr[4];
1445
1446#ifdef TARGET_X86_64
1447 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1448 if (rc != VINF_SUCCESS)
1449 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1450#else
1451 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1452 if (rc != VINF_SUCCESS)
1453 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1454#endif
1455}
1456
1457
1458/**
1459 * Called from compiled code to run dma.
1460 *
1461 * @param env Pointer to the CPU environment.
1462 */
1463void remR3DmaRun(CPUState *env)
1464{
1465 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1466 PDMR3DmaRun(env->pVM);
1467 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1468}
1469
1470
1471/**
1472 * Called from compiled code to schedule pending timers in VMM
1473 *
1474 * @param env Pointer to the CPU environment.
1475 */
1476void remR3TimersRun(CPUState *env)
1477{
1478 LogFlow(("remR3TimersRun:\n"));
1479 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1480 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1481 TMR3TimerQueuesDo(env->pVM);
1482 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1483 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1484}
1485
1486
1487/**
1488 * Record trap occurance
1489 *
1490 * @returns VBox status code
1491 * @param env Pointer to the CPU environment.
1492 * @param uTrap Trap nr
1493 * @param uErrorCode Error code
1494 * @param pvNextEIP Next EIP
1495 */
1496int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1497{
1498 PVM pVM = env->pVM;
1499#ifdef VBOX_WITH_STATISTICS
1500 static STAMCOUNTER s_aStatTrap[255];
1501 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1502#endif
1503
1504#ifdef VBOX_WITH_STATISTICS
1505 if (uTrap < 255)
1506 {
1507 if (!s_aRegisters[uTrap])
1508 {
1509 char szStatName[64];
1510 s_aRegisters[uTrap] = true;
1511 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1512 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1513 }
1514 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1515 }
1516#endif
1517 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1518 if( uTrap < 0x20
1519 && (env->cr[0] & X86_CR0_PE)
1520 && !(env->eflags & X86_EFL_VM))
1521 {
1522#ifdef DEBUG
1523 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1524#endif
1525 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1526 {
1527 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1528 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1529 return VERR_REM_TOO_MANY_TRAPS;
1530 }
1531 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1532 pVM->rem.s.cPendingExceptions = 1;
1533 pVM->rem.s.uPendingException = uTrap;
1534 pVM->rem.s.uPendingExcptEIP = env->eip;
1535 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1536 }
1537 else
1538 {
1539 pVM->rem.s.cPendingExceptions = 0;
1540 pVM->rem.s.uPendingException = uTrap;
1541 pVM->rem.s.uPendingExcptEIP = env->eip;
1542 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1543 }
1544 return VINF_SUCCESS;
1545}
1546
1547
1548/*
1549 * Clear current active trap
1550 *
1551 * @param pVM VM Handle.
1552 */
1553void remR3TrapClear(PVM pVM)
1554{
1555 pVM->rem.s.cPendingExceptions = 0;
1556 pVM->rem.s.uPendingException = 0;
1557 pVM->rem.s.uPendingExcptEIP = 0;
1558 pVM->rem.s.uPendingExcptCR2 = 0;
1559}
1560
1561
1562/*
1563 * Record previous call instruction addresses
1564 *
1565 * @param env Pointer to the CPU environment.
1566 */
1567void remR3RecordCall(CPUState *env)
1568{
1569 CSAMR3RecordCallAddress(env->pVM, env->eip);
1570}
1571
1572
1573/**
1574 * Syncs the internal REM state with the VM.
1575 *
1576 * This must be called before REMR3Run() is invoked whenever when the REM
1577 * state is not up to date. Calling it several times in a row is not
1578 * permitted.
1579 *
1580 * @returns VBox status code.
1581 *
1582 * @param pVM VM Handle.
1583 * @param fFlushTBs Flush all translation blocks before executing code
1584 *
1585 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1586 * no do this since the majority of the callers don't want any unnecessary of events
1587 * pending that would immediatly interrupt execution.
1588 */
1589REMR3DECL(int) REMR3State(PVM pVM)
1590{
1591 register const CPUMCTX *pCtx;
1592 register unsigned fFlags;
1593 bool fHiddenSelRegsValid;
1594 unsigned i;
1595 TRPMEVENT enmType;
1596 uint8_t u8TrapNo;
1597 int rc;
1598
1599 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1600 Log2(("REMR3State:\n"));
1601
1602 pCtx = pVM->rem.s.pCtx;
1603 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1604
1605 Assert(!pVM->rem.s.fInREM);
1606 pVM->rem.s.fInStateSync = true;
1607
1608 /*
1609 * If we have to flush TBs, do that immediately.
1610 */
1611 if (pVM->rem.s.fFlushTBs)
1612 {
1613 STAM_COUNTER_INC(&gStatFlushTBs);
1614 tb_flush(&pVM->rem.s.Env);
1615 pVM->rem.s.fFlushTBs = false;
1616 }
1617
1618 /*
1619 * Copy the registers which require no special handling.
1620 */
1621#ifdef TARGET_X86_64
1622 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1623 Assert(R_EAX == 0);
1624 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1625 Assert(R_ECX == 1);
1626 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1627 Assert(R_EDX == 2);
1628 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1629 Assert(R_EBX == 3);
1630 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1631 Assert(R_ESP == 4);
1632 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1633 Assert(R_EBP == 5);
1634 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1635 Assert(R_ESI == 6);
1636 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1637 Assert(R_EDI == 7);
1638 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1639 pVM->rem.s.Env.regs[8] = pCtx->r8;
1640 pVM->rem.s.Env.regs[9] = pCtx->r9;
1641 pVM->rem.s.Env.regs[10] = pCtx->r10;
1642 pVM->rem.s.Env.regs[11] = pCtx->r11;
1643 pVM->rem.s.Env.regs[12] = pCtx->r12;
1644 pVM->rem.s.Env.regs[13] = pCtx->r13;
1645 pVM->rem.s.Env.regs[14] = pCtx->r14;
1646 pVM->rem.s.Env.regs[15] = pCtx->r15;
1647
1648 pVM->rem.s.Env.eip = pCtx->rip;
1649
1650 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1651#else
1652 Assert(R_EAX == 0);
1653 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1654 Assert(R_ECX == 1);
1655 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1656 Assert(R_EDX == 2);
1657 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1658 Assert(R_EBX == 3);
1659 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1660 Assert(R_ESP == 4);
1661 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1662 Assert(R_EBP == 5);
1663 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1664 Assert(R_ESI == 6);
1665 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1666 Assert(R_EDI == 7);
1667 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1668 pVM->rem.s.Env.eip = pCtx->eip;
1669
1670 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1671#endif
1672
1673 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1674
1675 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1676 for (i=0;i<8;i++)
1677 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1678
1679 /*
1680 * Clear the halted hidden flag (the interrupt waking up the CPU can
1681 * have been dispatched in raw mode).
1682 */
1683 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1684
1685 /*
1686 * Replay invlpg?
1687 */
1688 if (pVM->rem.s.cInvalidatedPages)
1689 {
1690 RTUINT i;
1691
1692 pVM->rem.s.fIgnoreInvlPg = true;
1693 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1694 {
1695 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1696 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1697 }
1698 pVM->rem.s.fIgnoreInvlPg = false;
1699 pVM->rem.s.cInvalidatedPages = 0;
1700 }
1701
1702 /* Replay notification changes? */
1703 if (pVM->rem.s.cHandlerNotifications)
1704 REMR3ReplayHandlerNotifications(pVM);
1705
1706 /* Update MSRs; before CRx registers! */
1707 pVM->rem.s.Env.efer = pCtx->msrEFER;
1708 pVM->rem.s.Env.star = pCtx->msrSTAR;
1709 pVM->rem.s.Env.pat = pCtx->msrPAT;
1710#ifdef TARGET_X86_64
1711 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1712 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1713 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1714 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1715
1716 /* Update the internal long mode activate flag according to the new EFER value. */
1717 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1718 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1719 else
1720 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1721#endif
1722
1723
1724 /*
1725 * Registers which are rarely changed and require special handling / order when changed.
1726 */
1727 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1728 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1729 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1730 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1731 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1732 {
1733 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1734 {
1735 pVM->rem.s.fIgnoreCR3Load = true;
1736 tlb_flush(&pVM->rem.s.Env, true);
1737 pVM->rem.s.fIgnoreCR3Load = false;
1738 }
1739
1740 /* CR4 before CR0! */
1741 if (fFlags & CPUM_CHANGED_CR4)
1742 {
1743 pVM->rem.s.fIgnoreCR3Load = true;
1744 pVM->rem.s.fIgnoreCpuMode = true;
1745 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1746 pVM->rem.s.fIgnoreCpuMode = false;
1747 pVM->rem.s.fIgnoreCR3Load = false;
1748 }
1749
1750 if (fFlags & CPUM_CHANGED_CR0)
1751 {
1752 pVM->rem.s.fIgnoreCR3Load = true;
1753 pVM->rem.s.fIgnoreCpuMode = true;
1754 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1755 pVM->rem.s.fIgnoreCpuMode = false;
1756 pVM->rem.s.fIgnoreCR3Load = false;
1757 }
1758
1759 if (fFlags & CPUM_CHANGED_CR3)
1760 {
1761 pVM->rem.s.fIgnoreCR3Load = true;
1762 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1763 pVM->rem.s.fIgnoreCR3Load = false;
1764 }
1765
1766 if (fFlags & CPUM_CHANGED_GDTR)
1767 {
1768 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1769 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1770 }
1771
1772 if (fFlags & CPUM_CHANGED_IDTR)
1773 {
1774 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1775 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1776 }
1777
1778 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1779 {
1780 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1781 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1782 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1783 }
1784
1785 if (fFlags & CPUM_CHANGED_LDTR)
1786 {
1787 if (fHiddenSelRegsValid)
1788 {
1789 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1790 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1791 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1792 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1793 }
1794 else
1795 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1796 }
1797
1798 if (fFlags & CPUM_CHANGED_CPUID)
1799 {
1800 uint32_t u32Dummy;
1801
1802 /*
1803 * Get the CPUID features.
1804 */
1805 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1806 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1807 }
1808
1809 /* Sync FPU state after CR4, CPUID and EFER (!). */
1810 if (fFlags & CPUM_CHANGED_FPU_REM)
1811 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1812 }
1813
1814 /*
1815 * Sync TR unconditionally to make life simpler.
1816 */
1817 pVM->rem.s.Env.tr.selector = pCtx->tr;
1818 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1819 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1820 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1821 /* Note! do_interrupt will fault if the busy flag is still set... */
1822 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1823
1824 /*
1825 * Update selector registers.
1826 * This must be done *after* we've synced gdt, ldt and crX registers
1827 * since we're reading the GDT/LDT om sync_seg. This will happen with
1828 * saved state which takes a quick dip into rawmode for instance.
1829 */
1830 /*
1831 * Stack; Note first check this one as the CPL might have changed. The
1832 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1833 */
1834
1835 if (fHiddenSelRegsValid)
1836 {
1837 /* The hidden selector registers are valid in the CPU context. */
1838 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1839
1840 /* Set current CPL */
1841 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1842
1843 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1844 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1845 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1846 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1847 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1848 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1849 }
1850 else
1851 {
1852 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1853 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1854 {
1855 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1856
1857 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1858 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1859#ifdef VBOX_WITH_STATISTICS
1860 if (pVM->rem.s.Env.segs[R_SS].newselector)
1861 {
1862 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1863 }
1864#endif
1865 }
1866 else
1867 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1868
1869 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1870 {
1871 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1872 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1873#ifdef VBOX_WITH_STATISTICS
1874 if (pVM->rem.s.Env.segs[R_ES].newselector)
1875 {
1876 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1877 }
1878#endif
1879 }
1880 else
1881 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1882
1883 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1884 {
1885 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1886 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1887#ifdef VBOX_WITH_STATISTICS
1888 if (pVM->rem.s.Env.segs[R_CS].newselector)
1889 {
1890 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1891 }
1892#endif
1893 }
1894 else
1895 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1896
1897 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1898 {
1899 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1900 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1901#ifdef VBOX_WITH_STATISTICS
1902 if (pVM->rem.s.Env.segs[R_DS].newselector)
1903 {
1904 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1905 }
1906#endif
1907 }
1908 else
1909 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1910
1911 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1912 * be the same but not the base/limit. */
1913 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1914 {
1915 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1916 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1917#ifdef VBOX_WITH_STATISTICS
1918 if (pVM->rem.s.Env.segs[R_FS].newselector)
1919 {
1920 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1921 }
1922#endif
1923 }
1924 else
1925 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1926
1927 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1928 {
1929 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1930 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1931#ifdef VBOX_WITH_STATISTICS
1932 if (pVM->rem.s.Env.segs[R_GS].newselector)
1933 {
1934 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1935 }
1936#endif
1937 }
1938 else
1939 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1940 }
1941
1942 /*
1943 * Check for traps.
1944 */
1945 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1946 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1947 if (RT_SUCCESS(rc))
1948 {
1949#ifdef DEBUG
1950 if (u8TrapNo == 0x80)
1951 {
1952 remR3DumpLnxSyscall(pVM);
1953 remR3DumpOBsdSyscall(pVM);
1954 }
1955#endif
1956
1957 pVM->rem.s.Env.exception_index = u8TrapNo;
1958 if (enmType != TRPM_SOFTWARE_INT)
1959 {
1960 pVM->rem.s.Env.exception_is_int = 0;
1961 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1962 }
1963 else
1964 {
1965 /*
1966 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1967 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1968 * for int03 and into.
1969 */
1970 pVM->rem.s.Env.exception_is_int = 1;
1971 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1972 /* int 3 may be generated by one-byte 0xcc */
1973 if (u8TrapNo == 3)
1974 {
1975 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1976 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1977 }
1978 /* int 4 may be generated by one-byte 0xce */
1979 else if (u8TrapNo == 4)
1980 {
1981 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1982 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1983 }
1984 }
1985
1986 /* get error code and cr2 if needed. */
1987 switch (u8TrapNo)
1988 {
1989 case 0x0e:
1990 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1991 /* fallthru */
1992 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1993 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1994 break;
1995
1996 case 0x11: case 0x08:
1997 default:
1998 pVM->rem.s.Env.error_code = 0;
1999 break;
2000 }
2001
2002 /*
2003 * We can now reset the active trap since the recompiler is gonna have a go at it.
2004 */
2005 rc = TRPMResetTrap(pVM);
2006 AssertRC(rc);
2007 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2008 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2009 }
2010
2011 /*
2012 * Clear old interrupt request flags; Check for pending hardware interrupts.
2013 * (See @remark for why we don't check for other FFs.)
2014 */
2015 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2016 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2017 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2018 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2019
2020 /*
2021 * We're now in REM mode.
2022 */
2023 pVM->rem.s.fInREM = true;
2024 pVM->rem.s.fInStateSync = false;
2025 pVM->rem.s.cCanExecuteRaw = 0;
2026 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2027 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2028 return VINF_SUCCESS;
2029}
2030
2031
2032/**
2033 * Syncs back changes in the REM state to the the VM state.
2034 *
2035 * This must be called after invoking REMR3Run().
2036 * Calling it several times in a row is not permitted.
2037 *
2038 * @returns VBox status code.
2039 *
2040 * @param pVM VM Handle.
2041 */
2042REMR3DECL(int) REMR3StateBack(PVM pVM)
2043{
2044 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2045 unsigned i;
2046
2047 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2048 Log2(("REMR3StateBack:\n"));
2049 Assert(pVM->rem.s.fInREM);
2050
2051 /*
2052 * Copy back the registers.
2053 * This is done in the order they are declared in the CPUMCTX structure.
2054 */
2055
2056 /** @todo FOP */
2057 /** @todo FPUIP */
2058 /** @todo CS */
2059 /** @todo FPUDP */
2060 /** @todo DS */
2061
2062 /** @todo check if FPU/XMM was actually used in the recompiler */
2063 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2064//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2065
2066#ifdef TARGET_X86_64
2067 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2068 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2069 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2070 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2071 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2072 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2073 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2074 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2075 pCtx->r8 = pVM->rem.s.Env.regs[8];
2076 pCtx->r9 = pVM->rem.s.Env.regs[9];
2077 pCtx->r10 = pVM->rem.s.Env.regs[10];
2078 pCtx->r11 = pVM->rem.s.Env.regs[11];
2079 pCtx->r12 = pVM->rem.s.Env.regs[12];
2080 pCtx->r13 = pVM->rem.s.Env.regs[13];
2081 pCtx->r14 = pVM->rem.s.Env.regs[14];
2082 pCtx->r15 = pVM->rem.s.Env.regs[15];
2083
2084 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2085
2086#else
2087 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2088 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2089 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2090 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2091 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2092 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2093 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2094
2095 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2096#endif
2097
2098 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2099
2100#ifdef VBOX_WITH_STATISTICS
2101 if (pVM->rem.s.Env.segs[R_SS].newselector)
2102 {
2103 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2104 }
2105 if (pVM->rem.s.Env.segs[R_GS].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2108 }
2109 if (pVM->rem.s.Env.segs[R_FS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2112 }
2113 if (pVM->rem.s.Env.segs[R_ES].newselector)
2114 {
2115 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2116 }
2117 if (pVM->rem.s.Env.segs[R_DS].newselector)
2118 {
2119 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2120 }
2121 if (pVM->rem.s.Env.segs[R_CS].newselector)
2122 {
2123 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2124 }
2125#endif
2126 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2127 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2128 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2129 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2130 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2131
2132#ifdef TARGET_X86_64
2133 pCtx->rip = pVM->rem.s.Env.eip;
2134 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2135#else
2136 pCtx->eip = pVM->rem.s.Env.eip;
2137 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2138#endif
2139
2140 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2141 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2142 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2143 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2144 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2145 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2146
2147 for (i = 0; i < 8; i++)
2148 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2149
2150 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2151 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2152 {
2153 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2154 STAM_COUNTER_INC(&gStatREMGDTChange);
2155 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2156 }
2157
2158 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2159 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2160 {
2161 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2162 STAM_COUNTER_INC(&gStatREMIDTChange);
2163 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2164 }
2165
2166 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2167 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2168 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2169 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2170 {
2171 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2172 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2173 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2174 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2175 STAM_COUNTER_INC(&gStatREMLDTRChange);
2176 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2177 }
2178
2179 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2180 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2181 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2182 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2183 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2184 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2185 : 0) )
2186 {
2187 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2188 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2189 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2190 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2191 pCtx->tr = pVM->rem.s.Env.tr.selector;
2192 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2193 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2194 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2195 if (pCtx->trHid.Attr.u)
2196 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2197 STAM_COUNTER_INC(&gStatREMTRChange);
2198 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2199 }
2200
2201 /** @todo These values could still be out of sync! */
2202 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2203 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2204 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2205 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2206
2207 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2208 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2209 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2210
2211 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2212 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2213 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2214
2215 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2216 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2217 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2218
2219 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2220 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2221 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2222
2223 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2224 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2225 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2226
2227 /* Sysenter MSR */
2228 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2229 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2230 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2231
2232 /* System MSRs. */
2233 pCtx->msrEFER = pVM->rem.s.Env.efer;
2234 pCtx->msrSTAR = pVM->rem.s.Env.star;
2235 pCtx->msrPAT = pVM->rem.s.Env.pat;
2236#ifdef TARGET_X86_64
2237 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2238 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2239 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2240 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2241#endif
2242
2243 remR3TrapClear(pVM);
2244
2245 /*
2246 * Check for traps.
2247 */
2248 if ( pVM->rem.s.Env.exception_index >= 0
2249 && pVM->rem.s.Env.exception_index < 256)
2250 {
2251 int rc;
2252
2253 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2254 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2255 AssertRC(rc);
2256 switch (pVM->rem.s.Env.exception_index)
2257 {
2258 case 0x0e:
2259 TRPMSetFaultAddress(pVM, pCtx->cr2);
2260 /* fallthru */
2261 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2262 case 0x11: case 0x08: /* 0 */
2263 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2264 break;
2265 }
2266
2267 }
2268
2269 /*
2270 * We're not longer in REM mode.
2271 */
2272 pVM->rem.s.fInREM = false;
2273 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2274 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2275 return VINF_SUCCESS;
2276}
2277
2278
2279/**
2280 * This is called by the disassembler when it wants to update the cpu state
2281 * before for instance doing a register dump.
2282 */
2283static void remR3StateUpdate(PVM pVM)
2284{
2285 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2286 unsigned i;
2287
2288 Assert(pVM->rem.s.fInREM);
2289
2290 /*
2291 * Copy back the registers.
2292 * This is done in the order they are declared in the CPUMCTX structure.
2293 */
2294
2295 /** @todo FOP */
2296 /** @todo FPUIP */
2297 /** @todo CS */
2298 /** @todo FPUDP */
2299 /** @todo DS */
2300 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2301 pCtx->fpu.MXCSR = 0;
2302 pCtx->fpu.MXCSR_MASK = 0;
2303
2304 /** @todo check if FPU/XMM was actually used in the recompiler */
2305 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2306//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2307
2308#ifdef TARGET_X86_64
2309 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2310 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2311 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2312 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2313 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2314 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2315 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2316 pCtx->r8 = pVM->rem.s.Env.regs[8];
2317 pCtx->r9 = pVM->rem.s.Env.regs[9];
2318 pCtx->r10 = pVM->rem.s.Env.regs[10];
2319 pCtx->r11 = pVM->rem.s.Env.regs[11];
2320 pCtx->r12 = pVM->rem.s.Env.regs[12];
2321 pCtx->r13 = pVM->rem.s.Env.regs[13];
2322 pCtx->r14 = pVM->rem.s.Env.regs[14];
2323 pCtx->r15 = pVM->rem.s.Env.regs[15];
2324
2325 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2326#else
2327 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2328 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2329 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2330 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2331 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2332 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2333 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2334
2335 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2336#endif
2337
2338 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2339
2340 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2341 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2342 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2343 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2344 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2345
2346#ifdef TARGET_X86_64
2347 pCtx->rip = pVM->rem.s.Env.eip;
2348 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2349#else
2350 pCtx->eip = pVM->rem.s.Env.eip;
2351 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2352#endif
2353
2354 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2355 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2356 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2357 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2358 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2359 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2360
2361 for (i = 0; i < 8; i++)
2362 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2363
2364 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2365 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2366 {
2367 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2368 STAM_COUNTER_INC(&gStatREMGDTChange);
2369 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2370 }
2371
2372 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2373 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2374 {
2375 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2376 STAM_COUNTER_INC(&gStatREMIDTChange);
2377 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2378 }
2379
2380 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2381 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2382 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2383 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2384 {
2385 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2386 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2387 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2388 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2389 STAM_COUNTER_INC(&gStatREMLDTRChange);
2390 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2391 }
2392
2393 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2394 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2395 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2396 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2397 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2398 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2399 : 0) )
2400 {
2401 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2402 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2403 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2404 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2405 pCtx->tr = pVM->rem.s.Env.tr.selector;
2406 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2407 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2408 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2409 if (pCtx->trHid.Attr.u)
2410 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2411 STAM_COUNTER_INC(&gStatREMTRChange);
2412 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2413 }
2414
2415 /** @todo These values could still be out of sync! */
2416 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2417 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2418 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2419 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2420
2421 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2422 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2423 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2424
2425 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2426 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2427 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2428
2429 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2430 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2431 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2432
2433 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2434 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2435 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2436
2437 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2438 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2439 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2440
2441 /* Sysenter MSR */
2442 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2443 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2444 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2445
2446 /* System MSRs. */
2447 pCtx->msrEFER = pVM->rem.s.Env.efer;
2448 pCtx->msrSTAR = pVM->rem.s.Env.star;
2449 pCtx->msrPAT = pVM->rem.s.Env.pat;
2450#ifdef TARGET_X86_64
2451 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2452 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2453 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2454 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2455#endif
2456
2457}
2458
2459
2460/**
2461 * Update the VMM state information if we're currently in REM.
2462 *
2463 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2464 * we're currently executing in REM and the VMM state is invalid. This method will of
2465 * course check that we're executing in REM before syncing any data over to the VMM.
2466 *
2467 * @param pVM The VM handle.
2468 */
2469REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2470{
2471 if (pVM->rem.s.fInREM)
2472 remR3StateUpdate(pVM);
2473}
2474
2475
2476#undef LOG_GROUP
2477#define LOG_GROUP LOG_GROUP_REM
2478
2479
2480/**
2481 * Notify the recompiler about Address Gate 20 state change.
2482 *
2483 * This notification is required since A20 gate changes are
2484 * initialized from a device driver and the VM might just as
2485 * well be in REM mode as in RAW mode.
2486 *
2487 * @param pVM VM handle.
2488 * @param fEnable True if the gate should be enabled.
2489 * False if the gate should be disabled.
2490 */
2491REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2492{
2493 bool fSaved;
2494
2495 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2496 VM_ASSERT_EMT(pVM);
2497
2498 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2499 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2500
2501 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2502
2503 pVM->rem.s.fIgnoreAll = fSaved;
2504}
2505
2506
2507/**
2508 * Replays the invalidated recorded pages.
2509 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2510 *
2511 * @param pVM VM handle.
2512 */
2513REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2514{
2515 RTUINT i;
2516
2517 VM_ASSERT_EMT(pVM);
2518
2519 /*
2520 * Sync the required registers.
2521 */
2522 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2523 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2524 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2525 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2526
2527 /*
2528 * Replay the flushes.
2529 */
2530 pVM->rem.s.fIgnoreInvlPg = true;
2531 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2532 {
2533 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2534 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2535 }
2536 pVM->rem.s.fIgnoreInvlPg = false;
2537 pVM->rem.s.cInvalidatedPages = 0;
2538}
2539
2540
2541/**
2542 * Replays the handler notification changes
2543 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2544 *
2545 * @param pVM VM handle.
2546 */
2547REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2548{
2549 /*
2550 * Replay the flushes.
2551 */
2552 RTUINT i;
2553 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2554
2555 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2556 VM_ASSERT_EMT(pVM);
2557
2558 pVM->rem.s.cHandlerNotifications = 0;
2559 for (i = 0; i < c; i++)
2560 {
2561 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2562 switch (pRec->enmKind)
2563 {
2564 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2565 REMR3NotifyHandlerPhysicalRegister(pVM,
2566 pRec->u.PhysicalRegister.enmType,
2567 pRec->u.PhysicalRegister.GCPhys,
2568 pRec->u.PhysicalRegister.cb,
2569 pRec->u.PhysicalRegister.fHasHCHandler);
2570 break;
2571
2572 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2573 REMR3NotifyHandlerPhysicalDeregister(pVM,
2574 pRec->u.PhysicalDeregister.enmType,
2575 pRec->u.PhysicalDeregister.GCPhys,
2576 pRec->u.PhysicalDeregister.cb,
2577 pRec->u.PhysicalDeregister.fHasHCHandler,
2578 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2579 break;
2580
2581 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2582 REMR3NotifyHandlerPhysicalModify(pVM,
2583 pRec->u.PhysicalModify.enmType,
2584 pRec->u.PhysicalModify.GCPhysOld,
2585 pRec->u.PhysicalModify.GCPhysNew,
2586 pRec->u.PhysicalModify.cb,
2587 pRec->u.PhysicalModify.fHasHCHandler,
2588 pRec->u.PhysicalModify.fRestoreAsRAM);
2589 break;
2590
2591 default:
2592 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2593 break;
2594 }
2595 }
2596 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2597}
2598
2599
2600/**
2601 * Notify REM about changed code page.
2602 *
2603 * @returns VBox status code.
2604 * @param pVM VM handle.
2605 * @param pvCodePage Code page address
2606 */
2607REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2608{
2609#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2610 int rc;
2611 RTGCPHYS PhysGC;
2612 uint64_t flags;
2613
2614 VM_ASSERT_EMT(pVM);
2615
2616 /*
2617 * Get the physical page address.
2618 */
2619 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2620 if (rc == VINF_SUCCESS)
2621 {
2622 /*
2623 * Sync the required registers and flush the whole page.
2624 * (Easier to do the whole page than notifying it about each physical
2625 * byte that was changed.
2626 */
2627 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2628 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2629 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2630 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2631
2632 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2633 }
2634#endif
2635 return VINF_SUCCESS;
2636}
2637
2638
2639/**
2640 * Notification about a successful MMR3PhysRegister() call.
2641 *
2642 * @param pVM VM handle.
2643 * @param GCPhys The physical address the RAM.
2644 * @param cb Size of the memory.
2645 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2646 */
2647REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2648{
2649 uint32_t cbBitmap;
2650 int rc;
2651 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2652 VM_ASSERT_EMT(pVM);
2653
2654 /*
2655 * Validate input - we trust the caller.
2656 */
2657 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2658 Assert(cb);
2659 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2660
2661 /*
2662 * Base ram?
2663 */
2664 if (!GCPhys)
2665 {
2666 phys_ram_size = cb;
2667 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2668#ifndef VBOX_STRICT
2669 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2670 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2671#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2672 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2673 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2674 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2675 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2676 AssertRC(rc);
2677 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2678#endif
2679 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2680 }
2681
2682 /*
2683 * Register the ram.
2684 */
2685 Assert(!pVM->rem.s.fIgnoreAll);
2686 pVM->rem.s.fIgnoreAll = true;
2687
2688#ifdef VBOX_WITH_NEW_PHYS_CODE
2689 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2690#else
2691 if (!GCPhys)
2692 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2693 else
2694 {
2695 if (fFlags & MM_RAM_FLAGS_RESERVED)
2696 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2697 else
2698 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2699 }
2700#endif
2701 Assert(pVM->rem.s.fIgnoreAll);
2702 pVM->rem.s.fIgnoreAll = false;
2703}
2704
2705#ifndef VBOX_WITH_NEW_PHYS_CODE
2706
2707/**
2708 * Notification about a successful PGMR3PhysRegisterChunk() call.
2709 *
2710 * @param pVM VM handle.
2711 * @param GCPhys The physical address the RAM.
2712 * @param cb Size of the memory.
2713 * @param pvRam The HC address of the RAM.
2714 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2715 */
2716REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2717{
2718 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2719 VM_ASSERT_EMT(pVM);
2720
2721 /*
2722 * Validate input - we trust the caller.
2723 */
2724 Assert(pvRam);
2725 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2726 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2727 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2728 Assert(fFlags == 0 /* normal RAM */);
2729 Assert(!pVM->rem.s.fIgnoreAll);
2730 pVM->rem.s.fIgnoreAll = true;
2731 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2732 Assert(pVM->rem.s.fIgnoreAll);
2733 pVM->rem.s.fIgnoreAll = false;
2734}
2735
2736
2737/**
2738 * Grows dynamically allocated guest RAM.
2739 * Will raise a fatal error if the operation fails.
2740 *
2741 * @param physaddr The physical address.
2742 */
2743void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2744{
2745 int rc;
2746 PVM pVM = cpu_single_env->pVM;
2747 const RTGCPHYS GCPhys = physaddr;
2748
2749 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2750 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2751 if (RT_SUCCESS(rc))
2752 return;
2753
2754 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2755 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2756 AssertFatalFailed();
2757}
2758
2759#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2760
2761/**
2762 * Notification about a successful MMR3PhysRomRegister() call.
2763 *
2764 * @param pVM VM handle.
2765 * @param GCPhys The physical address of the ROM.
2766 * @param cb The size of the ROM.
2767 * @param pvCopy Pointer to the ROM copy.
2768 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2769 * This function will be called when ever the protection of the
2770 * shadow ROM changes (at reset and end of POST).
2771 */
2772REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2773{
2774 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2775 VM_ASSERT_EMT(pVM);
2776
2777 /*
2778 * Validate input - we trust the caller.
2779 */
2780 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2781 Assert(cb);
2782 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2783 Assert(pvCopy);
2784 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2785
2786 /*
2787 * Register the rom.
2788 */
2789 Assert(!pVM->rem.s.fIgnoreAll);
2790 pVM->rem.s.fIgnoreAll = true;
2791
2792 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2793
2794 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2795
2796 Assert(pVM->rem.s.fIgnoreAll);
2797 pVM->rem.s.fIgnoreAll = false;
2798}
2799
2800
2801/**
2802 * Notification about a successful memory deregistration or reservation.
2803 *
2804 * @param pVM VM Handle.
2805 * @param GCPhys Start physical address.
2806 * @param cb The size of the range.
2807 */
2808REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2809{
2810 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2811 VM_ASSERT_EMT(pVM);
2812
2813 /*
2814 * Validate input - we trust the caller.
2815 */
2816 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2817 Assert(cb);
2818 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2819
2820 /*
2821 * Unassigning the memory.
2822 */
2823 Assert(!pVM->rem.s.fIgnoreAll);
2824 pVM->rem.s.fIgnoreAll = true;
2825
2826 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2827
2828 Assert(pVM->rem.s.fIgnoreAll);
2829 pVM->rem.s.fIgnoreAll = false;
2830}
2831
2832
2833/**
2834 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2835 *
2836 * @param pVM VM Handle.
2837 * @param enmType Handler type.
2838 * @param GCPhys Handler range address.
2839 * @param cb Size of the handler range.
2840 * @param fHasHCHandler Set if the handler has a HC callback function.
2841 *
2842 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2843 * Handler memory type to memory which has no HC handler.
2844 */
2845REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2846{
2847 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2848 enmType, GCPhys, cb, fHasHCHandler));
2849 VM_ASSERT_EMT(pVM);
2850 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2851 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2852
2853 if (pVM->rem.s.cHandlerNotifications)
2854 REMR3ReplayHandlerNotifications(pVM);
2855
2856 Assert(!pVM->rem.s.fIgnoreAll);
2857 pVM->rem.s.fIgnoreAll = true;
2858
2859 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2860 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2861 else if (fHasHCHandler)
2862 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2863
2864 Assert(pVM->rem.s.fIgnoreAll);
2865 pVM->rem.s.fIgnoreAll = false;
2866}
2867
2868
2869/**
2870 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2871 *
2872 * @param pVM VM Handle.
2873 * @param enmType Handler type.
2874 * @param GCPhys Handler range address.
2875 * @param cb Size of the handler range.
2876 * @param fHasHCHandler Set if the handler has a HC callback function.
2877 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2878 */
2879REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2880{
2881 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2882 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2883 VM_ASSERT_EMT(pVM);
2884
2885 if (pVM->rem.s.cHandlerNotifications)
2886 REMR3ReplayHandlerNotifications(pVM);
2887
2888 Assert(!pVM->rem.s.fIgnoreAll);
2889 pVM->rem.s.fIgnoreAll = true;
2890
2891/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2892 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2893 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2894 else if (fHasHCHandler)
2895 {
2896 if (!fRestoreAsRAM)
2897 {
2898 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2899 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2900 }
2901 else
2902 {
2903 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2904 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2905 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2906 }
2907 }
2908
2909 Assert(pVM->rem.s.fIgnoreAll);
2910 pVM->rem.s.fIgnoreAll = false;
2911}
2912
2913
2914/**
2915 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2916 *
2917 * @param pVM VM Handle.
2918 * @param enmType Handler type.
2919 * @param GCPhysOld Old handler range address.
2920 * @param GCPhysNew New handler range address.
2921 * @param cb Size of the handler range.
2922 * @param fHasHCHandler Set if the handler has a HC callback function.
2923 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2924 */
2925REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2926{
2927 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2928 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2929 VM_ASSERT_EMT(pVM);
2930 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2931
2932 if (pVM->rem.s.cHandlerNotifications)
2933 REMR3ReplayHandlerNotifications(pVM);
2934
2935 if (fHasHCHandler)
2936 {
2937 Assert(!pVM->rem.s.fIgnoreAll);
2938 pVM->rem.s.fIgnoreAll = true;
2939
2940 /*
2941 * Reset the old page.
2942 */
2943 if (!fRestoreAsRAM)
2944 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2945 else
2946 {
2947 /* This is not perfect, but it'll do for PD monitoring... */
2948 Assert(cb == PAGE_SIZE);
2949 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2950 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2951 }
2952
2953 /*
2954 * Update the new page.
2955 */
2956 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2957 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2958 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2959
2960 Assert(pVM->rem.s.fIgnoreAll);
2961 pVM->rem.s.fIgnoreAll = false;
2962 }
2963}
2964
2965
2966/**
2967 * Checks if we're handling access to this page or not.
2968 *
2969 * @returns true if we're trapping access.
2970 * @returns false if we aren't.
2971 * @param pVM The VM handle.
2972 * @param GCPhys The physical address.
2973 *
2974 * @remark This function will only work correctly in VBOX_STRICT builds!
2975 */
2976REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2977{
2978#ifdef VBOX_STRICT
2979 unsigned long off;
2980 if (pVM->rem.s.cHandlerNotifications)
2981 REMR3ReplayHandlerNotifications(pVM);
2982
2983 off = get_phys_page_offset(GCPhys);
2984 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2985 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2986 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2987#else
2988 return false;
2989#endif
2990}
2991
2992
2993/**
2994 * Deals with a rare case in get_phys_addr_code where the code
2995 * is being monitored.
2996 *
2997 * It could also be an MMIO page, in which case we will raise a fatal error.
2998 *
2999 * @returns The physical address corresponding to addr.
3000 * @param env The cpu environment.
3001 * @param addr The virtual address.
3002 * @param pTLBEntry The TLB entry.
3003 */
3004target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3005 target_ulong addr,
3006 CPUTLBEntry* pTLBEntry,
3007 target_phys_addr_t ioTLBEntry)
3008{
3009 PVM pVM = env->pVM;
3010
3011 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3012 {
3013 /* If code memory is being monitored, appropriate IOTLB entry will have
3014 handler IO type, and addend will provide real physical address, no
3015 matter if we store VA in TLB or not, as handlers are always passed PA */
3016 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3017 return ret;
3018 }
3019 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3020 "*** handlers\n",
3021 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3022 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3023 LogRel(("*** mmio\n"));
3024 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3025 LogRel(("*** phys\n"));
3026 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3027 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3028 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3029 AssertFatalFailed();
3030}
3031
3032/**
3033 * Read guest RAM and ROM.
3034 *
3035 * @param SrcGCPhys The source address (guest physical).
3036 * @param pvDst The destination address.
3037 * @param cb Number of bytes
3038 */
3039void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3040{
3041 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3042 VBOX_CHECK_ADDR(SrcGCPhys);
3043 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3044#ifdef VBOX_DEBUG_PHYS
3045 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3046#endif
3047 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3048}
3049
3050
3051/**
3052 * Read guest RAM and ROM, unsigned 8-bit.
3053 *
3054 * @param SrcGCPhys The source address (guest physical).
3055 */
3056RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3057{
3058 uint8_t val;
3059 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3060 VBOX_CHECK_ADDR(SrcGCPhys);
3061 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3062 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3063#ifdef VBOX_DEBUG_PHYS
3064 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3065#endif
3066 return val;
3067}
3068
3069
3070/**
3071 * Read guest RAM and ROM, signed 8-bit.
3072 *
3073 * @param SrcGCPhys The source address (guest physical).
3074 */
3075RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3076{
3077 int8_t val;
3078 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3079 VBOX_CHECK_ADDR(SrcGCPhys);
3080 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3081 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3082#ifdef VBOX_DEBUG_PHYS
3083 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3084#endif
3085 return val;
3086}
3087
3088
3089/**
3090 * Read guest RAM and ROM, unsigned 16-bit.
3091 *
3092 * @param SrcGCPhys The source address (guest physical).
3093 */
3094RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3095{
3096 uint16_t val;
3097 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3098 VBOX_CHECK_ADDR(SrcGCPhys);
3099 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3100 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3101#ifdef VBOX_DEBUG_PHYS
3102 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3103#endif
3104 return val;
3105}
3106
3107
3108/**
3109 * Read guest RAM and ROM, signed 16-bit.
3110 *
3111 * @param SrcGCPhys The source address (guest physical).
3112 */
3113RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3114{
3115 int16_t val;
3116 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3117 VBOX_CHECK_ADDR(SrcGCPhys);
3118 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3119 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3120#ifdef VBOX_DEBUG_PHYS
3121 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3122#endif
3123 return val;
3124}
3125
3126
3127/**
3128 * Read guest RAM and ROM, unsigned 32-bit.
3129 *
3130 * @param SrcGCPhys The source address (guest physical).
3131 */
3132RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3133{
3134 uint32_t val;
3135 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3136 VBOX_CHECK_ADDR(SrcGCPhys);
3137 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3138 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3139#ifdef VBOX_DEBUG_PHYS
3140 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3141#endif
3142 return val;
3143}
3144
3145
3146/**
3147 * Read guest RAM and ROM, signed 32-bit.
3148 *
3149 * @param SrcGCPhys The source address (guest physical).
3150 */
3151RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3152{
3153 int32_t val;
3154 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3155 VBOX_CHECK_ADDR(SrcGCPhys);
3156 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3157 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3158#ifdef VBOX_DEBUG_PHYS
3159 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3160#endif
3161 return val;
3162}
3163
3164
3165/**
3166 * Read guest RAM and ROM, unsigned 64-bit.
3167 *
3168 * @param SrcGCPhys The source address (guest physical).
3169 */
3170uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3171{
3172 uint64_t val;
3173 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3174 VBOX_CHECK_ADDR(SrcGCPhys);
3175 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3176 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3177#ifdef VBOX_DEBUG_PHYS
3178 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3179#endif
3180 return val;
3181}
3182
3183
3184/**
3185 * Read guest RAM and ROM, signed 64-bit.
3186 *
3187 * @param SrcGCPhys The source address (guest physical).
3188 */
3189int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3190{
3191 int64_t val;
3192 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3193 VBOX_CHECK_ADDR(SrcGCPhys);
3194 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3195 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3196#ifdef VBOX_DEBUG_PHYS
3197 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3198#endif
3199 return val;
3200}
3201
3202
3203/**
3204 * Write guest RAM.
3205 *
3206 * @param DstGCPhys The destination address (guest physical).
3207 * @param pvSrc The source address.
3208 * @param cb Number of bytes to write
3209 */
3210void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3211{
3212 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3213 VBOX_CHECK_ADDR(DstGCPhys);
3214 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3215 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3216#ifdef VBOX_DEBUG_PHYS
3217 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3218#endif
3219}
3220
3221
3222/**
3223 * Write guest RAM, unsigned 8-bit.
3224 *
3225 * @param DstGCPhys The destination address (guest physical).
3226 * @param val Value
3227 */
3228void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3229{
3230 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3231 VBOX_CHECK_ADDR(DstGCPhys);
3232 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3233 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3234#ifdef VBOX_DEBUG_PHYS
3235 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3236#endif
3237}
3238
3239
3240/**
3241 * Write guest RAM, unsigned 8-bit.
3242 *
3243 * @param DstGCPhys The destination address (guest physical).
3244 * @param val Value
3245 */
3246void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3247{
3248 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3249 VBOX_CHECK_ADDR(DstGCPhys);
3250 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3251 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3252#ifdef VBOX_DEBUG_PHYS
3253 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3254#endif
3255}
3256
3257
3258/**
3259 * Write guest RAM, unsigned 32-bit.
3260 *
3261 * @param DstGCPhys The destination address (guest physical).
3262 * @param val Value
3263 */
3264void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3265{
3266 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3267 VBOX_CHECK_ADDR(DstGCPhys);
3268 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3269 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3270#ifdef VBOX_DEBUG_PHYS
3271 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3272#endif
3273}
3274
3275
3276/**
3277 * Write guest RAM, unsigned 64-bit.
3278 *
3279 * @param DstGCPhys The destination address (guest physical).
3280 * @param val Value
3281 */
3282void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3283{
3284 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3285 VBOX_CHECK_ADDR(DstGCPhys);
3286 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3287 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3288#ifdef VBOX_DEBUG_PHYS
3289 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3290#endif
3291}
3292
3293#undef LOG_GROUP
3294#define LOG_GROUP LOG_GROUP_REM_MMIO
3295
3296/** Read MMIO memory. */
3297static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3298{
3299 uint32_t u32 = 0;
3300 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3301 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3302 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3303 return u32;
3304}
3305
3306/** Read MMIO memory. */
3307static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3308{
3309 uint32_t u32 = 0;
3310 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3311 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3312 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3313 return u32;
3314}
3315
3316/** Read MMIO memory. */
3317static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3318{
3319 uint32_t u32 = 0;
3320 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3321 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3322 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3323 return u32;
3324}
3325
3326/** Write to MMIO memory. */
3327static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3328{
3329 int rc;
3330 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3331 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3332 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3333}
3334
3335/** Write to MMIO memory. */
3336static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3337{
3338 int rc;
3339 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3340 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3341 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3342}
3343
3344/** Write to MMIO memory. */
3345static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3346{
3347 int rc;
3348 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3349 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3350 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3351}
3352
3353
3354#undef LOG_GROUP
3355#define LOG_GROUP LOG_GROUP_REM_HANDLER
3356
3357/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3358
3359static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3360{
3361 uint8_t u8;
3362 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3363 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3364 return u8;
3365}
3366
3367static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3368{
3369 uint16_t u16;
3370 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3371 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3372 return u16;
3373}
3374
3375static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3376{
3377 uint32_t u32;
3378 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3379 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3380 return u32;
3381}
3382
3383static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3384{
3385 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3386 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3387}
3388
3389static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3390{
3391 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3392 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3393}
3394
3395static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3396{
3397 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3398 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3399}
3400
3401/* -+- disassembly -+- */
3402
3403#undef LOG_GROUP
3404#define LOG_GROUP LOG_GROUP_REM_DISAS
3405
3406
3407/**
3408 * Enables or disables singled stepped disassembly.
3409 *
3410 * @returns VBox status code.
3411 * @param pVM VM handle.
3412 * @param fEnable To enable set this flag, to disable clear it.
3413 */
3414static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3415{
3416 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3417 VM_ASSERT_EMT(pVM);
3418
3419 if (fEnable)
3420 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3421 else
3422 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3423 return VINF_SUCCESS;
3424}
3425
3426
3427/**
3428 * Enables or disables singled stepped disassembly.
3429 *
3430 * @returns VBox status code.
3431 * @param pVM VM handle.
3432 * @param fEnable To enable set this flag, to disable clear it.
3433 */
3434REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3435{
3436 PVMREQ pReq;
3437 int rc;
3438
3439 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3440 if (VM_IS_EMT(pVM))
3441 return remR3DisasEnableStepping(pVM, fEnable);
3442
3443 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3444 AssertRC(rc);
3445 if (RT_SUCCESS(rc))
3446 rc = pReq->iStatus;
3447 VMR3ReqFree(pReq);
3448 return rc;
3449}
3450
3451
3452#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3453/**
3454 * External Debugger Command: .remstep [on|off|1|0]
3455 */
3456static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3457{
3458 bool fEnable;
3459 int rc;
3460
3461 /* print status */
3462 if (cArgs == 0)
3463 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3464 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3465
3466 /* convert the argument and change the mode. */
3467 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3468 if (RT_FAILURE(rc))
3469 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3470 rc = REMR3DisasEnableStepping(pVM, fEnable);
3471 if (RT_FAILURE(rc))
3472 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3473 return rc;
3474}
3475#endif
3476
3477
3478/**
3479 * Disassembles n instructions and prints them to the log.
3480 *
3481 * @returns Success indicator.
3482 * @param env Pointer to the recompiler CPU structure.
3483 * @param f32BitCode Indicates that whether or not the code should
3484 * be disassembled as 16 or 32 bit. If -1 the CS
3485 * selector will be inspected.
3486 * @param nrInstructions Nr of instructions to disassemble
3487 * @param pszPrefix
3488 * @remark not currently used for anything but ad-hoc debugging.
3489 */
3490bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3491{
3492 int i, rc;
3493 RTGCPTR GCPtrPC;
3494 uint8_t *pvPC;
3495 RTINTPTR off;
3496 DISCPUSTATE Cpu;
3497
3498 /*
3499 * Determin 16/32 bit mode.
3500 */
3501 if (f32BitCode == -1)
3502 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3503
3504 /*
3505 * Convert cs:eip to host context address.
3506 * We don't care to much about cross page correctness presently.
3507 */
3508 GCPtrPC = env->segs[R_CS].base + env->eip;
3509 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3510 {
3511 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3512
3513 /* convert eip to physical address. */
3514 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3515 GCPtrPC,
3516 env->cr[3],
3517 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3518 (void**)&pvPC);
3519 if (RT_FAILURE(rc))
3520 {
3521 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3522 return false;
3523 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3524 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3525 }
3526 }
3527 else
3528 {
3529 /* physical address */
3530 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3531 (void**)&pvPC);
3532 if (RT_FAILURE(rc))
3533 return false;
3534 }
3535
3536 /*
3537 * Disassemble.
3538 */
3539 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3540 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3541 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3542 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3543 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3544 //Cpu.dwUserData[2] = GCPtrPC;
3545
3546 for (i=0;i<nrInstructions;i++)
3547 {
3548 char szOutput[256];
3549 uint32_t cbOp;
3550 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3551 return false;
3552 if (pszPrefix)
3553 Log(("%s: %s", pszPrefix, szOutput));
3554 else
3555 Log(("%s", szOutput));
3556
3557 pvPC += cbOp;
3558 }
3559 return true;
3560}
3561
3562
3563/** @todo need to test the new code, using the old code in the mean while. */
3564#define USE_OLD_DUMP_AND_DISASSEMBLY
3565
3566/**
3567 * Disassembles one instruction and prints it to the log.
3568 *
3569 * @returns Success indicator.
3570 * @param env Pointer to the recompiler CPU structure.
3571 * @param f32BitCode Indicates that whether or not the code should
3572 * be disassembled as 16 or 32 bit. If -1 the CS
3573 * selector will be inspected.
3574 * @param pszPrefix
3575 */
3576bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3577{
3578#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3579 PVM pVM = env->pVM;
3580 RTGCPTR GCPtrPC;
3581 uint8_t *pvPC;
3582 char szOutput[256];
3583 uint32_t cbOp;
3584 RTINTPTR off;
3585 DISCPUSTATE Cpu;
3586
3587
3588 /* Doesn't work in long mode. */
3589 if (env->hflags & HF_LMA_MASK)
3590 return false;
3591
3592 /*
3593 * Determin 16/32 bit mode.
3594 */
3595 if (f32BitCode == -1)
3596 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3597
3598 /*
3599 * Log registers
3600 */
3601 if (LogIs2Enabled())
3602 {
3603 remR3StateUpdate(pVM);
3604 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3605 }
3606
3607 /*
3608 * Convert cs:eip to host context address.
3609 * We don't care to much about cross page correctness presently.
3610 */
3611 GCPtrPC = env->segs[R_CS].base + env->eip;
3612 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3613 {
3614 /* convert eip to physical address. */
3615 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3616 GCPtrPC,
3617 env->cr[3],
3618 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3619 (void**)&pvPC);
3620 if (RT_FAILURE(rc))
3621 {
3622 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3623 return false;
3624 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3625 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3626 }
3627 }
3628 else
3629 {
3630
3631 /* physical address */
3632 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3633 if (RT_FAILURE(rc))
3634 return false;
3635 }
3636
3637 /*
3638 * Disassemble.
3639 */
3640 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3641 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3642 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3643 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3644 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3645 //Cpu.dwUserData[2] = GCPtrPC;
3646 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3647 return false;
3648
3649 if (!f32BitCode)
3650 {
3651 if (pszPrefix)
3652 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3653 else
3654 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3655 }
3656 else
3657 {
3658 if (pszPrefix)
3659 Log(("%s: %s", pszPrefix, szOutput));
3660 else
3661 Log(("%s", szOutput));
3662 }
3663 return true;
3664
3665#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3666 PVM pVM = env->pVM;
3667 const bool fLog = LogIsEnabled();
3668 const bool fLog2 = LogIs2Enabled();
3669 int rc = VINF_SUCCESS;
3670
3671 /*
3672 * Don't bother if there ain't any log output to do.
3673 */
3674 if (!fLog && !fLog2)
3675 return true;
3676
3677 /*
3678 * Update the state so DBGF reads the correct register values.
3679 */
3680 remR3StateUpdate(pVM);
3681
3682 /*
3683 * Log registers if requested.
3684 */
3685 if (!fLog2)
3686 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3687
3688 /*
3689 * Disassemble to log.
3690 */
3691 if (fLog)
3692 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3693
3694 return RT_SUCCESS(rc);
3695#endif
3696}
3697
3698
3699/**
3700 * Disassemble recompiled code.
3701 *
3702 * @param phFileIgnored Ignored, logfile usually.
3703 * @param pvCode Pointer to the code block.
3704 * @param cb Size of the code block.
3705 */
3706void disas(FILE *phFile, void *pvCode, unsigned long cb)
3707{
3708#ifdef DEBUG_TMP_LOGGING
3709# define DISAS_PRINTF(x...) fprintf(phFile, x)
3710#else
3711# define DISAS_PRINTF(x...) RTLogPrintf(x)
3712 if (LogIs2Enabled())
3713#endif
3714 {
3715 unsigned off = 0;
3716 char szOutput[256];
3717 DISCPUSTATE Cpu;
3718
3719 memset(&Cpu, 0, sizeof(Cpu));
3720#ifdef RT_ARCH_X86
3721 Cpu.mode = CPUMODE_32BIT;
3722#else
3723 Cpu.mode = CPUMODE_64BIT;
3724#endif
3725
3726 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3727 while (off < cb)
3728 {
3729 uint32_t cbInstr;
3730 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3731 DISAS_PRINTF("%s", szOutput);
3732 else
3733 {
3734 DISAS_PRINTF("disas error\n");
3735 cbInstr = 1;
3736#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3737 break;
3738#endif
3739 }
3740 off += cbInstr;
3741 }
3742 }
3743
3744#undef DISAS_PRINTF
3745}
3746
3747
3748/**
3749 * Disassemble guest code.
3750 *
3751 * @param phFileIgnored Ignored, logfile usually.
3752 * @param uCode The guest address of the code to disassemble. (flat?)
3753 * @param cb Number of bytes to disassemble.
3754 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3755 */
3756void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3757{
3758#ifdef DEBUG_TMP_LOGGING
3759# define DISAS_PRINTF(x...) fprintf(phFile, x)
3760#else
3761# define DISAS_PRINTF(x...) RTLogPrintf(x)
3762 if (LogIs2Enabled())
3763#endif
3764 {
3765 PVM pVM = cpu_single_env->pVM;
3766 RTSEL cs;
3767 RTGCUINTPTR eip;
3768
3769 /*
3770 * Update the state so DBGF reads the correct register values (flags).
3771 */
3772 remR3StateUpdate(pVM);
3773
3774 /*
3775 * Do the disassembling.
3776 */
3777 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3778 cs = cpu_single_env->segs[R_CS].selector;
3779 eip = uCode - cpu_single_env->segs[R_CS].base;
3780 for (;;)
3781 {
3782 char szBuf[256];
3783 uint32_t cbInstr;
3784 int rc = DBGFR3DisasInstrEx(pVM,
3785 cs,
3786 eip,
3787 0,
3788 szBuf, sizeof(szBuf),
3789 &cbInstr);
3790 if (RT_SUCCESS(rc))
3791 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3792 else
3793 {
3794 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3795 cbInstr = 1;
3796 }
3797
3798 /* next */
3799 if (cb <= cbInstr)
3800 break;
3801 cb -= cbInstr;
3802 uCode += cbInstr;
3803 eip += cbInstr;
3804 }
3805 }
3806#undef DISAS_PRINTF
3807}
3808
3809
3810/**
3811 * Looks up a guest symbol.
3812 *
3813 * @returns Pointer to symbol name. This is a static buffer.
3814 * @param orig_addr The address in question.
3815 */
3816const char *lookup_symbol(target_ulong orig_addr)
3817{
3818 RTGCINTPTR off = 0;
3819 DBGFSYMBOL Sym;
3820 PVM pVM = cpu_single_env->pVM;
3821 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3822 if (RT_SUCCESS(rc))
3823 {
3824 static char szSym[sizeof(Sym.szName) + 48];
3825 if (!off)
3826 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3827 else if (off > 0)
3828 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3829 else
3830 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3831 return szSym;
3832 }
3833 return "<N/A>";
3834}
3835
3836
3837#undef LOG_GROUP
3838#define LOG_GROUP LOG_GROUP_REM
3839
3840
3841/* -+- FF notifications -+- */
3842
3843
3844/**
3845 * Notification about a pending interrupt.
3846 *
3847 * @param pVM VM Handle.
3848 * @param u8Interrupt Interrupt
3849 * @thread The emulation thread.
3850 */
3851REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3852{
3853 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3854 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3855}
3856
3857/**
3858 * Notification about a pending interrupt.
3859 *
3860 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3861 * @param pVM VM Handle.
3862 * @thread The emulation thread.
3863 */
3864REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3865{
3866 return pVM->rem.s.u32PendingInterrupt;
3867}
3868
3869/**
3870 * Notification about the interrupt FF being set.
3871 *
3872 * @param pVM VM Handle.
3873 * @thread The emulation thread.
3874 */
3875REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3876{
3877 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3878 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3879 if (pVM->rem.s.fInREM)
3880 {
3881 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3882 CPU_INTERRUPT_EXTERNAL_HARD);
3883 }
3884}
3885
3886
3887/**
3888 * Notification about the interrupt FF being set.
3889 *
3890 * @param pVM VM Handle.
3891 * @thread Any.
3892 */
3893REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3894{
3895 LogFlow(("REMR3NotifyInterruptClear:\n"));
3896 if (pVM->rem.s.fInREM)
3897 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3898}
3899
3900
3901/**
3902 * Notification about pending timer(s).
3903 *
3904 * @param pVM VM Handle.
3905 * @thread Any.
3906 */
3907REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3908{
3909#ifndef DEBUG_bird
3910 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3911#endif
3912 if (pVM->rem.s.fInREM)
3913 {
3914 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3915 CPU_INTERRUPT_EXTERNAL_TIMER);
3916 }
3917}
3918
3919
3920/**
3921 * Notification about pending DMA transfers.
3922 *
3923 * @param pVM VM Handle.
3924 * @thread Any.
3925 */
3926REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3927{
3928 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3929 if (pVM->rem.s.fInREM)
3930 {
3931 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3932 CPU_INTERRUPT_EXTERNAL_DMA);
3933 }
3934}
3935
3936
3937/**
3938 * Notification about pending timer(s).
3939 *
3940 * @param pVM VM Handle.
3941 * @thread Any.
3942 */
3943REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3944{
3945 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3946 if (pVM->rem.s.fInREM)
3947 {
3948 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3949 CPU_INTERRUPT_EXTERNAL_EXIT);
3950 }
3951}
3952
3953
3954/**
3955 * Notification about pending FF set by an external thread.
3956 *
3957 * @param pVM VM handle.
3958 * @thread Any.
3959 */
3960REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3961{
3962 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3963 if (pVM->rem.s.fInREM)
3964 {
3965 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3966 CPU_INTERRUPT_EXTERNAL_EXIT);
3967 }
3968}
3969
3970
3971#ifdef VBOX_WITH_STATISTICS
3972void remR3ProfileStart(int statcode)
3973{
3974 STAMPROFILEADV *pStat;
3975 switch(statcode)
3976 {
3977 case STATS_EMULATE_SINGLE_INSTR:
3978 pStat = &gStatExecuteSingleInstr;
3979 break;
3980 case STATS_QEMU_COMPILATION:
3981 pStat = &gStatCompilationQEmu;
3982 break;
3983 case STATS_QEMU_RUN_EMULATED_CODE:
3984 pStat = &gStatRunCodeQEmu;
3985 break;
3986 case STATS_QEMU_TOTAL:
3987 pStat = &gStatTotalTimeQEmu;
3988 break;
3989 case STATS_QEMU_RUN_TIMERS:
3990 pStat = &gStatTimers;
3991 break;
3992 case STATS_TLB_LOOKUP:
3993 pStat= &gStatTBLookup;
3994 break;
3995 case STATS_IRQ_HANDLING:
3996 pStat= &gStatIRQ;
3997 break;
3998 case STATS_RAW_CHECK:
3999 pStat = &gStatRawCheck;
4000 break;
4001
4002 default:
4003 AssertMsgFailed(("unknown stat %d\n", statcode));
4004 return;
4005 }
4006 STAM_PROFILE_ADV_START(pStat, a);
4007}
4008
4009
4010void remR3ProfileStop(int statcode)
4011{
4012 STAMPROFILEADV *pStat;
4013 switch(statcode)
4014 {
4015 case STATS_EMULATE_SINGLE_INSTR:
4016 pStat = &gStatExecuteSingleInstr;
4017 break;
4018 case STATS_QEMU_COMPILATION:
4019 pStat = &gStatCompilationQEmu;
4020 break;
4021 case STATS_QEMU_RUN_EMULATED_CODE:
4022 pStat = &gStatRunCodeQEmu;
4023 break;
4024 case STATS_QEMU_TOTAL:
4025 pStat = &gStatTotalTimeQEmu;
4026 break;
4027 case STATS_QEMU_RUN_TIMERS:
4028 pStat = &gStatTimers;
4029 break;
4030 case STATS_TLB_LOOKUP:
4031 pStat= &gStatTBLookup;
4032 break;
4033 case STATS_IRQ_HANDLING:
4034 pStat= &gStatIRQ;
4035 break;
4036 case STATS_RAW_CHECK:
4037 pStat = &gStatRawCheck;
4038 break;
4039 default:
4040 AssertMsgFailed(("unknown stat %d\n", statcode));
4041 return;
4042 }
4043 STAM_PROFILE_ADV_STOP(pStat, a);
4044}
4045#endif
4046
4047/**
4048 * Raise an RC, force rem exit.
4049 *
4050 * @param pVM VM handle.
4051 * @param rc The rc.
4052 */
4053void remR3RaiseRC(PVM pVM, int rc)
4054{
4055 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4056 Assert(pVM->rem.s.fInREM);
4057 VM_ASSERT_EMT(pVM);
4058 pVM->rem.s.rc = rc;
4059 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4060}
4061
4062
4063/* -+- timers -+- */
4064
4065uint64_t cpu_get_tsc(CPUX86State *env)
4066{
4067 STAM_COUNTER_INC(&gStatCpuGetTSC);
4068 return TMCpuTickGet(env->pVM);
4069}
4070
4071
4072/* -+- interrupts -+- */
4073
4074void cpu_set_ferr(CPUX86State *env)
4075{
4076 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4077 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4078}
4079
4080int cpu_get_pic_interrupt(CPUState *env)
4081{
4082 uint8_t u8Interrupt;
4083 int rc;
4084
4085 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4086 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4087 * with the (a)pic.
4088 */
4089 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4090 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4091 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4092 * remove this kludge. */
4093 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4094 {
4095 rc = VINF_SUCCESS;
4096 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4097 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4098 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4099 }
4100 else
4101 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4102
4103 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4104 if (RT_SUCCESS(rc))
4105 {
4106 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4107 env->interrupt_request |= CPU_INTERRUPT_HARD;
4108 return u8Interrupt;
4109 }
4110 return -1;
4111}
4112
4113
4114/* -+- local apic -+- */
4115
4116void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4117{
4118 int rc = PDMApicSetBase(env->pVM, val);
4119 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4120}
4121
4122uint64_t cpu_get_apic_base(CPUX86State *env)
4123{
4124 uint64_t u64;
4125 int rc = PDMApicGetBase(env->pVM, &u64);
4126 if (RT_SUCCESS(rc))
4127 {
4128 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4129 return u64;
4130 }
4131 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4132 return 0;
4133}
4134
4135void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4136{
4137 int rc = PDMApicSetTPR(env->pVM, val);
4138 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4139}
4140
4141uint8_t cpu_get_apic_tpr(CPUX86State *env)
4142{
4143 uint8_t u8;
4144 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4145 if (RT_SUCCESS(rc))
4146 {
4147 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4148 return u8;
4149 }
4150 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4151 return 0;
4152}
4153
4154
4155uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4156{
4157 uint64_t value;
4158 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4159 if (RT_SUCCESS(rc))
4160 {
4161 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4162 return value;
4163 }
4164 /** @todo: exception ? */
4165 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4166 return value;
4167}
4168
4169void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4170{
4171 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4172 /** @todo: exception if error ? */
4173 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4174}
4175
4176uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4177{
4178 return CPUMGetGuestMsr(env->pVM, msr);
4179}
4180
4181void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4182{
4183 CPUMSetGuestMsr(env->pVM, msr, val);
4184}
4185
4186/* -+- I/O Ports -+- */
4187
4188#undef LOG_GROUP
4189#define LOG_GROUP LOG_GROUP_REM_IOPORT
4190
4191void cpu_outb(CPUState *env, int addr, int val)
4192{
4193 int rc;
4194
4195 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4196 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4197
4198 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4199 if (RT_LIKELY(rc == VINF_SUCCESS))
4200 return;
4201 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4202 {
4203 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4204 remR3RaiseRC(env->pVM, rc);
4205 return;
4206 }
4207 remAbort(rc, __FUNCTION__);
4208}
4209
4210void cpu_outw(CPUState *env, int addr, int val)
4211{
4212 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4213 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4214 if (RT_LIKELY(rc == VINF_SUCCESS))
4215 return;
4216 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4217 {
4218 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4219 remR3RaiseRC(env->pVM, rc);
4220 return;
4221 }
4222 remAbort(rc, __FUNCTION__);
4223}
4224
4225void cpu_outl(CPUState *env, int addr, int val)
4226{
4227 int rc;
4228 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4229 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4230 if (RT_LIKELY(rc == VINF_SUCCESS))
4231 return;
4232 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4233 {
4234 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4235 remR3RaiseRC(env->pVM, rc);
4236 return;
4237 }
4238 remAbort(rc, __FUNCTION__);
4239}
4240
4241int cpu_inb(CPUState *env, int addr)
4242{
4243 uint32_t u32 = 0;
4244 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4245 if (RT_LIKELY(rc == VINF_SUCCESS))
4246 {
4247 if (/*addr != 0x61 && */addr != 0x71)
4248 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4249 return (int)u32;
4250 }
4251 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4252 {
4253 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4254 remR3RaiseRC(env->pVM, rc);
4255 return (int)u32;
4256 }
4257 remAbort(rc, __FUNCTION__);
4258 return 0xff;
4259}
4260
4261int cpu_inw(CPUState *env, int addr)
4262{
4263 uint32_t u32 = 0;
4264 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4265 if (RT_LIKELY(rc == VINF_SUCCESS))
4266 {
4267 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4268 return (int)u32;
4269 }
4270 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4271 {
4272 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4273 remR3RaiseRC(env->pVM, rc);
4274 return (int)u32;
4275 }
4276 remAbort(rc, __FUNCTION__);
4277 return 0xffff;
4278}
4279
4280int cpu_inl(CPUState *env, int addr)
4281{
4282 uint32_t u32 = 0;
4283 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4284 if (RT_LIKELY(rc == VINF_SUCCESS))
4285 {
4286//if (addr==0x01f0 && u32 == 0x6b6d)
4287// loglevel = ~0;
4288 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4289 return (int)u32;
4290 }
4291 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4292 {
4293 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4294 remR3RaiseRC(env->pVM, rc);
4295 return (int)u32;
4296 }
4297 remAbort(rc, __FUNCTION__);
4298 return 0xffffffff;
4299}
4300
4301#undef LOG_GROUP
4302#define LOG_GROUP LOG_GROUP_REM
4303
4304
4305/* -+- helpers and misc other interfaces -+- */
4306
4307/**
4308 * Perform the CPUID instruction.
4309 *
4310 * ASMCpuId cannot be invoked from some source files where this is used because of global
4311 * register allocations.
4312 *
4313 * @param env Pointer to the recompiler CPU structure.
4314 * @param uOperator CPUID operation (eax).
4315 * @param pvEAX Where to store eax.
4316 * @param pvEBX Where to store ebx.
4317 * @param pvECX Where to store ecx.
4318 * @param pvEDX Where to store edx.
4319 */
4320void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4321{
4322 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4323}
4324
4325
4326#if 0 /* not used */
4327/**
4328 * Interface for qemu hardware to report back fatal errors.
4329 */
4330void hw_error(const char *pszFormat, ...)
4331{
4332 /*
4333 * Bitch about it.
4334 */
4335 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4336 * this in my Odin32 tree at home! */
4337 va_list args;
4338 va_start(args, pszFormat);
4339 RTLogPrintf("fatal error in virtual hardware:");
4340 RTLogPrintfV(pszFormat, args);
4341 va_end(args);
4342 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4343
4344 /*
4345 * If we're in REM context we'll sync back the state before 'jumping' to
4346 * the EMs failure handling.
4347 */
4348 PVM pVM = cpu_single_env->pVM;
4349 if (pVM->rem.s.fInREM)
4350 REMR3StateBack(pVM);
4351 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4352 AssertMsgFailed(("EMR3FatalError returned!\n"));
4353}
4354#endif
4355
4356/**
4357 * Interface for the qemu cpu to report unhandled situation
4358 * raising a fatal VM error.
4359 */
4360void cpu_abort(CPUState *env, const char *pszFormat, ...)
4361{
4362 va_list args;
4363 PVM pVM;
4364
4365 /*
4366 * Bitch about it.
4367 */
4368#ifndef _MSC_VER
4369 /** @todo: MSVC is right - it's not valid C */
4370 RTLogFlags(NULL, "nodisabled nobuffered");
4371#endif
4372 va_start(args, pszFormat);
4373 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4374 va_end(args);
4375 va_start(args, pszFormat);
4376 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4377 va_end(args);
4378
4379 /*
4380 * If we're in REM context we'll sync back the state before 'jumping' to
4381 * the EMs failure handling.
4382 */
4383 pVM = cpu_single_env->pVM;
4384 if (pVM->rem.s.fInREM)
4385 REMR3StateBack(pVM);
4386 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4387 AssertMsgFailed(("EMR3FatalError returned!\n"));
4388}
4389
4390
4391/**
4392 * Aborts the VM.
4393 *
4394 * @param rc VBox error code.
4395 * @param pszTip Hint about why/when this happend.
4396 */
4397void remAbort(int rc, const char *pszTip)
4398{
4399 PVM pVM;
4400
4401 /*
4402 * Bitch about it.
4403 */
4404 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4405 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4406
4407 /*
4408 * Jump back to where we entered the recompiler.
4409 */
4410 pVM = cpu_single_env->pVM;
4411 if (pVM->rem.s.fInREM)
4412 REMR3StateBack(pVM);
4413 EMR3FatalError(pVM, rc);
4414 AssertMsgFailed(("EMR3FatalError returned!\n"));
4415}
4416
4417
4418/**
4419 * Dumps a linux system call.
4420 * @param pVM VM handle.
4421 */
4422void remR3DumpLnxSyscall(PVM pVM)
4423{
4424 static const char *apsz[] =
4425 {
4426 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4427 "sys_exit",
4428 "sys_fork",
4429 "sys_read",
4430 "sys_write",
4431 "sys_open", /* 5 */
4432 "sys_close",
4433 "sys_waitpid",
4434 "sys_creat",
4435 "sys_link",
4436 "sys_unlink", /* 10 */
4437 "sys_execve",
4438 "sys_chdir",
4439 "sys_time",
4440 "sys_mknod",
4441 "sys_chmod", /* 15 */
4442 "sys_lchown16",
4443 "sys_ni_syscall", /* old break syscall holder */
4444 "sys_stat",
4445 "sys_lseek",
4446 "sys_getpid", /* 20 */
4447 "sys_mount",
4448 "sys_oldumount",
4449 "sys_setuid16",
4450 "sys_getuid16",
4451 "sys_stime", /* 25 */
4452 "sys_ptrace",
4453 "sys_alarm",
4454 "sys_fstat",
4455 "sys_pause",
4456 "sys_utime", /* 30 */
4457 "sys_ni_syscall", /* old stty syscall holder */
4458 "sys_ni_syscall", /* old gtty syscall holder */
4459 "sys_access",
4460 "sys_nice",
4461 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4462 "sys_sync",
4463 "sys_kill",
4464 "sys_rename",
4465 "sys_mkdir",
4466 "sys_rmdir", /* 40 */
4467 "sys_dup",
4468 "sys_pipe",
4469 "sys_times",
4470 "sys_ni_syscall", /* old prof syscall holder */
4471 "sys_brk", /* 45 */
4472 "sys_setgid16",
4473 "sys_getgid16",
4474 "sys_signal",
4475 "sys_geteuid16",
4476 "sys_getegid16", /* 50 */
4477 "sys_acct",
4478 "sys_umount", /* recycled never used phys() */
4479 "sys_ni_syscall", /* old lock syscall holder */
4480 "sys_ioctl",
4481 "sys_fcntl", /* 55 */
4482 "sys_ni_syscall", /* old mpx syscall holder */
4483 "sys_setpgid",
4484 "sys_ni_syscall", /* old ulimit syscall holder */
4485 "sys_olduname",
4486 "sys_umask", /* 60 */
4487 "sys_chroot",
4488 "sys_ustat",
4489 "sys_dup2",
4490 "sys_getppid",
4491 "sys_getpgrp", /* 65 */
4492 "sys_setsid",
4493 "sys_sigaction",
4494 "sys_sgetmask",
4495 "sys_ssetmask",
4496 "sys_setreuid16", /* 70 */
4497 "sys_setregid16",
4498 "sys_sigsuspend",
4499 "sys_sigpending",
4500 "sys_sethostname",
4501 "sys_setrlimit", /* 75 */
4502 "sys_old_getrlimit",
4503 "sys_getrusage",
4504 "sys_gettimeofday",
4505 "sys_settimeofday",
4506 "sys_getgroups16", /* 80 */
4507 "sys_setgroups16",
4508 "old_select",
4509 "sys_symlink",
4510 "sys_lstat",
4511 "sys_readlink", /* 85 */
4512 "sys_uselib",
4513 "sys_swapon",
4514 "sys_reboot",
4515 "old_readdir",
4516 "old_mmap", /* 90 */
4517 "sys_munmap",
4518 "sys_truncate",
4519 "sys_ftruncate",
4520 "sys_fchmod",
4521 "sys_fchown16", /* 95 */
4522 "sys_getpriority",
4523 "sys_setpriority",
4524 "sys_ni_syscall", /* old profil syscall holder */
4525 "sys_statfs",
4526 "sys_fstatfs", /* 100 */
4527 "sys_ioperm",
4528 "sys_socketcall",
4529 "sys_syslog",
4530 "sys_setitimer",
4531 "sys_getitimer", /* 105 */
4532 "sys_newstat",
4533 "sys_newlstat",
4534 "sys_newfstat",
4535 "sys_uname",
4536 "sys_iopl", /* 110 */
4537 "sys_vhangup",
4538 "sys_ni_syscall", /* old "idle" system call */
4539 "sys_vm86old",
4540 "sys_wait4",
4541 "sys_swapoff", /* 115 */
4542 "sys_sysinfo",
4543 "sys_ipc",
4544 "sys_fsync",
4545 "sys_sigreturn",
4546 "sys_clone", /* 120 */
4547 "sys_setdomainname",
4548 "sys_newuname",
4549 "sys_modify_ldt",
4550 "sys_adjtimex",
4551 "sys_mprotect", /* 125 */
4552 "sys_sigprocmask",
4553 "sys_ni_syscall", /* old "create_module" */
4554 "sys_init_module",
4555 "sys_delete_module",
4556 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4557 "sys_quotactl",
4558 "sys_getpgid",
4559 "sys_fchdir",
4560 "sys_bdflush",
4561 "sys_sysfs", /* 135 */
4562 "sys_personality",
4563 "sys_ni_syscall", /* reserved for afs_syscall */
4564 "sys_setfsuid16",
4565 "sys_setfsgid16",
4566 "sys_llseek", /* 140 */
4567 "sys_getdents",
4568 "sys_select",
4569 "sys_flock",
4570 "sys_msync",
4571 "sys_readv", /* 145 */
4572 "sys_writev",
4573 "sys_getsid",
4574 "sys_fdatasync",
4575 "sys_sysctl",
4576 "sys_mlock", /* 150 */
4577 "sys_munlock",
4578 "sys_mlockall",
4579 "sys_munlockall",
4580 "sys_sched_setparam",
4581 "sys_sched_getparam", /* 155 */
4582 "sys_sched_setscheduler",
4583 "sys_sched_getscheduler",
4584 "sys_sched_yield",
4585 "sys_sched_get_priority_max",
4586 "sys_sched_get_priority_min", /* 160 */
4587 "sys_sched_rr_get_interval",
4588 "sys_nanosleep",
4589 "sys_mremap",
4590 "sys_setresuid16",
4591 "sys_getresuid16", /* 165 */
4592 "sys_vm86",
4593 "sys_ni_syscall", /* Old sys_query_module */
4594 "sys_poll",
4595 "sys_nfsservctl",
4596 "sys_setresgid16", /* 170 */
4597 "sys_getresgid16",
4598 "sys_prctl",
4599 "sys_rt_sigreturn",
4600 "sys_rt_sigaction",
4601 "sys_rt_sigprocmask", /* 175 */
4602 "sys_rt_sigpending",
4603 "sys_rt_sigtimedwait",
4604 "sys_rt_sigqueueinfo",
4605 "sys_rt_sigsuspend",
4606 "sys_pread64", /* 180 */
4607 "sys_pwrite64",
4608 "sys_chown16",
4609 "sys_getcwd",
4610 "sys_capget",
4611 "sys_capset", /* 185 */
4612 "sys_sigaltstack",
4613 "sys_sendfile",
4614 "sys_ni_syscall", /* reserved for streams1 */
4615 "sys_ni_syscall", /* reserved for streams2 */
4616 "sys_vfork", /* 190 */
4617 "sys_getrlimit",
4618 "sys_mmap2",
4619 "sys_truncate64",
4620 "sys_ftruncate64",
4621 "sys_stat64", /* 195 */
4622 "sys_lstat64",
4623 "sys_fstat64",
4624 "sys_lchown",
4625 "sys_getuid",
4626 "sys_getgid", /* 200 */
4627 "sys_geteuid",
4628 "sys_getegid",
4629 "sys_setreuid",
4630 "sys_setregid",
4631 "sys_getgroups", /* 205 */
4632 "sys_setgroups",
4633 "sys_fchown",
4634 "sys_setresuid",
4635 "sys_getresuid",
4636 "sys_setresgid", /* 210 */
4637 "sys_getresgid",
4638 "sys_chown",
4639 "sys_setuid",
4640 "sys_setgid",
4641 "sys_setfsuid", /* 215 */
4642 "sys_setfsgid",
4643 "sys_pivot_root",
4644 "sys_mincore",
4645 "sys_madvise",
4646 "sys_getdents64", /* 220 */
4647 "sys_fcntl64",
4648 "sys_ni_syscall", /* reserved for TUX */
4649 "sys_ni_syscall",
4650 "sys_gettid",
4651 "sys_readahead", /* 225 */
4652 "sys_setxattr",
4653 "sys_lsetxattr",
4654 "sys_fsetxattr",
4655 "sys_getxattr",
4656 "sys_lgetxattr", /* 230 */
4657 "sys_fgetxattr",
4658 "sys_listxattr",
4659 "sys_llistxattr",
4660 "sys_flistxattr",
4661 "sys_removexattr", /* 235 */
4662 "sys_lremovexattr",
4663 "sys_fremovexattr",
4664 "sys_tkill",
4665 "sys_sendfile64",
4666 "sys_futex", /* 240 */
4667 "sys_sched_setaffinity",
4668 "sys_sched_getaffinity",
4669 "sys_set_thread_area",
4670 "sys_get_thread_area",
4671 "sys_io_setup", /* 245 */
4672 "sys_io_destroy",
4673 "sys_io_getevents",
4674 "sys_io_submit",
4675 "sys_io_cancel",
4676 "sys_fadvise64", /* 250 */
4677 "sys_ni_syscall",
4678 "sys_exit_group",
4679 "sys_lookup_dcookie",
4680 "sys_epoll_create",
4681 "sys_epoll_ctl", /* 255 */
4682 "sys_epoll_wait",
4683 "sys_remap_file_pages",
4684 "sys_set_tid_address",
4685 "sys_timer_create",
4686 "sys_timer_settime", /* 260 */
4687 "sys_timer_gettime",
4688 "sys_timer_getoverrun",
4689 "sys_timer_delete",
4690 "sys_clock_settime",
4691 "sys_clock_gettime", /* 265 */
4692 "sys_clock_getres",
4693 "sys_clock_nanosleep",
4694 "sys_statfs64",
4695 "sys_fstatfs64",
4696 "sys_tgkill", /* 270 */
4697 "sys_utimes",
4698 "sys_fadvise64_64",
4699 "sys_ni_syscall" /* sys_vserver */
4700 };
4701
4702 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4703 switch (uEAX)
4704 {
4705 default:
4706 if (uEAX < RT_ELEMENTS(apsz))
4707 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4708 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4709 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4710 else
4711 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4712 break;
4713
4714 }
4715}
4716
4717
4718/**
4719 * Dumps an OpenBSD system call.
4720 * @param pVM VM handle.
4721 */
4722void remR3DumpOBsdSyscall(PVM pVM)
4723{
4724 static const char *apsz[] =
4725 {
4726 "SYS_syscall", //0
4727 "SYS_exit", //1
4728 "SYS_fork", //2
4729 "SYS_read", //3
4730 "SYS_write", //4
4731 "SYS_open", //5
4732 "SYS_close", //6
4733 "SYS_wait4", //7
4734 "SYS_8",
4735 "SYS_link", //9
4736 "SYS_unlink", //10
4737 "SYS_11",
4738 "SYS_chdir", //12
4739 "SYS_fchdir", //13
4740 "SYS_mknod", //14
4741 "SYS_chmod", //15
4742 "SYS_chown", //16
4743 "SYS_break", //17
4744 "SYS_18",
4745 "SYS_19",
4746 "SYS_getpid", //20
4747 "SYS_mount", //21
4748 "SYS_unmount", //22
4749 "SYS_setuid", //23
4750 "SYS_getuid", //24
4751 "SYS_geteuid", //25
4752 "SYS_ptrace", //26
4753 "SYS_recvmsg", //27
4754 "SYS_sendmsg", //28
4755 "SYS_recvfrom", //29
4756 "SYS_accept", //30
4757 "SYS_getpeername", //31
4758 "SYS_getsockname", //32
4759 "SYS_access", //33
4760 "SYS_chflags", //34
4761 "SYS_fchflags", //35
4762 "SYS_sync", //36
4763 "SYS_kill", //37
4764 "SYS_38",
4765 "SYS_getppid", //39
4766 "SYS_40",
4767 "SYS_dup", //41
4768 "SYS_opipe", //42
4769 "SYS_getegid", //43
4770 "SYS_profil", //44
4771 "SYS_ktrace", //45
4772 "SYS_sigaction", //46
4773 "SYS_getgid", //47
4774 "SYS_sigprocmask", //48
4775 "SYS_getlogin", //49
4776 "SYS_setlogin", //50
4777 "SYS_acct", //51
4778 "SYS_sigpending", //52
4779 "SYS_osigaltstack", //53
4780 "SYS_ioctl", //54
4781 "SYS_reboot", //55
4782 "SYS_revoke", //56
4783 "SYS_symlink", //57
4784 "SYS_readlink", //58
4785 "SYS_execve", //59
4786 "SYS_umask", //60
4787 "SYS_chroot", //61
4788 "SYS_62",
4789 "SYS_63",
4790 "SYS_64",
4791 "SYS_65",
4792 "SYS_vfork", //66
4793 "SYS_67",
4794 "SYS_68",
4795 "SYS_sbrk", //69
4796 "SYS_sstk", //70
4797 "SYS_61",
4798 "SYS_vadvise", //72
4799 "SYS_munmap", //73
4800 "SYS_mprotect", //74
4801 "SYS_madvise", //75
4802 "SYS_76",
4803 "SYS_77",
4804 "SYS_mincore", //78
4805 "SYS_getgroups", //79
4806 "SYS_setgroups", //80
4807 "SYS_getpgrp", //81
4808 "SYS_setpgid", //82
4809 "SYS_setitimer", //83
4810 "SYS_84",
4811 "SYS_85",
4812 "SYS_getitimer", //86
4813 "SYS_87",
4814 "SYS_88",
4815 "SYS_89",
4816 "SYS_dup2", //90
4817 "SYS_91",
4818 "SYS_fcntl", //92
4819 "SYS_select", //93
4820 "SYS_94",
4821 "SYS_fsync", //95
4822 "SYS_setpriority", //96
4823 "SYS_socket", //97
4824 "SYS_connect", //98
4825 "SYS_99",
4826 "SYS_getpriority", //100
4827 "SYS_101",
4828 "SYS_102",
4829 "SYS_sigreturn", //103
4830 "SYS_bind", //104
4831 "SYS_setsockopt", //105
4832 "SYS_listen", //106
4833 "SYS_107",
4834 "SYS_108",
4835 "SYS_109",
4836 "SYS_110",
4837 "SYS_sigsuspend", //111
4838 "SYS_112",
4839 "SYS_113",
4840 "SYS_114",
4841 "SYS_115",
4842 "SYS_gettimeofday", //116
4843 "SYS_getrusage", //117
4844 "SYS_getsockopt", //118
4845 "SYS_119",
4846 "SYS_readv", //120
4847 "SYS_writev", //121
4848 "SYS_settimeofday", //122
4849 "SYS_fchown", //123
4850 "SYS_fchmod", //124
4851 "SYS_125",
4852 "SYS_setreuid", //126
4853 "SYS_setregid", //127
4854 "SYS_rename", //128
4855 "SYS_129",
4856 "SYS_130",
4857 "SYS_flock", //131
4858 "SYS_mkfifo", //132
4859 "SYS_sendto", //133
4860 "SYS_shutdown", //134
4861 "SYS_socketpair", //135
4862 "SYS_mkdir", //136
4863 "SYS_rmdir", //137
4864 "SYS_utimes", //138
4865 "SYS_139",
4866 "SYS_adjtime", //140
4867 "SYS_141",
4868 "SYS_142",
4869 "SYS_143",
4870 "SYS_144",
4871 "SYS_145",
4872 "SYS_146",
4873 "SYS_setsid", //147
4874 "SYS_quotactl", //148
4875 "SYS_149",
4876 "SYS_150",
4877 "SYS_151",
4878 "SYS_152",
4879 "SYS_153",
4880 "SYS_154",
4881 "SYS_nfssvc", //155
4882 "SYS_156",
4883 "SYS_157",
4884 "SYS_158",
4885 "SYS_159",
4886 "SYS_160",
4887 "SYS_getfh", //161
4888 "SYS_162",
4889 "SYS_163",
4890 "SYS_164",
4891 "SYS_sysarch", //165
4892 "SYS_166",
4893 "SYS_167",
4894 "SYS_168",
4895 "SYS_169",
4896 "SYS_170",
4897 "SYS_171",
4898 "SYS_172",
4899 "SYS_pread", //173
4900 "SYS_pwrite", //174
4901 "SYS_175",
4902 "SYS_176",
4903 "SYS_177",
4904 "SYS_178",
4905 "SYS_179",
4906 "SYS_180",
4907 "SYS_setgid", //181
4908 "SYS_setegid", //182
4909 "SYS_seteuid", //183
4910 "SYS_lfs_bmapv", //184
4911 "SYS_lfs_markv", //185
4912 "SYS_lfs_segclean", //186
4913 "SYS_lfs_segwait", //187
4914 "SYS_188",
4915 "SYS_189",
4916 "SYS_190",
4917 "SYS_pathconf", //191
4918 "SYS_fpathconf", //192
4919 "SYS_swapctl", //193
4920 "SYS_getrlimit", //194
4921 "SYS_setrlimit", //195
4922 "SYS_getdirentries", //196
4923 "SYS_mmap", //197
4924 "SYS___syscall", //198
4925 "SYS_lseek", //199
4926 "SYS_truncate", //200
4927 "SYS_ftruncate", //201
4928 "SYS___sysctl", //202
4929 "SYS_mlock", //203
4930 "SYS_munlock", //204
4931 "SYS_205",
4932 "SYS_futimes", //206
4933 "SYS_getpgid", //207
4934 "SYS_xfspioctl", //208
4935 "SYS_209",
4936 "SYS_210",
4937 "SYS_211",
4938 "SYS_212",
4939 "SYS_213",
4940 "SYS_214",
4941 "SYS_215",
4942 "SYS_216",
4943 "SYS_217",
4944 "SYS_218",
4945 "SYS_219",
4946 "SYS_220",
4947 "SYS_semget", //221
4948 "SYS_222",
4949 "SYS_223",
4950 "SYS_224",
4951 "SYS_msgget", //225
4952 "SYS_msgsnd", //226
4953 "SYS_msgrcv", //227
4954 "SYS_shmat", //228
4955 "SYS_229",
4956 "SYS_shmdt", //230
4957 "SYS_231",
4958 "SYS_clock_gettime", //232
4959 "SYS_clock_settime", //233
4960 "SYS_clock_getres", //234
4961 "SYS_235",
4962 "SYS_236",
4963 "SYS_237",
4964 "SYS_238",
4965 "SYS_239",
4966 "SYS_nanosleep", //240
4967 "SYS_241",
4968 "SYS_242",
4969 "SYS_243",
4970 "SYS_244",
4971 "SYS_245",
4972 "SYS_246",
4973 "SYS_247",
4974 "SYS_248",
4975 "SYS_249",
4976 "SYS_minherit", //250
4977 "SYS_rfork", //251
4978 "SYS_poll", //252
4979 "SYS_issetugid", //253
4980 "SYS_lchown", //254
4981 "SYS_getsid", //255
4982 "SYS_msync", //256
4983 "SYS_257",
4984 "SYS_258",
4985 "SYS_259",
4986 "SYS_getfsstat", //260
4987 "SYS_statfs", //261
4988 "SYS_fstatfs", //262
4989 "SYS_pipe", //263
4990 "SYS_fhopen", //264
4991 "SYS_265",
4992 "SYS_fhstatfs", //266
4993 "SYS_preadv", //267
4994 "SYS_pwritev", //268
4995 "SYS_kqueue", //269
4996 "SYS_kevent", //270
4997 "SYS_mlockall", //271
4998 "SYS_munlockall", //272
4999 "SYS_getpeereid", //273
5000 "SYS_274",
5001 "SYS_275",
5002 "SYS_276",
5003 "SYS_277",
5004 "SYS_278",
5005 "SYS_279",
5006 "SYS_280",
5007 "SYS_getresuid", //281
5008 "SYS_setresuid", //282
5009 "SYS_getresgid", //283
5010 "SYS_setresgid", //284
5011 "SYS_285",
5012 "SYS_mquery", //286
5013 "SYS_closefrom", //287
5014 "SYS_sigaltstack", //288
5015 "SYS_shmget", //289
5016 "SYS_semop", //290
5017 "SYS_stat", //291
5018 "SYS_fstat", //292
5019 "SYS_lstat", //293
5020 "SYS_fhstat", //294
5021 "SYS___semctl", //295
5022 "SYS_shmctl", //296
5023 "SYS_msgctl", //297
5024 "SYS_MAXSYSCALL", //298
5025 //299
5026 //300
5027 };
5028 uint32_t uEAX;
5029 if (!LogIsEnabled())
5030 return;
5031 uEAX = CPUMGetGuestEAX(pVM);
5032 switch (uEAX)
5033 {
5034 default:
5035 if (uEAX < RT_ELEMENTS(apsz))
5036 {
5037 uint32_t au32Args[8] = {0};
5038 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5039 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5040 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5041 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5042 }
5043 else
5044 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5045 break;
5046 }
5047}
5048
5049
5050#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5051/**
5052 * The Dll main entry point (stub).
5053 */
5054bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5055{
5056 return true;
5057}
5058
5059void *memcpy(void *dst, const void *src, size_t size)
5060{
5061 uint8_t*pbDst = dst, *pbSrc = src;
5062 while (size-- > 0)
5063 *pbDst++ = *pbSrc++;
5064 return dst;
5065}
5066
5067#endif
5068
5069void cpu_smm_update(CPUState *env)
5070{
5071}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette