VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 16681

Last change on this file since 16681 was 16455, checked in by vboxsync, 16 years ago

REM: segment forced sync, cleanups

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.9 KB
Line 
1/* $Id: VBoxRecompiler.c 16455 2009-02-02 12:55:06Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33void cpu_exec_init_all(unsigned long tb_size);
34
35#include <VBox/rem.h>
36#include <VBox/vmapi.h>
37#include <VBox/tm.h>
38#include <VBox/ssm.h>
39#include <VBox/em.h>
40#include <VBox/trpm.h>
41#include <VBox/iom.h>
42#include <VBox/mm.h>
43#include <VBox/pgm.h>
44#include <VBox/pdm.h>
45#include <VBox/dbgf.h>
46#include <VBox/dbg.h>
47#include <VBox/hwaccm.h>
48#include <VBox/patm.h>
49#include <VBox/csam.h>
50#include "REMInternal.h"
51#include <VBox/vm.h>
52#include <VBox/param.h>
53#include <VBox/err.h>
54
55#include <VBox/log.h>
56#include <iprt/semaphore.h>
57#include <iprt/asm.h>
58#include <iprt/assert.h>
59#include <iprt/thread.h>
60#include <iprt/string.h>
61
62/* Don't wanna include everything. */
63extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
64extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
65extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
66extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
67extern void tlb_flush(CPUState *env, int flush_global);
68extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
69extern void sync_ldtr(CPUX86State *env1, int selector);
70extern int sync_tr(CPUX86State *env1, int selector);
71
72#ifdef VBOX_STRICT
73unsigned long get_phys_page_offset(target_ulong addr);
74#endif
75
76/*******************************************************************************
77* Defined Constants And Macros *
78*******************************************************************************/
79
80/** Copy 80-bit fpu register at pSrc to pDst.
81 * This is probably faster than *calling* memcpy.
82 */
83#define REM_COPY_FPU_REG(pDst, pSrc) \
84 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
85
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
91static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
92static void remR3StateUpdate(PVM pVM);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/*******************************************************************************
218* Internal Functions *
219*******************************************************************************/
220void remAbort(int rc, const char *pszTip);
221extern int testmath(void);
222
223/* Put them here to avoid unused variable warning. */
224AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
225#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
226//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
227/* Why did this have to be identical?? */
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#else
230AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
231#endif
232
233
234/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
235uint8_t* code_gen_prologue;
236
237/**
238 * Initializes the REM.
239 *
240 * @returns VBox status code.
241 * @param pVM The VM to operate on.
242 */
243REMR3DECL(int) REMR3Init(PVM pVM)
244{
245 uint32_t u32Dummy;
246 int rc;
247
248#ifdef VBOX_ENABLE_VBOXREM64
249 LogRel(("Using 64-bit aware REM\n"));
250#endif
251
252 /*
253 * Assert sanity.
254 */
255 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
256 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
257 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
258#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
259 Assert(!testmath());
260#endif
261 /*
262 * Init some internal data members.
263 */
264 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
265 pVM->rem.s.Env.pVM = pVM;
266#ifdef CPU_RAW_MODE_INIT
267 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
268#endif
269
270 /* ctx. */
271 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
272 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
273
274 /* ignore all notifications */
275 pVM->rem.s.fIgnoreAll = true;
276
277 code_gen_prologue = RTMemExecAlloc(_1K);
278
279 cpu_exec_init_all(0);
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (RT_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
332 if (RT_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
367
368 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
369 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
370 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
371 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
372
373 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
379
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387
388#endif
389
390#ifdef DEBUG_ALL_LOGGING
391 loglevel = ~0;
392# ifdef DEBUG_TMP_LOGGING
393 logfile = fopen("/tmp/vbox-qemu.log", "w");
394# endif
395#endif
396
397 return rc;
398}
399
400
401/**
402 * Terminates the REM.
403 *
404 * Termination means cleaning up and freeing all resources,
405 * the VM it self is at this point powered off or suspended.
406 *
407 * @returns VBox status code.
408 * @param pVM The VM to operate on.
409 */
410REMR3DECL(int) REMR3Term(PVM pVM)
411{
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * The VM is being reset.
418 *
419 * For the REM component this means to call the cpu_reset() and
420 * reinitialize some state variables.
421 *
422 * @param pVM VM handle.
423 */
424REMR3DECL(void) REMR3Reset(PVM pVM)
425{
426 /*
427 * Reset the REM cpu.
428 */
429 pVM->rem.s.fIgnoreAll = true;
430 cpu_reset(&pVM->rem.s.Env);
431 pVM->rem.s.cInvalidatedPages = 0;
432 pVM->rem.s.fIgnoreAll = false;
433
434 /* Clear raw ring 0 init state */
435 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
436
437 /* Flush the TBs the next time we execute code here. */
438 pVM->rem.s.fFlushTBs = true;
439}
440
441
442/**
443 * Execute state save operation.
444 *
445 * @returns VBox status code.
446 * @param pVM VM Handle.
447 * @param pSSM SSM operation handle.
448 */
449static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
450{
451 /*
452 * Save the required CPU Env bits.
453 * (Not much because we're never in REM when doing the save.)
454 */
455 PREM pRem = &pVM->rem.s;
456 LogFlow(("remR3Save:\n"));
457 Assert(!pRem->fInREM);
458 SSMR3PutU32(pSSM, pRem->Env.hflags);
459 SSMR3PutU32(pSSM, ~0); /* separator */
460
461 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
462 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
463 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
464
465 return SSMR3PutU32(pSSM, ~0); /* terminator */
466}
467
468
469/**
470 * Execute state load operation.
471 *
472 * @returns VBox status code.
473 * @param pVM VM Handle.
474 * @param pSSM SSM operation handle.
475 * @param u32Version Data layout version.
476 */
477static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
478{
479 uint32_t u32Dummy;
480 uint32_t fRawRing0 = false;
481 uint32_t u32Sep;
482 int rc;
483 PREM pRem;
484 LogFlow(("remR3Load:\n"));
485
486 /*
487 * Validate version.
488 */
489 if ( u32Version != REM_SAVED_STATE_VERSION
490 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
491 {
492 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
493 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
494 }
495
496 /*
497 * Do a reset to be on the safe side...
498 */
499 REMR3Reset(pVM);
500
501 /*
502 * Ignore all ignorable notifications.
503 * (Not doing this will cause serious trouble.)
504 */
505 pVM->rem.s.fIgnoreAll = true;
506
507 /*
508 * Load the required CPU Env bits.
509 * (Not much because we're never in REM when doing the save.)
510 */
511 pRem = &pVM->rem.s;
512 Assert(!pRem->fInREM);
513 SSMR3GetU32(pSSM, &pRem->Env.hflags);
514 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
515 {
516 /* Redundant REM CPU state has to be loaded, but can be ignored. */
517 CPUX86State_Ver16 temp;
518 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
519 }
520
521 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
522 if (RT_FAILURE(rc))
523 return rc;
524 if (u32Sep != ~0U)
525 {
526 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
527 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
528 }
529
530 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
531 SSMR3GetUInt(pSSM, &fRawRing0);
532 if (fRawRing0)
533 pRem->Env.state |= CPU_RAW_RING0;
534
535 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
536 {
537 unsigned i;
538
539 /*
540 * Load the REM stuff.
541 */
542 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
543 if (RT_FAILURE(rc))
544 return rc;
545 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
546 {
547 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
548 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
549 }
550 for (i = 0; i < pRem->cInvalidatedPages; i++)
551 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
552 }
553
554 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
555 if (RT_FAILURE(rc))
556 return rc;
557
558 /* check the terminator. */
559 rc = SSMR3GetU32(pSSM, &u32Sep);
560 if (RT_FAILURE(rc))
561 return rc;
562 if (u32Sep != ~0U)
563 {
564 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
565 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
566 }
567
568 /*
569 * Get the CPUID features.
570 */
571 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
572 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
573
574 /*
575 * Sync the Load Flush the TLB
576 */
577 tlb_flush(&pRem->Env, 1);
578
579 /*
580 * Stop ignoring ignornable notifications.
581 */
582 pVM->rem.s.fIgnoreAll = false;
583
584 /*
585 * Sync the whole CPU state when executing code in the recompiler.
586 */
587 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
588 return VINF_SUCCESS;
589}
590
591
592
593#undef LOG_GROUP
594#define LOG_GROUP LOG_GROUP_REM_RUN
595
596/**
597 * Single steps an instruction in recompiled mode.
598 *
599 * Before calling this function the REM state needs to be in sync with
600 * the VM. Call REMR3State() to perform the sync. It's only necessary
601 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
602 * and after calling REMR3StateBack().
603 *
604 * @returns VBox status code.
605 *
606 * @param pVM VM Handle.
607 */
608REMR3DECL(int) REMR3Step(PVM pVM)
609{
610 int rc, interrupt_request;
611 RTGCPTR GCPtrPC;
612 bool fBp;
613
614 /*
615 * Lock the REM - we don't wanna have anyone interrupting us
616 * while stepping - and enabled single stepping. We also ignore
617 * pending interrupts and suchlike.
618 */
619 interrupt_request = pVM->rem.s.Env.interrupt_request;
620 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
621 pVM->rem.s.Env.interrupt_request = 0;
622 cpu_single_step(&pVM->rem.s.Env, 1);
623
624 /*
625 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
626 */
627 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
628 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
629
630 /*
631 * Execute and handle the return code.
632 * We execute without enabling the cpu tick, so on success we'll
633 * just flip it on and off to make sure it moves
634 */
635 rc = cpu_exec(&pVM->rem.s.Env);
636 if (rc == EXCP_DEBUG)
637 {
638 TMCpuTickResume(pVM);
639 TMCpuTickPause(pVM);
640 TMVirtualResume(pVM);
641 TMVirtualPause(pVM);
642 rc = VINF_EM_DBG_STEPPED;
643 }
644 else
645 {
646 switch (rc)
647 {
648 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
649 case EXCP_HLT:
650 case EXCP_HALTED: rc = VINF_EM_HALT; break;
651 case EXCP_RC:
652 rc = pVM->rem.s.rc;
653 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
654 break;
655 default:
656 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
657 rc = VERR_INTERNAL_ERROR;
658 break;
659 }
660 }
661
662 /*
663 * Restore the stuff we changed to prevent interruption.
664 * Unlock the REM.
665 */
666 if (fBp)
667 {
668 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
669 Assert(rc2 == 0); NOREF(rc2);
670 }
671 cpu_single_step(&pVM->rem.s.Env, 0);
672 pVM->rem.s.Env.interrupt_request = interrupt_request;
673
674 return rc;
675}
676
677
678/**
679 * Set a breakpoint using the REM facilities.
680 *
681 * @returns VBox status code.
682 * @param pVM The VM handle.
683 * @param Address The breakpoint address.
684 * @thread The emulation thread.
685 */
686REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
687{
688 VM_ASSERT_EMT(pVM);
689 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
690 {
691 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
692 return VINF_SUCCESS;
693 }
694 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
695 return VERR_REM_NO_MORE_BP_SLOTS;
696}
697
698
699/**
700 * Clears a breakpoint set by REMR3BreakpointSet().
701 *
702 * @returns VBox status code.
703 * @param pVM The VM handle.
704 * @param Address The breakpoint address.
705 * @thread The emulation thread.
706 */
707REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
708{
709 VM_ASSERT_EMT(pVM);
710 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
711 {
712 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
713 return VINF_SUCCESS;
714 }
715 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
716 return VERR_REM_BP_NOT_FOUND;
717}
718
719
720/**
721 * Emulate an instruction.
722 *
723 * This function executes one instruction without letting anyone
724 * interrupt it. This is intended for being called while being in
725 * raw mode and thus will take care of all the state syncing between
726 * REM and the rest.
727 *
728 * @returns VBox status code.
729 * @param pVM VM handle.
730 */
731REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
732{
733 bool fFlushTBs;
734
735 int rc, rc2;
736 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
737
738 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
739 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
740 */
741 if (HWACCMIsEnabled(pVM))
742 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
743
744 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
745 fFlushTBs = pVM->rem.s.fFlushTBs;
746 pVM->rem.s.fFlushTBs = false;
747
748 /*
749 * Sync the state and enable single instruction / single stepping.
750 */
751 rc = REMR3State(pVM);
752 pVM->rem.s.fFlushTBs = fFlushTBs;
753 if (RT_SUCCESS(rc))
754 {
755 int interrupt_request = pVM->rem.s.Env.interrupt_request;
756 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
757 Assert(!pVM->rem.s.Env.singlestep_enabled);
758 /*
759 * Now we set the execute single instruction flag and enter the cpu_exec loop.
760 */
761 TMNotifyStartOfExecution(pVM);
762 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
763 rc = cpu_exec(&pVM->rem.s.Env);
764 TMNotifyEndOfExecution(pVM);
765 switch (rc)
766 {
767 /*
768 * Executed without anything out of the way happening.
769 */
770 case EXCP_SINGLE_INSTR:
771 rc = VINF_EM_RESCHEDULE;
772 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
773 break;
774
775 /*
776 * If we take a trap or start servicing a pending interrupt, we might end up here.
777 * (Timer thread or some other thread wishing EMT's attention.)
778 */
779 case EXCP_INTERRUPT:
780 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
781 rc = VINF_EM_RESCHEDULE;
782 break;
783
784 /*
785 * Single step, we assume!
786 * If there was a breakpoint there we're fucked now.
787 */
788 case EXCP_DEBUG:
789 {
790 /* breakpoint or single step? */
791 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
792 int iBP;
793 rc = VINF_EM_DBG_STEPPED;
794 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
795 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
796 {
797 rc = VINF_EM_DBG_BREAKPOINT;
798 break;
799 }
800 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
801 break;
802 }
803
804 /*
805 * hlt instruction.
806 */
807 case EXCP_HLT:
808 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
809 rc = VINF_EM_HALT;
810 break;
811
812 /*
813 * The VM has halted.
814 */
815 case EXCP_HALTED:
816 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
817 rc = VINF_EM_HALT;
818 break;
819
820 /*
821 * Switch to RAW-mode.
822 */
823 case EXCP_EXECUTE_RAW:
824 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
825 rc = VINF_EM_RESCHEDULE_RAW;
826 break;
827
828 /*
829 * Switch to hardware accelerated RAW-mode.
830 */
831 case EXCP_EXECUTE_HWACC:
832 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
833 rc = VINF_EM_RESCHEDULE_HWACC;
834 break;
835
836 /*
837 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
838 */
839 case EXCP_RC:
840 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
841 rc = pVM->rem.s.rc;
842 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
843 break;
844
845 /*
846 * Figure out the rest when they arrive....
847 */
848 default:
849 AssertMsgFailed(("rc=%d\n", rc));
850 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
851 rc = VINF_EM_RESCHEDULE;
852 break;
853 }
854
855 /*
856 * Switch back the state.
857 */
858 pVM->rem.s.Env.interrupt_request = interrupt_request;
859 rc2 = REMR3StateBack(pVM);
860 AssertRC(rc2);
861 }
862
863 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
864 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
865 return rc;
866}
867
868
869/**
870 * Runs code in recompiled mode.
871 *
872 * Before calling this function the REM state needs to be in sync with
873 * the VM. Call REMR3State() to perform the sync. It's only necessary
874 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
875 * and after calling REMR3StateBack().
876 *
877 * @returns VBox status code.
878 *
879 * @param pVM VM Handle.
880 */
881REMR3DECL(int) REMR3Run(PVM pVM)
882{
883 int rc;
884 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
885 Assert(pVM->rem.s.fInREM);
886
887 TMNotifyStartOfExecution(pVM);
888 rc = cpu_exec(&pVM->rem.s.Env);
889 TMNotifyEndOfExecution(pVM);
890 switch (rc)
891 {
892 /*
893 * This happens when the execution was interrupted
894 * by an external event, like pending timers.
895 */
896 case EXCP_INTERRUPT:
897 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
898 rc = VINF_SUCCESS;
899 break;
900
901 /*
902 * hlt instruction.
903 */
904 case EXCP_HLT:
905 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
906 rc = VINF_EM_HALT;
907 break;
908
909 /*
910 * The VM has halted.
911 */
912 case EXCP_HALTED:
913 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
914 rc = VINF_EM_HALT;
915 break;
916
917 /*
918 * Breakpoint/single step.
919 */
920 case EXCP_DEBUG:
921 {
922#if 0//def DEBUG_bird
923 static int iBP = 0;
924 printf("howdy, breakpoint! iBP=%d\n", iBP);
925 switch (iBP)
926 {
927 case 0:
928 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
929 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
930 //pVM->rem.s.Env.interrupt_request = 0;
931 //pVM->rem.s.Env.exception_index = -1;
932 //g_fInterruptDisabled = 1;
933 rc = VINF_SUCCESS;
934 asm("int3");
935 break;
936 default:
937 asm("int3");
938 break;
939 }
940 iBP++;
941#else
942 /* breakpoint or single step? */
943 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
944 int iBP;
945 rc = VINF_EM_DBG_STEPPED;
946 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
947 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
948 {
949 rc = VINF_EM_DBG_BREAKPOINT;
950 break;
951 }
952 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
953#endif
954 break;
955 }
956
957 /*
958 * Switch to RAW-mode.
959 */
960 case EXCP_EXECUTE_RAW:
961 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
962 rc = VINF_EM_RESCHEDULE_RAW;
963 break;
964
965 /*
966 * Switch to hardware accelerated RAW-mode.
967 */
968 case EXCP_EXECUTE_HWACC:
969 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
970 rc = VINF_EM_RESCHEDULE_HWACC;
971 break;
972
973 /*
974 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
975 */
976 case EXCP_RC:
977 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
978 rc = pVM->rem.s.rc;
979 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
980 break;
981
982 /*
983 * Figure out the rest when they arrive....
984 */
985 default:
986 AssertMsgFailed(("rc=%d\n", rc));
987 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
988 rc = VINF_SUCCESS;
989 break;
990 }
991
992 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
993 return rc;
994}
995
996
997/**
998 * Check if the cpu state is suitable for Raw execution.
999 *
1000 * @returns boolean
1001 * @param env The CPU env struct.
1002 * @param eip The EIP to check this for (might differ from env->eip).
1003 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1004 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1005 *
1006 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1007 */
1008bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1009{
1010 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1011 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1012 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1013 uint32_t u32CR0;
1014
1015 /* Update counter. */
1016 env->pVM->rem.s.cCanExecuteRaw++;
1017
1018 if (HWACCMIsEnabled(env->pVM))
1019 {
1020 CPUMCTX Ctx;
1021
1022 env->state |= CPU_RAW_HWACC;
1023
1024 /*
1025 * Create partial context for HWACCMR3CanExecuteGuest
1026 */
1027 Ctx.cr0 = env->cr[0];
1028 Ctx.cr3 = env->cr[3];
1029 Ctx.cr4 = env->cr[4];
1030
1031 Ctx.tr = env->tr.selector;
1032 Ctx.trHid.u64Base = env->tr.base;
1033 Ctx.trHid.u32Limit = env->tr.limit;
1034 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1035
1036 Ctx.idtr.cbIdt = env->idt.limit;
1037 Ctx.idtr.pIdt = env->idt.base;
1038
1039 Ctx.gdtr.cbGdt = env->gdt.limit;
1040 Ctx.gdtr.pGdt = env->gdt.base;
1041
1042 Ctx.rsp = env->regs[R_ESP];
1043#ifdef LOG_ENABLED
1044 Ctx.rip = env->eip;
1045#endif
1046
1047 Ctx.eflags.u32 = env->eflags;
1048
1049 Ctx.cs = env->segs[R_CS].selector;
1050 Ctx.csHid.u64Base = env->segs[R_CS].base;
1051 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1052 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1053
1054 Ctx.ds = env->segs[R_DS].selector;
1055 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1056 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1057 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1058
1059 Ctx.es = env->segs[R_ES].selector;
1060 Ctx.esHid.u64Base = env->segs[R_ES].base;
1061 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1062 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1063
1064 Ctx.fs = env->segs[R_FS].selector;
1065 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1066 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1067 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1068
1069 Ctx.gs = env->segs[R_GS].selector;
1070 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1071 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1072 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1073
1074 Ctx.ss = env->segs[R_SS].selector;
1075 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1076 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1077 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1078
1079 Ctx.msrEFER = env->efer;
1080
1081 /* Hardware accelerated raw-mode:
1082 *
1083 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1084 */
1085 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1086 {
1087 *piException = EXCP_EXECUTE_HWACC;
1088 return true;
1089 }
1090 return false;
1091 }
1092
1093 /*
1094 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1095 * or 32 bits protected mode ring 0 code
1096 *
1097 * The tests are ordered by the likelyhood of being true during normal execution.
1098 */
1099 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1100 {
1101 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1102 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1103 return false;
1104 }
1105
1106#ifndef VBOX_RAW_V86
1107 if (fFlags & VM_MASK) {
1108 STAM_COUNTER_INC(&gStatRefuseVM86);
1109 Log2(("raw mode refused: VM_MASK\n"));
1110 return false;
1111 }
1112#endif
1113
1114 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1115 {
1116#ifndef DEBUG_bird
1117 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1118#endif
1119 return false;
1120 }
1121
1122 if (env->singlestep_enabled)
1123 {
1124 //Log2(("raw mode refused: Single step\n"));
1125 return false;
1126 }
1127
1128 if (env->nb_breakpoints > 0)
1129 {
1130 //Log2(("raw mode refused: Breakpoints\n"));
1131 return false;
1132 }
1133
1134 u32CR0 = env->cr[0];
1135 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1136 {
1137 STAM_COUNTER_INC(&gStatRefusePaging);
1138 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1139 return false;
1140 }
1141
1142 if (env->cr[4] & CR4_PAE_MASK)
1143 {
1144 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1145 {
1146 STAM_COUNTER_INC(&gStatRefusePAE);
1147 return false;
1148 }
1149 }
1150
1151 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1152 {
1153 if (!EMIsRawRing3Enabled(env->pVM))
1154 return false;
1155
1156 if (!(env->eflags & IF_MASK))
1157 {
1158 STAM_COUNTER_INC(&gStatRefuseIF0);
1159 Log2(("raw mode refused: IF (RawR3)\n"));
1160 return false;
1161 }
1162
1163 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1164 {
1165 STAM_COUNTER_INC(&gStatRefuseWP0);
1166 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1167 return false;
1168 }
1169 }
1170 else
1171 {
1172 if (!EMIsRawRing0Enabled(env->pVM))
1173 return false;
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1177 {
1178 STAM_COUNTER_INC(&gStatRefuseCode16);
1179 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1180 return false;
1181 }
1182
1183 // Only R0
1184 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1185 {
1186 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1187 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1188 return false;
1189 }
1190
1191 if (!(u32CR0 & CR0_WP_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseWP0);
1194 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1195 return false;
1196 }
1197
1198 if (PATMIsPatchGCAddr(env->pVM, eip))
1199 {
1200 Log2(("raw r0 mode forced: patch code\n"));
1201 *piException = EXCP_EXECUTE_RAW;
1202 return true;
1203 }
1204
1205#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1206 if (!(env->eflags & IF_MASK))
1207 {
1208 STAM_COUNTER_INC(&gStatRefuseIF0);
1209 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1210 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1211 return false;
1212 }
1213#endif
1214
1215 env->state |= CPU_RAW_RING0;
1216 }
1217
1218 /*
1219 * Don't reschedule the first time we're called, because there might be
1220 * special reasons why we're here that is not covered by the above checks.
1221 */
1222 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1223 {
1224 Log2(("raw mode refused: first scheduling\n"));
1225 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1226 return false;
1227 }
1228
1229 Assert(PGMPhysIsA20Enabled(env->pVM));
1230 *piException = EXCP_EXECUTE_RAW;
1231 return true;
1232}
1233
1234
1235/**
1236 * Fetches a code byte.
1237 *
1238 * @returns Success indicator (bool) for ease of use.
1239 * @param env The CPU environment structure.
1240 * @param GCPtrInstr Where to fetch code.
1241 * @param pu8Byte Where to store the byte on success
1242 */
1243bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1244{
1245 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1246 if (RT_SUCCESS(rc))
1247 return true;
1248 return false;
1249}
1250
1251
1252/**
1253 * Flush (or invalidate if you like) page table/dir entry.
1254 *
1255 * (invlpg instruction; tlb_flush_page)
1256 *
1257 * @param env Pointer to cpu environment.
1258 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1259 */
1260void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1261{
1262 PVM pVM = env->pVM;
1263 PCPUMCTX pCtx;
1264 int rc;
1265
1266 /*
1267 * When we're replaying invlpg instructions or restoring a saved
1268 * state we disable this path.
1269 */
1270 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1271 return;
1272 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1273 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1274
1275 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1276
1277 /*
1278 * Update the control registers before calling PGMFlushPage.
1279 */
1280 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1281 pCtx->cr0 = env->cr[0];
1282 pCtx->cr3 = env->cr[3];
1283 pCtx->cr4 = env->cr[4];
1284
1285 /*
1286 * Let PGM do the rest.
1287 */
1288 rc = PGMInvalidatePage(pVM, GCPtr);
1289 if (RT_FAILURE(rc))
1290 {
1291 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1292 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1293 }
1294 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1295}
1296
1297
1298#ifndef REM_PHYS_ADDR_IN_TLB
1299void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1300{
1301 void *pv;
1302 int rc;
1303
1304 /* Address must be aligned enough to fiddle with lower bits */
1305 Assert((physAddr & 0x3) == 0);
1306
1307 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1308 Assert( rc == VINF_SUCCESS
1309 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1310 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1311 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1312 if (RT_FAILURE(rc))
1313 return (void *)1;
1314 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1315 return (void *)((uintptr_t)pv | 2);
1316 return pv;
1317}
1318
1319target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1320{
1321 RTGCPHYS rv = 0;
1322 int rc;
1323
1324 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1325 Assert (RT_SUCCESS(rc));
1326
1327 return (target_ulong)rv;
1328}
1329#endif
1330
1331/**
1332 * Called from tlb_protect_code in order to write monitor a code page.
1333 *
1334 * @param env Pointer to the CPU environment.
1335 * @param GCPtr Code page to monitor
1336 */
1337void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1338{
1339#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1340 Assert(env->pVM->rem.s.fInREM);
1341 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1342 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1343 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1344 && !(env->eflags & VM_MASK) /* no V86 mode */
1345 && !HWACCMIsEnabled(env->pVM))
1346 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1347#endif
1348}
1349
1350/**
1351 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1352 *
1353 * @param env Pointer to the CPU environment.
1354 * @param GCPtr Code page to monitor
1355 */
1356void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1357{
1358 Assert(env->pVM->rem.s.fInREM);
1359#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1360 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1361 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1362 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1363 && !(env->eflags & VM_MASK) /* no V86 mode */
1364 && !HWACCMIsEnabled(env->pVM))
1365 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1366#endif
1367}
1368
1369/**
1370 * Called when the CPU is initialized, any of the CRx registers are changed or
1371 * when the A20 line is modified.
1372 *
1373 * @param env Pointer to the CPU environment.
1374 * @param fGlobal Set if the flush is global.
1375 */
1376void remR3FlushTLB(CPUState *env, bool fGlobal)
1377{
1378 PVM pVM = env->pVM;
1379 PCPUMCTX pCtx;
1380
1381 /*
1382 * When we're replaying invlpg instructions or restoring a saved
1383 * state we disable this path.
1384 */
1385 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1386 return;
1387 Assert(pVM->rem.s.fInREM);
1388
1389 /*
1390 * The caller doesn't check cr4, so we have to do that for ourselves.
1391 */
1392 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1393 fGlobal = true;
1394 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1395
1396 /*
1397 * Update the control registers before calling PGMR3FlushTLB.
1398 */
1399 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1400 pCtx->cr0 = env->cr[0];
1401 pCtx->cr3 = env->cr[3];
1402 pCtx->cr4 = env->cr[4];
1403
1404 /*
1405 * Let PGM do the rest.
1406 */
1407 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1408}
1409
1410
1411/**
1412 * Called when any of the cr0, cr4 or efer registers is updated.
1413 *
1414 * @param env Pointer to the CPU environment.
1415 */
1416void remR3ChangeCpuMode(CPUState *env)
1417{
1418 int rc;
1419 PVM pVM = env->pVM;
1420 PCPUMCTX pCtx;
1421
1422 /*
1423 * When we're replaying loads or restoring a saved
1424 * state this path is disabled.
1425 */
1426 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1427 return;
1428 Assert(pVM->rem.s.fInREM);
1429
1430 /*
1431 * Update the control registers before calling PGMChangeMode()
1432 * as it may need to map whatever cr3 is pointing to.
1433 */
1434 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1435 pCtx->cr0 = env->cr[0];
1436 pCtx->cr3 = env->cr[3];
1437 pCtx->cr4 = env->cr[4];
1438
1439#ifdef TARGET_X86_64
1440 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1441 if (rc != VINF_SUCCESS)
1442 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1443#else
1444 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1445 if (rc != VINF_SUCCESS)
1446 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1447#endif
1448}
1449
1450
1451/**
1452 * Called from compiled code to run dma.
1453 *
1454 * @param env Pointer to the CPU environment.
1455 */
1456void remR3DmaRun(CPUState *env)
1457{
1458 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1459 PDMR3DmaRun(env->pVM);
1460 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1461}
1462
1463
1464/**
1465 * Called from compiled code to schedule pending timers in VMM
1466 *
1467 * @param env Pointer to the CPU environment.
1468 */
1469void remR3TimersRun(CPUState *env)
1470{
1471 LogFlow(("remR3TimersRun:\n"));
1472 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1473 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1474 TMR3TimerQueuesDo(env->pVM);
1475 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1476 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1477}
1478
1479
1480/**
1481 * Record trap occurance
1482 *
1483 * @returns VBox status code
1484 * @param env Pointer to the CPU environment.
1485 * @param uTrap Trap nr
1486 * @param uErrorCode Error code
1487 * @param pvNextEIP Next EIP
1488 */
1489int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1490{
1491 PVM pVM = env->pVM;
1492#ifdef VBOX_WITH_STATISTICS
1493 static STAMCOUNTER s_aStatTrap[255];
1494 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1495#endif
1496
1497#ifdef VBOX_WITH_STATISTICS
1498 if (uTrap < 255)
1499 {
1500 if (!s_aRegisters[uTrap])
1501 {
1502 char szStatName[64];
1503 s_aRegisters[uTrap] = true;
1504 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1505 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1506 }
1507 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1508 }
1509#endif
1510 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1511 if( uTrap < 0x20
1512 && (env->cr[0] & X86_CR0_PE)
1513 && !(env->eflags & X86_EFL_VM))
1514 {
1515#ifdef DEBUG
1516 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1517#endif
1518 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1519 {
1520 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1521 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1522 return VERR_REM_TOO_MANY_TRAPS;
1523 }
1524 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1525 pVM->rem.s.cPendingExceptions = 1;
1526 pVM->rem.s.uPendingException = uTrap;
1527 pVM->rem.s.uPendingExcptEIP = env->eip;
1528 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1529 }
1530 else
1531 {
1532 pVM->rem.s.cPendingExceptions = 0;
1533 pVM->rem.s.uPendingException = uTrap;
1534 pVM->rem.s.uPendingExcptEIP = env->eip;
1535 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1536 }
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/*
1542 * Clear current active trap
1543 *
1544 * @param pVM VM Handle.
1545 */
1546void remR3TrapClear(PVM pVM)
1547{
1548 pVM->rem.s.cPendingExceptions = 0;
1549 pVM->rem.s.uPendingException = 0;
1550 pVM->rem.s.uPendingExcptEIP = 0;
1551 pVM->rem.s.uPendingExcptCR2 = 0;
1552}
1553
1554
1555/*
1556 * Record previous call instruction addresses
1557 *
1558 * @param env Pointer to the CPU environment.
1559 */
1560void remR3RecordCall(CPUState *env)
1561{
1562 CSAMR3RecordCallAddress(env->pVM, env->eip);
1563}
1564
1565
1566/**
1567 * Syncs the internal REM state with the VM.
1568 *
1569 * This must be called before REMR3Run() is invoked whenever when the REM
1570 * state is not up to date. Calling it several times in a row is not
1571 * permitted.
1572 *
1573 * @returns VBox status code.
1574 *
1575 * @param pVM VM Handle.
1576 * @param fFlushTBs Flush all translation blocks before executing code
1577 *
1578 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1579 * no do this since the majority of the callers don't want any unnecessary of events
1580 * pending that would immediatly interrupt execution.
1581 */
1582REMR3DECL(int) REMR3State(PVM pVM)
1583{
1584 register const CPUMCTX *pCtx;
1585 register unsigned fFlags;
1586 bool fHiddenSelRegsValid;
1587 unsigned i;
1588 TRPMEVENT enmType;
1589 uint8_t u8TrapNo;
1590 int rc;
1591
1592 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1593 Log2(("REMR3State:\n"));
1594
1595 pCtx = pVM->rem.s.pCtx;
1596 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1597
1598 Assert(!pVM->rem.s.fInREM);
1599 pVM->rem.s.fInStateSync = true;
1600
1601 /*
1602 * If we have to flush TBs, do that immediately.
1603 */
1604 if (pVM->rem.s.fFlushTBs)
1605 {
1606 STAM_COUNTER_INC(&gStatFlushTBs);
1607 tb_flush(&pVM->rem.s.Env);
1608 pVM->rem.s.fFlushTBs = false;
1609 }
1610
1611 /*
1612 * Copy the registers which require no special handling.
1613 */
1614#ifdef TARGET_X86_64
1615 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1616 Assert(R_EAX == 0);
1617 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1618 Assert(R_ECX == 1);
1619 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1620 Assert(R_EDX == 2);
1621 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1622 Assert(R_EBX == 3);
1623 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1624 Assert(R_ESP == 4);
1625 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1626 Assert(R_EBP == 5);
1627 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1628 Assert(R_ESI == 6);
1629 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1630 Assert(R_EDI == 7);
1631 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1632 pVM->rem.s.Env.regs[8] = pCtx->r8;
1633 pVM->rem.s.Env.regs[9] = pCtx->r9;
1634 pVM->rem.s.Env.regs[10] = pCtx->r10;
1635 pVM->rem.s.Env.regs[11] = pCtx->r11;
1636 pVM->rem.s.Env.regs[12] = pCtx->r12;
1637 pVM->rem.s.Env.regs[13] = pCtx->r13;
1638 pVM->rem.s.Env.regs[14] = pCtx->r14;
1639 pVM->rem.s.Env.regs[15] = pCtx->r15;
1640
1641 pVM->rem.s.Env.eip = pCtx->rip;
1642
1643 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1644#else
1645 Assert(R_EAX == 0);
1646 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1647 Assert(R_ECX == 1);
1648 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1649 Assert(R_EDX == 2);
1650 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1651 Assert(R_EBX == 3);
1652 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1653 Assert(R_ESP == 4);
1654 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1655 Assert(R_EBP == 5);
1656 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1657 Assert(R_ESI == 6);
1658 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1659 Assert(R_EDI == 7);
1660 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1661 pVM->rem.s.Env.eip = pCtx->eip;
1662
1663 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1664#endif
1665
1666 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1667
1668 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1669 for (i=0;i<8;i++)
1670 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1671
1672 /*
1673 * Clear the halted hidden flag (the interrupt waking up the CPU can
1674 * have been dispatched in raw mode).
1675 */
1676 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1677
1678 /*
1679 * Replay invlpg?
1680 */
1681 if (pVM->rem.s.cInvalidatedPages)
1682 {
1683 RTUINT i;
1684
1685 pVM->rem.s.fIgnoreInvlPg = true;
1686 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1687 {
1688 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1689 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1690 }
1691 pVM->rem.s.fIgnoreInvlPg = false;
1692 pVM->rem.s.cInvalidatedPages = 0;
1693 }
1694
1695 /* Replay notification changes? */
1696 if (pVM->rem.s.cHandlerNotifications)
1697 REMR3ReplayHandlerNotifications(pVM);
1698
1699 /* Update MSRs; before CRx registers! */
1700 pVM->rem.s.Env.efer = pCtx->msrEFER;
1701 pVM->rem.s.Env.star = pCtx->msrSTAR;
1702 pVM->rem.s.Env.pat = pCtx->msrPAT;
1703#ifdef TARGET_X86_64
1704 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1705 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1706 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1707 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1708
1709 /* Update the internal long mode activate flag according to the new EFER value. */
1710 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1711 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1712 else
1713 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1714#endif
1715
1716
1717 /*
1718 * Registers which are rarely changed and require special handling / order when changed.
1719 */
1720 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1721 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1722 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1723 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1724 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1725 {
1726 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1727 {
1728 pVM->rem.s.fIgnoreCR3Load = true;
1729 tlb_flush(&pVM->rem.s.Env, true);
1730 pVM->rem.s.fIgnoreCR3Load = false;
1731 }
1732
1733 /* CR4 before CR0! */
1734 if (fFlags & CPUM_CHANGED_CR4)
1735 {
1736 pVM->rem.s.fIgnoreCR3Load = true;
1737 pVM->rem.s.fIgnoreCpuMode = true;
1738 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1739 pVM->rem.s.fIgnoreCpuMode = false;
1740 pVM->rem.s.fIgnoreCR3Load = false;
1741 }
1742
1743 if (fFlags & CPUM_CHANGED_CR0)
1744 {
1745 pVM->rem.s.fIgnoreCR3Load = true;
1746 pVM->rem.s.fIgnoreCpuMode = true;
1747 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1748 pVM->rem.s.fIgnoreCpuMode = false;
1749 pVM->rem.s.fIgnoreCR3Load = false;
1750 }
1751
1752 if (fFlags & CPUM_CHANGED_CR3)
1753 {
1754 pVM->rem.s.fIgnoreCR3Load = true;
1755 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1756 pVM->rem.s.fIgnoreCR3Load = false;
1757 }
1758
1759 if (fFlags & CPUM_CHANGED_GDTR)
1760 {
1761 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1762 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1763 }
1764
1765 if (fFlags & CPUM_CHANGED_IDTR)
1766 {
1767 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1768 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1769 }
1770
1771 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1772 {
1773 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1774 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1775 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1776 }
1777
1778 if (fFlags & CPUM_CHANGED_LDTR)
1779 {
1780 if (fHiddenSelRegsValid)
1781 {
1782 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1783 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1784 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1785 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1786 }
1787 else
1788 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1789 }
1790
1791 if (fFlags & CPUM_CHANGED_TR)
1792 {
1793 if (fHiddenSelRegsValid)
1794 {
1795 pVM->rem.s.Env.tr.selector = pCtx->tr;
1796 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1797 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1798 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1799 }
1800 else
1801 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1802
1803 /** @note do_interrupt will fault if the busy flag is still set.... */
1804 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1805 }
1806
1807 if (fFlags & CPUM_CHANGED_CPUID)
1808 {
1809 uint32_t u32Dummy;
1810
1811 /*
1812 * Get the CPUID features.
1813 */
1814 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1815 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1816 }
1817
1818 /* Sync FPU state after CR4, CPUID and EFER (!). */
1819 if (fFlags & CPUM_CHANGED_FPU_REM)
1820 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1821 }
1822
1823 /*
1824 * Update selector registers.
1825 * This must be done *after* we've synced gdt, ldt and crX registers
1826 * since we're reading the GDT/LDT om sync_seg. This will happen with
1827 * saved state which takes a quick dip into rawmode for instance.
1828 */
1829 /*
1830 * Stack; Note first check this one as the CPL might have changed. The
1831 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1832 */
1833
1834 if (fHiddenSelRegsValid)
1835 {
1836 /* The hidden selector registers are valid in the CPU context. */
1837 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1838
1839 /* Set current CPL */
1840 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1841
1842 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1843 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1844 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1845 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1846 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1847 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1848 }
1849 else
1850 {
1851 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1852 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1853 {
1854 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1855
1856 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1857 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1858#ifdef VBOX_WITH_STATISTICS
1859 if (pVM->rem.s.Env.segs[R_SS].newselector)
1860 {
1861 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1862 }
1863#endif
1864 }
1865 else
1866 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1867
1868 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1869 {
1870 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1871 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1872#ifdef VBOX_WITH_STATISTICS
1873 if (pVM->rem.s.Env.segs[R_ES].newselector)
1874 {
1875 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1876 }
1877#endif
1878 }
1879 else
1880 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1881
1882 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1883 {
1884 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1885 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1886#ifdef VBOX_WITH_STATISTICS
1887 if (pVM->rem.s.Env.segs[R_CS].newselector)
1888 {
1889 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1890 }
1891#endif
1892 }
1893 else
1894 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1895
1896 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1897 {
1898 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1899 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1900#ifdef VBOX_WITH_STATISTICS
1901 if (pVM->rem.s.Env.segs[R_DS].newselector)
1902 {
1903 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1904 }
1905#endif
1906 }
1907 else
1908 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1909
1910 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1911 * be the same but not the base/limit. */
1912 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1913 {
1914 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1915 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1916#ifdef VBOX_WITH_STATISTICS
1917 if (pVM->rem.s.Env.segs[R_FS].newselector)
1918 {
1919 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1920 }
1921#endif
1922 }
1923 else
1924 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1925
1926 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1927 {
1928 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1929 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1930#ifdef VBOX_WITH_STATISTICS
1931 if (pVM->rem.s.Env.segs[R_GS].newselector)
1932 {
1933 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1934 }
1935#endif
1936 }
1937 else
1938 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1939 }
1940
1941 /*
1942 * Check for traps.
1943 */
1944 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1945 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1946 if (RT_SUCCESS(rc))
1947 {
1948#ifdef DEBUG
1949 if (u8TrapNo == 0x80)
1950 {
1951 remR3DumpLnxSyscall(pVM);
1952 remR3DumpOBsdSyscall(pVM);
1953 }
1954#endif
1955
1956 pVM->rem.s.Env.exception_index = u8TrapNo;
1957 if (enmType != TRPM_SOFTWARE_INT)
1958 {
1959 pVM->rem.s.Env.exception_is_int = 0;
1960 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1961 }
1962 else
1963 {
1964 /*
1965 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1966 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1967 * for int03 and into.
1968 */
1969 pVM->rem.s.Env.exception_is_int = 1;
1970 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1971 /* int 3 may be generated by one-byte 0xcc */
1972 if (u8TrapNo == 3)
1973 {
1974 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1975 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1976 }
1977 /* int 4 may be generated by one-byte 0xce */
1978 else if (u8TrapNo == 4)
1979 {
1980 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1981 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1982 }
1983 }
1984
1985 /* get error code and cr2 if needed. */
1986 switch (u8TrapNo)
1987 {
1988 case 0x0e:
1989 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1990 /* fallthru */
1991 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1992 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1993 break;
1994
1995 case 0x11: case 0x08:
1996 default:
1997 pVM->rem.s.Env.error_code = 0;
1998 break;
1999 }
2000
2001 /*
2002 * We can now reset the active trap since the recompiler is gonna have a go at it.
2003 */
2004 rc = TRPMResetTrap(pVM);
2005 AssertRC(rc);
2006 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2007 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2008 }
2009
2010 /*
2011 * Clear old interrupt request flags; Check for pending hardware interrupts.
2012 * (See @remark for why we don't check for other FFs.)
2013 */
2014 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2015 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2016 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2017 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2018
2019 /*
2020 * We're now in REM mode.
2021 */
2022 pVM->rem.s.fInREM = true;
2023 pVM->rem.s.fInStateSync = false;
2024 pVM->rem.s.cCanExecuteRaw = 0;
2025 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2026 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * Syncs back changes in the REM state to the the VM state.
2033 *
2034 * This must be called after invoking REMR3Run().
2035 * Calling it several times in a row is not permitted.
2036 *
2037 * @returns VBox status code.
2038 *
2039 * @param pVM VM Handle.
2040 */
2041REMR3DECL(int) REMR3StateBack(PVM pVM)
2042{
2043 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2044 unsigned i;
2045
2046 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2047 Log2(("REMR3StateBack:\n"));
2048 Assert(pVM->rem.s.fInREM);
2049
2050 /*
2051 * Copy back the registers.
2052 * This is done in the order they are declared in the CPUMCTX structure.
2053 */
2054
2055 /** @todo FOP */
2056 /** @todo FPUIP */
2057 /** @todo CS */
2058 /** @todo FPUDP */
2059 /** @todo DS */
2060
2061 /** @todo check if FPU/XMM was actually used in the recompiler */
2062 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2063//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2064
2065#ifdef TARGET_X86_64
2066 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2067 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2068 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2069 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2070 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2071 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2072 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2073 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2074 pCtx->r8 = pVM->rem.s.Env.regs[8];
2075 pCtx->r9 = pVM->rem.s.Env.regs[9];
2076 pCtx->r10 = pVM->rem.s.Env.regs[10];
2077 pCtx->r11 = pVM->rem.s.Env.regs[11];
2078 pCtx->r12 = pVM->rem.s.Env.regs[12];
2079 pCtx->r13 = pVM->rem.s.Env.regs[13];
2080 pCtx->r14 = pVM->rem.s.Env.regs[14];
2081 pCtx->r15 = pVM->rem.s.Env.regs[15];
2082
2083 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2084
2085#else
2086 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2087 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2088 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2089 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2090 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2091 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2092 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2093
2094 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2095#endif
2096
2097 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2098
2099#ifdef VBOX_WITH_STATISTICS
2100 if (pVM->rem.s.Env.segs[R_SS].newselector)
2101 {
2102 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2103 }
2104 if (pVM->rem.s.Env.segs[R_GS].newselector)
2105 {
2106 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2107 }
2108 if (pVM->rem.s.Env.segs[R_FS].newselector)
2109 {
2110 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2111 }
2112 if (pVM->rem.s.Env.segs[R_ES].newselector)
2113 {
2114 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2115 }
2116 if (pVM->rem.s.Env.segs[R_DS].newselector)
2117 {
2118 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2119 }
2120 if (pVM->rem.s.Env.segs[R_CS].newselector)
2121 {
2122 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2123 }
2124#endif
2125 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2126 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2127 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2128 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2129 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2130
2131#ifdef TARGET_X86_64
2132 pCtx->rip = pVM->rem.s.Env.eip;
2133 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2134#else
2135 pCtx->eip = pVM->rem.s.Env.eip;
2136 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2137#endif
2138
2139 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2140 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2141 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2142 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2143
2144 for (i=0;i<8;i++)
2145 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2146
2147 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2148 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2149 {
2150 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2151 STAM_COUNTER_INC(&gStatREMGDTChange);
2152 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2153 }
2154
2155 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2156 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2157 {
2158 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2159 STAM_COUNTER_INC(&gStatREMIDTChange);
2160 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2161 }
2162
2163 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2164 {
2165 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2166 STAM_COUNTER_INC(&gStatREMLDTRChange);
2167 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2168 }
2169 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2170 {
2171 pCtx->tr = pVM->rem.s.Env.tr.selector;
2172 STAM_COUNTER_INC(&gStatREMTRChange);
2173 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2174 }
2175
2176 /** @todo These values could still be out of sync! */
2177 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2178 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2179 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2180 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2181
2182 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2183 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2184 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2185
2186 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2187 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2188 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2189
2190 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2191 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2192 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2193
2194 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2195 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2196 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2197
2198 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2199 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2200 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2201
2202 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2203 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2204 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2205
2206 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2207 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2208 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2209
2210 /* Sysenter MSR */
2211 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2212 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2213 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2214
2215 /* System MSRs. */
2216 pCtx->msrEFER = pVM->rem.s.Env.efer;
2217 pCtx->msrSTAR = pVM->rem.s.Env.star;
2218 pCtx->msrPAT = pVM->rem.s.Env.pat;
2219#ifdef TARGET_X86_64
2220 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2221 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2222 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2223 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2224#endif
2225
2226 remR3TrapClear(pVM);
2227
2228 /*
2229 * Check for traps.
2230 */
2231 if ( pVM->rem.s.Env.exception_index >= 0
2232 && pVM->rem.s.Env.exception_index < 256)
2233 {
2234 int rc;
2235
2236 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2237 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2238 AssertRC(rc);
2239 switch (pVM->rem.s.Env.exception_index)
2240 {
2241 case 0x0e:
2242 TRPMSetFaultAddress(pVM, pCtx->cr2);
2243 /* fallthru */
2244 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2245 case 0x11: case 0x08: /* 0 */
2246 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2247 break;
2248 }
2249
2250 }
2251
2252 /*
2253 * We're not longer in REM mode.
2254 */
2255 pVM->rem.s.fInREM = false;
2256 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2257 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2258 return VINF_SUCCESS;
2259}
2260
2261
2262/**
2263 * This is called by the disassembler when it wants to update the cpu state
2264 * before for instance doing a register dump.
2265 */
2266static void remR3StateUpdate(PVM pVM)
2267{
2268 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2269 unsigned i;
2270
2271 Assert(pVM->rem.s.fInREM);
2272
2273 /*
2274 * Copy back the registers.
2275 * This is done in the order they are declared in the CPUMCTX structure.
2276 */
2277
2278 /** @todo FOP */
2279 /** @todo FPUIP */
2280 /** @todo CS */
2281 /** @todo FPUDP */
2282 /** @todo DS */
2283 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2284 pCtx->fpu.MXCSR = 0;
2285 pCtx->fpu.MXCSR_MASK = 0;
2286
2287 /** @todo check if FPU/XMM was actually used in the recompiler */
2288 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2289//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2290
2291#ifdef TARGET_X86_64
2292 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2293 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2294 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2295 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2296 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2297 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2298 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2299 pCtx->r8 = pVM->rem.s.Env.regs[8];
2300 pCtx->r9 = pVM->rem.s.Env.regs[9];
2301 pCtx->r10 = pVM->rem.s.Env.regs[10];
2302 pCtx->r11 = pVM->rem.s.Env.regs[11];
2303 pCtx->r12 = pVM->rem.s.Env.regs[12];
2304 pCtx->r13 = pVM->rem.s.Env.regs[13];
2305 pCtx->r14 = pVM->rem.s.Env.regs[14];
2306 pCtx->r15 = pVM->rem.s.Env.regs[15];
2307
2308 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2309#else
2310 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2311 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2312 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2313 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2314 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2315 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2316 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2317
2318 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2319#endif
2320
2321 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2322
2323 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2324 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2325 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2326 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2327 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2328
2329#ifdef TARGET_X86_64
2330 pCtx->rip = pVM->rem.s.Env.eip;
2331 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2332#else
2333 pCtx->eip = pVM->rem.s.Env.eip;
2334 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2335#endif
2336
2337 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2338 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2339 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2340 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2341
2342 for (i=0;i<8;i++)
2343 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2344
2345 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2346 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2347 {
2348 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2349 STAM_COUNTER_INC(&gStatREMGDTChange);
2350 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2351 }
2352
2353 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2354 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2355 {
2356 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2357 STAM_COUNTER_INC(&gStatREMIDTChange);
2358 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2359 }
2360
2361 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2362 {
2363 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2364 STAM_COUNTER_INC(&gStatREMLDTRChange);
2365 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2366 }
2367 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2368 {
2369 pCtx->tr = pVM->rem.s.Env.tr.selector;
2370 STAM_COUNTER_INC(&gStatREMTRChange);
2371 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2372 }
2373
2374 /** @todo These values could still be out of sync! */
2375 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2376 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2377 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2378 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2379
2380 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2381 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2382 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2383
2384 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2385 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2386 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2387
2388 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2389 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2390 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2391
2392 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2393 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2394 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2395
2396 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2397 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2398 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2399
2400 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2401 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2402 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2403
2404 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2405 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2406 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2407
2408 /* Sysenter MSR */
2409 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2410 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2411 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2412
2413 /* System MSRs. */
2414 pCtx->msrEFER = pVM->rem.s.Env.efer;
2415 pCtx->msrSTAR = pVM->rem.s.Env.star;
2416 pCtx->msrPAT = pVM->rem.s.Env.pat;
2417#ifdef TARGET_X86_64
2418 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2419 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2420 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2421 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2422#endif
2423
2424}
2425
2426
2427/**
2428 * Update the VMM state information if we're currently in REM.
2429 *
2430 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2431 * we're currently executing in REM and the VMM state is invalid. This method will of
2432 * course check that we're executing in REM before syncing any data over to the VMM.
2433 *
2434 * @param pVM The VM handle.
2435 */
2436REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2437{
2438 if (pVM->rem.s.fInREM)
2439 remR3StateUpdate(pVM);
2440}
2441
2442
2443#undef LOG_GROUP
2444#define LOG_GROUP LOG_GROUP_REM
2445
2446
2447/**
2448 * Notify the recompiler about Address Gate 20 state change.
2449 *
2450 * This notification is required since A20 gate changes are
2451 * initialized from a device driver and the VM might just as
2452 * well be in REM mode as in RAW mode.
2453 *
2454 * @param pVM VM handle.
2455 * @param fEnable True if the gate should be enabled.
2456 * False if the gate should be disabled.
2457 */
2458REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2459{
2460 bool fSaved;
2461
2462 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2463 VM_ASSERT_EMT(pVM);
2464
2465 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2466 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2467
2468 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2469
2470 pVM->rem.s.fIgnoreAll = fSaved;
2471}
2472
2473
2474/**
2475 * Replays the invalidated recorded pages.
2476 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2477 *
2478 * @param pVM VM handle.
2479 */
2480REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2481{
2482 RTUINT i;
2483
2484 VM_ASSERT_EMT(pVM);
2485
2486 /*
2487 * Sync the required registers.
2488 */
2489 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2490 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2491 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2492 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2493
2494 /*
2495 * Replay the flushes.
2496 */
2497 pVM->rem.s.fIgnoreInvlPg = true;
2498 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2499 {
2500 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2501 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2502 }
2503 pVM->rem.s.fIgnoreInvlPg = false;
2504 pVM->rem.s.cInvalidatedPages = 0;
2505}
2506
2507
2508/**
2509 * Replays the handler notification changes
2510 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2511 *
2512 * @param pVM VM handle.
2513 */
2514REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2515{
2516 /*
2517 * Replay the flushes.
2518 */
2519 RTUINT i;
2520 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2521
2522 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2523 VM_ASSERT_EMT(pVM);
2524
2525 pVM->rem.s.cHandlerNotifications = 0;
2526 for (i = 0; i < c; i++)
2527 {
2528 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2529 switch (pRec->enmKind)
2530 {
2531 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2532 REMR3NotifyHandlerPhysicalRegister(pVM,
2533 pRec->u.PhysicalRegister.enmType,
2534 pRec->u.PhysicalRegister.GCPhys,
2535 pRec->u.PhysicalRegister.cb,
2536 pRec->u.PhysicalRegister.fHasHCHandler);
2537 break;
2538
2539 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2540 REMR3NotifyHandlerPhysicalDeregister(pVM,
2541 pRec->u.PhysicalDeregister.enmType,
2542 pRec->u.PhysicalDeregister.GCPhys,
2543 pRec->u.PhysicalDeregister.cb,
2544 pRec->u.PhysicalDeregister.fHasHCHandler,
2545 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2546 break;
2547
2548 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2549 REMR3NotifyHandlerPhysicalModify(pVM,
2550 pRec->u.PhysicalModify.enmType,
2551 pRec->u.PhysicalModify.GCPhysOld,
2552 pRec->u.PhysicalModify.GCPhysNew,
2553 pRec->u.PhysicalModify.cb,
2554 pRec->u.PhysicalModify.fHasHCHandler,
2555 pRec->u.PhysicalModify.fRestoreAsRAM);
2556 break;
2557
2558 default:
2559 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2560 break;
2561 }
2562 }
2563 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2564}
2565
2566
2567/**
2568 * Notify REM about changed code page.
2569 *
2570 * @returns VBox status code.
2571 * @param pVM VM handle.
2572 * @param pvCodePage Code page address
2573 */
2574REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2575{
2576#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2577 int rc;
2578 RTGCPHYS PhysGC;
2579 uint64_t flags;
2580
2581 VM_ASSERT_EMT(pVM);
2582
2583 /*
2584 * Get the physical page address.
2585 */
2586 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2587 if (rc == VINF_SUCCESS)
2588 {
2589 /*
2590 * Sync the required registers and flush the whole page.
2591 * (Easier to do the whole page than notifying it about each physical
2592 * byte that was changed.
2593 */
2594 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2595 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2596 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2597 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2598
2599 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2600 }
2601#endif
2602 return VINF_SUCCESS;
2603}
2604
2605
2606/**
2607 * Notification about a successful MMR3PhysRegister() call.
2608 *
2609 * @param pVM VM handle.
2610 * @param GCPhys The physical address the RAM.
2611 * @param cb Size of the memory.
2612 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2613 */
2614REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2615{
2616 uint32_t cbBitmap;
2617 int rc;
2618 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2619 VM_ASSERT_EMT(pVM);
2620
2621 /*
2622 * Validate input - we trust the caller.
2623 */
2624 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2625 Assert(cb);
2626 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2627
2628 /*
2629 * Base ram?
2630 */
2631 if (!GCPhys)
2632 {
2633 phys_ram_size = cb;
2634 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2635#ifndef VBOX_STRICT
2636 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2637 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2638#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2639 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2640 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2641 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2642 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2643 AssertRC(rc);
2644 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2645#endif
2646 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2647 }
2648
2649 /*
2650 * Register the ram.
2651 */
2652 Assert(!pVM->rem.s.fIgnoreAll);
2653 pVM->rem.s.fIgnoreAll = true;
2654
2655#ifdef VBOX_WITH_NEW_PHYS_CODE
2656 if (fFlags & MM_RAM_FLAGS_RESERVED)
2657 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2658 else
2659 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2660#else
2661 if (!GCPhys)
2662 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2663 else
2664 {
2665 if (fFlags & MM_RAM_FLAGS_RESERVED)
2666 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2667 else
2668 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2669 }
2670#endif
2671 Assert(pVM->rem.s.fIgnoreAll);
2672 pVM->rem.s.fIgnoreAll = false;
2673}
2674
2675#ifndef VBOX_WITH_NEW_PHYS_CODE
2676
2677/**
2678 * Notification about a successful PGMR3PhysRegisterChunk() call.
2679 *
2680 * @param pVM VM handle.
2681 * @param GCPhys The physical address the RAM.
2682 * @param cb Size of the memory.
2683 * @param pvRam The HC address of the RAM.
2684 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2685 */
2686REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2687{
2688 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2689 VM_ASSERT_EMT(pVM);
2690
2691 /*
2692 * Validate input - we trust the caller.
2693 */
2694 Assert(pvRam);
2695 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2696 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2697 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2698 Assert(fFlags == 0 /* normal RAM */);
2699 Assert(!pVM->rem.s.fIgnoreAll);
2700 pVM->rem.s.fIgnoreAll = true;
2701 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2702 Assert(pVM->rem.s.fIgnoreAll);
2703 pVM->rem.s.fIgnoreAll = false;
2704}
2705
2706
2707/**
2708 * Grows dynamically allocated guest RAM.
2709 * Will raise a fatal error if the operation fails.
2710 *
2711 * @param physaddr The physical address.
2712 */
2713void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2714{
2715 int rc;
2716 PVM pVM = cpu_single_env->pVM;
2717 const RTGCPHYS GCPhys = physaddr;
2718
2719 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2720 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2721 if (RT_SUCCESS(rc))
2722 return;
2723
2724 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2725 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2726 AssertFatalFailed();
2727}
2728
2729#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2730
2731/**
2732 * Notification about a successful MMR3PhysRomRegister() call.
2733 *
2734 * @param pVM VM handle.
2735 * @param GCPhys The physical address of the ROM.
2736 * @param cb The size of the ROM.
2737 * @param pvCopy Pointer to the ROM copy.
2738 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2739 * This function will be called when ever the protection of the
2740 * shadow ROM changes (at reset and end of POST).
2741 */
2742REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2743{
2744 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2745 VM_ASSERT_EMT(pVM);
2746
2747 /*
2748 * Validate input - we trust the caller.
2749 */
2750 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2751 Assert(cb);
2752 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2753 Assert(pvCopy);
2754 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2755
2756 /*
2757 * Register the rom.
2758 */
2759 Assert(!pVM->rem.s.fIgnoreAll);
2760 pVM->rem.s.fIgnoreAll = true;
2761
2762 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2763
2764 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2765
2766 Assert(pVM->rem.s.fIgnoreAll);
2767 pVM->rem.s.fIgnoreAll = false;
2768}
2769
2770
2771/**
2772 * Notification about a successful memory deregistration or reservation.
2773 *
2774 * @param pVM VM Handle.
2775 * @param GCPhys Start physical address.
2776 * @param cb The size of the range.
2777 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2778 * reserve any memory soon.
2779 */
2780REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2781{
2782 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2783 VM_ASSERT_EMT(pVM);
2784
2785 /*
2786 * Validate input - we trust the caller.
2787 */
2788 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2789 Assert(cb);
2790 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2791
2792 /*
2793 * Unassigning the memory.
2794 */
2795 Assert(!pVM->rem.s.fIgnoreAll);
2796 pVM->rem.s.fIgnoreAll = true;
2797
2798 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2799
2800 Assert(pVM->rem.s.fIgnoreAll);
2801 pVM->rem.s.fIgnoreAll = false;
2802}
2803
2804
2805/**
2806 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2807 *
2808 * @param pVM VM Handle.
2809 * @param enmType Handler type.
2810 * @param GCPhys Handler range address.
2811 * @param cb Size of the handler range.
2812 * @param fHasHCHandler Set if the handler has a HC callback function.
2813 *
2814 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2815 * Handler memory type to memory which has no HC handler.
2816 */
2817REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2818{
2819 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2820 enmType, GCPhys, cb, fHasHCHandler));
2821 VM_ASSERT_EMT(pVM);
2822 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2823 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2824
2825 if (pVM->rem.s.cHandlerNotifications)
2826 REMR3ReplayHandlerNotifications(pVM);
2827
2828 Assert(!pVM->rem.s.fIgnoreAll);
2829 pVM->rem.s.fIgnoreAll = true;
2830
2831 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2832 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2833 else if (fHasHCHandler)
2834 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2835
2836 Assert(pVM->rem.s.fIgnoreAll);
2837 pVM->rem.s.fIgnoreAll = false;
2838}
2839
2840
2841/**
2842 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2843 *
2844 * @param pVM VM Handle.
2845 * @param enmType Handler type.
2846 * @param GCPhys Handler range address.
2847 * @param cb Size of the handler range.
2848 * @param fHasHCHandler Set if the handler has a HC callback function.
2849 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2850 */
2851REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2852{
2853 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2854 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2855 VM_ASSERT_EMT(pVM);
2856
2857 if (pVM->rem.s.cHandlerNotifications)
2858 REMR3ReplayHandlerNotifications(pVM);
2859
2860 Assert(!pVM->rem.s.fIgnoreAll);
2861 pVM->rem.s.fIgnoreAll = true;
2862
2863/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2864 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2865 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2866 else if (fHasHCHandler)
2867 {
2868 if (!fRestoreAsRAM)
2869 {
2870 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2871 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2872 }
2873 else
2874 {
2875 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2876 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2877 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2878 }
2879 }
2880
2881 Assert(pVM->rem.s.fIgnoreAll);
2882 pVM->rem.s.fIgnoreAll = false;
2883}
2884
2885
2886/**
2887 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2888 *
2889 * @param pVM VM Handle.
2890 * @param enmType Handler type.
2891 * @param GCPhysOld Old handler range address.
2892 * @param GCPhysNew New handler range address.
2893 * @param cb Size of the handler range.
2894 * @param fHasHCHandler Set if the handler has a HC callback function.
2895 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2896 */
2897REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2898{
2899 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2900 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2901 VM_ASSERT_EMT(pVM);
2902 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2903
2904 if (pVM->rem.s.cHandlerNotifications)
2905 REMR3ReplayHandlerNotifications(pVM);
2906
2907 if (fHasHCHandler)
2908 {
2909 Assert(!pVM->rem.s.fIgnoreAll);
2910 pVM->rem.s.fIgnoreAll = true;
2911
2912 /*
2913 * Reset the old page.
2914 */
2915 if (!fRestoreAsRAM)
2916 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2917 else
2918 {
2919 /* This is not perfect, but it'll do for PD monitoring... */
2920 Assert(cb == PAGE_SIZE);
2921 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2922 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2923 }
2924
2925 /*
2926 * Update the new page.
2927 */
2928 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2929 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2930 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2931
2932 Assert(pVM->rem.s.fIgnoreAll);
2933 pVM->rem.s.fIgnoreAll = false;
2934 }
2935}
2936
2937
2938/**
2939 * Checks if we're handling access to this page or not.
2940 *
2941 * @returns true if we're trapping access.
2942 * @returns false if we aren't.
2943 * @param pVM The VM handle.
2944 * @param GCPhys The physical address.
2945 *
2946 * @remark This function will only work correctly in VBOX_STRICT builds!
2947 */
2948REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2949{
2950#ifdef VBOX_STRICT
2951 unsigned long off;
2952 if (pVM->rem.s.cHandlerNotifications)
2953 REMR3ReplayHandlerNotifications(pVM);
2954
2955 off = get_phys_page_offset(GCPhys);
2956 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2957 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2958 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2959#else
2960 return false;
2961#endif
2962}
2963
2964
2965/**
2966 * Deals with a rare case in get_phys_addr_code where the code
2967 * is being monitored.
2968 *
2969 * It could also be an MMIO page, in which case we will raise a fatal error.
2970 *
2971 * @returns The physical address corresponding to addr.
2972 * @param env The cpu environment.
2973 * @param addr The virtual address.
2974 * @param pTLBEntry The TLB entry.
2975 */
2976target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2977 target_ulong addr,
2978 CPUTLBEntry* pTLBEntry,
2979 target_phys_addr_t ioTLBEntry)
2980{
2981 PVM pVM = env->pVM;
2982
2983 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2984 {
2985 /* If code memory is being monitored, appropriate IOTLB entry will have
2986 handler IO type, and addend will provide real physical address, no
2987 matter if we store VA in TLB or not, as handlers are always passed PA */
2988 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
2989 return ret;
2990 }
2991 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
2992 "*** handlers\n",
2993 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
2994 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
2995 LogRel(("*** mmio\n"));
2996 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
2997 LogRel(("*** phys\n"));
2998 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
2999 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3000 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3001 AssertFatalFailed();
3002}
3003
3004/**
3005 * Read guest RAM and ROM.
3006 *
3007 * @param SrcGCPhys The source address (guest physical).
3008 * @param pvDst The destination address.
3009 * @param cb Number of bytes
3010 */
3011void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3012{
3013 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3014 VBOX_CHECK_ADDR(SrcGCPhys);
3015 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3016#ifdef VBOX_DEBUG_PHYS
3017 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3018#endif
3019 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3020}
3021
3022
3023/**
3024 * Read guest RAM and ROM, unsigned 8-bit.
3025 *
3026 * @param SrcGCPhys The source address (guest physical).
3027 */
3028RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3029{
3030 uint8_t val;
3031 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3032 VBOX_CHECK_ADDR(SrcGCPhys);
3033 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3034 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3035#ifdef VBOX_DEBUG_PHYS
3036 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3037#endif
3038 return val;
3039}
3040
3041
3042/**
3043 * Read guest RAM and ROM, signed 8-bit.
3044 *
3045 * @param SrcGCPhys The source address (guest physical).
3046 */
3047RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3048{
3049 int8_t val;
3050 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3051 VBOX_CHECK_ADDR(SrcGCPhys);
3052 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3053 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3054#ifdef VBOX_DEBUG_PHYS
3055 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3056#endif
3057 return val;
3058}
3059
3060
3061/**
3062 * Read guest RAM and ROM, unsigned 16-bit.
3063 *
3064 * @param SrcGCPhys The source address (guest physical).
3065 */
3066RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3067{
3068 uint16_t val;
3069 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3070 VBOX_CHECK_ADDR(SrcGCPhys);
3071 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3072 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3073#ifdef VBOX_DEBUG_PHYS
3074 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3075#endif
3076 return val;
3077}
3078
3079
3080/**
3081 * Read guest RAM and ROM, signed 16-bit.
3082 *
3083 * @param SrcGCPhys The source address (guest physical).
3084 */
3085RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3086{
3087 int16_t val;
3088 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3089 VBOX_CHECK_ADDR(SrcGCPhys);
3090 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3091 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3092#ifdef VBOX_DEBUG_PHYS
3093 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3094#endif
3095 return val;
3096}
3097
3098
3099/**
3100 * Read guest RAM and ROM, unsigned 32-bit.
3101 *
3102 * @param SrcGCPhys The source address (guest physical).
3103 */
3104RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3105{
3106 uint32_t val;
3107 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3108 VBOX_CHECK_ADDR(SrcGCPhys);
3109 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3110 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3111#ifdef VBOX_DEBUG_PHYS
3112 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3113#endif
3114 return val;
3115}
3116
3117
3118/**
3119 * Read guest RAM and ROM, signed 32-bit.
3120 *
3121 * @param SrcGCPhys The source address (guest physical).
3122 */
3123RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3124{
3125 int32_t val;
3126 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3127 VBOX_CHECK_ADDR(SrcGCPhys);
3128 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3129 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3130#ifdef VBOX_DEBUG_PHYS
3131 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3132#endif
3133 return val;
3134}
3135
3136
3137/**
3138 * Read guest RAM and ROM, unsigned 64-bit.
3139 *
3140 * @param SrcGCPhys The source address (guest physical).
3141 */
3142uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3143{
3144 uint64_t val;
3145 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3146 VBOX_CHECK_ADDR(SrcGCPhys);
3147 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3148 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3149#ifdef VBOX_DEBUG_PHYS
3150 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3151#endif
3152 return val;
3153}
3154
3155/**
3156 * Read guest RAM and ROM, signed 64-bit.
3157 *
3158 * @param SrcGCPhys The source address (guest physical).
3159 */
3160int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3161{
3162 int64_t val;
3163 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3164 VBOX_CHECK_ADDR(SrcGCPhys);
3165 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3166 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3167#ifdef VBOX_DEBUG_PHYS
3168 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3169#endif
3170 return val;
3171}
3172
3173
3174/**
3175 * Write guest RAM.
3176 *
3177 * @param DstGCPhys The destination address (guest physical).
3178 * @param pvSrc The source address.
3179 * @param cb Number of bytes to write
3180 */
3181void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3182{
3183 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3184 VBOX_CHECK_ADDR(DstGCPhys);
3185 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3186 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3187#ifdef VBOX_DEBUG_PHYS
3188 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3189#endif
3190}
3191
3192
3193/**
3194 * Write guest RAM, unsigned 8-bit.
3195 *
3196 * @param DstGCPhys The destination address (guest physical).
3197 * @param val Value
3198 */
3199void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3200{
3201 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3202 VBOX_CHECK_ADDR(DstGCPhys);
3203 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3204 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3205#ifdef VBOX_DEBUG_PHYS
3206 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3207#endif
3208}
3209
3210
3211/**
3212 * Write guest RAM, unsigned 8-bit.
3213 *
3214 * @param DstGCPhys The destination address (guest physical).
3215 * @param val Value
3216 */
3217void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3218{
3219 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3220 VBOX_CHECK_ADDR(DstGCPhys);
3221 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3222 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3223#ifdef VBOX_DEBUG_PHYS
3224 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3225#endif
3226}
3227
3228
3229/**
3230 * Write guest RAM, unsigned 32-bit.
3231 *
3232 * @param DstGCPhys The destination address (guest physical).
3233 * @param val Value
3234 */
3235void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3236{
3237 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3238 VBOX_CHECK_ADDR(DstGCPhys);
3239 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3240 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3241#ifdef VBOX_DEBUG_PHYS
3242 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3243#endif
3244}
3245
3246
3247/**
3248 * Write guest RAM, unsigned 64-bit.
3249 *
3250 * @param DstGCPhys The destination address (guest physical).
3251 * @param val Value
3252 */
3253void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3254{
3255 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3256 VBOX_CHECK_ADDR(DstGCPhys);
3257 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3258 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3259#ifdef VBOX_DEBUG_PHYS
3260 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3261#endif
3262}
3263
3264#undef LOG_GROUP
3265#define LOG_GROUP LOG_GROUP_REM_MMIO
3266
3267/** Read MMIO memory. */
3268static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3269{
3270 uint32_t u32 = 0;
3271 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3272 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3273 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3274 return u32;
3275}
3276
3277/** Read MMIO memory. */
3278static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3279{
3280 uint32_t u32 = 0;
3281 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3282 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3283 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3284 return u32;
3285}
3286
3287/** Read MMIO memory. */
3288static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3289{
3290 uint32_t u32 = 0;
3291 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3292 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3293 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3294 return u32;
3295}
3296
3297/** Write to MMIO memory. */
3298static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3299{
3300 int rc;
3301 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3302 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3303 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3304}
3305
3306/** Write to MMIO memory. */
3307static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3308{
3309 int rc;
3310 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3311 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3312 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3313}
3314
3315/** Write to MMIO memory. */
3316static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3317{
3318 int rc;
3319 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3320 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3321 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3322}
3323
3324
3325#undef LOG_GROUP
3326#define LOG_GROUP LOG_GROUP_REM_HANDLER
3327
3328/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3329
3330static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3331{
3332 uint8_t u8;
3333 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3334 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3335 return u8;
3336}
3337
3338static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3339{
3340 uint16_t u16;
3341 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3342 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3343 return u16;
3344}
3345
3346static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3347{
3348 uint32_t u32;
3349 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3350 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3351 return u32;
3352}
3353
3354static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3355{
3356 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3357 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3358}
3359
3360static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3361{
3362 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3363 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3364}
3365
3366static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3367{
3368 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3369 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3370}
3371
3372/* -+- disassembly -+- */
3373
3374#undef LOG_GROUP
3375#define LOG_GROUP LOG_GROUP_REM_DISAS
3376
3377
3378/**
3379 * Enables or disables singled stepped disassembly.
3380 *
3381 * @returns VBox status code.
3382 * @param pVM VM handle.
3383 * @param fEnable To enable set this flag, to disable clear it.
3384 */
3385static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3386{
3387 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3388 VM_ASSERT_EMT(pVM);
3389
3390 if (fEnable)
3391 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3392 else
3393 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3394 return VINF_SUCCESS;
3395}
3396
3397
3398/**
3399 * Enables or disables singled stepped disassembly.
3400 *
3401 * @returns VBox status code.
3402 * @param pVM VM handle.
3403 * @param fEnable To enable set this flag, to disable clear it.
3404 */
3405REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3406{
3407 PVMREQ pReq;
3408 int rc;
3409
3410 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3411 if (VM_IS_EMT(pVM))
3412 return remR3DisasEnableStepping(pVM, fEnable);
3413
3414 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3415 AssertRC(rc);
3416 if (RT_SUCCESS(rc))
3417 rc = pReq->iStatus;
3418 VMR3ReqFree(pReq);
3419 return rc;
3420}
3421
3422
3423#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3424/**
3425 * External Debugger Command: .remstep [on|off|1|0]
3426 */
3427static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3428{
3429 bool fEnable;
3430 int rc;
3431
3432 /* print status */
3433 if (cArgs == 0)
3434 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3435 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3436
3437 /* convert the argument and change the mode. */
3438 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3439 if (RT_FAILURE(rc))
3440 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3441 rc = REMR3DisasEnableStepping(pVM, fEnable);
3442 if (RT_FAILURE(rc))
3443 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3444 return rc;
3445}
3446#endif
3447
3448
3449/**
3450 * Disassembles n instructions and prints them to the log.
3451 *
3452 * @returns Success indicator.
3453 * @param env Pointer to the recompiler CPU structure.
3454 * @param f32BitCode Indicates that whether or not the code should
3455 * be disassembled as 16 or 32 bit. If -1 the CS
3456 * selector will be inspected.
3457 * @param nrInstructions Nr of instructions to disassemble
3458 * @param pszPrefix
3459 * @remark not currently used for anything but ad-hoc debugging.
3460 */
3461bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3462{
3463 int i, rc;
3464 RTGCPTR GCPtrPC;
3465 uint8_t *pvPC;
3466 RTINTPTR off;
3467 DISCPUSTATE Cpu;
3468
3469 /*
3470 * Determin 16/32 bit mode.
3471 */
3472 if (f32BitCode == -1)
3473 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3474
3475 /*
3476 * Convert cs:eip to host context address.
3477 * We don't care to much about cross page correctness presently.
3478 */
3479 GCPtrPC = env->segs[R_CS].base + env->eip;
3480 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3481 {
3482 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3483
3484 /* convert eip to physical address. */
3485 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3486 GCPtrPC,
3487 env->cr[3],
3488 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3489 (void**)&pvPC);
3490 if (RT_FAILURE(rc))
3491 {
3492 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3493 return false;
3494 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3495 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3496 }
3497 }
3498 else
3499 {
3500 /* physical address */
3501 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3502 (void**)&pvPC);
3503 if (RT_FAILURE(rc))
3504 return false;
3505 }
3506
3507 /*
3508 * Disassemble.
3509 */
3510 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3511 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3512 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3513 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3514 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3515 //Cpu.dwUserData[2] = GCPtrPC;
3516
3517 for (i=0;i<nrInstructions;i++)
3518 {
3519 char szOutput[256];
3520 uint32_t cbOp;
3521 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3522 return false;
3523 if (pszPrefix)
3524 Log(("%s: %s", pszPrefix, szOutput));
3525 else
3526 Log(("%s", szOutput));
3527
3528 pvPC += cbOp;
3529 }
3530 return true;
3531}
3532
3533
3534/** @todo need to test the new code, using the old code in the mean while. */
3535#define USE_OLD_DUMP_AND_DISASSEMBLY
3536
3537/**
3538 * Disassembles one instruction and prints it to the log.
3539 *
3540 * @returns Success indicator.
3541 * @param env Pointer to the recompiler CPU structure.
3542 * @param f32BitCode Indicates that whether or not the code should
3543 * be disassembled as 16 or 32 bit. If -1 the CS
3544 * selector will be inspected.
3545 * @param pszPrefix
3546 */
3547bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3548{
3549#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3550 PVM pVM = env->pVM;
3551 RTGCPTR GCPtrPC;
3552 uint8_t *pvPC;
3553 char szOutput[256];
3554 uint32_t cbOp;
3555 RTINTPTR off;
3556 DISCPUSTATE Cpu;
3557
3558
3559 /* Doesn't work in long mode. */
3560 if (env->hflags & HF_LMA_MASK)
3561 return false;
3562
3563 /*
3564 * Determin 16/32 bit mode.
3565 */
3566 if (f32BitCode == -1)
3567 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3568
3569 /*
3570 * Log registers
3571 */
3572 if (LogIs2Enabled())
3573 {
3574 remR3StateUpdate(pVM);
3575 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3576 }
3577
3578 /*
3579 * Convert cs:eip to host context address.
3580 * We don't care to much about cross page correctness presently.
3581 */
3582 GCPtrPC = env->segs[R_CS].base + env->eip;
3583 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3584 {
3585 /* convert eip to physical address. */
3586 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3587 GCPtrPC,
3588 env->cr[3],
3589 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3590 (void**)&pvPC);
3591 if (RT_FAILURE(rc))
3592 {
3593 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3594 return false;
3595 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3596 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3597 }
3598 }
3599 else
3600 {
3601
3602 /* physical address */
3603 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3604 if (RT_FAILURE(rc))
3605 return false;
3606 }
3607
3608 /*
3609 * Disassemble.
3610 */
3611 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3612 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3613 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3614 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3615 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3616 //Cpu.dwUserData[2] = GCPtrPC;
3617 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3618 return false;
3619
3620 if (!f32BitCode)
3621 {
3622 if (pszPrefix)
3623 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3624 else
3625 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3626 }
3627 else
3628 {
3629 if (pszPrefix)
3630 Log(("%s: %s", pszPrefix, szOutput));
3631 else
3632 Log(("%s", szOutput));
3633 }
3634 return true;
3635
3636#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3637 PVM pVM = env->pVM;
3638 const bool fLog = LogIsEnabled();
3639 const bool fLog2 = LogIs2Enabled();
3640 int rc = VINF_SUCCESS;
3641
3642 /*
3643 * Don't bother if there ain't any log output to do.
3644 */
3645 if (!fLog && !fLog2)
3646 return true;
3647
3648 /*
3649 * Update the state so DBGF reads the correct register values.
3650 */
3651 remR3StateUpdate(pVM);
3652
3653 /*
3654 * Log registers if requested.
3655 */
3656 if (!fLog2)
3657 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3658
3659 /*
3660 * Disassemble to log.
3661 */
3662 if (fLog)
3663 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3664
3665 return RT_SUCCESS(rc);
3666#endif
3667}
3668
3669
3670/**
3671 * Disassemble recompiled code.
3672 *
3673 * @param phFileIgnored Ignored, logfile usually.
3674 * @param pvCode Pointer to the code block.
3675 * @param cb Size of the code block.
3676 */
3677void disas(FILE *phFile, void *pvCode, unsigned long cb)
3678{
3679#ifdef DEBUG_TMP_LOGGING
3680# define DISAS_PRINTF(x...) fprintf(phFile, x)
3681#else
3682# define DISAS_PRINTF(x...) RTLogPrintf(x)
3683 if (LogIs2Enabled())
3684#endif
3685 {
3686 unsigned off = 0;
3687 char szOutput[256];
3688 DISCPUSTATE Cpu;
3689
3690 memset(&Cpu, 0, sizeof(Cpu));
3691#ifdef RT_ARCH_X86
3692 Cpu.mode = CPUMODE_32BIT;
3693#else
3694 Cpu.mode = CPUMODE_64BIT;
3695#endif
3696
3697 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3698 while (off < cb)
3699 {
3700 uint32_t cbInstr;
3701 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3702 DISAS_PRINTF("%s", szOutput);
3703 else
3704 {
3705 DISAS_PRINTF("disas error\n");
3706 cbInstr = 1;
3707#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3708 break;
3709#endif
3710 }
3711 off += cbInstr;
3712 }
3713 }
3714
3715#undef DISAS_PRINTF
3716}
3717
3718
3719/**
3720 * Disassemble guest code.
3721 *
3722 * @param phFileIgnored Ignored, logfile usually.
3723 * @param uCode The guest address of the code to disassemble. (flat?)
3724 * @param cb Number of bytes to disassemble.
3725 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3726 */
3727void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3728{
3729#ifdef DEBUG_TMP_LOGGING
3730# define DISAS_PRINTF(x...) fprintf(phFile, x)
3731#else
3732# define DISAS_PRINTF(x...) RTLogPrintf(x)
3733 if (LogIs2Enabled())
3734#endif
3735 {
3736 PVM pVM = cpu_single_env->pVM;
3737 RTSEL cs;
3738 RTGCUINTPTR eip;
3739
3740 /*
3741 * Update the state so DBGF reads the correct register values (flags).
3742 */
3743 remR3StateUpdate(pVM);
3744
3745 /*
3746 * Do the disassembling.
3747 */
3748 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3749 cs = cpu_single_env->segs[R_CS].selector;
3750 eip = uCode - cpu_single_env->segs[R_CS].base;
3751 for (;;)
3752 {
3753 char szBuf[256];
3754 uint32_t cbInstr;
3755 int rc = DBGFR3DisasInstrEx(pVM,
3756 cs,
3757 eip,
3758 0,
3759 szBuf, sizeof(szBuf),
3760 &cbInstr);
3761 if (RT_SUCCESS(rc))
3762 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3763 else
3764 {
3765 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3766 cbInstr = 1;
3767 }
3768
3769 /* next */
3770 if (cb <= cbInstr)
3771 break;
3772 cb -= cbInstr;
3773 uCode += cbInstr;
3774 eip += cbInstr;
3775 }
3776 }
3777#undef DISAS_PRINTF
3778}
3779
3780
3781/**
3782 * Looks up a guest symbol.
3783 *
3784 * @returns Pointer to symbol name. This is a static buffer.
3785 * @param orig_addr The address in question.
3786 */
3787const char *lookup_symbol(target_ulong orig_addr)
3788{
3789 RTGCINTPTR off = 0;
3790 DBGFSYMBOL Sym;
3791 PVM pVM = cpu_single_env->pVM;
3792 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3793 if (RT_SUCCESS(rc))
3794 {
3795 static char szSym[sizeof(Sym.szName) + 48];
3796 if (!off)
3797 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3798 else if (off > 0)
3799 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3800 else
3801 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3802 return szSym;
3803 }
3804 return "<N/A>";
3805}
3806
3807
3808#undef LOG_GROUP
3809#define LOG_GROUP LOG_GROUP_REM
3810
3811
3812/* -+- FF notifications -+- */
3813
3814
3815/**
3816 * Notification about a pending interrupt.
3817 *
3818 * @param pVM VM Handle.
3819 * @param u8Interrupt Interrupt
3820 * @thread The emulation thread.
3821 */
3822REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3823{
3824 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3825 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3826}
3827
3828/**
3829 * Notification about a pending interrupt.
3830 *
3831 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3832 * @param pVM VM Handle.
3833 * @thread The emulation thread.
3834 */
3835REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3836{
3837 return pVM->rem.s.u32PendingInterrupt;
3838}
3839
3840/**
3841 * Notification about the interrupt FF being set.
3842 *
3843 * @param pVM VM Handle.
3844 * @thread The emulation thread.
3845 */
3846REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3847{
3848 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3849 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3850 if (pVM->rem.s.fInREM)
3851 {
3852 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3853 CPU_INTERRUPT_EXTERNAL_HARD);
3854 }
3855}
3856
3857
3858/**
3859 * Notification about the interrupt FF being set.
3860 *
3861 * @param pVM VM Handle.
3862 * @thread Any.
3863 */
3864REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3865{
3866 LogFlow(("REMR3NotifyInterruptClear:\n"));
3867 if (pVM->rem.s.fInREM)
3868 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3869}
3870
3871
3872/**
3873 * Notification about pending timer(s).
3874 *
3875 * @param pVM VM Handle.
3876 * @thread Any.
3877 */
3878REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3879{
3880#ifndef DEBUG_bird
3881 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3882#endif
3883 if (pVM->rem.s.fInREM)
3884 {
3885 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3886 CPU_INTERRUPT_EXTERNAL_TIMER);
3887 }
3888}
3889
3890
3891/**
3892 * Notification about pending DMA transfers.
3893 *
3894 * @param pVM VM Handle.
3895 * @thread Any.
3896 */
3897REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3898{
3899 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3900 if (pVM->rem.s.fInREM)
3901 {
3902 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3903 CPU_INTERRUPT_EXTERNAL_DMA);
3904 }
3905}
3906
3907
3908/**
3909 * Notification about pending timer(s).
3910 *
3911 * @param pVM VM Handle.
3912 * @thread Any.
3913 */
3914REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3915{
3916 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3917 if (pVM->rem.s.fInREM)
3918 {
3919 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3920 CPU_INTERRUPT_EXTERNAL_EXIT);
3921 }
3922}
3923
3924
3925/**
3926 * Notification about pending FF set by an external thread.
3927 *
3928 * @param pVM VM handle.
3929 * @thread Any.
3930 */
3931REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3932{
3933 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3934 if (pVM->rem.s.fInREM)
3935 {
3936 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3937 CPU_INTERRUPT_EXTERNAL_EXIT);
3938 }
3939}
3940
3941
3942#ifdef VBOX_WITH_STATISTICS
3943void remR3ProfileStart(int statcode)
3944{
3945 STAMPROFILEADV *pStat;
3946 switch(statcode)
3947 {
3948 case STATS_EMULATE_SINGLE_INSTR:
3949 pStat = &gStatExecuteSingleInstr;
3950 break;
3951 case STATS_QEMU_COMPILATION:
3952 pStat = &gStatCompilationQEmu;
3953 break;
3954 case STATS_QEMU_RUN_EMULATED_CODE:
3955 pStat = &gStatRunCodeQEmu;
3956 break;
3957 case STATS_QEMU_TOTAL:
3958 pStat = &gStatTotalTimeQEmu;
3959 break;
3960 case STATS_QEMU_RUN_TIMERS:
3961 pStat = &gStatTimers;
3962 break;
3963 case STATS_TLB_LOOKUP:
3964 pStat= &gStatTBLookup;
3965 break;
3966 case STATS_IRQ_HANDLING:
3967 pStat= &gStatIRQ;
3968 break;
3969 case STATS_RAW_CHECK:
3970 pStat = &gStatRawCheck;
3971 break;
3972
3973 default:
3974 AssertMsgFailed(("unknown stat %d\n", statcode));
3975 return;
3976 }
3977 STAM_PROFILE_ADV_START(pStat, a);
3978}
3979
3980
3981void remR3ProfileStop(int statcode)
3982{
3983 STAMPROFILEADV *pStat;
3984 switch(statcode)
3985 {
3986 case STATS_EMULATE_SINGLE_INSTR:
3987 pStat = &gStatExecuteSingleInstr;
3988 break;
3989 case STATS_QEMU_COMPILATION:
3990 pStat = &gStatCompilationQEmu;
3991 break;
3992 case STATS_QEMU_RUN_EMULATED_CODE:
3993 pStat = &gStatRunCodeQEmu;
3994 break;
3995 case STATS_QEMU_TOTAL:
3996 pStat = &gStatTotalTimeQEmu;
3997 break;
3998 case STATS_QEMU_RUN_TIMERS:
3999 pStat = &gStatTimers;
4000 break;
4001 case STATS_TLB_LOOKUP:
4002 pStat= &gStatTBLookup;
4003 break;
4004 case STATS_IRQ_HANDLING:
4005 pStat= &gStatIRQ;
4006 break;
4007 case STATS_RAW_CHECK:
4008 pStat = &gStatRawCheck;
4009 break;
4010 default:
4011 AssertMsgFailed(("unknown stat %d\n", statcode));
4012 return;
4013 }
4014 STAM_PROFILE_ADV_STOP(pStat, a);
4015}
4016#endif
4017
4018/**
4019 * Raise an RC, force rem exit.
4020 *
4021 * @param pVM VM handle.
4022 * @param rc The rc.
4023 */
4024void remR3RaiseRC(PVM pVM, int rc)
4025{
4026 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4027 Assert(pVM->rem.s.fInREM);
4028 VM_ASSERT_EMT(pVM);
4029 pVM->rem.s.rc = rc;
4030 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4031}
4032
4033
4034/* -+- timers -+- */
4035
4036uint64_t cpu_get_tsc(CPUX86State *env)
4037{
4038 STAM_COUNTER_INC(&gStatCpuGetTSC);
4039 return TMCpuTickGet(env->pVM);
4040}
4041
4042
4043/* -+- interrupts -+- */
4044
4045void cpu_set_ferr(CPUX86State *env)
4046{
4047 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4048 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4049}
4050
4051int cpu_get_pic_interrupt(CPUState *env)
4052{
4053 uint8_t u8Interrupt;
4054 int rc;
4055
4056 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4057 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4058 * with the (a)pic.
4059 */
4060 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4061 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4062 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4063 * remove this kludge. */
4064 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4065 {
4066 rc = VINF_SUCCESS;
4067 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4068 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4069 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4070 }
4071 else
4072 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4073
4074 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4075 if (RT_SUCCESS(rc))
4076 {
4077 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4078 env->interrupt_request |= CPU_INTERRUPT_HARD;
4079 return u8Interrupt;
4080 }
4081 return -1;
4082}
4083
4084
4085/* -+- local apic -+- */
4086
4087void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4088{
4089 int rc = PDMApicSetBase(env->pVM, val);
4090 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4091}
4092
4093uint64_t cpu_get_apic_base(CPUX86State *env)
4094{
4095 uint64_t u64;
4096 int rc = PDMApicGetBase(env->pVM, &u64);
4097 if (RT_SUCCESS(rc))
4098 {
4099 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4100 return u64;
4101 }
4102 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4103 return 0;
4104}
4105
4106void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4107{
4108 int rc = PDMApicSetTPR(env->pVM, val);
4109 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4110}
4111
4112uint8_t cpu_get_apic_tpr(CPUX86State *env)
4113{
4114 uint8_t u8;
4115 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4116 if (RT_SUCCESS(rc))
4117 {
4118 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4119 return u8;
4120 }
4121 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4122 return 0;
4123}
4124
4125
4126uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4127{
4128 uint64_t value;
4129 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4130 if (RT_SUCCESS(rc))
4131 {
4132 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4133 return value;
4134 }
4135 /** @todo: exception ? */
4136 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4137 return value;
4138}
4139
4140void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4141{
4142 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4143 /** @todo: exception if error ? */
4144 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4145}
4146
4147uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4148{
4149 return CPUMGetGuestMsr(env->pVM, msr);
4150}
4151
4152void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4153{
4154 CPUMSetGuestMsr(env->pVM, msr, val);
4155}
4156/* -+- I/O Ports -+- */
4157
4158#undef LOG_GROUP
4159#define LOG_GROUP LOG_GROUP_REM_IOPORT
4160
4161void cpu_outb(CPUState *env, int addr, int val)
4162{
4163 int rc;
4164
4165 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4166 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4167
4168 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4169 if (RT_LIKELY(rc == VINF_SUCCESS))
4170 return;
4171 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4172 {
4173 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4174 remR3RaiseRC(env->pVM, rc);
4175 return;
4176 }
4177 remAbort(rc, __FUNCTION__);
4178}
4179
4180void cpu_outw(CPUState *env, int addr, int val)
4181{
4182 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4183 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4184 if (RT_LIKELY(rc == VINF_SUCCESS))
4185 return;
4186 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4187 {
4188 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4189 remR3RaiseRC(env->pVM, rc);
4190 return;
4191 }
4192 remAbort(rc, __FUNCTION__);
4193}
4194
4195void cpu_outl(CPUState *env, int addr, int val)
4196{
4197 int rc;
4198 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4199 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4200 if (RT_LIKELY(rc == VINF_SUCCESS))
4201 return;
4202 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4203 {
4204 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4205 remR3RaiseRC(env->pVM, rc);
4206 return;
4207 }
4208 remAbort(rc, __FUNCTION__);
4209}
4210
4211int cpu_inb(CPUState *env, int addr)
4212{
4213 uint32_t u32 = 0;
4214 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4215 if (RT_LIKELY(rc == VINF_SUCCESS))
4216 {
4217 if (/*addr != 0x61 && */addr != 0x71)
4218 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4219 return (int)u32;
4220 }
4221 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4222 {
4223 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4224 remR3RaiseRC(env->pVM, rc);
4225 return (int)u32;
4226 }
4227 remAbort(rc, __FUNCTION__);
4228 return 0xff;
4229}
4230
4231int cpu_inw(CPUState *env, int addr)
4232{
4233 uint32_t u32 = 0;
4234 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4235 if (RT_LIKELY(rc == VINF_SUCCESS))
4236 {
4237 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4238 return (int)u32;
4239 }
4240 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4241 {
4242 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4243 remR3RaiseRC(env->pVM, rc);
4244 return (int)u32;
4245 }
4246 remAbort(rc, __FUNCTION__);
4247 return 0xffff;
4248}
4249
4250int cpu_inl(CPUState *env, int addr)
4251{
4252 uint32_t u32 = 0;
4253 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4254 if (RT_LIKELY(rc == VINF_SUCCESS))
4255 {
4256//if (addr==0x01f0 && u32 == 0x6b6d)
4257// loglevel = ~0;
4258 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4259 return (int)u32;
4260 }
4261 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4262 {
4263 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4264 remR3RaiseRC(env->pVM, rc);
4265 return (int)u32;
4266 }
4267 remAbort(rc, __FUNCTION__);
4268 return 0xffffffff;
4269}
4270
4271#undef LOG_GROUP
4272#define LOG_GROUP LOG_GROUP_REM
4273
4274
4275/* -+- helpers and misc other interfaces -+- */
4276
4277/**
4278 * Perform the CPUID instruction.
4279 *
4280 * ASMCpuId cannot be invoked from some source files where this is used because of global
4281 * register allocations.
4282 *
4283 * @param env Pointer to the recompiler CPU structure.
4284 * @param uOperator CPUID operation (eax).
4285 * @param pvEAX Where to store eax.
4286 * @param pvEBX Where to store ebx.
4287 * @param pvECX Where to store ecx.
4288 * @param pvEDX Where to store edx.
4289 */
4290void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4291{
4292 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4293}
4294
4295
4296#if 0 /* not used */
4297/**
4298 * Interface for qemu hardware to report back fatal errors.
4299 */
4300void hw_error(const char *pszFormat, ...)
4301{
4302 /*
4303 * Bitch about it.
4304 */
4305 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4306 * this in my Odin32 tree at home! */
4307 va_list args;
4308 va_start(args, pszFormat);
4309 RTLogPrintf("fatal error in virtual hardware:");
4310 RTLogPrintfV(pszFormat, args);
4311 va_end(args);
4312 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4313
4314 /*
4315 * If we're in REM context we'll sync back the state before 'jumping' to
4316 * the EMs failure handling.
4317 */
4318 PVM pVM = cpu_single_env->pVM;
4319 if (pVM->rem.s.fInREM)
4320 REMR3StateBack(pVM);
4321 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4322 AssertMsgFailed(("EMR3FatalError returned!\n"));
4323}
4324#endif
4325
4326/**
4327 * Interface for the qemu cpu to report unhandled situation
4328 * raising a fatal VM error.
4329 */
4330void cpu_abort(CPUState *env, const char *pszFormat, ...)
4331{
4332 va_list args;
4333 PVM pVM;
4334
4335 /*
4336 * Bitch about it.
4337 */
4338#ifndef _MSC_VER
4339 /** @todo: MSVC is right - it's not valid C */
4340 RTLogFlags(NULL, "nodisabled nobuffered");
4341#endif
4342 va_start(args, pszFormat);
4343 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4344 va_end(args);
4345 va_start(args, pszFormat);
4346 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4347 va_end(args);
4348
4349 /*
4350 * If we're in REM context we'll sync back the state before 'jumping' to
4351 * the EMs failure handling.
4352 */
4353 pVM = cpu_single_env->pVM;
4354 if (pVM->rem.s.fInREM)
4355 REMR3StateBack(pVM);
4356 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4357 AssertMsgFailed(("EMR3FatalError returned!\n"));
4358}
4359
4360
4361/**
4362 * Aborts the VM.
4363 *
4364 * @param rc VBox error code.
4365 * @param pszTip Hint about why/when this happend.
4366 */
4367void remAbort(int rc, const char *pszTip)
4368{
4369 PVM pVM;
4370
4371 /*
4372 * Bitch about it.
4373 */
4374 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4375 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4376
4377 /*
4378 * Jump back to where we entered the recompiler.
4379 */
4380 pVM = cpu_single_env->pVM;
4381 if (pVM->rem.s.fInREM)
4382 REMR3StateBack(pVM);
4383 EMR3FatalError(pVM, rc);
4384 AssertMsgFailed(("EMR3FatalError returned!\n"));
4385}
4386
4387
4388/**
4389 * Dumps a linux system call.
4390 * @param pVM VM handle.
4391 */
4392void remR3DumpLnxSyscall(PVM pVM)
4393{
4394 static const char *apsz[] =
4395 {
4396 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4397 "sys_exit",
4398 "sys_fork",
4399 "sys_read",
4400 "sys_write",
4401 "sys_open", /* 5 */
4402 "sys_close",
4403 "sys_waitpid",
4404 "sys_creat",
4405 "sys_link",
4406 "sys_unlink", /* 10 */
4407 "sys_execve",
4408 "sys_chdir",
4409 "sys_time",
4410 "sys_mknod",
4411 "sys_chmod", /* 15 */
4412 "sys_lchown16",
4413 "sys_ni_syscall", /* old break syscall holder */
4414 "sys_stat",
4415 "sys_lseek",
4416 "sys_getpid", /* 20 */
4417 "sys_mount",
4418 "sys_oldumount",
4419 "sys_setuid16",
4420 "sys_getuid16",
4421 "sys_stime", /* 25 */
4422 "sys_ptrace",
4423 "sys_alarm",
4424 "sys_fstat",
4425 "sys_pause",
4426 "sys_utime", /* 30 */
4427 "sys_ni_syscall", /* old stty syscall holder */
4428 "sys_ni_syscall", /* old gtty syscall holder */
4429 "sys_access",
4430 "sys_nice",
4431 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4432 "sys_sync",
4433 "sys_kill",
4434 "sys_rename",
4435 "sys_mkdir",
4436 "sys_rmdir", /* 40 */
4437 "sys_dup",
4438 "sys_pipe",
4439 "sys_times",
4440 "sys_ni_syscall", /* old prof syscall holder */
4441 "sys_brk", /* 45 */
4442 "sys_setgid16",
4443 "sys_getgid16",
4444 "sys_signal",
4445 "sys_geteuid16",
4446 "sys_getegid16", /* 50 */
4447 "sys_acct",
4448 "sys_umount", /* recycled never used phys() */
4449 "sys_ni_syscall", /* old lock syscall holder */
4450 "sys_ioctl",
4451 "sys_fcntl", /* 55 */
4452 "sys_ni_syscall", /* old mpx syscall holder */
4453 "sys_setpgid",
4454 "sys_ni_syscall", /* old ulimit syscall holder */
4455 "sys_olduname",
4456 "sys_umask", /* 60 */
4457 "sys_chroot",
4458 "sys_ustat",
4459 "sys_dup2",
4460 "sys_getppid",
4461 "sys_getpgrp", /* 65 */
4462 "sys_setsid",
4463 "sys_sigaction",
4464 "sys_sgetmask",
4465 "sys_ssetmask",
4466 "sys_setreuid16", /* 70 */
4467 "sys_setregid16",
4468 "sys_sigsuspend",
4469 "sys_sigpending",
4470 "sys_sethostname",
4471 "sys_setrlimit", /* 75 */
4472 "sys_old_getrlimit",
4473 "sys_getrusage",
4474 "sys_gettimeofday",
4475 "sys_settimeofday",
4476 "sys_getgroups16", /* 80 */
4477 "sys_setgroups16",
4478 "old_select",
4479 "sys_symlink",
4480 "sys_lstat",
4481 "sys_readlink", /* 85 */
4482 "sys_uselib",
4483 "sys_swapon",
4484 "sys_reboot",
4485 "old_readdir",
4486 "old_mmap", /* 90 */
4487 "sys_munmap",
4488 "sys_truncate",
4489 "sys_ftruncate",
4490 "sys_fchmod",
4491 "sys_fchown16", /* 95 */
4492 "sys_getpriority",
4493 "sys_setpriority",
4494 "sys_ni_syscall", /* old profil syscall holder */
4495 "sys_statfs",
4496 "sys_fstatfs", /* 100 */
4497 "sys_ioperm",
4498 "sys_socketcall",
4499 "sys_syslog",
4500 "sys_setitimer",
4501 "sys_getitimer", /* 105 */
4502 "sys_newstat",
4503 "sys_newlstat",
4504 "sys_newfstat",
4505 "sys_uname",
4506 "sys_iopl", /* 110 */
4507 "sys_vhangup",
4508 "sys_ni_syscall", /* old "idle" system call */
4509 "sys_vm86old",
4510 "sys_wait4",
4511 "sys_swapoff", /* 115 */
4512 "sys_sysinfo",
4513 "sys_ipc",
4514 "sys_fsync",
4515 "sys_sigreturn",
4516 "sys_clone", /* 120 */
4517 "sys_setdomainname",
4518 "sys_newuname",
4519 "sys_modify_ldt",
4520 "sys_adjtimex",
4521 "sys_mprotect", /* 125 */
4522 "sys_sigprocmask",
4523 "sys_ni_syscall", /* old "create_module" */
4524 "sys_init_module",
4525 "sys_delete_module",
4526 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4527 "sys_quotactl",
4528 "sys_getpgid",
4529 "sys_fchdir",
4530 "sys_bdflush",
4531 "sys_sysfs", /* 135 */
4532 "sys_personality",
4533 "sys_ni_syscall", /* reserved for afs_syscall */
4534 "sys_setfsuid16",
4535 "sys_setfsgid16",
4536 "sys_llseek", /* 140 */
4537 "sys_getdents",
4538 "sys_select",
4539 "sys_flock",
4540 "sys_msync",
4541 "sys_readv", /* 145 */
4542 "sys_writev",
4543 "sys_getsid",
4544 "sys_fdatasync",
4545 "sys_sysctl",
4546 "sys_mlock", /* 150 */
4547 "sys_munlock",
4548 "sys_mlockall",
4549 "sys_munlockall",
4550 "sys_sched_setparam",
4551 "sys_sched_getparam", /* 155 */
4552 "sys_sched_setscheduler",
4553 "sys_sched_getscheduler",
4554 "sys_sched_yield",
4555 "sys_sched_get_priority_max",
4556 "sys_sched_get_priority_min", /* 160 */
4557 "sys_sched_rr_get_interval",
4558 "sys_nanosleep",
4559 "sys_mremap",
4560 "sys_setresuid16",
4561 "sys_getresuid16", /* 165 */
4562 "sys_vm86",
4563 "sys_ni_syscall", /* Old sys_query_module */
4564 "sys_poll",
4565 "sys_nfsservctl",
4566 "sys_setresgid16", /* 170 */
4567 "sys_getresgid16",
4568 "sys_prctl",
4569 "sys_rt_sigreturn",
4570 "sys_rt_sigaction",
4571 "sys_rt_sigprocmask", /* 175 */
4572 "sys_rt_sigpending",
4573 "sys_rt_sigtimedwait",
4574 "sys_rt_sigqueueinfo",
4575 "sys_rt_sigsuspend",
4576 "sys_pread64", /* 180 */
4577 "sys_pwrite64",
4578 "sys_chown16",
4579 "sys_getcwd",
4580 "sys_capget",
4581 "sys_capset", /* 185 */
4582 "sys_sigaltstack",
4583 "sys_sendfile",
4584 "sys_ni_syscall", /* reserved for streams1 */
4585 "sys_ni_syscall", /* reserved for streams2 */
4586 "sys_vfork", /* 190 */
4587 "sys_getrlimit",
4588 "sys_mmap2",
4589 "sys_truncate64",
4590 "sys_ftruncate64",
4591 "sys_stat64", /* 195 */
4592 "sys_lstat64",
4593 "sys_fstat64",
4594 "sys_lchown",
4595 "sys_getuid",
4596 "sys_getgid", /* 200 */
4597 "sys_geteuid",
4598 "sys_getegid",
4599 "sys_setreuid",
4600 "sys_setregid",
4601 "sys_getgroups", /* 205 */
4602 "sys_setgroups",
4603 "sys_fchown",
4604 "sys_setresuid",
4605 "sys_getresuid",
4606 "sys_setresgid", /* 210 */
4607 "sys_getresgid",
4608 "sys_chown",
4609 "sys_setuid",
4610 "sys_setgid",
4611 "sys_setfsuid", /* 215 */
4612 "sys_setfsgid",
4613 "sys_pivot_root",
4614 "sys_mincore",
4615 "sys_madvise",
4616 "sys_getdents64", /* 220 */
4617 "sys_fcntl64",
4618 "sys_ni_syscall", /* reserved for TUX */
4619 "sys_ni_syscall",
4620 "sys_gettid",
4621 "sys_readahead", /* 225 */
4622 "sys_setxattr",
4623 "sys_lsetxattr",
4624 "sys_fsetxattr",
4625 "sys_getxattr",
4626 "sys_lgetxattr", /* 230 */
4627 "sys_fgetxattr",
4628 "sys_listxattr",
4629 "sys_llistxattr",
4630 "sys_flistxattr",
4631 "sys_removexattr", /* 235 */
4632 "sys_lremovexattr",
4633 "sys_fremovexattr",
4634 "sys_tkill",
4635 "sys_sendfile64",
4636 "sys_futex", /* 240 */
4637 "sys_sched_setaffinity",
4638 "sys_sched_getaffinity",
4639 "sys_set_thread_area",
4640 "sys_get_thread_area",
4641 "sys_io_setup", /* 245 */
4642 "sys_io_destroy",
4643 "sys_io_getevents",
4644 "sys_io_submit",
4645 "sys_io_cancel",
4646 "sys_fadvise64", /* 250 */
4647 "sys_ni_syscall",
4648 "sys_exit_group",
4649 "sys_lookup_dcookie",
4650 "sys_epoll_create",
4651 "sys_epoll_ctl", /* 255 */
4652 "sys_epoll_wait",
4653 "sys_remap_file_pages",
4654 "sys_set_tid_address",
4655 "sys_timer_create",
4656 "sys_timer_settime", /* 260 */
4657 "sys_timer_gettime",
4658 "sys_timer_getoverrun",
4659 "sys_timer_delete",
4660 "sys_clock_settime",
4661 "sys_clock_gettime", /* 265 */
4662 "sys_clock_getres",
4663 "sys_clock_nanosleep",
4664 "sys_statfs64",
4665 "sys_fstatfs64",
4666 "sys_tgkill", /* 270 */
4667 "sys_utimes",
4668 "sys_fadvise64_64",
4669 "sys_ni_syscall" /* sys_vserver */
4670 };
4671
4672 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4673 switch (uEAX)
4674 {
4675 default:
4676 if (uEAX < RT_ELEMENTS(apsz))
4677 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4678 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4679 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4680 else
4681 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4682 break;
4683
4684 }
4685}
4686
4687
4688/**
4689 * Dumps an OpenBSD system call.
4690 * @param pVM VM handle.
4691 */
4692void remR3DumpOBsdSyscall(PVM pVM)
4693{
4694 static const char *apsz[] =
4695 {
4696 "SYS_syscall", //0
4697 "SYS_exit", //1
4698 "SYS_fork", //2
4699 "SYS_read", //3
4700 "SYS_write", //4
4701 "SYS_open", //5
4702 "SYS_close", //6
4703 "SYS_wait4", //7
4704 "SYS_8",
4705 "SYS_link", //9
4706 "SYS_unlink", //10
4707 "SYS_11",
4708 "SYS_chdir", //12
4709 "SYS_fchdir", //13
4710 "SYS_mknod", //14
4711 "SYS_chmod", //15
4712 "SYS_chown", //16
4713 "SYS_break", //17
4714 "SYS_18",
4715 "SYS_19",
4716 "SYS_getpid", //20
4717 "SYS_mount", //21
4718 "SYS_unmount", //22
4719 "SYS_setuid", //23
4720 "SYS_getuid", //24
4721 "SYS_geteuid", //25
4722 "SYS_ptrace", //26
4723 "SYS_recvmsg", //27
4724 "SYS_sendmsg", //28
4725 "SYS_recvfrom", //29
4726 "SYS_accept", //30
4727 "SYS_getpeername", //31
4728 "SYS_getsockname", //32
4729 "SYS_access", //33
4730 "SYS_chflags", //34
4731 "SYS_fchflags", //35
4732 "SYS_sync", //36
4733 "SYS_kill", //37
4734 "SYS_38",
4735 "SYS_getppid", //39
4736 "SYS_40",
4737 "SYS_dup", //41
4738 "SYS_opipe", //42
4739 "SYS_getegid", //43
4740 "SYS_profil", //44
4741 "SYS_ktrace", //45
4742 "SYS_sigaction", //46
4743 "SYS_getgid", //47
4744 "SYS_sigprocmask", //48
4745 "SYS_getlogin", //49
4746 "SYS_setlogin", //50
4747 "SYS_acct", //51
4748 "SYS_sigpending", //52
4749 "SYS_osigaltstack", //53
4750 "SYS_ioctl", //54
4751 "SYS_reboot", //55
4752 "SYS_revoke", //56
4753 "SYS_symlink", //57
4754 "SYS_readlink", //58
4755 "SYS_execve", //59
4756 "SYS_umask", //60
4757 "SYS_chroot", //61
4758 "SYS_62",
4759 "SYS_63",
4760 "SYS_64",
4761 "SYS_65",
4762 "SYS_vfork", //66
4763 "SYS_67",
4764 "SYS_68",
4765 "SYS_sbrk", //69
4766 "SYS_sstk", //70
4767 "SYS_61",
4768 "SYS_vadvise", //72
4769 "SYS_munmap", //73
4770 "SYS_mprotect", //74
4771 "SYS_madvise", //75
4772 "SYS_76",
4773 "SYS_77",
4774 "SYS_mincore", //78
4775 "SYS_getgroups", //79
4776 "SYS_setgroups", //80
4777 "SYS_getpgrp", //81
4778 "SYS_setpgid", //82
4779 "SYS_setitimer", //83
4780 "SYS_84",
4781 "SYS_85",
4782 "SYS_getitimer", //86
4783 "SYS_87",
4784 "SYS_88",
4785 "SYS_89",
4786 "SYS_dup2", //90
4787 "SYS_91",
4788 "SYS_fcntl", //92
4789 "SYS_select", //93
4790 "SYS_94",
4791 "SYS_fsync", //95
4792 "SYS_setpriority", //96
4793 "SYS_socket", //97
4794 "SYS_connect", //98
4795 "SYS_99",
4796 "SYS_getpriority", //100
4797 "SYS_101",
4798 "SYS_102",
4799 "SYS_sigreturn", //103
4800 "SYS_bind", //104
4801 "SYS_setsockopt", //105
4802 "SYS_listen", //106
4803 "SYS_107",
4804 "SYS_108",
4805 "SYS_109",
4806 "SYS_110",
4807 "SYS_sigsuspend", //111
4808 "SYS_112",
4809 "SYS_113",
4810 "SYS_114",
4811 "SYS_115",
4812 "SYS_gettimeofday", //116
4813 "SYS_getrusage", //117
4814 "SYS_getsockopt", //118
4815 "SYS_119",
4816 "SYS_readv", //120
4817 "SYS_writev", //121
4818 "SYS_settimeofday", //122
4819 "SYS_fchown", //123
4820 "SYS_fchmod", //124
4821 "SYS_125",
4822 "SYS_setreuid", //126
4823 "SYS_setregid", //127
4824 "SYS_rename", //128
4825 "SYS_129",
4826 "SYS_130",
4827 "SYS_flock", //131
4828 "SYS_mkfifo", //132
4829 "SYS_sendto", //133
4830 "SYS_shutdown", //134
4831 "SYS_socketpair", //135
4832 "SYS_mkdir", //136
4833 "SYS_rmdir", //137
4834 "SYS_utimes", //138
4835 "SYS_139",
4836 "SYS_adjtime", //140
4837 "SYS_141",
4838 "SYS_142",
4839 "SYS_143",
4840 "SYS_144",
4841 "SYS_145",
4842 "SYS_146",
4843 "SYS_setsid", //147
4844 "SYS_quotactl", //148
4845 "SYS_149",
4846 "SYS_150",
4847 "SYS_151",
4848 "SYS_152",
4849 "SYS_153",
4850 "SYS_154",
4851 "SYS_nfssvc", //155
4852 "SYS_156",
4853 "SYS_157",
4854 "SYS_158",
4855 "SYS_159",
4856 "SYS_160",
4857 "SYS_getfh", //161
4858 "SYS_162",
4859 "SYS_163",
4860 "SYS_164",
4861 "SYS_sysarch", //165
4862 "SYS_166",
4863 "SYS_167",
4864 "SYS_168",
4865 "SYS_169",
4866 "SYS_170",
4867 "SYS_171",
4868 "SYS_172",
4869 "SYS_pread", //173
4870 "SYS_pwrite", //174
4871 "SYS_175",
4872 "SYS_176",
4873 "SYS_177",
4874 "SYS_178",
4875 "SYS_179",
4876 "SYS_180",
4877 "SYS_setgid", //181
4878 "SYS_setegid", //182
4879 "SYS_seteuid", //183
4880 "SYS_lfs_bmapv", //184
4881 "SYS_lfs_markv", //185
4882 "SYS_lfs_segclean", //186
4883 "SYS_lfs_segwait", //187
4884 "SYS_188",
4885 "SYS_189",
4886 "SYS_190",
4887 "SYS_pathconf", //191
4888 "SYS_fpathconf", //192
4889 "SYS_swapctl", //193
4890 "SYS_getrlimit", //194
4891 "SYS_setrlimit", //195
4892 "SYS_getdirentries", //196
4893 "SYS_mmap", //197
4894 "SYS___syscall", //198
4895 "SYS_lseek", //199
4896 "SYS_truncate", //200
4897 "SYS_ftruncate", //201
4898 "SYS___sysctl", //202
4899 "SYS_mlock", //203
4900 "SYS_munlock", //204
4901 "SYS_205",
4902 "SYS_futimes", //206
4903 "SYS_getpgid", //207
4904 "SYS_xfspioctl", //208
4905 "SYS_209",
4906 "SYS_210",
4907 "SYS_211",
4908 "SYS_212",
4909 "SYS_213",
4910 "SYS_214",
4911 "SYS_215",
4912 "SYS_216",
4913 "SYS_217",
4914 "SYS_218",
4915 "SYS_219",
4916 "SYS_220",
4917 "SYS_semget", //221
4918 "SYS_222",
4919 "SYS_223",
4920 "SYS_224",
4921 "SYS_msgget", //225
4922 "SYS_msgsnd", //226
4923 "SYS_msgrcv", //227
4924 "SYS_shmat", //228
4925 "SYS_229",
4926 "SYS_shmdt", //230
4927 "SYS_231",
4928 "SYS_clock_gettime", //232
4929 "SYS_clock_settime", //233
4930 "SYS_clock_getres", //234
4931 "SYS_235",
4932 "SYS_236",
4933 "SYS_237",
4934 "SYS_238",
4935 "SYS_239",
4936 "SYS_nanosleep", //240
4937 "SYS_241",
4938 "SYS_242",
4939 "SYS_243",
4940 "SYS_244",
4941 "SYS_245",
4942 "SYS_246",
4943 "SYS_247",
4944 "SYS_248",
4945 "SYS_249",
4946 "SYS_minherit", //250
4947 "SYS_rfork", //251
4948 "SYS_poll", //252
4949 "SYS_issetugid", //253
4950 "SYS_lchown", //254
4951 "SYS_getsid", //255
4952 "SYS_msync", //256
4953 "SYS_257",
4954 "SYS_258",
4955 "SYS_259",
4956 "SYS_getfsstat", //260
4957 "SYS_statfs", //261
4958 "SYS_fstatfs", //262
4959 "SYS_pipe", //263
4960 "SYS_fhopen", //264
4961 "SYS_265",
4962 "SYS_fhstatfs", //266
4963 "SYS_preadv", //267
4964 "SYS_pwritev", //268
4965 "SYS_kqueue", //269
4966 "SYS_kevent", //270
4967 "SYS_mlockall", //271
4968 "SYS_munlockall", //272
4969 "SYS_getpeereid", //273
4970 "SYS_274",
4971 "SYS_275",
4972 "SYS_276",
4973 "SYS_277",
4974 "SYS_278",
4975 "SYS_279",
4976 "SYS_280",
4977 "SYS_getresuid", //281
4978 "SYS_setresuid", //282
4979 "SYS_getresgid", //283
4980 "SYS_setresgid", //284
4981 "SYS_285",
4982 "SYS_mquery", //286
4983 "SYS_closefrom", //287
4984 "SYS_sigaltstack", //288
4985 "SYS_shmget", //289
4986 "SYS_semop", //290
4987 "SYS_stat", //291
4988 "SYS_fstat", //292
4989 "SYS_lstat", //293
4990 "SYS_fhstat", //294
4991 "SYS___semctl", //295
4992 "SYS_shmctl", //296
4993 "SYS_msgctl", //297
4994 "SYS_MAXSYSCALL", //298
4995 //299
4996 //300
4997 };
4998 uint32_t uEAX;
4999 if (!LogIsEnabled())
5000 return;
5001 uEAX = CPUMGetGuestEAX(pVM);
5002 switch (uEAX)
5003 {
5004 default:
5005 if (uEAX < RT_ELEMENTS(apsz))
5006 {
5007 uint32_t au32Args[8] = {0};
5008 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5009 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5010 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5011 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5012 }
5013 else
5014 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5015 break;
5016 }
5017}
5018
5019
5020#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5021/**
5022 * The Dll main entry point (stub).
5023 */
5024bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5025{
5026 return true;
5027}
5028
5029void *memcpy(void *dst, const void *src, size_t size)
5030{
5031 uint8_t*pbDst = dst, *pbSrc = src;
5032 while (size-- > 0)
5033 *pbDst++ = *pbSrc++;
5034 return dst;
5035}
5036
5037#endif
5038
5039void cpu_smm_update(CPUState* env)
5040{
5041}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette