VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 17814

Last change on this file since 17814 was 17538, checked in by vboxsync, 16 years ago

REM,PGM: Added two mutualy exclusive flags to REMR3NotifyPhysRamRegister to indicate whether it's MMIO2 or RAM that's being registered. This is for the new code only, the old one sticks to the MM_RAM_FLAGS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 154.9 KB
Line 
1/* $Id: VBoxRecompiler.c 17538 2009-03-08 05:32:49Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/*******************************************************************************
218* Internal Functions *
219*******************************************************************************/
220void remAbort(int rc, const char *pszTip);
221extern int testmath(void);
222
223/* Put them here to avoid unused variable warning. */
224AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
225#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
226//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
227/* Why did this have to be identical?? */
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#else
230AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
231#endif
232
233
234/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
235uint8_t *code_gen_prologue;
236
237/**
238 * Initializes the REM.
239 *
240 * @returns VBox status code.
241 * @param pVM The VM to operate on.
242 */
243REMR3DECL(int) REMR3Init(PVM pVM)
244{
245 uint32_t u32Dummy;
246 int rc;
247
248#ifdef VBOX_ENABLE_VBOXREM64
249 LogRel(("Using 64-bit aware REM\n"));
250#endif
251
252 /*
253 * Assert sanity.
254 */
255 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
256 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
257 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
258#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
259 Assert(!testmath());
260#endif
261
262 /*
263 * Init some internal data members.
264 */
265 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
266 pVM->rem.s.Env.pVM = pVM;
267#ifdef CPU_RAW_MODE_INIT
268 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
269#endif
270
271 /* ctx. */
272 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
273 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
274
275 /* ignore all notifications */
276 pVM->rem.s.fIgnoreAll = true;
277
278 code_gen_prologue = RTMemExecAlloc(_1K);
279 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
280
281 cpu_exec_init_all(0);
282
283 /*
284 * Init the recompiler.
285 */
286 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
287 {
288 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
289 return VERR_GENERAL_FAILURE;
290 }
291 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
292 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
293
294 /* allocate code buffer for single instruction emulation. */
295 pVM->rem.s.Env.cbCodeBuffer = 4096;
296 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
297 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
298
299 /* finally, set the cpu_single_env global. */
300 cpu_single_env = &pVM->rem.s.Env;
301
302 /* Nothing is pending by default */
303 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
304
305 /*
306 * Register ram types.
307 */
308 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
310 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
311 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
312 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
313
314 /* stop ignoring. */
315 pVM->rem.s.fIgnoreAll = false;
316
317 /*
318 * Register the saved state data unit.
319 */
320 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
321 NULL, remR3Save, NULL,
322 NULL, remR3Load, NULL);
323 if (RT_FAILURE(rc))
324 return rc;
325
326#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
327 /*
328 * Debugger commands.
329 */
330 static bool fRegisteredCmds = false;
331 if (!fRegisteredCmds)
332 {
333 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
334 if (RT_SUCCESS(rc))
335 fRegisteredCmds = true;
336 }
337#endif
338
339#ifdef VBOX_WITH_STATISTICS
340 /*
341 * Statistics.
342 */
343 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
344 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
345 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
346 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
347 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
350 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
351 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
352 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
353 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
354 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
355
356 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
357
358 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
359 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
360 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
361 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
362 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
363 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
364 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
365 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
366 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
367 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
368 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
369
370 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
371 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
372 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
373 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
374
375 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
379 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
381
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
388
389 /** @todo missing /REM/Tb*Count stats */
390
391#endif
392
393#ifdef DEBUG_ALL_LOGGING
394 loglevel = ~0;
395# ifdef DEBUG_TMP_LOGGING
396 logfile = fopen("/tmp/vbox-qemu.log", "w");
397# endif
398#endif
399
400 return rc;
401}
402
403
404/**
405 * Finalizes the REM initialization.
406 *
407 * This is called after all components, devices and drivers has
408 * been initialized. Its main purpose it to finish the RAM related
409 * initialization.
410 *
411 * @returns VBox status code.
412 *
413 * @param pVM The VM handle.
414 */
415REMR3DECL(int) REMR3InitFinalize(PVM pVM)
416{
417 int rc;
418
419 /*
420 * Ram size & dirty bit map.
421 */
422 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
423 pVM->rem.s.fGCPhysLastRamFixed = true;
424#ifdef RT_STRICT
425 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
426#else
427 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
428#endif
429 return rc;
430}
431
432
433/**
434 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
435 *
436 * @returns VBox status code.
437 * @param pVM The VM handle.
438 * @param fGuarded Whether to guard the map.
439 */
440static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
441{
442 int rc = VINF_SUCCESS;
443 RTGCPHYS cb;
444
445 cb = pVM->rem.s.GCPhysLastRam + 1;
446 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
447 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
448 VERR_OUT_OF_RANGE);
449 phys_ram_size = cb;
450 phys_ram_dirty_size = cb >> PAGE_SHIFT;
451 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
452
453 if (!fGuarded)
454 {
455 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
456 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
457 }
458 else
459 {
460 /*
461 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
462 */
463 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
464 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
465 if (cbBitmapFull == cbBitmapAligned)
466 cbBitmapFull += _4G >> PAGE_SHIFT;
467 else if (cbBitmapFull - cbBitmapAligned < _64K)
468 cbBitmapFull += _64K;
469
470 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
471 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
472
473 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
474 if (RT_FAILURE(rc))
475 {
476 RTMemPageFree(phys_ram_dirty);
477 AssertLogRelRCReturn(rc, rc);
478 }
479
480 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
481 }
482
483 /* initialize it. */
484 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
485 return rc;
486}
487
488
489/**
490 * Terminates the REM.
491 *
492 * Termination means cleaning up and freeing all resources,
493 * the VM it self is at this point powered off or suspended.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM to operate on.
497 */
498REMR3DECL(int) REMR3Term(PVM pVM)
499{
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * The VM is being reset.
506 *
507 * For the REM component this means to call the cpu_reset() and
508 * reinitialize some state variables.
509 *
510 * @param pVM VM handle.
511 */
512REMR3DECL(void) REMR3Reset(PVM pVM)
513{
514 /*
515 * Reset the REM cpu.
516 */
517 pVM->rem.s.fIgnoreAll = true;
518 cpu_reset(&pVM->rem.s.Env);
519 pVM->rem.s.cInvalidatedPages = 0;
520 pVM->rem.s.fIgnoreAll = false;
521
522 /* Clear raw ring 0 init state */
523 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
524
525 /* Flush the TBs the next time we execute code here. */
526 pVM->rem.s.fFlushTBs = true;
527}
528
529
530/**
531 * Execute state save operation.
532 *
533 * @returns VBox status code.
534 * @param pVM VM Handle.
535 * @param pSSM SSM operation handle.
536 */
537static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
538{
539 /*
540 * Save the required CPU Env bits.
541 * (Not much because we're never in REM when doing the save.)
542 */
543 PREM pRem = &pVM->rem.s;
544 LogFlow(("remR3Save:\n"));
545 Assert(!pRem->fInREM);
546 SSMR3PutU32(pSSM, pRem->Env.hflags);
547 SSMR3PutU32(pSSM, ~0); /* separator */
548
549 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
550 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
551 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
552
553 return SSMR3PutU32(pSSM, ~0); /* terminator */
554}
555
556
557/**
558 * Execute state load operation.
559 *
560 * @returns VBox status code.
561 * @param pVM VM Handle.
562 * @param pSSM SSM operation handle.
563 * @param u32Version Data layout version.
564 */
565static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
566{
567 uint32_t u32Dummy;
568 uint32_t fRawRing0 = false;
569 uint32_t u32Sep;
570 int rc;
571 PREM pRem;
572 LogFlow(("remR3Load:\n"));
573
574 /*
575 * Validate version.
576 */
577 if ( u32Version != REM_SAVED_STATE_VERSION
578 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
579 {
580 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
581 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
582 }
583
584 /*
585 * Do a reset to be on the safe side...
586 */
587 REMR3Reset(pVM);
588
589 /*
590 * Ignore all ignorable notifications.
591 * (Not doing this will cause serious trouble.)
592 */
593 pVM->rem.s.fIgnoreAll = true;
594
595 /*
596 * Load the required CPU Env bits.
597 * (Not much because we're never in REM when doing the save.)
598 */
599 pRem = &pVM->rem.s;
600 Assert(!pRem->fInREM);
601 SSMR3GetU32(pSSM, &pRem->Env.hflags);
602 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
603 {
604 /* Redundant REM CPU state has to be loaded, but can be ignored. */
605 CPUX86State_Ver16 temp;
606 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
607 }
608
609 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
610 if (RT_FAILURE(rc))
611 return rc;
612 if (u32Sep != ~0U)
613 {
614 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
615 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
616 }
617
618 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
619 SSMR3GetUInt(pSSM, &fRawRing0);
620 if (fRawRing0)
621 pRem->Env.state |= CPU_RAW_RING0;
622
623 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
624 {
625 unsigned i;
626
627 /*
628 * Load the REM stuff.
629 */
630 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
631 if (RT_FAILURE(rc))
632 return rc;
633 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
634 {
635 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
636 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
637 }
638 for (i = 0; i < pRem->cInvalidatedPages; i++)
639 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
640 }
641
642 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
643 if (RT_FAILURE(rc))
644 return rc;
645
646 /* check the terminator. */
647 rc = SSMR3GetU32(pSSM, &u32Sep);
648 if (RT_FAILURE(rc))
649 return rc;
650 if (u32Sep != ~0U)
651 {
652 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
653 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
654 }
655
656 /*
657 * Get the CPUID features.
658 */
659 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
660 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
661
662 /*
663 * Sync the Load Flush the TLB
664 */
665 tlb_flush(&pRem->Env, 1);
666
667 /*
668 * Stop ignoring ignornable notifications.
669 */
670 pVM->rem.s.fIgnoreAll = false;
671
672 /*
673 * Sync the whole CPU state when executing code in the recompiler.
674 */
675 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
676 return VINF_SUCCESS;
677}
678
679
680
681#undef LOG_GROUP
682#define LOG_GROUP LOG_GROUP_REM_RUN
683
684/**
685 * Single steps an instruction in recompiled mode.
686 *
687 * Before calling this function the REM state needs to be in sync with
688 * the VM. Call REMR3State() to perform the sync. It's only necessary
689 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
690 * and after calling REMR3StateBack().
691 *
692 * @returns VBox status code.
693 *
694 * @param pVM VM Handle.
695 */
696REMR3DECL(int) REMR3Step(PVM pVM)
697{
698 int rc, interrupt_request;
699 RTGCPTR GCPtrPC;
700 bool fBp;
701
702 /*
703 * Lock the REM - we don't wanna have anyone interrupting us
704 * while stepping - and enabled single stepping. We also ignore
705 * pending interrupts and suchlike.
706 */
707 interrupt_request = pVM->rem.s.Env.interrupt_request;
708 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
709 pVM->rem.s.Env.interrupt_request = 0;
710 cpu_single_step(&pVM->rem.s.Env, 1);
711
712 /*
713 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
714 */
715 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
716 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
717
718 /*
719 * Execute and handle the return code.
720 * We execute without enabling the cpu tick, so on success we'll
721 * just flip it on and off to make sure it moves
722 */
723 rc = cpu_exec(&pVM->rem.s.Env);
724 if (rc == EXCP_DEBUG)
725 {
726 TMCpuTickResume(pVM);
727 TMCpuTickPause(pVM);
728 TMVirtualResume(pVM);
729 TMVirtualPause(pVM);
730 rc = VINF_EM_DBG_STEPPED;
731 }
732 else
733 {
734 switch (rc)
735 {
736 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
737 case EXCP_HLT:
738 case EXCP_HALTED: rc = VINF_EM_HALT; break;
739 case EXCP_RC:
740 rc = pVM->rem.s.rc;
741 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
742 break;
743 default:
744 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
745 rc = VERR_INTERNAL_ERROR;
746 break;
747 }
748 }
749
750 /*
751 * Restore the stuff we changed to prevent interruption.
752 * Unlock the REM.
753 */
754 if (fBp)
755 {
756 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
757 Assert(rc2 == 0); NOREF(rc2);
758 }
759 cpu_single_step(&pVM->rem.s.Env, 0);
760 pVM->rem.s.Env.interrupt_request = interrupt_request;
761
762 return rc;
763}
764
765
766/**
767 * Set a breakpoint using the REM facilities.
768 *
769 * @returns VBox status code.
770 * @param pVM The VM handle.
771 * @param Address The breakpoint address.
772 * @thread The emulation thread.
773 */
774REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
775{
776 VM_ASSERT_EMT(pVM);
777 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
778 {
779 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
780 return VINF_SUCCESS;
781 }
782 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
783 return VERR_REM_NO_MORE_BP_SLOTS;
784}
785
786
787/**
788 * Clears a breakpoint set by REMR3BreakpointSet().
789 *
790 * @returns VBox status code.
791 * @param pVM The VM handle.
792 * @param Address The breakpoint address.
793 * @thread The emulation thread.
794 */
795REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
796{
797 VM_ASSERT_EMT(pVM);
798 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
799 {
800 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
801 return VINF_SUCCESS;
802 }
803 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
804 return VERR_REM_BP_NOT_FOUND;
805}
806
807
808/**
809 * Emulate an instruction.
810 *
811 * This function executes one instruction without letting anyone
812 * interrupt it. This is intended for being called while being in
813 * raw mode and thus will take care of all the state syncing between
814 * REM and the rest.
815 *
816 * @returns VBox status code.
817 * @param pVM VM handle.
818 */
819REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
820{
821 bool fFlushTBs;
822
823 int rc, rc2;
824 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
825
826 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
827 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
828 */
829 if (HWACCMIsEnabled(pVM))
830 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
831
832 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
833 fFlushTBs = pVM->rem.s.fFlushTBs;
834 pVM->rem.s.fFlushTBs = false;
835
836 /*
837 * Sync the state and enable single instruction / single stepping.
838 */
839 rc = REMR3State(pVM);
840 pVM->rem.s.fFlushTBs = fFlushTBs;
841 if (RT_SUCCESS(rc))
842 {
843 int interrupt_request = pVM->rem.s.Env.interrupt_request;
844 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
845 Assert(!pVM->rem.s.Env.singlestep_enabled);
846 /*
847 * Now we set the execute single instruction flag and enter the cpu_exec loop.
848 */
849 TMNotifyStartOfExecution(pVM);
850 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
851 rc = cpu_exec(&pVM->rem.s.Env);
852 TMNotifyEndOfExecution(pVM);
853 switch (rc)
854 {
855 /*
856 * Executed without anything out of the way happening.
857 */
858 case EXCP_SINGLE_INSTR:
859 rc = VINF_EM_RESCHEDULE;
860 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
861 break;
862
863 /*
864 * If we take a trap or start servicing a pending interrupt, we might end up here.
865 * (Timer thread or some other thread wishing EMT's attention.)
866 */
867 case EXCP_INTERRUPT:
868 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
869 rc = VINF_EM_RESCHEDULE;
870 break;
871
872 /*
873 * Single step, we assume!
874 * If there was a breakpoint there we're fucked now.
875 */
876 case EXCP_DEBUG:
877 {
878 /* breakpoint or single step? */
879 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
880 int iBP;
881 rc = VINF_EM_DBG_STEPPED;
882 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
883 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
884 {
885 rc = VINF_EM_DBG_BREAKPOINT;
886 break;
887 }
888 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
889 break;
890 }
891
892 /*
893 * hlt instruction.
894 */
895 case EXCP_HLT:
896 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
897 rc = VINF_EM_HALT;
898 break;
899
900 /*
901 * The VM has halted.
902 */
903 case EXCP_HALTED:
904 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
905 rc = VINF_EM_HALT;
906 break;
907
908 /*
909 * Switch to RAW-mode.
910 */
911 case EXCP_EXECUTE_RAW:
912 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
913 rc = VINF_EM_RESCHEDULE_RAW;
914 break;
915
916 /*
917 * Switch to hardware accelerated RAW-mode.
918 */
919 case EXCP_EXECUTE_HWACC:
920 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
921 rc = VINF_EM_RESCHEDULE_HWACC;
922 break;
923
924 /*
925 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
926 */
927 case EXCP_RC:
928 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
929 rc = pVM->rem.s.rc;
930 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
931 break;
932
933 /*
934 * Figure out the rest when they arrive....
935 */
936 default:
937 AssertMsgFailed(("rc=%d\n", rc));
938 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
939 rc = VINF_EM_RESCHEDULE;
940 break;
941 }
942
943 /*
944 * Switch back the state.
945 */
946 pVM->rem.s.Env.interrupt_request = interrupt_request;
947 rc2 = REMR3StateBack(pVM);
948 AssertRC(rc2);
949 }
950
951 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
952 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
953 return rc;
954}
955
956
957/**
958 * Runs code in recompiled mode.
959 *
960 * Before calling this function the REM state needs to be in sync with
961 * the VM. Call REMR3State() to perform the sync. It's only necessary
962 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
963 * and after calling REMR3StateBack().
964 *
965 * @returns VBox status code.
966 *
967 * @param pVM VM Handle.
968 */
969REMR3DECL(int) REMR3Run(PVM pVM)
970{
971 int rc;
972 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
973 Assert(pVM->rem.s.fInREM);
974
975 TMNotifyStartOfExecution(pVM);
976 rc = cpu_exec(&pVM->rem.s.Env);
977 TMNotifyEndOfExecution(pVM);
978 switch (rc)
979 {
980 /*
981 * This happens when the execution was interrupted
982 * by an external event, like pending timers.
983 */
984 case EXCP_INTERRUPT:
985 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
986 rc = VINF_SUCCESS;
987 break;
988
989 /*
990 * hlt instruction.
991 */
992 case EXCP_HLT:
993 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
994 rc = VINF_EM_HALT;
995 break;
996
997 /*
998 * The VM has halted.
999 */
1000 case EXCP_HALTED:
1001 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1002 rc = VINF_EM_HALT;
1003 break;
1004
1005 /*
1006 * Breakpoint/single step.
1007 */
1008 case EXCP_DEBUG:
1009 {
1010#if 0//def DEBUG_bird
1011 static int iBP = 0;
1012 printf("howdy, breakpoint! iBP=%d\n", iBP);
1013 switch (iBP)
1014 {
1015 case 0:
1016 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1017 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1018 //pVM->rem.s.Env.interrupt_request = 0;
1019 //pVM->rem.s.Env.exception_index = -1;
1020 //g_fInterruptDisabled = 1;
1021 rc = VINF_SUCCESS;
1022 asm("int3");
1023 break;
1024 default:
1025 asm("int3");
1026 break;
1027 }
1028 iBP++;
1029#else
1030 /* breakpoint or single step? */
1031 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1032 int iBP;
1033 rc = VINF_EM_DBG_STEPPED;
1034 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1035 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1036 {
1037 rc = VINF_EM_DBG_BREAKPOINT;
1038 break;
1039 }
1040 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1041#endif
1042 break;
1043 }
1044
1045 /*
1046 * Switch to RAW-mode.
1047 */
1048 case EXCP_EXECUTE_RAW:
1049 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1050 rc = VINF_EM_RESCHEDULE_RAW;
1051 break;
1052
1053 /*
1054 * Switch to hardware accelerated RAW-mode.
1055 */
1056 case EXCP_EXECUTE_HWACC:
1057 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1058 rc = VINF_EM_RESCHEDULE_HWACC;
1059 break;
1060
1061 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1062 /*
1063 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1064 */
1065 case EXCP_RC:
1066 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1067 rc = pVM->rem.s.rc;
1068 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1069 break;
1070
1071 /*
1072 * Figure out the rest when they arrive....
1073 */
1074 default:
1075 AssertMsgFailed(("rc=%d\n", rc));
1076 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1077 rc = VINF_SUCCESS;
1078 break;
1079 }
1080
1081 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1082 return rc;
1083}
1084
1085
1086/**
1087 * Check if the cpu state is suitable for Raw execution.
1088 *
1089 * @returns boolean
1090 * @param env The CPU env struct.
1091 * @param eip The EIP to check this for (might differ from env->eip).
1092 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1093 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1094 *
1095 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1096 */
1097bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1098{
1099 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1100 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1101 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1102 uint32_t u32CR0;
1103
1104 /* Update counter. */
1105 env->pVM->rem.s.cCanExecuteRaw++;
1106
1107 if (HWACCMIsEnabled(env->pVM))
1108 {
1109 CPUMCTX Ctx;
1110
1111 env->state |= CPU_RAW_HWACC;
1112
1113 /*
1114 * Create partial context for HWACCMR3CanExecuteGuest
1115 */
1116 Ctx.cr0 = env->cr[0];
1117 Ctx.cr3 = env->cr[3];
1118 Ctx.cr4 = env->cr[4];
1119
1120 Ctx.tr = env->tr.selector;
1121 Ctx.trHid.u64Base = env->tr.base;
1122 Ctx.trHid.u32Limit = env->tr.limit;
1123 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1124
1125 Ctx.idtr.cbIdt = env->idt.limit;
1126 Ctx.idtr.pIdt = env->idt.base;
1127
1128 Ctx.gdtr.cbGdt = env->gdt.limit;
1129 Ctx.gdtr.pGdt = env->gdt.base;
1130
1131 Ctx.rsp = env->regs[R_ESP];
1132 Ctx.rip = env->eip;
1133
1134 Ctx.eflags.u32 = env->eflags;
1135
1136 Ctx.cs = env->segs[R_CS].selector;
1137 Ctx.csHid.u64Base = env->segs[R_CS].base;
1138 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1139 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1140
1141 Ctx.ds = env->segs[R_DS].selector;
1142 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1143 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1144 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1145
1146 Ctx.es = env->segs[R_ES].selector;
1147 Ctx.esHid.u64Base = env->segs[R_ES].base;
1148 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1149 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1150
1151 Ctx.fs = env->segs[R_FS].selector;
1152 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1153 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1154 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1155
1156 Ctx.gs = env->segs[R_GS].selector;
1157 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1158 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1159 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1160
1161 Ctx.ss = env->segs[R_SS].selector;
1162 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1163 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1164 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1165
1166 Ctx.msrEFER = env->efer;
1167
1168 /* Hardware accelerated raw-mode:
1169 *
1170 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1171 */
1172 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1173 {
1174 *piException = EXCP_EXECUTE_HWACC;
1175 return true;
1176 }
1177 return false;
1178 }
1179
1180 /*
1181 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1182 * or 32 bits protected mode ring 0 code
1183 *
1184 * The tests are ordered by the likelyhood of being true during normal execution.
1185 */
1186 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1187 {
1188 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1189 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1190 return false;
1191 }
1192
1193#ifndef VBOX_RAW_V86
1194 if (fFlags & VM_MASK) {
1195 STAM_COUNTER_INC(&gStatRefuseVM86);
1196 Log2(("raw mode refused: VM_MASK\n"));
1197 return false;
1198 }
1199#endif
1200
1201 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1202 {
1203#ifndef DEBUG_bird
1204 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1205#endif
1206 return false;
1207 }
1208
1209 if (env->singlestep_enabled)
1210 {
1211 //Log2(("raw mode refused: Single step\n"));
1212 return false;
1213 }
1214
1215 if (env->nb_breakpoints > 0)
1216 {
1217 //Log2(("raw mode refused: Breakpoints\n"));
1218 return false;
1219 }
1220
1221 u32CR0 = env->cr[0];
1222 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1223 {
1224 STAM_COUNTER_INC(&gStatRefusePaging);
1225 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1226 return false;
1227 }
1228
1229 if (env->cr[4] & CR4_PAE_MASK)
1230 {
1231 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1232 {
1233 STAM_COUNTER_INC(&gStatRefusePAE);
1234 return false;
1235 }
1236 }
1237
1238 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1239 {
1240 if (!EMIsRawRing3Enabled(env->pVM))
1241 return false;
1242
1243 if (!(env->eflags & IF_MASK))
1244 {
1245 STAM_COUNTER_INC(&gStatRefuseIF0);
1246 Log2(("raw mode refused: IF (RawR3)\n"));
1247 return false;
1248 }
1249
1250 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1251 {
1252 STAM_COUNTER_INC(&gStatRefuseWP0);
1253 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1254 return false;
1255 }
1256 }
1257 else
1258 {
1259 if (!EMIsRawRing0Enabled(env->pVM))
1260 return false;
1261
1262 // Let's start with pure 32 bits ring 0 code first
1263 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1264 {
1265 STAM_COUNTER_INC(&gStatRefuseCode16);
1266 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1267 return false;
1268 }
1269
1270 // Only R0
1271 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1272 {
1273 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1274 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1275 return false;
1276 }
1277
1278 if (!(u32CR0 & CR0_WP_MASK))
1279 {
1280 STAM_COUNTER_INC(&gStatRefuseWP0);
1281 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1282 return false;
1283 }
1284
1285 if (PATMIsPatchGCAddr(env->pVM, eip))
1286 {
1287 Log2(("raw r0 mode forced: patch code\n"));
1288 *piException = EXCP_EXECUTE_RAW;
1289 return true;
1290 }
1291
1292#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1293 if (!(env->eflags & IF_MASK))
1294 {
1295 STAM_COUNTER_INC(&gStatRefuseIF0);
1296 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1297 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1298 return false;
1299 }
1300#endif
1301
1302 env->state |= CPU_RAW_RING0;
1303 }
1304
1305 /*
1306 * Don't reschedule the first time we're called, because there might be
1307 * special reasons why we're here that is not covered by the above checks.
1308 */
1309 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1310 {
1311 Log2(("raw mode refused: first scheduling\n"));
1312 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1313 return false;
1314 }
1315
1316 Assert(PGMPhysIsA20Enabled(env->pVM));
1317 *piException = EXCP_EXECUTE_RAW;
1318 return true;
1319}
1320
1321
1322/**
1323 * Fetches a code byte.
1324 *
1325 * @returns Success indicator (bool) for ease of use.
1326 * @param env The CPU environment structure.
1327 * @param GCPtrInstr Where to fetch code.
1328 * @param pu8Byte Where to store the byte on success
1329 */
1330bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1331{
1332 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1333 if (RT_SUCCESS(rc))
1334 return true;
1335 return false;
1336}
1337
1338
1339/**
1340 * Flush (or invalidate if you like) page table/dir entry.
1341 *
1342 * (invlpg instruction; tlb_flush_page)
1343 *
1344 * @param env Pointer to cpu environment.
1345 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1346 */
1347void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1348{
1349 PVM pVM = env->pVM;
1350 PCPUMCTX pCtx;
1351 int rc;
1352
1353 /*
1354 * When we're replaying invlpg instructions or restoring a saved
1355 * state we disable this path.
1356 */
1357 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1358 return;
1359 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1360 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1361
1362 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1363
1364 /*
1365 * Update the control registers before calling PGMFlushPage.
1366 */
1367 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1368 pCtx->cr0 = env->cr[0];
1369 pCtx->cr3 = env->cr[3];
1370 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1371 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1372 pCtx->cr4 = env->cr[4];
1373
1374 /*
1375 * Let PGM do the rest.
1376 */
1377 rc = PGMInvalidatePage(pVM, GCPtr);
1378 if (RT_FAILURE(rc))
1379 {
1380 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1381 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1382 }
1383 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1384}
1385
1386
1387#ifndef REM_PHYS_ADDR_IN_TLB
1388/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1389void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1390{
1391 void *pv;
1392 int rc;
1393
1394 /* Address must be aligned enough to fiddle with lower bits */
1395 Assert((physAddr & 0x3) == 0);
1396
1397 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1398 Assert( rc == VINF_SUCCESS
1399 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1400 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1401 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1402 if (RT_FAILURE(rc))
1403 return (void *)1;
1404 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1405 return (void *)((uintptr_t)pv | 2);
1406 return pv;
1407}
1408#endif /* REM_PHYS_ADDR_IN_TLB */
1409
1410
1411/**
1412 * Called from tlb_protect_code in order to write monitor a code page.
1413 *
1414 * @param env Pointer to the CPU environment.
1415 * @param GCPtr Code page to monitor
1416 */
1417void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1418{
1419#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1420 Assert(env->pVM->rem.s.fInREM);
1421 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1422 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1423 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1424 && !(env->eflags & VM_MASK) /* no V86 mode */
1425 && !HWACCMIsEnabled(env->pVM))
1426 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1427#endif
1428}
1429
1430
1431/**
1432 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1433 *
1434 * @param env Pointer to the CPU environment.
1435 * @param GCPtr Code page to monitor
1436 */
1437void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1438{
1439 Assert(env->pVM->rem.s.fInREM);
1440#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1441 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1442 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1443 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1444 && !(env->eflags & VM_MASK) /* no V86 mode */
1445 && !HWACCMIsEnabled(env->pVM))
1446 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1447#endif
1448}
1449
1450
1451/**
1452 * Called when the CPU is initialized, any of the CRx registers are changed or
1453 * when the A20 line is modified.
1454 *
1455 * @param env Pointer to the CPU environment.
1456 * @param fGlobal Set if the flush is global.
1457 */
1458void remR3FlushTLB(CPUState *env, bool fGlobal)
1459{
1460 PVM pVM = env->pVM;
1461 PCPUMCTX pCtx;
1462
1463 /*
1464 * When we're replaying invlpg instructions or restoring a saved
1465 * state we disable this path.
1466 */
1467 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1468 return;
1469 Assert(pVM->rem.s.fInREM);
1470
1471 /*
1472 * The caller doesn't check cr4, so we have to do that for ourselves.
1473 */
1474 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1475 fGlobal = true;
1476 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1477
1478 /*
1479 * Update the control registers before calling PGMR3FlushTLB.
1480 */
1481 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1482 pCtx->cr0 = env->cr[0];
1483 pCtx->cr3 = env->cr[3];
1484 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1485 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1486 pCtx->cr4 = env->cr[4];
1487
1488 /*
1489 * Let PGM do the rest.
1490 */
1491 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1492}
1493
1494
1495/**
1496 * Called when any of the cr0, cr4 or efer registers is updated.
1497 *
1498 * @param env Pointer to the CPU environment.
1499 */
1500void remR3ChangeCpuMode(CPUState *env)
1501{
1502 int rc;
1503 PVM pVM = env->pVM;
1504 PCPUMCTX pCtx;
1505
1506 /*
1507 * When we're replaying loads or restoring a saved
1508 * state this path is disabled.
1509 */
1510 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1511 return;
1512 Assert(pVM->rem.s.fInREM);
1513
1514 /*
1515 * Update the control registers before calling PGMChangeMode()
1516 * as it may need to map whatever cr3 is pointing to.
1517 */
1518 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1519 pCtx->cr0 = env->cr[0];
1520 pCtx->cr3 = env->cr[3];
1521 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1522 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1523 pCtx->cr4 = env->cr[4];
1524
1525#ifdef TARGET_X86_64
1526 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1527 if (rc != VINF_SUCCESS)
1528 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1529#else
1530 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1531 if (rc != VINF_SUCCESS)
1532 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1533#endif
1534}
1535
1536
1537/**
1538 * Called from compiled code to run dma.
1539 *
1540 * @param env Pointer to the CPU environment.
1541 */
1542void remR3DmaRun(CPUState *env)
1543{
1544 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1545 PDMR3DmaRun(env->pVM);
1546 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1547}
1548
1549
1550/**
1551 * Called from compiled code to schedule pending timers in VMM
1552 *
1553 * @param env Pointer to the CPU environment.
1554 */
1555void remR3TimersRun(CPUState *env)
1556{
1557 LogFlow(("remR3TimersRun:\n"));
1558 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1559 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1560 TMR3TimerQueuesDo(env->pVM);
1561 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1562 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1563}
1564
1565
1566/**
1567 * Record trap occurance
1568 *
1569 * @returns VBox status code
1570 * @param env Pointer to the CPU environment.
1571 * @param uTrap Trap nr
1572 * @param uErrorCode Error code
1573 * @param pvNextEIP Next EIP
1574 */
1575int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1576{
1577 PVM pVM = env->pVM;
1578#ifdef VBOX_WITH_STATISTICS
1579 static STAMCOUNTER s_aStatTrap[255];
1580 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1581#endif
1582
1583#ifdef VBOX_WITH_STATISTICS
1584 if (uTrap < 255)
1585 {
1586 if (!s_aRegisters[uTrap])
1587 {
1588 char szStatName[64];
1589 s_aRegisters[uTrap] = true;
1590 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1591 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1592 }
1593 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1594 }
1595#endif
1596 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1597 if( uTrap < 0x20
1598 && (env->cr[0] & X86_CR0_PE)
1599 && !(env->eflags & X86_EFL_VM))
1600 {
1601#ifdef DEBUG
1602 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1603#endif
1604 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1605 {
1606 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1607 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1608 return VERR_REM_TOO_MANY_TRAPS;
1609 }
1610 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1611 pVM->rem.s.cPendingExceptions = 1;
1612 pVM->rem.s.uPendingException = uTrap;
1613 pVM->rem.s.uPendingExcptEIP = env->eip;
1614 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1615 }
1616 else
1617 {
1618 pVM->rem.s.cPendingExceptions = 0;
1619 pVM->rem.s.uPendingException = uTrap;
1620 pVM->rem.s.uPendingExcptEIP = env->eip;
1621 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1622 }
1623 return VINF_SUCCESS;
1624}
1625
1626
1627/*
1628 * Clear current active trap
1629 *
1630 * @param pVM VM Handle.
1631 */
1632void remR3TrapClear(PVM pVM)
1633{
1634 pVM->rem.s.cPendingExceptions = 0;
1635 pVM->rem.s.uPendingException = 0;
1636 pVM->rem.s.uPendingExcptEIP = 0;
1637 pVM->rem.s.uPendingExcptCR2 = 0;
1638}
1639
1640
1641/*
1642 * Record previous call instruction addresses
1643 *
1644 * @param env Pointer to the CPU environment.
1645 */
1646void remR3RecordCall(CPUState *env)
1647{
1648 CSAMR3RecordCallAddress(env->pVM, env->eip);
1649}
1650
1651
1652/**
1653 * Syncs the internal REM state with the VM.
1654 *
1655 * This must be called before REMR3Run() is invoked whenever when the REM
1656 * state is not up to date. Calling it several times in a row is not
1657 * permitted.
1658 *
1659 * @returns VBox status code.
1660 *
1661 * @param pVM VM Handle.
1662 *
1663 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1664 * no do this since the majority of the callers don't want any unnecessary of events
1665 * pending that would immediatly interrupt execution.
1666 */
1667REMR3DECL(int) REMR3State(PVM pVM)
1668{
1669 register const CPUMCTX *pCtx;
1670 register unsigned fFlags;
1671 bool fHiddenSelRegsValid;
1672 unsigned i;
1673 TRPMEVENT enmType;
1674 uint8_t u8TrapNo;
1675 int rc;
1676
1677 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1678 Log2(("REMR3State:\n"));
1679
1680 pCtx = pVM->rem.s.pCtx;
1681 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1682
1683 Assert(!pVM->rem.s.fInREM);
1684 pVM->rem.s.fInStateSync = true;
1685
1686 /*
1687 * If we have to flush TBs, do that immediately.
1688 */
1689 if (pVM->rem.s.fFlushTBs)
1690 {
1691 STAM_COUNTER_INC(&gStatFlushTBs);
1692 tb_flush(&pVM->rem.s.Env);
1693 pVM->rem.s.fFlushTBs = false;
1694 }
1695
1696 /*
1697 * Copy the registers which require no special handling.
1698 */
1699#ifdef TARGET_X86_64
1700 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1701 Assert(R_EAX == 0);
1702 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1703 Assert(R_ECX == 1);
1704 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1705 Assert(R_EDX == 2);
1706 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1707 Assert(R_EBX == 3);
1708 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1709 Assert(R_ESP == 4);
1710 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1711 Assert(R_EBP == 5);
1712 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1713 Assert(R_ESI == 6);
1714 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1715 Assert(R_EDI == 7);
1716 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1717 pVM->rem.s.Env.regs[8] = pCtx->r8;
1718 pVM->rem.s.Env.regs[9] = pCtx->r9;
1719 pVM->rem.s.Env.regs[10] = pCtx->r10;
1720 pVM->rem.s.Env.regs[11] = pCtx->r11;
1721 pVM->rem.s.Env.regs[12] = pCtx->r12;
1722 pVM->rem.s.Env.regs[13] = pCtx->r13;
1723 pVM->rem.s.Env.regs[14] = pCtx->r14;
1724 pVM->rem.s.Env.regs[15] = pCtx->r15;
1725
1726 pVM->rem.s.Env.eip = pCtx->rip;
1727
1728 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1729#else
1730 Assert(R_EAX == 0);
1731 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1732 Assert(R_ECX == 1);
1733 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1734 Assert(R_EDX == 2);
1735 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1736 Assert(R_EBX == 3);
1737 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1738 Assert(R_ESP == 4);
1739 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1740 Assert(R_EBP == 5);
1741 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1742 Assert(R_ESI == 6);
1743 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1744 Assert(R_EDI == 7);
1745 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1746 pVM->rem.s.Env.eip = pCtx->eip;
1747
1748 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1749#endif
1750
1751 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1752
1753 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1754 for (i=0;i<8;i++)
1755 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1756
1757 /*
1758 * Clear the halted hidden flag (the interrupt waking up the CPU can
1759 * have been dispatched in raw mode).
1760 */
1761 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1762
1763 /*
1764 * Replay invlpg?
1765 */
1766 if (pVM->rem.s.cInvalidatedPages)
1767 {
1768 RTUINT i;
1769
1770 pVM->rem.s.fIgnoreInvlPg = true;
1771 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1772 {
1773 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1774 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1775 }
1776 pVM->rem.s.fIgnoreInvlPg = false;
1777 pVM->rem.s.cInvalidatedPages = 0;
1778 }
1779
1780 /* Replay notification changes? */
1781 if (pVM->rem.s.cHandlerNotifications)
1782 REMR3ReplayHandlerNotifications(pVM);
1783
1784 /* Update MSRs; before CRx registers! */
1785 pVM->rem.s.Env.efer = pCtx->msrEFER;
1786 pVM->rem.s.Env.star = pCtx->msrSTAR;
1787 pVM->rem.s.Env.pat = pCtx->msrPAT;
1788#ifdef TARGET_X86_64
1789 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1790 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1791 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1792 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1793
1794 /* Update the internal long mode activate flag according to the new EFER value. */
1795 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1796 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1797 else
1798 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1799#endif
1800
1801 /*
1802 * Registers which are rarely changed and require special handling / order when changed.
1803 */
1804 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1805 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1806 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1807 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1808 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1809 {
1810 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1811 {
1812 pVM->rem.s.fIgnoreCR3Load = true;
1813 tlb_flush(&pVM->rem.s.Env, true);
1814 pVM->rem.s.fIgnoreCR3Load = false;
1815 }
1816
1817 /* CR4 before CR0! */
1818 if (fFlags & CPUM_CHANGED_CR4)
1819 {
1820 pVM->rem.s.fIgnoreCR3Load = true;
1821 pVM->rem.s.fIgnoreCpuMode = true;
1822 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1823 pVM->rem.s.fIgnoreCpuMode = false;
1824 pVM->rem.s.fIgnoreCR3Load = false;
1825 }
1826
1827 if (fFlags & CPUM_CHANGED_CR0)
1828 {
1829 pVM->rem.s.fIgnoreCR3Load = true;
1830 pVM->rem.s.fIgnoreCpuMode = true;
1831 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1832 pVM->rem.s.fIgnoreCpuMode = false;
1833 pVM->rem.s.fIgnoreCR3Load = false;
1834 }
1835
1836 if (fFlags & CPUM_CHANGED_CR3)
1837 {
1838 pVM->rem.s.fIgnoreCR3Load = true;
1839 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1840 pVM->rem.s.fIgnoreCR3Load = false;
1841 }
1842
1843 if (fFlags & CPUM_CHANGED_GDTR)
1844 {
1845 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1846 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1847 }
1848
1849 if (fFlags & CPUM_CHANGED_IDTR)
1850 {
1851 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1852 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1853 }
1854
1855 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1856 {
1857 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1858 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1859 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1860 }
1861
1862 if (fFlags & CPUM_CHANGED_LDTR)
1863 {
1864 if (fHiddenSelRegsValid)
1865 {
1866 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1867 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1868 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1869 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1870 }
1871 else
1872 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1873 }
1874
1875 if (fFlags & CPUM_CHANGED_CPUID)
1876 {
1877 uint32_t u32Dummy;
1878
1879 /*
1880 * Get the CPUID features.
1881 */
1882 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1883 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1884 }
1885
1886 /* Sync FPU state after CR4, CPUID and EFER (!). */
1887 if (fFlags & CPUM_CHANGED_FPU_REM)
1888 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1889 }
1890
1891 /*
1892 * Sync TR unconditionally to make life simpler.
1893 */
1894 pVM->rem.s.Env.tr.selector = pCtx->tr;
1895 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1896 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1897 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1898 /* Note! do_interrupt will fault if the busy flag is still set... */
1899 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1900
1901 /*
1902 * Update selector registers.
1903 * This must be done *after* we've synced gdt, ldt and crX registers
1904 * since we're reading the GDT/LDT om sync_seg. This will happen with
1905 * saved state which takes a quick dip into rawmode for instance.
1906 */
1907 /*
1908 * Stack; Note first check this one as the CPL might have changed. The
1909 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1910 */
1911
1912 if (fHiddenSelRegsValid)
1913 {
1914 /* The hidden selector registers are valid in the CPU context. */
1915 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1916
1917 /* Set current CPL */
1918 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1919
1920 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1921 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1922 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1923 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1924 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1925 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1926 }
1927 else
1928 {
1929 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1930 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1931 {
1932 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1933
1934 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1935 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1936#ifdef VBOX_WITH_STATISTICS
1937 if (pVM->rem.s.Env.segs[R_SS].newselector)
1938 {
1939 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1940 }
1941#endif
1942 }
1943 else
1944 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1945
1946 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1947 {
1948 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1949 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1950#ifdef VBOX_WITH_STATISTICS
1951 if (pVM->rem.s.Env.segs[R_ES].newselector)
1952 {
1953 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1954 }
1955#endif
1956 }
1957 else
1958 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1959
1960 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1961 {
1962 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1963 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1964#ifdef VBOX_WITH_STATISTICS
1965 if (pVM->rem.s.Env.segs[R_CS].newselector)
1966 {
1967 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1968 }
1969#endif
1970 }
1971 else
1972 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1973
1974 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1975 {
1976 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1977 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1978#ifdef VBOX_WITH_STATISTICS
1979 if (pVM->rem.s.Env.segs[R_DS].newselector)
1980 {
1981 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1982 }
1983#endif
1984 }
1985 else
1986 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1987
1988 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1989 * be the same but not the base/limit. */
1990 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1991 {
1992 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1993 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1994#ifdef VBOX_WITH_STATISTICS
1995 if (pVM->rem.s.Env.segs[R_FS].newselector)
1996 {
1997 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1998 }
1999#endif
2000 }
2001 else
2002 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2003
2004 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2005 {
2006 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2007 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2008#ifdef VBOX_WITH_STATISTICS
2009 if (pVM->rem.s.Env.segs[R_GS].newselector)
2010 {
2011 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2012 }
2013#endif
2014 }
2015 else
2016 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2017 }
2018
2019 /*
2020 * Check for traps.
2021 */
2022 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2023 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2024 if (RT_SUCCESS(rc))
2025 {
2026#ifdef DEBUG
2027 if (u8TrapNo == 0x80)
2028 {
2029 remR3DumpLnxSyscall(pVM);
2030 remR3DumpOBsdSyscall(pVM);
2031 }
2032#endif
2033
2034 pVM->rem.s.Env.exception_index = u8TrapNo;
2035 if (enmType != TRPM_SOFTWARE_INT)
2036 {
2037 pVM->rem.s.Env.exception_is_int = 0;
2038 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2039 }
2040 else
2041 {
2042 /*
2043 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2044 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2045 * for int03 and into.
2046 */
2047 pVM->rem.s.Env.exception_is_int = 1;
2048 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2049 /* int 3 may be generated by one-byte 0xcc */
2050 if (u8TrapNo == 3)
2051 {
2052 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2053 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2054 }
2055 /* int 4 may be generated by one-byte 0xce */
2056 else if (u8TrapNo == 4)
2057 {
2058 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2059 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2060 }
2061 }
2062
2063 /* get error code and cr2 if needed. */
2064 switch (u8TrapNo)
2065 {
2066 case 0x0e:
2067 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2068 /* fallthru */
2069 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2070 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2071 break;
2072
2073 case 0x11: case 0x08:
2074 default:
2075 pVM->rem.s.Env.error_code = 0;
2076 break;
2077 }
2078
2079 /*
2080 * We can now reset the active trap since the recompiler is gonna have a go at it.
2081 */
2082 rc = TRPMResetTrap(pVM);
2083 AssertRC(rc);
2084 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2085 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2086 }
2087
2088 /*
2089 * Clear old interrupt request flags; Check for pending hardware interrupts.
2090 * (See @remark for why we don't check for other FFs.)
2091 */
2092 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2093 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2094 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2095 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2096
2097 /*
2098 * We're now in REM mode.
2099 */
2100 pVM->rem.s.fInREM = true;
2101 pVM->rem.s.fInStateSync = false;
2102 pVM->rem.s.cCanExecuteRaw = 0;
2103 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2104 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2105 return VINF_SUCCESS;
2106}
2107
2108
2109/**
2110 * Syncs back changes in the REM state to the the VM state.
2111 *
2112 * This must be called after invoking REMR3Run().
2113 * Calling it several times in a row is not permitted.
2114 *
2115 * @returns VBox status code.
2116 *
2117 * @param pVM VM Handle.
2118 */
2119REMR3DECL(int) REMR3StateBack(PVM pVM)
2120{
2121 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2122 unsigned i;
2123
2124 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2125 Log2(("REMR3StateBack:\n"));
2126 Assert(pVM->rem.s.fInREM);
2127
2128 /*
2129 * Copy back the registers.
2130 * This is done in the order they are declared in the CPUMCTX structure.
2131 */
2132
2133 /** @todo FOP */
2134 /** @todo FPUIP */
2135 /** @todo CS */
2136 /** @todo FPUDP */
2137 /** @todo DS */
2138
2139 /** @todo check if FPU/XMM was actually used in the recompiler */
2140 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2141//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2142
2143#ifdef TARGET_X86_64
2144 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2145 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2146 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2147 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2148 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2149 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2150 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2151 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2152 pCtx->r8 = pVM->rem.s.Env.regs[8];
2153 pCtx->r9 = pVM->rem.s.Env.regs[9];
2154 pCtx->r10 = pVM->rem.s.Env.regs[10];
2155 pCtx->r11 = pVM->rem.s.Env.regs[11];
2156 pCtx->r12 = pVM->rem.s.Env.regs[12];
2157 pCtx->r13 = pVM->rem.s.Env.regs[13];
2158 pCtx->r14 = pVM->rem.s.Env.regs[14];
2159 pCtx->r15 = pVM->rem.s.Env.regs[15];
2160
2161 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2162
2163#else
2164 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2165 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2166 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2167 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2168 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2169 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2170 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2171
2172 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2173#endif
2174
2175 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2176
2177#ifdef VBOX_WITH_STATISTICS
2178 if (pVM->rem.s.Env.segs[R_SS].newselector)
2179 {
2180 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2181 }
2182 if (pVM->rem.s.Env.segs[R_GS].newselector)
2183 {
2184 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2185 }
2186 if (pVM->rem.s.Env.segs[R_FS].newselector)
2187 {
2188 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2189 }
2190 if (pVM->rem.s.Env.segs[R_ES].newselector)
2191 {
2192 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2193 }
2194 if (pVM->rem.s.Env.segs[R_DS].newselector)
2195 {
2196 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2197 }
2198 if (pVM->rem.s.Env.segs[R_CS].newselector)
2199 {
2200 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2201 }
2202#endif
2203 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2204 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2205 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2206 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2207 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2208
2209#ifdef TARGET_X86_64
2210 pCtx->rip = pVM->rem.s.Env.eip;
2211 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2212#else
2213 pCtx->eip = pVM->rem.s.Env.eip;
2214 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2215#endif
2216
2217 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2218 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2219 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2220 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2221 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2222 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2223
2224 for (i = 0; i < 8; i++)
2225 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2226
2227 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2228 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2229 {
2230 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2231 STAM_COUNTER_INC(&gStatREMGDTChange);
2232 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2233 }
2234
2235 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2236 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2237 {
2238 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2239 STAM_COUNTER_INC(&gStatREMIDTChange);
2240 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2241 }
2242
2243 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2244 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2245 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2246 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2247 {
2248 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2249 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2250 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2251 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2252 STAM_COUNTER_INC(&gStatREMLDTRChange);
2253 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2254 }
2255
2256 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2257 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2258 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2259 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2260 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2261 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2262 : 0) )
2263 {
2264 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2265 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2266 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2267 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2268 pCtx->tr = pVM->rem.s.Env.tr.selector;
2269 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2270 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2271 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2272 if (pCtx->trHid.Attr.u)
2273 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2274 STAM_COUNTER_INC(&gStatREMTRChange);
2275 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2276 }
2277
2278 /** @todo These values could still be out of sync! */
2279 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2280 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2281 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2282 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2283
2284 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2285 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2286 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2287
2288 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2289 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2290 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2291
2292 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2293 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2294 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2295
2296 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2297 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2298 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2299
2300 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2301 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2302 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2303
2304 /* Sysenter MSR */
2305 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2306 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2307 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2308
2309 /* System MSRs. */
2310 pCtx->msrEFER = pVM->rem.s.Env.efer;
2311 pCtx->msrSTAR = pVM->rem.s.Env.star;
2312 pCtx->msrPAT = pVM->rem.s.Env.pat;
2313#ifdef TARGET_X86_64
2314 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2315 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2316 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2317 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2318#endif
2319
2320 remR3TrapClear(pVM);
2321
2322 /*
2323 * Check for traps.
2324 */
2325 if ( pVM->rem.s.Env.exception_index >= 0
2326 && pVM->rem.s.Env.exception_index < 256)
2327 {
2328 int rc;
2329
2330 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2331 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2332 AssertRC(rc);
2333 switch (pVM->rem.s.Env.exception_index)
2334 {
2335 case 0x0e:
2336 TRPMSetFaultAddress(pVM, pCtx->cr2);
2337 /* fallthru */
2338 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2339 case 0x11: case 0x08: /* 0 */
2340 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2341 break;
2342 }
2343
2344 }
2345
2346 /*
2347 * We're not longer in REM mode.
2348 */
2349 pVM->rem.s.fInREM = false;
2350 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2351 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2352 return VINF_SUCCESS;
2353}
2354
2355
2356/**
2357 * This is called by the disassembler when it wants to update the cpu state
2358 * before for instance doing a register dump.
2359 */
2360static void remR3StateUpdate(PVM pVM)
2361{
2362 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2363 unsigned i;
2364
2365 Assert(pVM->rem.s.fInREM);
2366
2367 /*
2368 * Copy back the registers.
2369 * This is done in the order they are declared in the CPUMCTX structure.
2370 */
2371
2372 /** @todo FOP */
2373 /** @todo FPUIP */
2374 /** @todo CS */
2375 /** @todo FPUDP */
2376 /** @todo DS */
2377 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2378 pCtx->fpu.MXCSR = 0;
2379 pCtx->fpu.MXCSR_MASK = 0;
2380
2381 /** @todo check if FPU/XMM was actually used in the recompiler */
2382 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2383//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2384
2385#ifdef TARGET_X86_64
2386 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2387 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2388 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2389 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2390 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2391 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2392 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2393 pCtx->r8 = pVM->rem.s.Env.regs[8];
2394 pCtx->r9 = pVM->rem.s.Env.regs[9];
2395 pCtx->r10 = pVM->rem.s.Env.regs[10];
2396 pCtx->r11 = pVM->rem.s.Env.regs[11];
2397 pCtx->r12 = pVM->rem.s.Env.regs[12];
2398 pCtx->r13 = pVM->rem.s.Env.regs[13];
2399 pCtx->r14 = pVM->rem.s.Env.regs[14];
2400 pCtx->r15 = pVM->rem.s.Env.regs[15];
2401
2402 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2403#else
2404 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2405 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2406 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2407 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2408 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2409 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2410 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2411
2412 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2413#endif
2414
2415 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2416
2417 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2418 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2419 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2420 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2421 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2422
2423#ifdef TARGET_X86_64
2424 pCtx->rip = pVM->rem.s.Env.eip;
2425 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2426#else
2427 pCtx->eip = pVM->rem.s.Env.eip;
2428 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2429#endif
2430
2431 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2432 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2433 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2434 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2435 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2436 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2437
2438 for (i = 0; i < 8; i++)
2439 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2440
2441 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2442 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2443 {
2444 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2445 STAM_COUNTER_INC(&gStatREMGDTChange);
2446 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2447 }
2448
2449 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2450 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2451 {
2452 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2453 STAM_COUNTER_INC(&gStatREMIDTChange);
2454 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2455 }
2456
2457 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2458 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2459 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2460 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2461 {
2462 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2463 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2464 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2465 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2466 STAM_COUNTER_INC(&gStatREMLDTRChange);
2467 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2468 }
2469
2470 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2471 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2472 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2473 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2474 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2475 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2476 : 0) )
2477 {
2478 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2479 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2480 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2481 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2482 pCtx->tr = pVM->rem.s.Env.tr.selector;
2483 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2484 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2485 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2486 if (pCtx->trHid.Attr.u)
2487 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2488 STAM_COUNTER_INC(&gStatREMTRChange);
2489 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2490 }
2491
2492 /** @todo These values could still be out of sync! */
2493 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2494 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2495 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2496 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2497
2498 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2499 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2500 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2501
2502 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2503 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2504 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2505
2506 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2507 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2508 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2509
2510 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2511 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2512 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2513
2514 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2515 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2516 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2517
2518 /* Sysenter MSR */
2519 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2520 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2521 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2522
2523 /* System MSRs. */
2524 pCtx->msrEFER = pVM->rem.s.Env.efer;
2525 pCtx->msrSTAR = pVM->rem.s.Env.star;
2526 pCtx->msrPAT = pVM->rem.s.Env.pat;
2527#ifdef TARGET_X86_64
2528 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2529 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2530 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2531 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2532#endif
2533
2534}
2535
2536
2537/**
2538 * Update the VMM state information if we're currently in REM.
2539 *
2540 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2541 * we're currently executing in REM and the VMM state is invalid. This method will of
2542 * course check that we're executing in REM before syncing any data over to the VMM.
2543 *
2544 * @param pVM The VM handle.
2545 */
2546REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2547{
2548 if (pVM->rem.s.fInREM)
2549 remR3StateUpdate(pVM);
2550}
2551
2552
2553#undef LOG_GROUP
2554#define LOG_GROUP LOG_GROUP_REM
2555
2556
2557/**
2558 * Notify the recompiler about Address Gate 20 state change.
2559 *
2560 * This notification is required since A20 gate changes are
2561 * initialized from a device driver and the VM might just as
2562 * well be in REM mode as in RAW mode.
2563 *
2564 * @param pVM VM handle.
2565 * @param fEnable True if the gate should be enabled.
2566 * False if the gate should be disabled.
2567 */
2568REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2569{
2570 bool fSaved;
2571
2572 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2573 VM_ASSERT_EMT(pVM);
2574
2575 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2576 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2577
2578 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2579
2580 pVM->rem.s.fIgnoreAll = fSaved;
2581}
2582
2583
2584/**
2585 * Replays the invalidated recorded pages.
2586 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2587 *
2588 * @param pVM VM handle.
2589 */
2590REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2591{
2592 RTUINT i;
2593
2594 VM_ASSERT_EMT(pVM);
2595
2596 /*
2597 * Sync the required registers.
2598 */
2599 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2600 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2601 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2602 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2603
2604 /*
2605 * Replay the flushes.
2606 */
2607 pVM->rem.s.fIgnoreInvlPg = true;
2608 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2609 {
2610 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2611 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2612 }
2613 pVM->rem.s.fIgnoreInvlPg = false;
2614 pVM->rem.s.cInvalidatedPages = 0;
2615}
2616
2617
2618/**
2619 * Replays the handler notification changes
2620 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2621 *
2622 * @param pVM VM handle.
2623 */
2624REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2625{
2626 /*
2627 * Replay the flushes.
2628 */
2629 RTUINT i;
2630 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2631
2632 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2633 VM_ASSERT_EMT(pVM);
2634
2635 pVM->rem.s.cHandlerNotifications = 0;
2636 for (i = 0; i < c; i++)
2637 {
2638 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2639 switch (pRec->enmKind)
2640 {
2641 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2642 REMR3NotifyHandlerPhysicalRegister(pVM,
2643 pRec->u.PhysicalRegister.enmType,
2644 pRec->u.PhysicalRegister.GCPhys,
2645 pRec->u.PhysicalRegister.cb,
2646 pRec->u.PhysicalRegister.fHasHCHandler);
2647 break;
2648
2649 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2650 REMR3NotifyHandlerPhysicalDeregister(pVM,
2651 pRec->u.PhysicalDeregister.enmType,
2652 pRec->u.PhysicalDeregister.GCPhys,
2653 pRec->u.PhysicalDeregister.cb,
2654 pRec->u.PhysicalDeregister.fHasHCHandler,
2655 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2656 break;
2657
2658 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2659 REMR3NotifyHandlerPhysicalModify(pVM,
2660 pRec->u.PhysicalModify.enmType,
2661 pRec->u.PhysicalModify.GCPhysOld,
2662 pRec->u.PhysicalModify.GCPhysNew,
2663 pRec->u.PhysicalModify.cb,
2664 pRec->u.PhysicalModify.fHasHCHandler,
2665 pRec->u.PhysicalModify.fRestoreAsRAM);
2666 break;
2667
2668 default:
2669 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2670 break;
2671 }
2672 }
2673 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2674}
2675
2676
2677/**
2678 * Notify REM about changed code page.
2679 *
2680 * @returns VBox status code.
2681 * @param pVM VM handle.
2682 * @param pvCodePage Code page address
2683 */
2684REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2685{
2686#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2687 int rc;
2688 RTGCPHYS PhysGC;
2689 uint64_t flags;
2690
2691 VM_ASSERT_EMT(pVM);
2692
2693 /*
2694 * Get the physical page address.
2695 */
2696 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2697 if (rc == VINF_SUCCESS)
2698 {
2699 /*
2700 * Sync the required registers and flush the whole page.
2701 * (Easier to do the whole page than notifying it about each physical
2702 * byte that was changed.
2703 */
2704 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2705 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2706 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2707 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2708
2709 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2710 }
2711#endif
2712 return VINF_SUCCESS;
2713}
2714
2715
2716/**
2717 * Notification about a successful MMR3PhysRegister() call.
2718 *
2719 * @param pVM VM handle.
2720 * @param GCPhys The physical address the RAM.
2721 * @param cb Size of the memory.
2722 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2723 */
2724REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2725{
2726 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2727 VM_ASSERT_EMT(pVM);
2728
2729 /*
2730 * Validate input - we trust the caller.
2731 */
2732 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2733 Assert(cb);
2734 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2735#ifdef VBOX_WITH_NEW_PHYS_CODE
2736 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2737#endif
2738
2739 /*
2740 * Base ram? Update GCPhysLastRam.
2741 */
2742#ifdef VBOX_WITH_NEW_PHYS_CODE
2743 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2744#else
2745 if (!GCPhys)
2746#endif
2747 {
2748 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2749 {
2750 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2751 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2752 }
2753 }
2754
2755 /*
2756 * Register the ram.
2757 */
2758 Assert(!pVM->rem.s.fIgnoreAll);
2759 pVM->rem.s.fIgnoreAll = true;
2760
2761#ifdef VBOX_WITH_NEW_PHYS_CODE
2762 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2763#else
2764 if (!GCPhys)
2765 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2766 else
2767 {
2768 if (fFlags & MM_RAM_FLAGS_RESERVED)
2769 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2770 else
2771 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2772 }
2773#endif
2774 Assert(pVM->rem.s.fIgnoreAll);
2775 pVM->rem.s.fIgnoreAll = false;
2776}
2777
2778#ifndef VBOX_WITH_NEW_PHYS_CODE
2779
2780/**
2781 * Notification about a successful PGMR3PhysRegisterChunk() call.
2782 *
2783 * @param pVM VM handle.
2784 * @param GCPhys The physical address the RAM.
2785 * @param cb Size of the memory.
2786 * @param pvRam The HC address of the RAM.
2787 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2788 */
2789REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2790{
2791 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2792 VM_ASSERT_EMT(pVM);
2793
2794 /*
2795 * Validate input - we trust the caller.
2796 */
2797 Assert(pvRam);
2798 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2799 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2800 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2801 Assert(fFlags == 0 /* normal RAM */);
2802 Assert(!pVM->rem.s.fIgnoreAll);
2803 pVM->rem.s.fIgnoreAll = true;
2804 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2805 Assert(pVM->rem.s.fIgnoreAll);
2806 pVM->rem.s.fIgnoreAll = false;
2807}
2808
2809
2810/**
2811 * Grows dynamically allocated guest RAM.
2812 * Will raise a fatal error if the operation fails.
2813 *
2814 * @param physaddr The physical address.
2815 */
2816void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2817{
2818 int rc;
2819 PVM pVM = cpu_single_env->pVM;
2820 const RTGCPHYS GCPhys = physaddr;
2821
2822 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2823 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2824 if (RT_SUCCESS(rc))
2825 return;
2826
2827 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2828 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2829 AssertFatalFailed();
2830}
2831
2832#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2833
2834/**
2835 * Notification about a successful MMR3PhysRomRegister() call.
2836 *
2837 * @param pVM VM handle.
2838 * @param GCPhys The physical address of the ROM.
2839 * @param cb The size of the ROM.
2840 * @param pvCopy Pointer to the ROM copy.
2841 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2842 * This function will be called when ever the protection of the
2843 * shadow ROM changes (at reset and end of POST).
2844 */
2845REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2846{
2847 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2848 VM_ASSERT_EMT(pVM);
2849
2850 /*
2851 * Validate input - we trust the caller.
2852 */
2853 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2854 Assert(cb);
2855 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2856
2857 /*
2858 * Register the rom.
2859 */
2860 Assert(!pVM->rem.s.fIgnoreAll);
2861 pVM->rem.s.fIgnoreAll = true;
2862
2863 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2864
2865 Assert(pVM->rem.s.fIgnoreAll);
2866 pVM->rem.s.fIgnoreAll = false;
2867}
2868
2869
2870/**
2871 * Notification about a successful memory deregistration or reservation.
2872 *
2873 * @param pVM VM Handle.
2874 * @param GCPhys Start physical address.
2875 * @param cb The size of the range.
2876 */
2877REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2878{
2879 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2880 VM_ASSERT_EMT(pVM);
2881
2882 /*
2883 * Validate input - we trust the caller.
2884 */
2885 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2886 Assert(cb);
2887 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2888
2889 /*
2890 * Unassigning the memory.
2891 */
2892 Assert(!pVM->rem.s.fIgnoreAll);
2893 pVM->rem.s.fIgnoreAll = true;
2894
2895 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2896
2897 Assert(pVM->rem.s.fIgnoreAll);
2898 pVM->rem.s.fIgnoreAll = false;
2899}
2900
2901
2902/**
2903 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2904 *
2905 * @param pVM VM Handle.
2906 * @param enmType Handler type.
2907 * @param GCPhys Handler range address.
2908 * @param cb Size of the handler range.
2909 * @param fHasHCHandler Set if the handler has a HC callback function.
2910 *
2911 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2912 * Handler memory type to memory which has no HC handler.
2913 */
2914REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2915{
2916 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2917 enmType, GCPhys, cb, fHasHCHandler));
2918 VM_ASSERT_EMT(pVM);
2919 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2920 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2921
2922 if (pVM->rem.s.cHandlerNotifications)
2923 REMR3ReplayHandlerNotifications(pVM);
2924
2925 Assert(!pVM->rem.s.fIgnoreAll);
2926 pVM->rem.s.fIgnoreAll = true;
2927
2928 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2929 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2930 else if (fHasHCHandler)
2931 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2932
2933 Assert(pVM->rem.s.fIgnoreAll);
2934 pVM->rem.s.fIgnoreAll = false;
2935}
2936
2937
2938/**
2939 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2940 *
2941 * @param pVM VM Handle.
2942 * @param enmType Handler type.
2943 * @param GCPhys Handler range address.
2944 * @param cb Size of the handler range.
2945 * @param fHasHCHandler Set if the handler has a HC callback function.
2946 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2947 */
2948REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2949{
2950 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2951 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2952 VM_ASSERT_EMT(pVM);
2953
2954 if (pVM->rem.s.cHandlerNotifications)
2955 REMR3ReplayHandlerNotifications(pVM);
2956
2957 Assert(!pVM->rem.s.fIgnoreAll);
2958 pVM->rem.s.fIgnoreAll = true;
2959
2960/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2961 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2962 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2963 else if (fHasHCHandler)
2964 {
2965 if (!fRestoreAsRAM)
2966 {
2967 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2968 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2969 }
2970 else
2971 {
2972 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2973 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2974 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2975 }
2976 }
2977
2978 Assert(pVM->rem.s.fIgnoreAll);
2979 pVM->rem.s.fIgnoreAll = false;
2980}
2981
2982
2983/**
2984 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2985 *
2986 * @param pVM VM Handle.
2987 * @param enmType Handler type.
2988 * @param GCPhysOld Old handler range address.
2989 * @param GCPhysNew New handler range address.
2990 * @param cb Size of the handler range.
2991 * @param fHasHCHandler Set if the handler has a HC callback function.
2992 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2993 */
2994REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2995{
2996 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2997 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2998 VM_ASSERT_EMT(pVM);
2999 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3000
3001 if (pVM->rem.s.cHandlerNotifications)
3002 REMR3ReplayHandlerNotifications(pVM);
3003
3004 if (fHasHCHandler)
3005 {
3006 Assert(!pVM->rem.s.fIgnoreAll);
3007 pVM->rem.s.fIgnoreAll = true;
3008
3009 /*
3010 * Reset the old page.
3011 */
3012 if (!fRestoreAsRAM)
3013 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3014 else
3015 {
3016 /* This is not perfect, but it'll do for PD monitoring... */
3017 Assert(cb == PAGE_SIZE);
3018 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3019 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3020 }
3021
3022 /*
3023 * Update the new page.
3024 */
3025 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3026 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3027 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3028
3029 Assert(pVM->rem.s.fIgnoreAll);
3030 pVM->rem.s.fIgnoreAll = false;
3031 }
3032}
3033
3034
3035/**
3036 * Checks if we're handling access to this page or not.
3037 *
3038 * @returns true if we're trapping access.
3039 * @returns false if we aren't.
3040 * @param pVM The VM handle.
3041 * @param GCPhys The physical address.
3042 *
3043 * @remark This function will only work correctly in VBOX_STRICT builds!
3044 */
3045REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3046{
3047#ifdef VBOX_STRICT
3048 unsigned long off;
3049 if (pVM->rem.s.cHandlerNotifications)
3050 REMR3ReplayHandlerNotifications(pVM);
3051
3052 off = get_phys_page_offset(GCPhys);
3053 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3054 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3055 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3056#else
3057 return false;
3058#endif
3059}
3060
3061
3062/**
3063 * Deals with a rare case in get_phys_addr_code where the code
3064 * is being monitored.
3065 *
3066 * It could also be an MMIO page, in which case we will raise a fatal error.
3067 *
3068 * @returns The physical address corresponding to addr.
3069 * @param env The cpu environment.
3070 * @param addr The virtual address.
3071 * @param pTLBEntry The TLB entry.
3072 */
3073target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3074 target_ulong addr,
3075 CPUTLBEntry* pTLBEntry,
3076 target_phys_addr_t ioTLBEntry)
3077{
3078 PVM pVM = env->pVM;
3079
3080 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3081 {
3082 /* If code memory is being monitored, appropriate IOTLB entry will have
3083 handler IO type, and addend will provide real physical address, no
3084 matter if we store VA in TLB or not, as handlers are always passed PA */
3085 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3086 return ret;
3087 }
3088 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3089 "*** handlers\n",
3090 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3091 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3092 LogRel(("*** mmio\n"));
3093 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3094 LogRel(("*** phys\n"));
3095 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3096 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3097 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3098 AssertFatalFailed();
3099}
3100
3101/**
3102 * Read guest RAM and ROM.
3103 *
3104 * @param SrcGCPhys The source address (guest physical).
3105 * @param pvDst The destination address.
3106 * @param cb Number of bytes
3107 */
3108void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3109{
3110 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3111 VBOX_CHECK_ADDR(SrcGCPhys);
3112 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3113#ifdef VBOX_DEBUG_PHYS
3114 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3115#endif
3116 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3117}
3118
3119
3120/**
3121 * Read guest RAM and ROM, unsigned 8-bit.
3122 *
3123 * @param SrcGCPhys The source address (guest physical).
3124 */
3125RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3126{
3127 uint8_t val;
3128 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3129 VBOX_CHECK_ADDR(SrcGCPhys);
3130 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3131 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3132#ifdef VBOX_DEBUG_PHYS
3133 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3134#endif
3135 return val;
3136}
3137
3138
3139/**
3140 * Read guest RAM and ROM, signed 8-bit.
3141 *
3142 * @param SrcGCPhys The source address (guest physical).
3143 */
3144RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3145{
3146 int8_t val;
3147 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3148 VBOX_CHECK_ADDR(SrcGCPhys);
3149 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3150 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3151#ifdef VBOX_DEBUG_PHYS
3152 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3153#endif
3154 return val;
3155}
3156
3157
3158/**
3159 * Read guest RAM and ROM, unsigned 16-bit.
3160 *
3161 * @param SrcGCPhys The source address (guest physical).
3162 */
3163RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3164{
3165 uint16_t val;
3166 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3167 VBOX_CHECK_ADDR(SrcGCPhys);
3168 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3169 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3170#ifdef VBOX_DEBUG_PHYS
3171 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3172#endif
3173 return val;
3174}
3175
3176
3177/**
3178 * Read guest RAM and ROM, signed 16-bit.
3179 *
3180 * @param SrcGCPhys The source address (guest physical).
3181 */
3182RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3183{
3184 int16_t val;
3185 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3186 VBOX_CHECK_ADDR(SrcGCPhys);
3187 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3188 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3189#ifdef VBOX_DEBUG_PHYS
3190 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3191#endif
3192 return val;
3193}
3194
3195
3196/**
3197 * Read guest RAM and ROM, unsigned 32-bit.
3198 *
3199 * @param SrcGCPhys The source address (guest physical).
3200 */
3201RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3202{
3203 uint32_t val;
3204 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3205 VBOX_CHECK_ADDR(SrcGCPhys);
3206 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3207 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3208#ifdef VBOX_DEBUG_PHYS
3209 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3210#endif
3211 return val;
3212}
3213
3214
3215/**
3216 * Read guest RAM and ROM, signed 32-bit.
3217 *
3218 * @param SrcGCPhys The source address (guest physical).
3219 */
3220RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3221{
3222 int32_t val;
3223 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3224 VBOX_CHECK_ADDR(SrcGCPhys);
3225 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3226 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3227#ifdef VBOX_DEBUG_PHYS
3228 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3229#endif
3230 return val;
3231}
3232
3233
3234/**
3235 * Read guest RAM and ROM, unsigned 64-bit.
3236 *
3237 * @param SrcGCPhys The source address (guest physical).
3238 */
3239uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3240{
3241 uint64_t val;
3242 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3243 VBOX_CHECK_ADDR(SrcGCPhys);
3244 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3245 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3246#ifdef VBOX_DEBUG_PHYS
3247 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3248#endif
3249 return val;
3250}
3251
3252
3253/**
3254 * Read guest RAM and ROM, signed 64-bit.
3255 *
3256 * @param SrcGCPhys The source address (guest physical).
3257 */
3258int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3259{
3260 int64_t val;
3261 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3262 VBOX_CHECK_ADDR(SrcGCPhys);
3263 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3264 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3265#ifdef VBOX_DEBUG_PHYS
3266 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3267#endif
3268 return val;
3269}
3270
3271
3272/**
3273 * Write guest RAM.
3274 *
3275 * @param DstGCPhys The destination address (guest physical).
3276 * @param pvSrc The source address.
3277 * @param cb Number of bytes to write
3278 */
3279void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3280{
3281 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3282 VBOX_CHECK_ADDR(DstGCPhys);
3283 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3284 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3285#ifdef VBOX_DEBUG_PHYS
3286 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3287#endif
3288}
3289
3290
3291/**
3292 * Write guest RAM, unsigned 8-bit.
3293 *
3294 * @param DstGCPhys The destination address (guest physical).
3295 * @param val Value
3296 */
3297void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3298{
3299 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3300 VBOX_CHECK_ADDR(DstGCPhys);
3301 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3302 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3303#ifdef VBOX_DEBUG_PHYS
3304 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3305#endif
3306}
3307
3308
3309/**
3310 * Write guest RAM, unsigned 8-bit.
3311 *
3312 * @param DstGCPhys The destination address (guest physical).
3313 * @param val Value
3314 */
3315void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3316{
3317 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3318 VBOX_CHECK_ADDR(DstGCPhys);
3319 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3320 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3321#ifdef VBOX_DEBUG_PHYS
3322 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3323#endif
3324}
3325
3326
3327/**
3328 * Write guest RAM, unsigned 32-bit.
3329 *
3330 * @param DstGCPhys The destination address (guest physical).
3331 * @param val Value
3332 */
3333void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3334{
3335 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3336 VBOX_CHECK_ADDR(DstGCPhys);
3337 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3338 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3339#ifdef VBOX_DEBUG_PHYS
3340 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3341#endif
3342}
3343
3344
3345/**
3346 * Write guest RAM, unsigned 64-bit.
3347 *
3348 * @param DstGCPhys The destination address (guest physical).
3349 * @param val Value
3350 */
3351void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3352{
3353 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3354 VBOX_CHECK_ADDR(DstGCPhys);
3355 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3356 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3357#ifdef VBOX_DEBUG_PHYS
3358 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3359#endif
3360}
3361
3362#undef LOG_GROUP
3363#define LOG_GROUP LOG_GROUP_REM_MMIO
3364
3365/** Read MMIO memory. */
3366static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3367{
3368 uint32_t u32 = 0;
3369 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3370 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3371 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3372 return u32;
3373}
3374
3375/** Read MMIO memory. */
3376static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3377{
3378 uint32_t u32 = 0;
3379 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3380 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3381 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3382 return u32;
3383}
3384
3385/** Read MMIO memory. */
3386static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3387{
3388 uint32_t u32 = 0;
3389 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3390 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3391 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3392 return u32;
3393}
3394
3395/** Write to MMIO memory. */
3396static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3397{
3398 int rc;
3399 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3400 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3401 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3402}
3403
3404/** Write to MMIO memory. */
3405static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3406{
3407 int rc;
3408 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3409 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3410 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3411}
3412
3413/** Write to MMIO memory. */
3414static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3415{
3416 int rc;
3417 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3418 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3419 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3420}
3421
3422
3423#undef LOG_GROUP
3424#define LOG_GROUP LOG_GROUP_REM_HANDLER
3425
3426/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3427
3428static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3429{
3430 uint8_t u8;
3431 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3432 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3433 return u8;
3434}
3435
3436static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3437{
3438 uint16_t u16;
3439 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3440 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3441 return u16;
3442}
3443
3444static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3445{
3446 uint32_t u32;
3447 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3448 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3449 return u32;
3450}
3451
3452static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3453{
3454 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3455 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3456}
3457
3458static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3459{
3460 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3461 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3462}
3463
3464static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3465{
3466 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3467 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3468}
3469
3470/* -+- disassembly -+- */
3471
3472#undef LOG_GROUP
3473#define LOG_GROUP LOG_GROUP_REM_DISAS
3474
3475
3476/**
3477 * Enables or disables singled stepped disassembly.
3478 *
3479 * @returns VBox status code.
3480 * @param pVM VM handle.
3481 * @param fEnable To enable set this flag, to disable clear it.
3482 */
3483static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3484{
3485 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3486 VM_ASSERT_EMT(pVM);
3487
3488 if (fEnable)
3489 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3490 else
3491 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3492 return VINF_SUCCESS;
3493}
3494
3495
3496/**
3497 * Enables or disables singled stepped disassembly.
3498 *
3499 * @returns VBox status code.
3500 * @param pVM VM handle.
3501 * @param fEnable To enable set this flag, to disable clear it.
3502 */
3503REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3504{
3505 PVMREQ pReq;
3506 int rc;
3507
3508 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3509 if (VM_IS_EMT(pVM))
3510 return remR3DisasEnableStepping(pVM, fEnable);
3511
3512 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3513 AssertRC(rc);
3514 if (RT_SUCCESS(rc))
3515 rc = pReq->iStatus;
3516 VMR3ReqFree(pReq);
3517 return rc;
3518}
3519
3520
3521#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3522/**
3523 * External Debugger Command: .remstep [on|off|1|0]
3524 */
3525static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3526{
3527 bool fEnable;
3528 int rc;
3529
3530 /* print status */
3531 if (cArgs == 0)
3532 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3533 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3534
3535 /* convert the argument and change the mode. */
3536 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3537 if (RT_FAILURE(rc))
3538 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3539 rc = REMR3DisasEnableStepping(pVM, fEnable);
3540 if (RT_FAILURE(rc))
3541 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3542 return rc;
3543}
3544#endif
3545
3546
3547/**
3548 * Disassembles one instruction and prints it to the log.
3549 *
3550 * @returns Success indicator.
3551 * @param env Pointer to the recompiler CPU structure.
3552 * @param f32BitCode Indicates that whether or not the code should
3553 * be disassembled as 16 or 32 bit. If -1 the CS
3554 * selector will be inspected.
3555 * @param pszPrefix
3556 */
3557bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3558{
3559 PVM pVM = env->pVM;
3560 const bool fLog = LogIsEnabled();
3561 const bool fLog2 = LogIs2Enabled();
3562 int rc = VINF_SUCCESS;
3563
3564 /*
3565 * Don't bother if there ain't any log output to do.
3566 */
3567 if (!fLog && !fLog2)
3568 return true;
3569
3570 /*
3571 * Update the state so DBGF reads the correct register values.
3572 */
3573 remR3StateUpdate(pVM);
3574
3575 /*
3576 * Log registers if requested.
3577 */
3578 if (!fLog2)
3579 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3580
3581 /*
3582 * Disassemble to log.
3583 */
3584 if (fLog)
3585 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3586
3587 return RT_SUCCESS(rc);
3588}
3589
3590
3591/**
3592 * Disassemble recompiled code.
3593 *
3594 * @param phFileIgnored Ignored, logfile usually.
3595 * @param pvCode Pointer to the code block.
3596 * @param cb Size of the code block.
3597 */
3598void disas(FILE *phFile, void *pvCode, unsigned long cb)
3599{
3600#ifdef DEBUG_TMP_LOGGING
3601# define DISAS_PRINTF(x...) fprintf(phFile, x)
3602#else
3603# define DISAS_PRINTF(x...) RTLogPrintf(x)
3604 if (LogIs2Enabled())
3605#endif
3606 {
3607 unsigned off = 0;
3608 char szOutput[256];
3609 DISCPUSTATE Cpu;
3610
3611 memset(&Cpu, 0, sizeof(Cpu));
3612#ifdef RT_ARCH_X86
3613 Cpu.mode = CPUMODE_32BIT;
3614#else
3615 Cpu.mode = CPUMODE_64BIT;
3616#endif
3617
3618 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3619 while (off < cb)
3620 {
3621 uint32_t cbInstr;
3622 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3623 DISAS_PRINTF("%s", szOutput);
3624 else
3625 {
3626 DISAS_PRINTF("disas error\n");
3627 cbInstr = 1;
3628#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3629 break;
3630#endif
3631 }
3632 off += cbInstr;
3633 }
3634 }
3635
3636#undef DISAS_PRINTF
3637}
3638
3639
3640/**
3641 * Disassemble guest code.
3642 *
3643 * @param phFileIgnored Ignored, logfile usually.
3644 * @param uCode The guest address of the code to disassemble. (flat?)
3645 * @param cb Number of bytes to disassemble.
3646 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3647 */
3648void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3649{
3650#ifdef DEBUG_TMP_LOGGING
3651# define DISAS_PRINTF(x...) fprintf(phFile, x)
3652#else
3653# define DISAS_PRINTF(x...) RTLogPrintf(x)
3654 if (LogIs2Enabled())
3655#endif
3656 {
3657 PVM pVM = cpu_single_env->pVM;
3658 RTSEL cs;
3659 RTGCUINTPTR eip;
3660
3661 /*
3662 * Update the state so DBGF reads the correct register values (flags).
3663 */
3664 remR3StateUpdate(pVM);
3665
3666 /*
3667 * Do the disassembling.
3668 */
3669 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3670 cs = cpu_single_env->segs[R_CS].selector;
3671 eip = uCode - cpu_single_env->segs[R_CS].base;
3672 for (;;)
3673 {
3674 char szBuf[256];
3675 uint32_t cbInstr;
3676 int rc = DBGFR3DisasInstrEx(pVM,
3677 cs,
3678 eip,
3679 0,
3680 szBuf, sizeof(szBuf),
3681 &cbInstr);
3682 if (RT_SUCCESS(rc))
3683 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3684 else
3685 {
3686 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3687 cbInstr = 1;
3688 }
3689
3690 /* next */
3691 if (cb <= cbInstr)
3692 break;
3693 cb -= cbInstr;
3694 uCode += cbInstr;
3695 eip += cbInstr;
3696 }
3697 }
3698#undef DISAS_PRINTF
3699}
3700
3701
3702/**
3703 * Looks up a guest symbol.
3704 *
3705 * @returns Pointer to symbol name. This is a static buffer.
3706 * @param orig_addr The address in question.
3707 */
3708const char *lookup_symbol(target_ulong orig_addr)
3709{
3710 RTGCINTPTR off = 0;
3711 DBGFSYMBOL Sym;
3712 PVM pVM = cpu_single_env->pVM;
3713 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3714 if (RT_SUCCESS(rc))
3715 {
3716 static char szSym[sizeof(Sym.szName) + 48];
3717 if (!off)
3718 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3719 else if (off > 0)
3720 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3721 else
3722 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3723 return szSym;
3724 }
3725 return "<N/A>";
3726}
3727
3728
3729#undef LOG_GROUP
3730#define LOG_GROUP LOG_GROUP_REM
3731
3732
3733/* -+- FF notifications -+- */
3734
3735
3736/**
3737 * Notification about a pending interrupt.
3738 *
3739 * @param pVM VM Handle.
3740 * @param u8Interrupt Interrupt
3741 * @thread The emulation thread.
3742 */
3743REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3744{
3745 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3746 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3747}
3748
3749/**
3750 * Notification about a pending interrupt.
3751 *
3752 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3753 * @param pVM VM Handle.
3754 * @thread The emulation thread.
3755 */
3756REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3757{
3758 return pVM->rem.s.u32PendingInterrupt;
3759}
3760
3761/**
3762 * Notification about the interrupt FF being set.
3763 *
3764 * @param pVM VM Handle.
3765 * @thread The emulation thread.
3766 */
3767REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3768{
3769 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3770 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3771 if (pVM->rem.s.fInREM)
3772 {
3773 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3774 CPU_INTERRUPT_EXTERNAL_HARD);
3775 }
3776}
3777
3778
3779/**
3780 * Notification about the interrupt FF being set.
3781 *
3782 * @param pVM VM Handle.
3783 * @thread Any.
3784 */
3785REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3786{
3787 LogFlow(("REMR3NotifyInterruptClear:\n"));
3788 if (pVM->rem.s.fInREM)
3789 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3790}
3791
3792
3793/**
3794 * Notification about pending timer(s).
3795 *
3796 * @param pVM VM Handle.
3797 * @thread Any.
3798 */
3799REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3800{
3801#ifndef DEBUG_bird
3802 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3803#endif
3804 if (pVM->rem.s.fInREM)
3805 {
3806 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3807 CPU_INTERRUPT_EXTERNAL_TIMER);
3808 }
3809}
3810
3811
3812/**
3813 * Notification about pending DMA transfers.
3814 *
3815 * @param pVM VM Handle.
3816 * @thread Any.
3817 */
3818REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3819{
3820 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3821 if (pVM->rem.s.fInREM)
3822 {
3823 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3824 CPU_INTERRUPT_EXTERNAL_DMA);
3825 }
3826}
3827
3828
3829/**
3830 * Notification about pending timer(s).
3831 *
3832 * @param pVM VM Handle.
3833 * @thread Any.
3834 */
3835REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3836{
3837 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3838 if (pVM->rem.s.fInREM)
3839 {
3840 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3841 CPU_INTERRUPT_EXTERNAL_EXIT);
3842 }
3843}
3844
3845
3846/**
3847 * Notification about pending FF set by an external thread.
3848 *
3849 * @param pVM VM handle.
3850 * @thread Any.
3851 */
3852REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3853{
3854 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3855 if (pVM->rem.s.fInREM)
3856 {
3857 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3858 CPU_INTERRUPT_EXTERNAL_EXIT);
3859 }
3860}
3861
3862
3863#ifdef VBOX_WITH_STATISTICS
3864void remR3ProfileStart(int statcode)
3865{
3866 STAMPROFILEADV *pStat;
3867 switch(statcode)
3868 {
3869 case STATS_EMULATE_SINGLE_INSTR:
3870 pStat = &gStatExecuteSingleInstr;
3871 break;
3872 case STATS_QEMU_COMPILATION:
3873 pStat = &gStatCompilationQEmu;
3874 break;
3875 case STATS_QEMU_RUN_EMULATED_CODE:
3876 pStat = &gStatRunCodeQEmu;
3877 break;
3878 case STATS_QEMU_TOTAL:
3879 pStat = &gStatTotalTimeQEmu;
3880 break;
3881 case STATS_QEMU_RUN_TIMERS:
3882 pStat = &gStatTimers;
3883 break;
3884 case STATS_TLB_LOOKUP:
3885 pStat= &gStatTBLookup;
3886 break;
3887 case STATS_IRQ_HANDLING:
3888 pStat= &gStatIRQ;
3889 break;
3890 case STATS_RAW_CHECK:
3891 pStat = &gStatRawCheck;
3892 break;
3893
3894 default:
3895 AssertMsgFailed(("unknown stat %d\n", statcode));
3896 return;
3897 }
3898 STAM_PROFILE_ADV_START(pStat, a);
3899}
3900
3901
3902void remR3ProfileStop(int statcode)
3903{
3904 STAMPROFILEADV *pStat;
3905 switch(statcode)
3906 {
3907 case STATS_EMULATE_SINGLE_INSTR:
3908 pStat = &gStatExecuteSingleInstr;
3909 break;
3910 case STATS_QEMU_COMPILATION:
3911 pStat = &gStatCompilationQEmu;
3912 break;
3913 case STATS_QEMU_RUN_EMULATED_CODE:
3914 pStat = &gStatRunCodeQEmu;
3915 break;
3916 case STATS_QEMU_TOTAL:
3917 pStat = &gStatTotalTimeQEmu;
3918 break;
3919 case STATS_QEMU_RUN_TIMERS:
3920 pStat = &gStatTimers;
3921 break;
3922 case STATS_TLB_LOOKUP:
3923 pStat= &gStatTBLookup;
3924 break;
3925 case STATS_IRQ_HANDLING:
3926 pStat= &gStatIRQ;
3927 break;
3928 case STATS_RAW_CHECK:
3929 pStat = &gStatRawCheck;
3930 break;
3931 default:
3932 AssertMsgFailed(("unknown stat %d\n", statcode));
3933 return;
3934 }
3935 STAM_PROFILE_ADV_STOP(pStat, a);
3936}
3937#endif
3938
3939/**
3940 * Raise an RC, force rem exit.
3941 *
3942 * @param pVM VM handle.
3943 * @param rc The rc.
3944 */
3945void remR3RaiseRC(PVM pVM, int rc)
3946{
3947 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3948 Assert(pVM->rem.s.fInREM);
3949 VM_ASSERT_EMT(pVM);
3950 pVM->rem.s.rc = rc;
3951 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3952}
3953
3954
3955/* -+- timers -+- */
3956
3957uint64_t cpu_get_tsc(CPUX86State *env)
3958{
3959 STAM_COUNTER_INC(&gStatCpuGetTSC);
3960 return TMCpuTickGet(env->pVM);
3961}
3962
3963
3964/* -+- interrupts -+- */
3965
3966void cpu_set_ferr(CPUX86State *env)
3967{
3968 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3969 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3970}
3971
3972int cpu_get_pic_interrupt(CPUState *env)
3973{
3974 uint8_t u8Interrupt;
3975 int rc;
3976
3977 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3978 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3979 * with the (a)pic.
3980 */
3981 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3982 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3983 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3984 * remove this kludge. */
3985 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3986 {
3987 rc = VINF_SUCCESS;
3988 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
3989 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
3990 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
3991 }
3992 else
3993 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
3994
3995 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
3996 if (RT_SUCCESS(rc))
3997 {
3998 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3999 env->interrupt_request |= CPU_INTERRUPT_HARD;
4000 return u8Interrupt;
4001 }
4002 return -1;
4003}
4004
4005
4006/* -+- local apic -+- */
4007
4008void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4009{
4010 int rc = PDMApicSetBase(env->pVM, val);
4011 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4012}
4013
4014uint64_t cpu_get_apic_base(CPUX86State *env)
4015{
4016 uint64_t u64;
4017 int rc = PDMApicGetBase(env->pVM, &u64);
4018 if (RT_SUCCESS(rc))
4019 {
4020 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4021 return u64;
4022 }
4023 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4024 return 0;
4025}
4026
4027void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4028{
4029 int rc = PDMApicSetTPR(env->pVM, val);
4030 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4031}
4032
4033uint8_t cpu_get_apic_tpr(CPUX86State *env)
4034{
4035 uint8_t u8;
4036 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4037 if (RT_SUCCESS(rc))
4038 {
4039 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4040 return u8;
4041 }
4042 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4043 return 0;
4044}
4045
4046
4047uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4048{
4049 uint64_t value;
4050 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4051 if (RT_SUCCESS(rc))
4052 {
4053 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4054 return value;
4055 }
4056 /** @todo: exception ? */
4057 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4058 return value;
4059}
4060
4061void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4062{
4063 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4064 /** @todo: exception if error ? */
4065 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4066}
4067
4068uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4069{
4070 return CPUMGetGuestMsr(env->pVM, msr);
4071}
4072
4073void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4074{
4075 CPUMSetGuestMsr(env->pVM, msr, val);
4076}
4077
4078/* -+- I/O Ports -+- */
4079
4080#undef LOG_GROUP
4081#define LOG_GROUP LOG_GROUP_REM_IOPORT
4082
4083void cpu_outb(CPUState *env, int addr, int val)
4084{
4085 int rc;
4086
4087 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4088 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4089
4090 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4091 if (RT_LIKELY(rc == VINF_SUCCESS))
4092 return;
4093 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4094 {
4095 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4096 remR3RaiseRC(env->pVM, rc);
4097 return;
4098 }
4099 remAbort(rc, __FUNCTION__);
4100}
4101
4102void cpu_outw(CPUState *env, int addr, int val)
4103{
4104 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4105 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4106 if (RT_LIKELY(rc == VINF_SUCCESS))
4107 return;
4108 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4109 {
4110 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4111 remR3RaiseRC(env->pVM, rc);
4112 return;
4113 }
4114 remAbort(rc, __FUNCTION__);
4115}
4116
4117void cpu_outl(CPUState *env, int addr, int val)
4118{
4119 int rc;
4120 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4121 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4122 if (RT_LIKELY(rc == VINF_SUCCESS))
4123 return;
4124 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4125 {
4126 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4127 remR3RaiseRC(env->pVM, rc);
4128 return;
4129 }
4130 remAbort(rc, __FUNCTION__);
4131}
4132
4133int cpu_inb(CPUState *env, int addr)
4134{
4135 uint32_t u32 = 0;
4136 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4137 if (RT_LIKELY(rc == VINF_SUCCESS))
4138 {
4139 if (/*addr != 0x61 && */addr != 0x71)
4140 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4141 return (int)u32;
4142 }
4143 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4144 {
4145 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4146 remR3RaiseRC(env->pVM, rc);
4147 return (int)u32;
4148 }
4149 remAbort(rc, __FUNCTION__);
4150 return 0xff;
4151}
4152
4153int cpu_inw(CPUState *env, int addr)
4154{
4155 uint32_t u32 = 0;
4156 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4157 if (RT_LIKELY(rc == VINF_SUCCESS))
4158 {
4159 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4160 return (int)u32;
4161 }
4162 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4163 {
4164 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4165 remR3RaiseRC(env->pVM, rc);
4166 return (int)u32;
4167 }
4168 remAbort(rc, __FUNCTION__);
4169 return 0xffff;
4170}
4171
4172int cpu_inl(CPUState *env, int addr)
4173{
4174 uint32_t u32 = 0;
4175 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4176 if (RT_LIKELY(rc == VINF_SUCCESS))
4177 {
4178//if (addr==0x01f0 && u32 == 0x6b6d)
4179// loglevel = ~0;
4180 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4181 return (int)u32;
4182 }
4183 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4184 {
4185 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4186 remR3RaiseRC(env->pVM, rc);
4187 return (int)u32;
4188 }
4189 remAbort(rc, __FUNCTION__);
4190 return 0xffffffff;
4191}
4192
4193#undef LOG_GROUP
4194#define LOG_GROUP LOG_GROUP_REM
4195
4196
4197/* -+- helpers and misc other interfaces -+- */
4198
4199/**
4200 * Perform the CPUID instruction.
4201 *
4202 * ASMCpuId cannot be invoked from some source files where this is used because of global
4203 * register allocations.
4204 *
4205 * @param env Pointer to the recompiler CPU structure.
4206 * @param uOperator CPUID operation (eax).
4207 * @param pvEAX Where to store eax.
4208 * @param pvEBX Where to store ebx.
4209 * @param pvECX Where to store ecx.
4210 * @param pvEDX Where to store edx.
4211 */
4212void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4213{
4214 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4215}
4216
4217
4218#if 0 /* not used */
4219/**
4220 * Interface for qemu hardware to report back fatal errors.
4221 */
4222void hw_error(const char *pszFormat, ...)
4223{
4224 /*
4225 * Bitch about it.
4226 */
4227 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4228 * this in my Odin32 tree at home! */
4229 va_list args;
4230 va_start(args, pszFormat);
4231 RTLogPrintf("fatal error in virtual hardware:");
4232 RTLogPrintfV(pszFormat, args);
4233 va_end(args);
4234 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4235
4236 /*
4237 * If we're in REM context we'll sync back the state before 'jumping' to
4238 * the EMs failure handling.
4239 */
4240 PVM pVM = cpu_single_env->pVM;
4241 if (pVM->rem.s.fInREM)
4242 REMR3StateBack(pVM);
4243 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4244 AssertMsgFailed(("EMR3FatalError returned!\n"));
4245}
4246#endif
4247
4248/**
4249 * Interface for the qemu cpu to report unhandled situation
4250 * raising a fatal VM error.
4251 */
4252void cpu_abort(CPUState *env, const char *pszFormat, ...)
4253{
4254 va_list args;
4255 PVM pVM;
4256
4257 /*
4258 * Bitch about it.
4259 */
4260#ifndef _MSC_VER
4261 /** @todo: MSVC is right - it's not valid C */
4262 RTLogFlags(NULL, "nodisabled nobuffered");
4263#endif
4264 va_start(args, pszFormat);
4265 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4266 va_end(args);
4267 va_start(args, pszFormat);
4268 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4269 va_end(args);
4270
4271 /*
4272 * If we're in REM context we'll sync back the state before 'jumping' to
4273 * the EMs failure handling.
4274 */
4275 pVM = cpu_single_env->pVM;
4276 if (pVM->rem.s.fInREM)
4277 REMR3StateBack(pVM);
4278 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4279 AssertMsgFailed(("EMR3FatalError returned!\n"));
4280}
4281
4282
4283/**
4284 * Aborts the VM.
4285 *
4286 * @param rc VBox error code.
4287 * @param pszTip Hint about why/when this happend.
4288 */
4289void remAbort(int rc, const char *pszTip)
4290{
4291 PVM pVM;
4292
4293 /*
4294 * Bitch about it.
4295 */
4296 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4297 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4298
4299 /*
4300 * Jump back to where we entered the recompiler.
4301 */
4302 pVM = cpu_single_env->pVM;
4303 if (pVM->rem.s.fInREM)
4304 REMR3StateBack(pVM);
4305 EMR3FatalError(pVM, rc);
4306 AssertMsgFailed(("EMR3FatalError returned!\n"));
4307}
4308
4309
4310/**
4311 * Dumps a linux system call.
4312 * @param pVM VM handle.
4313 */
4314void remR3DumpLnxSyscall(PVM pVM)
4315{
4316 static const char *apsz[] =
4317 {
4318 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4319 "sys_exit",
4320 "sys_fork",
4321 "sys_read",
4322 "sys_write",
4323 "sys_open", /* 5 */
4324 "sys_close",
4325 "sys_waitpid",
4326 "sys_creat",
4327 "sys_link",
4328 "sys_unlink", /* 10 */
4329 "sys_execve",
4330 "sys_chdir",
4331 "sys_time",
4332 "sys_mknod",
4333 "sys_chmod", /* 15 */
4334 "sys_lchown16",
4335 "sys_ni_syscall", /* old break syscall holder */
4336 "sys_stat",
4337 "sys_lseek",
4338 "sys_getpid", /* 20 */
4339 "sys_mount",
4340 "sys_oldumount",
4341 "sys_setuid16",
4342 "sys_getuid16",
4343 "sys_stime", /* 25 */
4344 "sys_ptrace",
4345 "sys_alarm",
4346 "sys_fstat",
4347 "sys_pause",
4348 "sys_utime", /* 30 */
4349 "sys_ni_syscall", /* old stty syscall holder */
4350 "sys_ni_syscall", /* old gtty syscall holder */
4351 "sys_access",
4352 "sys_nice",
4353 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4354 "sys_sync",
4355 "sys_kill",
4356 "sys_rename",
4357 "sys_mkdir",
4358 "sys_rmdir", /* 40 */
4359 "sys_dup",
4360 "sys_pipe",
4361 "sys_times",
4362 "sys_ni_syscall", /* old prof syscall holder */
4363 "sys_brk", /* 45 */
4364 "sys_setgid16",
4365 "sys_getgid16",
4366 "sys_signal",
4367 "sys_geteuid16",
4368 "sys_getegid16", /* 50 */
4369 "sys_acct",
4370 "sys_umount", /* recycled never used phys() */
4371 "sys_ni_syscall", /* old lock syscall holder */
4372 "sys_ioctl",
4373 "sys_fcntl", /* 55 */
4374 "sys_ni_syscall", /* old mpx syscall holder */
4375 "sys_setpgid",
4376 "sys_ni_syscall", /* old ulimit syscall holder */
4377 "sys_olduname",
4378 "sys_umask", /* 60 */
4379 "sys_chroot",
4380 "sys_ustat",
4381 "sys_dup2",
4382 "sys_getppid",
4383 "sys_getpgrp", /* 65 */
4384 "sys_setsid",
4385 "sys_sigaction",
4386 "sys_sgetmask",
4387 "sys_ssetmask",
4388 "sys_setreuid16", /* 70 */
4389 "sys_setregid16",
4390 "sys_sigsuspend",
4391 "sys_sigpending",
4392 "sys_sethostname",
4393 "sys_setrlimit", /* 75 */
4394 "sys_old_getrlimit",
4395 "sys_getrusage",
4396 "sys_gettimeofday",
4397 "sys_settimeofday",
4398 "sys_getgroups16", /* 80 */
4399 "sys_setgroups16",
4400 "old_select",
4401 "sys_symlink",
4402 "sys_lstat",
4403 "sys_readlink", /* 85 */
4404 "sys_uselib",
4405 "sys_swapon",
4406 "sys_reboot",
4407 "old_readdir",
4408 "old_mmap", /* 90 */
4409 "sys_munmap",
4410 "sys_truncate",
4411 "sys_ftruncate",
4412 "sys_fchmod",
4413 "sys_fchown16", /* 95 */
4414 "sys_getpriority",
4415 "sys_setpriority",
4416 "sys_ni_syscall", /* old profil syscall holder */
4417 "sys_statfs",
4418 "sys_fstatfs", /* 100 */
4419 "sys_ioperm",
4420 "sys_socketcall",
4421 "sys_syslog",
4422 "sys_setitimer",
4423 "sys_getitimer", /* 105 */
4424 "sys_newstat",
4425 "sys_newlstat",
4426 "sys_newfstat",
4427 "sys_uname",
4428 "sys_iopl", /* 110 */
4429 "sys_vhangup",
4430 "sys_ni_syscall", /* old "idle" system call */
4431 "sys_vm86old",
4432 "sys_wait4",
4433 "sys_swapoff", /* 115 */
4434 "sys_sysinfo",
4435 "sys_ipc",
4436 "sys_fsync",
4437 "sys_sigreturn",
4438 "sys_clone", /* 120 */
4439 "sys_setdomainname",
4440 "sys_newuname",
4441 "sys_modify_ldt",
4442 "sys_adjtimex",
4443 "sys_mprotect", /* 125 */
4444 "sys_sigprocmask",
4445 "sys_ni_syscall", /* old "create_module" */
4446 "sys_init_module",
4447 "sys_delete_module",
4448 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4449 "sys_quotactl",
4450 "sys_getpgid",
4451 "sys_fchdir",
4452 "sys_bdflush",
4453 "sys_sysfs", /* 135 */
4454 "sys_personality",
4455 "sys_ni_syscall", /* reserved for afs_syscall */
4456 "sys_setfsuid16",
4457 "sys_setfsgid16",
4458 "sys_llseek", /* 140 */
4459 "sys_getdents",
4460 "sys_select",
4461 "sys_flock",
4462 "sys_msync",
4463 "sys_readv", /* 145 */
4464 "sys_writev",
4465 "sys_getsid",
4466 "sys_fdatasync",
4467 "sys_sysctl",
4468 "sys_mlock", /* 150 */
4469 "sys_munlock",
4470 "sys_mlockall",
4471 "sys_munlockall",
4472 "sys_sched_setparam",
4473 "sys_sched_getparam", /* 155 */
4474 "sys_sched_setscheduler",
4475 "sys_sched_getscheduler",
4476 "sys_sched_yield",
4477 "sys_sched_get_priority_max",
4478 "sys_sched_get_priority_min", /* 160 */
4479 "sys_sched_rr_get_interval",
4480 "sys_nanosleep",
4481 "sys_mremap",
4482 "sys_setresuid16",
4483 "sys_getresuid16", /* 165 */
4484 "sys_vm86",
4485 "sys_ni_syscall", /* Old sys_query_module */
4486 "sys_poll",
4487 "sys_nfsservctl",
4488 "sys_setresgid16", /* 170 */
4489 "sys_getresgid16",
4490 "sys_prctl",
4491 "sys_rt_sigreturn",
4492 "sys_rt_sigaction",
4493 "sys_rt_sigprocmask", /* 175 */
4494 "sys_rt_sigpending",
4495 "sys_rt_sigtimedwait",
4496 "sys_rt_sigqueueinfo",
4497 "sys_rt_sigsuspend",
4498 "sys_pread64", /* 180 */
4499 "sys_pwrite64",
4500 "sys_chown16",
4501 "sys_getcwd",
4502 "sys_capget",
4503 "sys_capset", /* 185 */
4504 "sys_sigaltstack",
4505 "sys_sendfile",
4506 "sys_ni_syscall", /* reserved for streams1 */
4507 "sys_ni_syscall", /* reserved for streams2 */
4508 "sys_vfork", /* 190 */
4509 "sys_getrlimit",
4510 "sys_mmap2",
4511 "sys_truncate64",
4512 "sys_ftruncate64",
4513 "sys_stat64", /* 195 */
4514 "sys_lstat64",
4515 "sys_fstat64",
4516 "sys_lchown",
4517 "sys_getuid",
4518 "sys_getgid", /* 200 */
4519 "sys_geteuid",
4520 "sys_getegid",
4521 "sys_setreuid",
4522 "sys_setregid",
4523 "sys_getgroups", /* 205 */
4524 "sys_setgroups",
4525 "sys_fchown",
4526 "sys_setresuid",
4527 "sys_getresuid",
4528 "sys_setresgid", /* 210 */
4529 "sys_getresgid",
4530 "sys_chown",
4531 "sys_setuid",
4532 "sys_setgid",
4533 "sys_setfsuid", /* 215 */
4534 "sys_setfsgid",
4535 "sys_pivot_root",
4536 "sys_mincore",
4537 "sys_madvise",
4538 "sys_getdents64", /* 220 */
4539 "sys_fcntl64",
4540 "sys_ni_syscall", /* reserved for TUX */
4541 "sys_ni_syscall",
4542 "sys_gettid",
4543 "sys_readahead", /* 225 */
4544 "sys_setxattr",
4545 "sys_lsetxattr",
4546 "sys_fsetxattr",
4547 "sys_getxattr",
4548 "sys_lgetxattr", /* 230 */
4549 "sys_fgetxattr",
4550 "sys_listxattr",
4551 "sys_llistxattr",
4552 "sys_flistxattr",
4553 "sys_removexattr", /* 235 */
4554 "sys_lremovexattr",
4555 "sys_fremovexattr",
4556 "sys_tkill",
4557 "sys_sendfile64",
4558 "sys_futex", /* 240 */
4559 "sys_sched_setaffinity",
4560 "sys_sched_getaffinity",
4561 "sys_set_thread_area",
4562 "sys_get_thread_area",
4563 "sys_io_setup", /* 245 */
4564 "sys_io_destroy",
4565 "sys_io_getevents",
4566 "sys_io_submit",
4567 "sys_io_cancel",
4568 "sys_fadvise64", /* 250 */
4569 "sys_ni_syscall",
4570 "sys_exit_group",
4571 "sys_lookup_dcookie",
4572 "sys_epoll_create",
4573 "sys_epoll_ctl", /* 255 */
4574 "sys_epoll_wait",
4575 "sys_remap_file_pages",
4576 "sys_set_tid_address",
4577 "sys_timer_create",
4578 "sys_timer_settime", /* 260 */
4579 "sys_timer_gettime",
4580 "sys_timer_getoverrun",
4581 "sys_timer_delete",
4582 "sys_clock_settime",
4583 "sys_clock_gettime", /* 265 */
4584 "sys_clock_getres",
4585 "sys_clock_nanosleep",
4586 "sys_statfs64",
4587 "sys_fstatfs64",
4588 "sys_tgkill", /* 270 */
4589 "sys_utimes",
4590 "sys_fadvise64_64",
4591 "sys_ni_syscall" /* sys_vserver */
4592 };
4593
4594 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4595 switch (uEAX)
4596 {
4597 default:
4598 if (uEAX < RT_ELEMENTS(apsz))
4599 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4600 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4601 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4602 else
4603 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4604 break;
4605
4606 }
4607}
4608
4609
4610/**
4611 * Dumps an OpenBSD system call.
4612 * @param pVM VM handle.
4613 */
4614void remR3DumpOBsdSyscall(PVM pVM)
4615{
4616 static const char *apsz[] =
4617 {
4618 "SYS_syscall", //0
4619 "SYS_exit", //1
4620 "SYS_fork", //2
4621 "SYS_read", //3
4622 "SYS_write", //4
4623 "SYS_open", //5
4624 "SYS_close", //6
4625 "SYS_wait4", //7
4626 "SYS_8",
4627 "SYS_link", //9
4628 "SYS_unlink", //10
4629 "SYS_11",
4630 "SYS_chdir", //12
4631 "SYS_fchdir", //13
4632 "SYS_mknod", //14
4633 "SYS_chmod", //15
4634 "SYS_chown", //16
4635 "SYS_break", //17
4636 "SYS_18",
4637 "SYS_19",
4638 "SYS_getpid", //20
4639 "SYS_mount", //21
4640 "SYS_unmount", //22
4641 "SYS_setuid", //23
4642 "SYS_getuid", //24
4643 "SYS_geteuid", //25
4644 "SYS_ptrace", //26
4645 "SYS_recvmsg", //27
4646 "SYS_sendmsg", //28
4647 "SYS_recvfrom", //29
4648 "SYS_accept", //30
4649 "SYS_getpeername", //31
4650 "SYS_getsockname", //32
4651 "SYS_access", //33
4652 "SYS_chflags", //34
4653 "SYS_fchflags", //35
4654 "SYS_sync", //36
4655 "SYS_kill", //37
4656 "SYS_38",
4657 "SYS_getppid", //39
4658 "SYS_40",
4659 "SYS_dup", //41
4660 "SYS_opipe", //42
4661 "SYS_getegid", //43
4662 "SYS_profil", //44
4663 "SYS_ktrace", //45
4664 "SYS_sigaction", //46
4665 "SYS_getgid", //47
4666 "SYS_sigprocmask", //48
4667 "SYS_getlogin", //49
4668 "SYS_setlogin", //50
4669 "SYS_acct", //51
4670 "SYS_sigpending", //52
4671 "SYS_osigaltstack", //53
4672 "SYS_ioctl", //54
4673 "SYS_reboot", //55
4674 "SYS_revoke", //56
4675 "SYS_symlink", //57
4676 "SYS_readlink", //58
4677 "SYS_execve", //59
4678 "SYS_umask", //60
4679 "SYS_chroot", //61
4680 "SYS_62",
4681 "SYS_63",
4682 "SYS_64",
4683 "SYS_65",
4684 "SYS_vfork", //66
4685 "SYS_67",
4686 "SYS_68",
4687 "SYS_sbrk", //69
4688 "SYS_sstk", //70
4689 "SYS_61",
4690 "SYS_vadvise", //72
4691 "SYS_munmap", //73
4692 "SYS_mprotect", //74
4693 "SYS_madvise", //75
4694 "SYS_76",
4695 "SYS_77",
4696 "SYS_mincore", //78
4697 "SYS_getgroups", //79
4698 "SYS_setgroups", //80
4699 "SYS_getpgrp", //81
4700 "SYS_setpgid", //82
4701 "SYS_setitimer", //83
4702 "SYS_84",
4703 "SYS_85",
4704 "SYS_getitimer", //86
4705 "SYS_87",
4706 "SYS_88",
4707 "SYS_89",
4708 "SYS_dup2", //90
4709 "SYS_91",
4710 "SYS_fcntl", //92
4711 "SYS_select", //93
4712 "SYS_94",
4713 "SYS_fsync", //95
4714 "SYS_setpriority", //96
4715 "SYS_socket", //97
4716 "SYS_connect", //98
4717 "SYS_99",
4718 "SYS_getpriority", //100
4719 "SYS_101",
4720 "SYS_102",
4721 "SYS_sigreturn", //103
4722 "SYS_bind", //104
4723 "SYS_setsockopt", //105
4724 "SYS_listen", //106
4725 "SYS_107",
4726 "SYS_108",
4727 "SYS_109",
4728 "SYS_110",
4729 "SYS_sigsuspend", //111
4730 "SYS_112",
4731 "SYS_113",
4732 "SYS_114",
4733 "SYS_115",
4734 "SYS_gettimeofday", //116
4735 "SYS_getrusage", //117
4736 "SYS_getsockopt", //118
4737 "SYS_119",
4738 "SYS_readv", //120
4739 "SYS_writev", //121
4740 "SYS_settimeofday", //122
4741 "SYS_fchown", //123
4742 "SYS_fchmod", //124
4743 "SYS_125",
4744 "SYS_setreuid", //126
4745 "SYS_setregid", //127
4746 "SYS_rename", //128
4747 "SYS_129",
4748 "SYS_130",
4749 "SYS_flock", //131
4750 "SYS_mkfifo", //132
4751 "SYS_sendto", //133
4752 "SYS_shutdown", //134
4753 "SYS_socketpair", //135
4754 "SYS_mkdir", //136
4755 "SYS_rmdir", //137
4756 "SYS_utimes", //138
4757 "SYS_139",
4758 "SYS_adjtime", //140
4759 "SYS_141",
4760 "SYS_142",
4761 "SYS_143",
4762 "SYS_144",
4763 "SYS_145",
4764 "SYS_146",
4765 "SYS_setsid", //147
4766 "SYS_quotactl", //148
4767 "SYS_149",
4768 "SYS_150",
4769 "SYS_151",
4770 "SYS_152",
4771 "SYS_153",
4772 "SYS_154",
4773 "SYS_nfssvc", //155
4774 "SYS_156",
4775 "SYS_157",
4776 "SYS_158",
4777 "SYS_159",
4778 "SYS_160",
4779 "SYS_getfh", //161
4780 "SYS_162",
4781 "SYS_163",
4782 "SYS_164",
4783 "SYS_sysarch", //165
4784 "SYS_166",
4785 "SYS_167",
4786 "SYS_168",
4787 "SYS_169",
4788 "SYS_170",
4789 "SYS_171",
4790 "SYS_172",
4791 "SYS_pread", //173
4792 "SYS_pwrite", //174
4793 "SYS_175",
4794 "SYS_176",
4795 "SYS_177",
4796 "SYS_178",
4797 "SYS_179",
4798 "SYS_180",
4799 "SYS_setgid", //181
4800 "SYS_setegid", //182
4801 "SYS_seteuid", //183
4802 "SYS_lfs_bmapv", //184
4803 "SYS_lfs_markv", //185
4804 "SYS_lfs_segclean", //186
4805 "SYS_lfs_segwait", //187
4806 "SYS_188",
4807 "SYS_189",
4808 "SYS_190",
4809 "SYS_pathconf", //191
4810 "SYS_fpathconf", //192
4811 "SYS_swapctl", //193
4812 "SYS_getrlimit", //194
4813 "SYS_setrlimit", //195
4814 "SYS_getdirentries", //196
4815 "SYS_mmap", //197
4816 "SYS___syscall", //198
4817 "SYS_lseek", //199
4818 "SYS_truncate", //200
4819 "SYS_ftruncate", //201
4820 "SYS___sysctl", //202
4821 "SYS_mlock", //203
4822 "SYS_munlock", //204
4823 "SYS_205",
4824 "SYS_futimes", //206
4825 "SYS_getpgid", //207
4826 "SYS_xfspioctl", //208
4827 "SYS_209",
4828 "SYS_210",
4829 "SYS_211",
4830 "SYS_212",
4831 "SYS_213",
4832 "SYS_214",
4833 "SYS_215",
4834 "SYS_216",
4835 "SYS_217",
4836 "SYS_218",
4837 "SYS_219",
4838 "SYS_220",
4839 "SYS_semget", //221
4840 "SYS_222",
4841 "SYS_223",
4842 "SYS_224",
4843 "SYS_msgget", //225
4844 "SYS_msgsnd", //226
4845 "SYS_msgrcv", //227
4846 "SYS_shmat", //228
4847 "SYS_229",
4848 "SYS_shmdt", //230
4849 "SYS_231",
4850 "SYS_clock_gettime", //232
4851 "SYS_clock_settime", //233
4852 "SYS_clock_getres", //234
4853 "SYS_235",
4854 "SYS_236",
4855 "SYS_237",
4856 "SYS_238",
4857 "SYS_239",
4858 "SYS_nanosleep", //240
4859 "SYS_241",
4860 "SYS_242",
4861 "SYS_243",
4862 "SYS_244",
4863 "SYS_245",
4864 "SYS_246",
4865 "SYS_247",
4866 "SYS_248",
4867 "SYS_249",
4868 "SYS_minherit", //250
4869 "SYS_rfork", //251
4870 "SYS_poll", //252
4871 "SYS_issetugid", //253
4872 "SYS_lchown", //254
4873 "SYS_getsid", //255
4874 "SYS_msync", //256
4875 "SYS_257",
4876 "SYS_258",
4877 "SYS_259",
4878 "SYS_getfsstat", //260
4879 "SYS_statfs", //261
4880 "SYS_fstatfs", //262
4881 "SYS_pipe", //263
4882 "SYS_fhopen", //264
4883 "SYS_265",
4884 "SYS_fhstatfs", //266
4885 "SYS_preadv", //267
4886 "SYS_pwritev", //268
4887 "SYS_kqueue", //269
4888 "SYS_kevent", //270
4889 "SYS_mlockall", //271
4890 "SYS_munlockall", //272
4891 "SYS_getpeereid", //273
4892 "SYS_274",
4893 "SYS_275",
4894 "SYS_276",
4895 "SYS_277",
4896 "SYS_278",
4897 "SYS_279",
4898 "SYS_280",
4899 "SYS_getresuid", //281
4900 "SYS_setresuid", //282
4901 "SYS_getresgid", //283
4902 "SYS_setresgid", //284
4903 "SYS_285",
4904 "SYS_mquery", //286
4905 "SYS_closefrom", //287
4906 "SYS_sigaltstack", //288
4907 "SYS_shmget", //289
4908 "SYS_semop", //290
4909 "SYS_stat", //291
4910 "SYS_fstat", //292
4911 "SYS_lstat", //293
4912 "SYS_fhstat", //294
4913 "SYS___semctl", //295
4914 "SYS_shmctl", //296
4915 "SYS_msgctl", //297
4916 "SYS_MAXSYSCALL", //298
4917 //299
4918 //300
4919 };
4920 uint32_t uEAX;
4921 if (!LogIsEnabled())
4922 return;
4923 uEAX = CPUMGetGuestEAX(pVM);
4924 switch (uEAX)
4925 {
4926 default:
4927 if (uEAX < RT_ELEMENTS(apsz))
4928 {
4929 uint32_t au32Args[8] = {0};
4930 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4931 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4932 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4933 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4934 }
4935 else
4936 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4937 break;
4938 }
4939}
4940
4941
4942#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4943/**
4944 * The Dll main entry point (stub).
4945 */
4946bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4947{
4948 return true;
4949}
4950
4951void *memcpy(void *dst, const void *src, size_t size)
4952{
4953 uint8_t*pbDst = dst, *pbSrc = src;
4954 while (size-- > 0)
4955 *pbDst++ = *pbSrc++;
4956 return dst;
4957}
4958
4959#endif
4960
4961void cpu_smm_update(CPUState *env)
4962{
4963}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette