VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18188

Last change on this file since 18188 was 18073, checked in by vboxsync, 16 years ago

debugger: make it a bit more working

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.0 KB
Line 
1/* $Id: VBoxRecompiler.c 18073 2009-03-18 17:05:00Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146
147/*
148 * Global stuff.
149 */
150
151/** MMIO read callbacks. */
152CPUReadMemoryFunc *g_apfnMMIORead[3] =
153{
154 remR3MMIOReadU8,
155 remR3MMIOReadU16,
156 remR3MMIOReadU32
157};
158
159/** MMIO write callbacks. */
160CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
161{
162 remR3MMIOWriteU8,
163 remR3MMIOWriteU16,
164 remR3MMIOWriteU32
165};
166
167/** Handler read callbacks. */
168CPUReadMemoryFunc *g_apfnHandlerRead[3] =
169{
170 remR3HandlerReadU8,
171 remR3HandlerReadU16,
172 remR3HandlerReadU32
173};
174
175/** Handler write callbacks. */
176CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
177{
178 remR3HandlerWriteU8,
179 remR3HandlerWriteU16,
180 remR3HandlerWriteU32
181};
182
183
184#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
185/*
186 * Debugger commands.
187 */
188static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
189
190/** '.remstep' arguments. */
191static const DBGCVARDESC g_aArgRemStep[] =
192{
193 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
194 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
195};
196
197/** Command descriptors. */
198static const DBGCCMD g_aCmds[] =
199{
200 {
201 .pszCmd ="remstep",
202 .cArgsMin = 0,
203 .cArgsMax = 1,
204 .paArgDescs = &g_aArgRemStep[0],
205 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
206 .pResultDesc = NULL,
207 .fFlags = 0,
208 .pfnHandler = remR3CmdDisasEnableStepping,
209 .pszSyntax = "[on/off]",
210 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
211 "If no arguments show the current state."
212 }
213};
214#endif
215
216
217/*******************************************************************************
218* Internal Functions *
219*******************************************************************************/
220void remAbort(int rc, const char *pszTip);
221extern int testmath(void);
222
223/* Put them here to avoid unused variable warning. */
224AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
225#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
226//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
227/* Why did this have to be identical?? */
228AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
229#else
230AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
231#endif
232
233
234/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
235uint8_t *code_gen_prologue;
236
237/**
238 * Initializes the REM.
239 *
240 * @returns VBox status code.
241 * @param pVM The VM to operate on.
242 */
243REMR3DECL(int) REMR3Init(PVM pVM)
244{
245 uint32_t u32Dummy;
246 int rc;
247
248#ifdef VBOX_ENABLE_VBOXREM64
249 LogRel(("Using 64-bit aware REM\n"));
250#endif
251
252 /*
253 * Assert sanity.
254 */
255 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
256 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
257 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
258#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
259 Assert(!testmath());
260#endif
261
262 /*
263 * Init some internal data members.
264 */
265 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
266 pVM->rem.s.Env.pVM = pVM;
267#ifdef CPU_RAW_MODE_INIT
268 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
269#endif
270
271 /* ctx. */
272 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
273 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
274
275 /* ignore all notifications */
276 pVM->rem.s.fIgnoreAll = true;
277
278 code_gen_prologue = RTMemExecAlloc(_1K);
279 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
280
281 cpu_exec_init_all(0);
282
283 /*
284 * Init the recompiler.
285 */
286 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
287 {
288 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
289 return VERR_GENERAL_FAILURE;
290 }
291 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
292 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
293
294 /* allocate code buffer for single instruction emulation. */
295 pVM->rem.s.Env.cbCodeBuffer = 4096;
296 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
297 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
298
299 /* finally, set the cpu_single_env global. */
300 cpu_single_env = &pVM->rem.s.Env;
301
302 /* Nothing is pending by default */
303 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
304
305 /*
306 * Register ram types.
307 */
308 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
310 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
311 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
312 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
313
314 /* stop ignoring. */
315 pVM->rem.s.fIgnoreAll = false;
316
317 /*
318 * Register the saved state data unit.
319 */
320 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
321 NULL, remR3Save, NULL,
322 NULL, remR3Load, NULL);
323 if (RT_FAILURE(rc))
324 return rc;
325
326#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
327 /*
328 * Debugger commands.
329 */
330 static bool fRegisteredCmds = false;
331 if (!fRegisteredCmds)
332 {
333 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
334 if (RT_SUCCESS(rc))
335 fRegisteredCmds = true;
336 }
337#endif
338
339#ifdef VBOX_WITH_STATISTICS
340 /*
341 * Statistics.
342 */
343 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
344 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
345 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
346 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
347 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
350 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
351 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
352 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
353 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
354 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
355
356 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
357
358 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
359 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
360 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
361 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
362 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
363 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
364 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
365 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
366 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
367 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
368 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
369
370 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
371 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
372 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
373 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
374
375 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
378 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
379 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
381
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
388
389 /** @todo missing /REM/Tb*Count stats */
390
391#endif
392
393#ifdef DEBUG_ALL_LOGGING
394 loglevel = ~0;
395# ifdef DEBUG_TMP_LOGGING
396 logfile = fopen("/tmp/vbox-qemu.log", "w");
397# endif
398#endif
399
400 return rc;
401}
402
403
404/**
405 * Finalizes the REM initialization.
406 *
407 * This is called after all components, devices and drivers has
408 * been initialized. Its main purpose it to finish the RAM related
409 * initialization.
410 *
411 * @returns VBox status code.
412 *
413 * @param pVM The VM handle.
414 */
415REMR3DECL(int) REMR3InitFinalize(PVM pVM)
416{
417 int rc;
418
419 /*
420 * Ram size & dirty bit map.
421 */
422 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
423 pVM->rem.s.fGCPhysLastRamFixed = true;
424#ifdef RT_STRICT
425 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
426#else
427 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
428#endif
429 return rc;
430}
431
432
433/**
434 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
435 *
436 * @returns VBox status code.
437 * @param pVM The VM handle.
438 * @param fGuarded Whether to guard the map.
439 */
440static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
441{
442 int rc = VINF_SUCCESS;
443 RTGCPHYS cb;
444
445 cb = pVM->rem.s.GCPhysLastRam + 1;
446 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
447 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
448 VERR_OUT_OF_RANGE);
449 phys_ram_size = cb;
450 phys_ram_dirty_size = cb >> PAGE_SHIFT;
451 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
452
453 if (!fGuarded)
454 {
455 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
456 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
457 }
458 else
459 {
460 /*
461 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
462 */
463 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
464 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
465 if (cbBitmapFull == cbBitmapAligned)
466 cbBitmapFull += _4G >> PAGE_SHIFT;
467 else if (cbBitmapFull - cbBitmapAligned < _64K)
468 cbBitmapFull += _64K;
469
470 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
471 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
472
473 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
474 if (RT_FAILURE(rc))
475 {
476 RTMemPageFree(phys_ram_dirty);
477 AssertLogRelRCReturn(rc, rc);
478 }
479
480 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
481 }
482
483 /* initialize it. */
484 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
485 return rc;
486}
487
488
489/**
490 * Terminates the REM.
491 *
492 * Termination means cleaning up and freeing all resources,
493 * the VM it self is at this point powered off or suspended.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM to operate on.
497 */
498REMR3DECL(int) REMR3Term(PVM pVM)
499{
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * The VM is being reset.
506 *
507 * For the REM component this means to call the cpu_reset() and
508 * reinitialize some state variables.
509 *
510 * @param pVM VM handle.
511 */
512REMR3DECL(void) REMR3Reset(PVM pVM)
513{
514 /*
515 * Reset the REM cpu.
516 */
517 pVM->rem.s.fIgnoreAll = true;
518 cpu_reset(&pVM->rem.s.Env);
519 pVM->rem.s.cInvalidatedPages = 0;
520 pVM->rem.s.fIgnoreAll = false;
521
522 /* Clear raw ring 0 init state */
523 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
524
525 /* Flush the TBs the next time we execute code here. */
526 pVM->rem.s.fFlushTBs = true;
527}
528
529
530/**
531 * Execute state save operation.
532 *
533 * @returns VBox status code.
534 * @param pVM VM Handle.
535 * @param pSSM SSM operation handle.
536 */
537static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
538{
539 /*
540 * Save the required CPU Env bits.
541 * (Not much because we're never in REM when doing the save.)
542 */
543 PREM pRem = &pVM->rem.s;
544 LogFlow(("remR3Save:\n"));
545 Assert(!pRem->fInREM);
546 SSMR3PutU32(pSSM, pRem->Env.hflags);
547 SSMR3PutU32(pSSM, ~0); /* separator */
548
549 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
550 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
551 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
552
553 return SSMR3PutU32(pSSM, ~0); /* terminator */
554}
555
556
557/**
558 * Execute state load operation.
559 *
560 * @returns VBox status code.
561 * @param pVM VM Handle.
562 * @param pSSM SSM operation handle.
563 * @param u32Version Data layout version.
564 */
565static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
566{
567 uint32_t u32Dummy;
568 uint32_t fRawRing0 = false;
569 uint32_t u32Sep;
570 int rc;
571 PREM pRem;
572 LogFlow(("remR3Load:\n"));
573
574 /*
575 * Validate version.
576 */
577 if ( u32Version != REM_SAVED_STATE_VERSION
578 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
579 {
580 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
581 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
582 }
583
584 /*
585 * Do a reset to be on the safe side...
586 */
587 REMR3Reset(pVM);
588
589 /*
590 * Ignore all ignorable notifications.
591 * (Not doing this will cause serious trouble.)
592 */
593 pVM->rem.s.fIgnoreAll = true;
594
595 /*
596 * Load the required CPU Env bits.
597 * (Not much because we're never in REM when doing the save.)
598 */
599 pRem = &pVM->rem.s;
600 Assert(!pRem->fInREM);
601 SSMR3GetU32(pSSM, &pRem->Env.hflags);
602 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
603 {
604 /* Redundant REM CPU state has to be loaded, but can be ignored. */
605 CPUX86State_Ver16 temp;
606 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
607 }
608
609 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
610 if (RT_FAILURE(rc))
611 return rc;
612 if (u32Sep != ~0U)
613 {
614 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
615 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
616 }
617
618 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
619 SSMR3GetUInt(pSSM, &fRawRing0);
620 if (fRawRing0)
621 pRem->Env.state |= CPU_RAW_RING0;
622
623 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
624 {
625 unsigned i;
626
627 /*
628 * Load the REM stuff.
629 */
630 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
631 if (RT_FAILURE(rc))
632 return rc;
633 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
634 {
635 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
636 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
637 }
638 for (i = 0; i < pRem->cInvalidatedPages; i++)
639 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
640 }
641
642 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
643 if (RT_FAILURE(rc))
644 return rc;
645
646 /* check the terminator. */
647 rc = SSMR3GetU32(pSSM, &u32Sep);
648 if (RT_FAILURE(rc))
649 return rc;
650 if (u32Sep != ~0U)
651 {
652 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
653 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
654 }
655
656 /*
657 * Get the CPUID features.
658 */
659 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
660 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
661
662 /*
663 * Sync the Load Flush the TLB
664 */
665 tlb_flush(&pRem->Env, 1);
666
667 /*
668 * Stop ignoring ignornable notifications.
669 */
670 pVM->rem.s.fIgnoreAll = false;
671
672 /*
673 * Sync the whole CPU state when executing code in the recompiler.
674 */
675 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
676 return VINF_SUCCESS;
677}
678
679
680
681#undef LOG_GROUP
682#define LOG_GROUP LOG_GROUP_REM_RUN
683
684/**
685 * Single steps an instruction in recompiled mode.
686 *
687 * Before calling this function the REM state needs to be in sync with
688 * the VM. Call REMR3State() to perform the sync. It's only necessary
689 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
690 * and after calling REMR3StateBack().
691 *
692 * @returns VBox status code.
693 *
694 * @param pVM VM Handle.
695 */
696REMR3DECL(int) REMR3Step(PVM pVM)
697{
698 int rc, interrupt_request;
699 RTGCPTR GCPtrPC;
700 bool fBp;
701
702 /*
703 * Lock the REM - we don't wanna have anyone interrupting us
704 * while stepping - and enabled single stepping. We also ignore
705 * pending interrupts and suchlike.
706 */
707 interrupt_request = pVM->rem.s.Env.interrupt_request;
708 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
709 pVM->rem.s.Env.interrupt_request = 0;
710 cpu_single_step(&pVM->rem.s.Env, 1);
711
712 /*
713 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
714 */
715 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
716 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
717
718 /*
719 * Execute and handle the return code.
720 * We execute without enabling the cpu tick, so on success we'll
721 * just flip it on and off to make sure it moves
722 */
723 rc = cpu_exec(&pVM->rem.s.Env);
724 if (rc == EXCP_DEBUG)
725 {
726 TMCpuTickResume(pVM);
727 TMCpuTickPause(pVM);
728 TMVirtualResume(pVM);
729 TMVirtualPause(pVM);
730 rc = VINF_EM_DBG_STEPPED;
731 }
732 else
733 {
734 switch (rc)
735 {
736 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
737 case EXCP_HLT:
738 case EXCP_HALTED: rc = VINF_EM_HALT; break;
739 case EXCP_RC:
740 rc = pVM->rem.s.rc;
741 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
742 break;
743 case EXCP_EXECUTE_RAW:
744 case EXCP_EXECUTE_HWACC:
745 /** @todo: is it correct? */
746 rc = VINF_SUCCESS;
747 break;
748 default:
749 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
750 rc = VERR_INTERNAL_ERROR;
751 break;
752 }
753 }
754
755 /*
756 * Restore the stuff we changed to prevent interruption.
757 * Unlock the REM.
758 */
759 if (fBp)
760 {
761 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
762 Assert(rc2 == 0); NOREF(rc2);
763 }
764 cpu_single_step(&pVM->rem.s.Env, 0);
765 pVM->rem.s.Env.interrupt_request = interrupt_request;
766
767 return rc;
768}
769
770
771/**
772 * Set a breakpoint using the REM facilities.
773 *
774 * @returns VBox status code.
775 * @param pVM The VM handle.
776 * @param Address The breakpoint address.
777 * @thread The emulation thread.
778 */
779REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
780{
781 VM_ASSERT_EMT(pVM);
782 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
783 {
784 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
785 return VINF_SUCCESS;
786 }
787 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
788 return VERR_REM_NO_MORE_BP_SLOTS;
789}
790
791
792/**
793 * Clears a breakpoint set by REMR3BreakpointSet().
794 *
795 * @returns VBox status code.
796 * @param pVM The VM handle.
797 * @param Address The breakpoint address.
798 * @thread The emulation thread.
799 */
800REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
801{
802 VM_ASSERT_EMT(pVM);
803 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
804 {
805 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
806 return VINF_SUCCESS;
807 }
808 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
809 return VERR_REM_BP_NOT_FOUND;
810}
811
812
813/**
814 * Emulate an instruction.
815 *
816 * This function executes one instruction without letting anyone
817 * interrupt it. This is intended for being called while being in
818 * raw mode and thus will take care of all the state syncing between
819 * REM and the rest.
820 *
821 * @returns VBox status code.
822 * @param pVM VM handle.
823 */
824REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
825{
826 bool fFlushTBs;
827
828 int rc, rc2;
829 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
830
831 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
832 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
833 */
834 if (HWACCMIsEnabled(pVM))
835 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
836
837 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
838 fFlushTBs = pVM->rem.s.fFlushTBs;
839 pVM->rem.s.fFlushTBs = false;
840
841 /*
842 * Sync the state and enable single instruction / single stepping.
843 */
844 rc = REMR3State(pVM);
845 pVM->rem.s.fFlushTBs = fFlushTBs;
846 if (RT_SUCCESS(rc))
847 {
848 int interrupt_request = pVM->rem.s.Env.interrupt_request;
849 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
850 Assert(!pVM->rem.s.Env.singlestep_enabled);
851 /*
852 * Now we set the execute single instruction flag and enter the cpu_exec loop.
853 */
854 TMNotifyStartOfExecution(pVM);
855 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
856 rc = cpu_exec(&pVM->rem.s.Env);
857 TMNotifyEndOfExecution(pVM);
858 switch (rc)
859 {
860 /*
861 * Executed without anything out of the way happening.
862 */
863 case EXCP_SINGLE_INSTR:
864 rc = VINF_EM_RESCHEDULE;
865 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
866 break;
867
868 /*
869 * If we take a trap or start servicing a pending interrupt, we might end up here.
870 * (Timer thread or some other thread wishing EMT's attention.)
871 */
872 case EXCP_INTERRUPT:
873 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
874 rc = VINF_EM_RESCHEDULE;
875 break;
876
877 /*
878 * Single step, we assume!
879 * If there was a breakpoint there we're fucked now.
880 */
881 case EXCP_DEBUG:
882 {
883 /* breakpoint or single step? */
884 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
885 int iBP;
886 rc = VINF_EM_DBG_STEPPED;
887 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
888 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
889 {
890 rc = VINF_EM_DBG_BREAKPOINT;
891 break;
892 }
893 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
894 break;
895 }
896
897 /*
898 * hlt instruction.
899 */
900 case EXCP_HLT:
901 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
902 rc = VINF_EM_HALT;
903 break;
904
905 /*
906 * The VM has halted.
907 */
908 case EXCP_HALTED:
909 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
910 rc = VINF_EM_HALT;
911 break;
912
913 /*
914 * Switch to RAW-mode.
915 */
916 case EXCP_EXECUTE_RAW:
917 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
918 rc = VINF_EM_RESCHEDULE_RAW;
919 break;
920
921 /*
922 * Switch to hardware accelerated RAW-mode.
923 */
924 case EXCP_EXECUTE_HWACC:
925 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
926 rc = VINF_EM_RESCHEDULE_HWACC;
927 break;
928
929 /*
930 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
931 */
932 case EXCP_RC:
933 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
934 rc = pVM->rem.s.rc;
935 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
936 break;
937
938 /*
939 * Figure out the rest when they arrive....
940 */
941 default:
942 AssertMsgFailed(("rc=%d\n", rc));
943 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
944 rc = VINF_EM_RESCHEDULE;
945 break;
946 }
947
948 /*
949 * Switch back the state.
950 */
951 pVM->rem.s.Env.interrupt_request = interrupt_request;
952 rc2 = REMR3StateBack(pVM);
953 AssertRC(rc2);
954 }
955
956 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
957 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
958 return rc;
959}
960
961
962/**
963 * Runs code in recompiled mode.
964 *
965 * Before calling this function the REM state needs to be in sync with
966 * the VM. Call REMR3State() to perform the sync. It's only necessary
967 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
968 * and after calling REMR3StateBack().
969 *
970 * @returns VBox status code.
971 *
972 * @param pVM VM Handle.
973 */
974REMR3DECL(int) REMR3Run(PVM pVM)
975{
976 int rc;
977 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
978 Assert(pVM->rem.s.fInREM);
979
980 TMNotifyStartOfExecution(pVM);
981 rc = cpu_exec(&pVM->rem.s.Env);
982 TMNotifyEndOfExecution(pVM);
983 switch (rc)
984 {
985 /*
986 * This happens when the execution was interrupted
987 * by an external event, like pending timers.
988 */
989 case EXCP_INTERRUPT:
990 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
991 rc = VINF_SUCCESS;
992 break;
993
994 /*
995 * hlt instruction.
996 */
997 case EXCP_HLT:
998 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
999 rc = VINF_EM_HALT;
1000 break;
1001
1002 /*
1003 * The VM has halted.
1004 */
1005 case EXCP_HALTED:
1006 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1007 rc = VINF_EM_HALT;
1008 break;
1009
1010 /*
1011 * Breakpoint/single step.
1012 */
1013 case EXCP_DEBUG:
1014 {
1015#if 0//def DEBUG_bird
1016 static int iBP = 0;
1017 printf("howdy, breakpoint! iBP=%d\n", iBP);
1018 switch (iBP)
1019 {
1020 case 0:
1021 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1022 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1023 //pVM->rem.s.Env.interrupt_request = 0;
1024 //pVM->rem.s.Env.exception_index = -1;
1025 //g_fInterruptDisabled = 1;
1026 rc = VINF_SUCCESS;
1027 asm("int3");
1028 break;
1029 default:
1030 asm("int3");
1031 break;
1032 }
1033 iBP++;
1034#else
1035 /* breakpoint or single step? */
1036 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1037 int iBP;
1038 rc = VINF_EM_DBG_STEPPED;
1039 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1040 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1041 {
1042 rc = VINF_EM_DBG_BREAKPOINT;
1043 break;
1044 }
1045 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1046#endif
1047 break;
1048 }
1049
1050 /*
1051 * Switch to RAW-mode.
1052 */
1053 case EXCP_EXECUTE_RAW:
1054 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1055 rc = VINF_EM_RESCHEDULE_RAW;
1056 break;
1057
1058 /*
1059 * Switch to hardware accelerated RAW-mode.
1060 */
1061 case EXCP_EXECUTE_HWACC:
1062 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1063 rc = VINF_EM_RESCHEDULE_HWACC;
1064 break;
1065
1066 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1067 /*
1068 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1069 */
1070 case EXCP_RC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1072 rc = pVM->rem.s.rc;
1073 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1074 break;
1075
1076 /*
1077 * Figure out the rest when they arrive....
1078 */
1079 default:
1080 AssertMsgFailed(("rc=%d\n", rc));
1081 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1082 rc = VINF_SUCCESS;
1083 break;
1084 }
1085
1086 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1087 return rc;
1088}
1089
1090
1091/**
1092 * Check if the cpu state is suitable for Raw execution.
1093 *
1094 * @returns boolean
1095 * @param env The CPU env struct.
1096 * @param eip The EIP to check this for (might differ from env->eip).
1097 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1098 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1099 *
1100 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1101 */
1102bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1103{
1104 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1105 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1106 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1107 uint32_t u32CR0;
1108
1109 /* Update counter. */
1110 env->pVM->rem.s.cCanExecuteRaw++;
1111
1112 if (HWACCMIsEnabled(env->pVM))
1113 {
1114 CPUMCTX Ctx;
1115
1116 env->state |= CPU_RAW_HWACC;
1117
1118 /*
1119 * Create partial context for HWACCMR3CanExecuteGuest
1120 */
1121 Ctx.cr0 = env->cr[0];
1122 Ctx.cr3 = env->cr[3];
1123 Ctx.cr4 = env->cr[4];
1124
1125 Ctx.tr = env->tr.selector;
1126 Ctx.trHid.u64Base = env->tr.base;
1127 Ctx.trHid.u32Limit = env->tr.limit;
1128 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1129
1130 Ctx.idtr.cbIdt = env->idt.limit;
1131 Ctx.idtr.pIdt = env->idt.base;
1132
1133 Ctx.gdtr.cbGdt = env->gdt.limit;
1134 Ctx.gdtr.pGdt = env->gdt.base;
1135
1136 Ctx.rsp = env->regs[R_ESP];
1137 Ctx.rip = env->eip;
1138
1139 Ctx.eflags.u32 = env->eflags;
1140
1141 Ctx.cs = env->segs[R_CS].selector;
1142 Ctx.csHid.u64Base = env->segs[R_CS].base;
1143 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1144 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1145
1146 Ctx.ds = env->segs[R_DS].selector;
1147 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1148 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1149 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1150
1151 Ctx.es = env->segs[R_ES].selector;
1152 Ctx.esHid.u64Base = env->segs[R_ES].base;
1153 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1154 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1155
1156 Ctx.fs = env->segs[R_FS].selector;
1157 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1158 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1159 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1160
1161 Ctx.gs = env->segs[R_GS].selector;
1162 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1163 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1164 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1165
1166 Ctx.ss = env->segs[R_SS].selector;
1167 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1168 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1169 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1170
1171 Ctx.msrEFER = env->efer;
1172
1173 /* Hardware accelerated raw-mode:
1174 *
1175 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1176 */
1177 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1178 {
1179 *piException = EXCP_EXECUTE_HWACC;
1180 return true;
1181 }
1182 return false;
1183 }
1184
1185 /*
1186 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1187 * or 32 bits protected mode ring 0 code
1188 *
1189 * The tests are ordered by the likelyhood of being true during normal execution.
1190 */
1191 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1192 {
1193 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1194 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1195 return false;
1196 }
1197
1198#ifndef VBOX_RAW_V86
1199 if (fFlags & VM_MASK) {
1200 STAM_COUNTER_INC(&gStatRefuseVM86);
1201 Log2(("raw mode refused: VM_MASK\n"));
1202 return false;
1203 }
1204#endif
1205
1206 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1207 {
1208#ifndef DEBUG_bird
1209 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1210#endif
1211 return false;
1212 }
1213
1214 if (env->singlestep_enabled)
1215 {
1216 //Log2(("raw mode refused: Single step\n"));
1217 return false;
1218 }
1219
1220 if (env->nb_breakpoints > 0)
1221 {
1222 //Log2(("raw mode refused: Breakpoints\n"));
1223 return false;
1224 }
1225
1226 u32CR0 = env->cr[0];
1227 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1228 {
1229 STAM_COUNTER_INC(&gStatRefusePaging);
1230 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1231 return false;
1232 }
1233
1234 if (env->cr[4] & CR4_PAE_MASK)
1235 {
1236 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1237 {
1238 STAM_COUNTER_INC(&gStatRefusePAE);
1239 return false;
1240 }
1241 }
1242
1243 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1244 {
1245 if (!EMIsRawRing3Enabled(env->pVM))
1246 return false;
1247
1248 if (!(env->eflags & IF_MASK))
1249 {
1250 STAM_COUNTER_INC(&gStatRefuseIF0);
1251 Log2(("raw mode refused: IF (RawR3)\n"));
1252 return false;
1253 }
1254
1255 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1256 {
1257 STAM_COUNTER_INC(&gStatRefuseWP0);
1258 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1259 return false;
1260 }
1261 }
1262 else
1263 {
1264 if (!EMIsRawRing0Enabled(env->pVM))
1265 return false;
1266
1267 // Let's start with pure 32 bits ring 0 code first
1268 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1269 {
1270 STAM_COUNTER_INC(&gStatRefuseCode16);
1271 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1272 return false;
1273 }
1274
1275 // Only R0
1276 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1277 {
1278 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1279 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1280 return false;
1281 }
1282
1283 if (!(u32CR0 & CR0_WP_MASK))
1284 {
1285 STAM_COUNTER_INC(&gStatRefuseWP0);
1286 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1287 return false;
1288 }
1289
1290 if (PATMIsPatchGCAddr(env->pVM, eip))
1291 {
1292 Log2(("raw r0 mode forced: patch code\n"));
1293 *piException = EXCP_EXECUTE_RAW;
1294 return true;
1295 }
1296
1297#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1298 if (!(env->eflags & IF_MASK))
1299 {
1300 STAM_COUNTER_INC(&gStatRefuseIF0);
1301 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1302 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1303 return false;
1304 }
1305#endif
1306
1307 env->state |= CPU_RAW_RING0;
1308 }
1309
1310 /*
1311 * Don't reschedule the first time we're called, because there might be
1312 * special reasons why we're here that is not covered by the above checks.
1313 */
1314 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1315 {
1316 Log2(("raw mode refused: first scheduling\n"));
1317 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1318 return false;
1319 }
1320
1321 Assert(PGMPhysIsA20Enabled(env->pVM));
1322 *piException = EXCP_EXECUTE_RAW;
1323 return true;
1324}
1325
1326
1327/**
1328 * Fetches a code byte.
1329 *
1330 * @returns Success indicator (bool) for ease of use.
1331 * @param env The CPU environment structure.
1332 * @param GCPtrInstr Where to fetch code.
1333 * @param pu8Byte Where to store the byte on success
1334 */
1335bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1336{
1337 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1338 if (RT_SUCCESS(rc))
1339 return true;
1340 return false;
1341}
1342
1343
1344/**
1345 * Flush (or invalidate if you like) page table/dir entry.
1346 *
1347 * (invlpg instruction; tlb_flush_page)
1348 *
1349 * @param env Pointer to cpu environment.
1350 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1351 */
1352void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1353{
1354 PVM pVM = env->pVM;
1355 PCPUMCTX pCtx;
1356 int rc;
1357
1358 /*
1359 * When we're replaying invlpg instructions or restoring a saved
1360 * state we disable this path.
1361 */
1362 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1363 return;
1364 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1365 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1366
1367 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1368
1369 /*
1370 * Update the control registers before calling PGMFlushPage.
1371 */
1372 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1373 pCtx->cr0 = env->cr[0];
1374 pCtx->cr3 = env->cr[3];
1375 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1376 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1377 pCtx->cr4 = env->cr[4];
1378
1379 /*
1380 * Let PGM do the rest.
1381 */
1382 rc = PGMInvalidatePage(pVM, GCPtr);
1383 if (RT_FAILURE(rc))
1384 {
1385 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1386 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1387 }
1388 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1389}
1390
1391
1392#ifndef REM_PHYS_ADDR_IN_TLB
1393/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1394void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1395{
1396 void *pv;
1397 int rc;
1398
1399 /* Address must be aligned enough to fiddle with lower bits */
1400 Assert((physAddr & 0x3) == 0);
1401
1402 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1403 Assert( rc == VINF_SUCCESS
1404 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1405 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1406 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1407 if (RT_FAILURE(rc))
1408 return (void *)1;
1409 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1410 return (void *)((uintptr_t)pv | 2);
1411 return pv;
1412}
1413#endif /* REM_PHYS_ADDR_IN_TLB */
1414
1415
1416/**
1417 * Called from tlb_protect_code in order to write monitor a code page.
1418 *
1419 * @param env Pointer to the CPU environment.
1420 * @param GCPtr Code page to monitor
1421 */
1422void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1423{
1424#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1425 Assert(env->pVM->rem.s.fInREM);
1426 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1427 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1428 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1429 && !(env->eflags & VM_MASK) /* no V86 mode */
1430 && !HWACCMIsEnabled(env->pVM))
1431 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1432#endif
1433}
1434
1435
1436/**
1437 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1438 *
1439 * @param env Pointer to the CPU environment.
1440 * @param GCPtr Code page to monitor
1441 */
1442void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1443{
1444 Assert(env->pVM->rem.s.fInREM);
1445#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1446 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1447 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1448 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1449 && !(env->eflags & VM_MASK) /* no V86 mode */
1450 && !HWACCMIsEnabled(env->pVM))
1451 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1452#endif
1453}
1454
1455
1456/**
1457 * Called when the CPU is initialized, any of the CRx registers are changed or
1458 * when the A20 line is modified.
1459 *
1460 * @param env Pointer to the CPU environment.
1461 * @param fGlobal Set if the flush is global.
1462 */
1463void remR3FlushTLB(CPUState *env, bool fGlobal)
1464{
1465 PVM pVM = env->pVM;
1466 PCPUMCTX pCtx;
1467
1468 /*
1469 * When we're replaying invlpg instructions or restoring a saved
1470 * state we disable this path.
1471 */
1472 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1473 return;
1474 Assert(pVM->rem.s.fInREM);
1475
1476 /*
1477 * The caller doesn't check cr4, so we have to do that for ourselves.
1478 */
1479 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1480 fGlobal = true;
1481 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1482
1483 /*
1484 * Update the control registers before calling PGMR3FlushTLB.
1485 */
1486 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1487 pCtx->cr0 = env->cr[0];
1488 pCtx->cr3 = env->cr[3];
1489 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1490 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1491 pCtx->cr4 = env->cr[4];
1492
1493 /*
1494 * Let PGM do the rest.
1495 */
1496 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1497}
1498
1499
1500/**
1501 * Called when any of the cr0, cr4 or efer registers is updated.
1502 *
1503 * @param env Pointer to the CPU environment.
1504 */
1505void remR3ChangeCpuMode(CPUState *env)
1506{
1507 int rc;
1508 PVM pVM = env->pVM;
1509 PCPUMCTX pCtx;
1510
1511 /*
1512 * When we're replaying loads or restoring a saved
1513 * state this path is disabled.
1514 */
1515 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1516 return;
1517 Assert(pVM->rem.s.fInREM);
1518
1519 /*
1520 * Update the control registers before calling PGMChangeMode()
1521 * as it may need to map whatever cr3 is pointing to.
1522 */
1523 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1524 pCtx->cr0 = env->cr[0];
1525 pCtx->cr3 = env->cr[3];
1526 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1527 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1528 pCtx->cr4 = env->cr[4];
1529
1530#ifdef TARGET_X86_64
1531 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1532 if (rc != VINF_SUCCESS)
1533 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1534#else
1535 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1536 if (rc != VINF_SUCCESS)
1537 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1538#endif
1539}
1540
1541
1542/**
1543 * Called from compiled code to run dma.
1544 *
1545 * @param env Pointer to the CPU environment.
1546 */
1547void remR3DmaRun(CPUState *env)
1548{
1549 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1550 PDMR3DmaRun(env->pVM);
1551 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1552}
1553
1554
1555/**
1556 * Called from compiled code to schedule pending timers in VMM
1557 *
1558 * @param env Pointer to the CPU environment.
1559 */
1560void remR3TimersRun(CPUState *env)
1561{
1562 LogFlow(("remR3TimersRun:\n"));
1563 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1564 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1565 TMR3TimerQueuesDo(env->pVM);
1566 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1567 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1568}
1569
1570
1571/**
1572 * Record trap occurance
1573 *
1574 * @returns VBox status code
1575 * @param env Pointer to the CPU environment.
1576 * @param uTrap Trap nr
1577 * @param uErrorCode Error code
1578 * @param pvNextEIP Next EIP
1579 */
1580int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1581{
1582 PVM pVM = env->pVM;
1583#ifdef VBOX_WITH_STATISTICS
1584 static STAMCOUNTER s_aStatTrap[255];
1585 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1586#endif
1587
1588#ifdef VBOX_WITH_STATISTICS
1589 if (uTrap < 255)
1590 {
1591 if (!s_aRegisters[uTrap])
1592 {
1593 char szStatName[64];
1594 s_aRegisters[uTrap] = true;
1595 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1596 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1597 }
1598 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1599 }
1600#endif
1601 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1602 if( uTrap < 0x20
1603 && (env->cr[0] & X86_CR0_PE)
1604 && !(env->eflags & X86_EFL_VM))
1605 {
1606#ifdef DEBUG
1607 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1608#endif
1609 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1610 {
1611 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1612 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1613 return VERR_REM_TOO_MANY_TRAPS;
1614 }
1615 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1616 pVM->rem.s.cPendingExceptions = 1;
1617 pVM->rem.s.uPendingException = uTrap;
1618 pVM->rem.s.uPendingExcptEIP = env->eip;
1619 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1620 }
1621 else
1622 {
1623 pVM->rem.s.cPendingExceptions = 0;
1624 pVM->rem.s.uPendingException = uTrap;
1625 pVM->rem.s.uPendingExcptEIP = env->eip;
1626 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1627 }
1628 return VINF_SUCCESS;
1629}
1630
1631
1632/*
1633 * Clear current active trap
1634 *
1635 * @param pVM VM Handle.
1636 */
1637void remR3TrapClear(PVM pVM)
1638{
1639 pVM->rem.s.cPendingExceptions = 0;
1640 pVM->rem.s.uPendingException = 0;
1641 pVM->rem.s.uPendingExcptEIP = 0;
1642 pVM->rem.s.uPendingExcptCR2 = 0;
1643}
1644
1645
1646/*
1647 * Record previous call instruction addresses
1648 *
1649 * @param env Pointer to the CPU environment.
1650 */
1651void remR3RecordCall(CPUState *env)
1652{
1653 CSAMR3RecordCallAddress(env->pVM, env->eip);
1654}
1655
1656
1657/**
1658 * Syncs the internal REM state with the VM.
1659 *
1660 * This must be called before REMR3Run() is invoked whenever when the REM
1661 * state is not up to date. Calling it several times in a row is not
1662 * permitted.
1663 *
1664 * @returns VBox status code.
1665 *
1666 * @param pVM VM Handle.
1667 *
1668 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1669 * no do this since the majority of the callers don't want any unnecessary of events
1670 * pending that would immediatly interrupt execution.
1671 */
1672REMR3DECL(int) REMR3State(PVM pVM)
1673{
1674 register const CPUMCTX *pCtx;
1675 register unsigned fFlags;
1676 bool fHiddenSelRegsValid;
1677 unsigned i;
1678 TRPMEVENT enmType;
1679 uint8_t u8TrapNo;
1680 int rc;
1681
1682 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1683 Log2(("REMR3State:\n"));
1684
1685 pCtx = pVM->rem.s.pCtx;
1686 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1687
1688 Assert(!pVM->rem.s.fInREM);
1689 pVM->rem.s.fInStateSync = true;
1690
1691 /*
1692 * If we have to flush TBs, do that immediately.
1693 */
1694 if (pVM->rem.s.fFlushTBs)
1695 {
1696 STAM_COUNTER_INC(&gStatFlushTBs);
1697 tb_flush(&pVM->rem.s.Env);
1698 pVM->rem.s.fFlushTBs = false;
1699 }
1700
1701 /*
1702 * Copy the registers which require no special handling.
1703 */
1704#ifdef TARGET_X86_64
1705 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1706 Assert(R_EAX == 0);
1707 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1708 Assert(R_ECX == 1);
1709 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1710 Assert(R_EDX == 2);
1711 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1712 Assert(R_EBX == 3);
1713 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1714 Assert(R_ESP == 4);
1715 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1716 Assert(R_EBP == 5);
1717 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1718 Assert(R_ESI == 6);
1719 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1720 Assert(R_EDI == 7);
1721 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1722 pVM->rem.s.Env.regs[8] = pCtx->r8;
1723 pVM->rem.s.Env.regs[9] = pCtx->r9;
1724 pVM->rem.s.Env.regs[10] = pCtx->r10;
1725 pVM->rem.s.Env.regs[11] = pCtx->r11;
1726 pVM->rem.s.Env.regs[12] = pCtx->r12;
1727 pVM->rem.s.Env.regs[13] = pCtx->r13;
1728 pVM->rem.s.Env.regs[14] = pCtx->r14;
1729 pVM->rem.s.Env.regs[15] = pCtx->r15;
1730
1731 pVM->rem.s.Env.eip = pCtx->rip;
1732
1733 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1734#else
1735 Assert(R_EAX == 0);
1736 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1737 Assert(R_ECX == 1);
1738 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1739 Assert(R_EDX == 2);
1740 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1741 Assert(R_EBX == 3);
1742 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1743 Assert(R_ESP == 4);
1744 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1745 Assert(R_EBP == 5);
1746 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1747 Assert(R_ESI == 6);
1748 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1749 Assert(R_EDI == 7);
1750 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1751 pVM->rem.s.Env.eip = pCtx->eip;
1752
1753 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1754#endif
1755
1756 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1757
1758 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1759 for (i=0;i<8;i++)
1760 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1761
1762 /*
1763 * Clear the halted hidden flag (the interrupt waking up the CPU can
1764 * have been dispatched in raw mode).
1765 */
1766 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1767
1768 /*
1769 * Replay invlpg?
1770 */
1771 if (pVM->rem.s.cInvalidatedPages)
1772 {
1773 RTUINT i;
1774
1775 pVM->rem.s.fIgnoreInvlPg = true;
1776 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1777 {
1778 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1779 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1780 }
1781 pVM->rem.s.fIgnoreInvlPg = false;
1782 pVM->rem.s.cInvalidatedPages = 0;
1783 }
1784
1785 /* Replay notification changes? */
1786 if (pVM->rem.s.cHandlerNotifications)
1787 REMR3ReplayHandlerNotifications(pVM);
1788
1789 /* Update MSRs; before CRx registers! */
1790 pVM->rem.s.Env.efer = pCtx->msrEFER;
1791 pVM->rem.s.Env.star = pCtx->msrSTAR;
1792 pVM->rem.s.Env.pat = pCtx->msrPAT;
1793#ifdef TARGET_X86_64
1794 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1795 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1796 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1797 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1798
1799 /* Update the internal long mode activate flag according to the new EFER value. */
1800 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1801 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1802 else
1803 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1804#endif
1805
1806 /*
1807 * Registers which are rarely changed and require special handling / order when changed.
1808 */
1809 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1810 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1811 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1812 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1813 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1814 {
1815 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1816 {
1817 pVM->rem.s.fIgnoreCR3Load = true;
1818 tlb_flush(&pVM->rem.s.Env, true);
1819 pVM->rem.s.fIgnoreCR3Load = false;
1820 }
1821
1822 /* CR4 before CR0! */
1823 if (fFlags & CPUM_CHANGED_CR4)
1824 {
1825 pVM->rem.s.fIgnoreCR3Load = true;
1826 pVM->rem.s.fIgnoreCpuMode = true;
1827 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1828 pVM->rem.s.fIgnoreCpuMode = false;
1829 pVM->rem.s.fIgnoreCR3Load = false;
1830 }
1831
1832 if (fFlags & CPUM_CHANGED_CR0)
1833 {
1834 pVM->rem.s.fIgnoreCR3Load = true;
1835 pVM->rem.s.fIgnoreCpuMode = true;
1836 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1837 pVM->rem.s.fIgnoreCpuMode = false;
1838 pVM->rem.s.fIgnoreCR3Load = false;
1839 }
1840
1841 if (fFlags & CPUM_CHANGED_CR3)
1842 {
1843 pVM->rem.s.fIgnoreCR3Load = true;
1844 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1845 pVM->rem.s.fIgnoreCR3Load = false;
1846 }
1847
1848 if (fFlags & CPUM_CHANGED_GDTR)
1849 {
1850 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1851 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1852 }
1853
1854 if (fFlags & CPUM_CHANGED_IDTR)
1855 {
1856 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1857 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1858 }
1859
1860 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1861 {
1862 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1863 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1864 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1865 }
1866
1867 if (fFlags & CPUM_CHANGED_LDTR)
1868 {
1869 if (fHiddenSelRegsValid)
1870 {
1871 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1872 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1873 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1874 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1875 }
1876 else
1877 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1878 }
1879
1880 if (fFlags & CPUM_CHANGED_CPUID)
1881 {
1882 uint32_t u32Dummy;
1883
1884 /*
1885 * Get the CPUID features.
1886 */
1887 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1888 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1889 }
1890
1891 /* Sync FPU state after CR4, CPUID and EFER (!). */
1892 if (fFlags & CPUM_CHANGED_FPU_REM)
1893 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1894 }
1895
1896 /*
1897 * Sync TR unconditionally to make life simpler.
1898 */
1899 pVM->rem.s.Env.tr.selector = pCtx->tr;
1900 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1901 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1902 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1903 /* Note! do_interrupt will fault if the busy flag is still set... */
1904 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1905
1906 /*
1907 * Update selector registers.
1908 * This must be done *after* we've synced gdt, ldt and crX registers
1909 * since we're reading the GDT/LDT om sync_seg. This will happen with
1910 * saved state which takes a quick dip into rawmode for instance.
1911 */
1912 /*
1913 * Stack; Note first check this one as the CPL might have changed. The
1914 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1915 */
1916
1917 if (fHiddenSelRegsValid)
1918 {
1919 /* The hidden selector registers are valid in the CPU context. */
1920 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1921
1922 /* Set current CPL */
1923 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1924
1925 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1926 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1927 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1928 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1929 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1930 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1931 }
1932 else
1933 {
1934 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1935 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1936 {
1937 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1938
1939 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1940 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1941#ifdef VBOX_WITH_STATISTICS
1942 if (pVM->rem.s.Env.segs[R_SS].newselector)
1943 {
1944 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1945 }
1946#endif
1947 }
1948 else
1949 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1950
1951 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1952 {
1953 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1954 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1955#ifdef VBOX_WITH_STATISTICS
1956 if (pVM->rem.s.Env.segs[R_ES].newselector)
1957 {
1958 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1959 }
1960#endif
1961 }
1962 else
1963 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1964
1965 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1966 {
1967 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1968 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1969#ifdef VBOX_WITH_STATISTICS
1970 if (pVM->rem.s.Env.segs[R_CS].newselector)
1971 {
1972 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1973 }
1974#endif
1975 }
1976 else
1977 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1978
1979 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1980 {
1981 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1982 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1983#ifdef VBOX_WITH_STATISTICS
1984 if (pVM->rem.s.Env.segs[R_DS].newselector)
1985 {
1986 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1987 }
1988#endif
1989 }
1990 else
1991 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1992
1993 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1994 * be the same but not the base/limit. */
1995 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1996 {
1997 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1998 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1999#ifdef VBOX_WITH_STATISTICS
2000 if (pVM->rem.s.Env.segs[R_FS].newselector)
2001 {
2002 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2003 }
2004#endif
2005 }
2006 else
2007 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2008
2009 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2010 {
2011 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2012 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2013#ifdef VBOX_WITH_STATISTICS
2014 if (pVM->rem.s.Env.segs[R_GS].newselector)
2015 {
2016 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2017 }
2018#endif
2019 }
2020 else
2021 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2022 }
2023
2024 /*
2025 * Check for traps.
2026 */
2027 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2028 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2029 if (RT_SUCCESS(rc))
2030 {
2031#ifdef DEBUG
2032 if (u8TrapNo == 0x80)
2033 {
2034 remR3DumpLnxSyscall(pVM);
2035 remR3DumpOBsdSyscall(pVM);
2036 }
2037#endif
2038
2039 pVM->rem.s.Env.exception_index = u8TrapNo;
2040 if (enmType != TRPM_SOFTWARE_INT)
2041 {
2042 pVM->rem.s.Env.exception_is_int = 0;
2043 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2044 }
2045 else
2046 {
2047 /*
2048 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2049 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2050 * for int03 and into.
2051 */
2052 pVM->rem.s.Env.exception_is_int = 1;
2053 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2054 /* int 3 may be generated by one-byte 0xcc */
2055 if (u8TrapNo == 3)
2056 {
2057 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2058 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2059 }
2060 /* int 4 may be generated by one-byte 0xce */
2061 else if (u8TrapNo == 4)
2062 {
2063 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2064 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2065 }
2066 }
2067
2068 /* get error code and cr2 if needed. */
2069 switch (u8TrapNo)
2070 {
2071 case 0x0e:
2072 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2073 /* fallthru */
2074 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2075 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2076 break;
2077
2078 case 0x11: case 0x08:
2079 default:
2080 pVM->rem.s.Env.error_code = 0;
2081 break;
2082 }
2083
2084 /*
2085 * We can now reset the active trap since the recompiler is gonna have a go at it.
2086 */
2087 rc = TRPMResetTrap(pVM);
2088 AssertRC(rc);
2089 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2090 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2091 }
2092
2093 /*
2094 * Clear old interrupt request flags; Check for pending hardware interrupts.
2095 * (See @remark for why we don't check for other FFs.)
2096 */
2097 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2098 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2099 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2100 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2101
2102 /*
2103 * We're now in REM mode.
2104 */
2105 pVM->rem.s.fInREM = true;
2106 pVM->rem.s.fInStateSync = false;
2107 pVM->rem.s.cCanExecuteRaw = 0;
2108 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2109 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2110 return VINF_SUCCESS;
2111}
2112
2113
2114/**
2115 * Syncs back changes in the REM state to the the VM state.
2116 *
2117 * This must be called after invoking REMR3Run().
2118 * Calling it several times in a row is not permitted.
2119 *
2120 * @returns VBox status code.
2121 *
2122 * @param pVM VM Handle.
2123 */
2124REMR3DECL(int) REMR3StateBack(PVM pVM)
2125{
2126 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2127 unsigned i;
2128
2129 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2130 Log2(("REMR3StateBack:\n"));
2131 Assert(pVM->rem.s.fInREM);
2132
2133 /*
2134 * Copy back the registers.
2135 * This is done in the order they are declared in the CPUMCTX structure.
2136 */
2137
2138 /** @todo FOP */
2139 /** @todo FPUIP */
2140 /** @todo CS */
2141 /** @todo FPUDP */
2142 /** @todo DS */
2143
2144 /** @todo check if FPU/XMM was actually used in the recompiler */
2145 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2146//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2147
2148#ifdef TARGET_X86_64
2149 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2150 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2151 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2152 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2153 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2154 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2155 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2156 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2157 pCtx->r8 = pVM->rem.s.Env.regs[8];
2158 pCtx->r9 = pVM->rem.s.Env.regs[9];
2159 pCtx->r10 = pVM->rem.s.Env.regs[10];
2160 pCtx->r11 = pVM->rem.s.Env.regs[11];
2161 pCtx->r12 = pVM->rem.s.Env.regs[12];
2162 pCtx->r13 = pVM->rem.s.Env.regs[13];
2163 pCtx->r14 = pVM->rem.s.Env.regs[14];
2164 pCtx->r15 = pVM->rem.s.Env.regs[15];
2165
2166 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2167
2168#else
2169 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2170 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2171 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2172 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2173 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2174 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2175 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2176
2177 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2178#endif
2179
2180 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2181
2182#ifdef VBOX_WITH_STATISTICS
2183 if (pVM->rem.s.Env.segs[R_SS].newselector)
2184 {
2185 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2186 }
2187 if (pVM->rem.s.Env.segs[R_GS].newselector)
2188 {
2189 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2190 }
2191 if (pVM->rem.s.Env.segs[R_FS].newselector)
2192 {
2193 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2194 }
2195 if (pVM->rem.s.Env.segs[R_ES].newselector)
2196 {
2197 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2198 }
2199 if (pVM->rem.s.Env.segs[R_DS].newselector)
2200 {
2201 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2202 }
2203 if (pVM->rem.s.Env.segs[R_CS].newselector)
2204 {
2205 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2206 }
2207#endif
2208 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2209 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2210 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2211 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2212 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2213
2214#ifdef TARGET_X86_64
2215 pCtx->rip = pVM->rem.s.Env.eip;
2216 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2217#else
2218 pCtx->eip = pVM->rem.s.Env.eip;
2219 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2220#endif
2221
2222 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2223 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2224 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2225 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2226 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2227 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2228
2229 for (i = 0; i < 8; i++)
2230 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2231
2232 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2233 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2234 {
2235 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2236 STAM_COUNTER_INC(&gStatREMGDTChange);
2237 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2238 }
2239
2240 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2241 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2242 {
2243 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2244 STAM_COUNTER_INC(&gStatREMIDTChange);
2245 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2246 }
2247
2248 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2249 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2250 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2251 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2252 {
2253 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2254 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2255 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2256 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2257 STAM_COUNTER_INC(&gStatREMLDTRChange);
2258 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2259 }
2260
2261 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2262 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2263 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2264 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2265 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2266 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2267 : 0) )
2268 {
2269 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2270 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2271 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2272 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2273 pCtx->tr = pVM->rem.s.Env.tr.selector;
2274 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2275 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2276 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2277 if (pCtx->trHid.Attr.u)
2278 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2279 STAM_COUNTER_INC(&gStatREMTRChange);
2280 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2281 }
2282
2283 /** @todo These values could still be out of sync! */
2284 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2285 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2286 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2287 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2288
2289 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2290 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2291 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2292
2293 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2294 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2295 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2296
2297 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2298 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2299 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2300
2301 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2302 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2303 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2304
2305 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2306 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2307 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2308
2309 /* Sysenter MSR */
2310 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2311 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2312 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2313
2314 /* System MSRs. */
2315 pCtx->msrEFER = pVM->rem.s.Env.efer;
2316 pCtx->msrSTAR = pVM->rem.s.Env.star;
2317 pCtx->msrPAT = pVM->rem.s.Env.pat;
2318#ifdef TARGET_X86_64
2319 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2320 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2321 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2322 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2323#endif
2324
2325 remR3TrapClear(pVM);
2326
2327 /*
2328 * Check for traps.
2329 */
2330 if ( pVM->rem.s.Env.exception_index >= 0
2331 && pVM->rem.s.Env.exception_index < 256)
2332 {
2333 int rc;
2334
2335 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2336 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2337 AssertRC(rc);
2338 switch (pVM->rem.s.Env.exception_index)
2339 {
2340 case 0x0e:
2341 TRPMSetFaultAddress(pVM, pCtx->cr2);
2342 /* fallthru */
2343 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2344 case 0x11: case 0x08: /* 0 */
2345 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2346 break;
2347 }
2348
2349 }
2350
2351 /*
2352 * We're not longer in REM mode.
2353 */
2354 pVM->rem.s.fInREM = false;
2355 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2356 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2357 return VINF_SUCCESS;
2358}
2359
2360
2361/**
2362 * This is called by the disassembler when it wants to update the cpu state
2363 * before for instance doing a register dump.
2364 */
2365static void remR3StateUpdate(PVM pVM)
2366{
2367 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2368 unsigned i;
2369
2370 Assert(pVM->rem.s.fInREM);
2371
2372 /*
2373 * Copy back the registers.
2374 * This is done in the order they are declared in the CPUMCTX structure.
2375 */
2376
2377 /** @todo FOP */
2378 /** @todo FPUIP */
2379 /** @todo CS */
2380 /** @todo FPUDP */
2381 /** @todo DS */
2382 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2383 pCtx->fpu.MXCSR = 0;
2384 pCtx->fpu.MXCSR_MASK = 0;
2385
2386 /** @todo check if FPU/XMM was actually used in the recompiler */
2387 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2388//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2389
2390#ifdef TARGET_X86_64
2391 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2392 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2393 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2394 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2395 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2396 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2397 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2398 pCtx->r8 = pVM->rem.s.Env.regs[8];
2399 pCtx->r9 = pVM->rem.s.Env.regs[9];
2400 pCtx->r10 = pVM->rem.s.Env.regs[10];
2401 pCtx->r11 = pVM->rem.s.Env.regs[11];
2402 pCtx->r12 = pVM->rem.s.Env.regs[12];
2403 pCtx->r13 = pVM->rem.s.Env.regs[13];
2404 pCtx->r14 = pVM->rem.s.Env.regs[14];
2405 pCtx->r15 = pVM->rem.s.Env.regs[15];
2406
2407 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2408#else
2409 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2410 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2411 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2412 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2413 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2414 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2415 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2416
2417 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2418#endif
2419
2420 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2421
2422 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2423 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2424 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2425 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2426 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2427
2428#ifdef TARGET_X86_64
2429 pCtx->rip = pVM->rem.s.Env.eip;
2430 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2431#else
2432 pCtx->eip = pVM->rem.s.Env.eip;
2433 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2434#endif
2435
2436 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2437 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2438 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2439 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2440 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2441 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2442
2443 for (i = 0; i < 8; i++)
2444 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2445
2446 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2447 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2448 {
2449 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2450 STAM_COUNTER_INC(&gStatREMGDTChange);
2451 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2452 }
2453
2454 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2455 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2456 {
2457 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2458 STAM_COUNTER_INC(&gStatREMIDTChange);
2459 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2460 }
2461
2462 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2463 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2464 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2465 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2466 {
2467 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2468 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2469 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2470 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2471 STAM_COUNTER_INC(&gStatREMLDTRChange);
2472 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2473 }
2474
2475 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2476 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2477 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2478 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2479 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2480 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2481 : 0) )
2482 {
2483 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2484 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2485 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2486 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2487 pCtx->tr = pVM->rem.s.Env.tr.selector;
2488 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2489 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2490 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2491 if (pCtx->trHid.Attr.u)
2492 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2493 STAM_COUNTER_INC(&gStatREMTRChange);
2494 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2495 }
2496
2497 /** @todo These values could still be out of sync! */
2498 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2499 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2500 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2501 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2502
2503 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2504 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2505 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2506
2507 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2508 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2509 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2510
2511 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2512 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2513 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2514
2515 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2516 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2517 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2518
2519 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2520 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2521 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2522
2523 /* Sysenter MSR */
2524 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2525 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2526 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2527
2528 /* System MSRs. */
2529 pCtx->msrEFER = pVM->rem.s.Env.efer;
2530 pCtx->msrSTAR = pVM->rem.s.Env.star;
2531 pCtx->msrPAT = pVM->rem.s.Env.pat;
2532#ifdef TARGET_X86_64
2533 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2534 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2535 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2536 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2537#endif
2538
2539}
2540
2541
2542/**
2543 * Update the VMM state information if we're currently in REM.
2544 *
2545 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2546 * we're currently executing in REM and the VMM state is invalid. This method will of
2547 * course check that we're executing in REM before syncing any data over to the VMM.
2548 *
2549 * @param pVM The VM handle.
2550 */
2551REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2552{
2553 if (pVM->rem.s.fInREM)
2554 remR3StateUpdate(pVM);
2555}
2556
2557
2558#undef LOG_GROUP
2559#define LOG_GROUP LOG_GROUP_REM
2560
2561
2562/**
2563 * Notify the recompiler about Address Gate 20 state change.
2564 *
2565 * This notification is required since A20 gate changes are
2566 * initialized from a device driver and the VM might just as
2567 * well be in REM mode as in RAW mode.
2568 *
2569 * @param pVM VM handle.
2570 * @param fEnable True if the gate should be enabled.
2571 * False if the gate should be disabled.
2572 */
2573REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2574{
2575 bool fSaved;
2576
2577 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2578 VM_ASSERT_EMT(pVM);
2579
2580 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2581 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2582
2583 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2584
2585 pVM->rem.s.fIgnoreAll = fSaved;
2586}
2587
2588
2589/**
2590 * Replays the invalidated recorded pages.
2591 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2592 *
2593 * @param pVM VM handle.
2594 */
2595REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2596{
2597 RTUINT i;
2598
2599 VM_ASSERT_EMT(pVM);
2600
2601 /*
2602 * Sync the required registers.
2603 */
2604 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2605 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2606 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2607 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2608
2609 /*
2610 * Replay the flushes.
2611 */
2612 pVM->rem.s.fIgnoreInvlPg = true;
2613 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2614 {
2615 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2616 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2617 }
2618 pVM->rem.s.fIgnoreInvlPg = false;
2619 pVM->rem.s.cInvalidatedPages = 0;
2620}
2621
2622
2623/**
2624 * Replays the handler notification changes
2625 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2626 *
2627 * @param pVM VM handle.
2628 */
2629REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2630{
2631 /*
2632 * Replay the flushes.
2633 */
2634 RTUINT i;
2635 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2636
2637 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2638 VM_ASSERT_EMT(pVM);
2639
2640 pVM->rem.s.cHandlerNotifications = 0;
2641 for (i = 0; i < c; i++)
2642 {
2643 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2644 switch (pRec->enmKind)
2645 {
2646 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2647 REMR3NotifyHandlerPhysicalRegister(pVM,
2648 pRec->u.PhysicalRegister.enmType,
2649 pRec->u.PhysicalRegister.GCPhys,
2650 pRec->u.PhysicalRegister.cb,
2651 pRec->u.PhysicalRegister.fHasHCHandler);
2652 break;
2653
2654 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2655 REMR3NotifyHandlerPhysicalDeregister(pVM,
2656 pRec->u.PhysicalDeregister.enmType,
2657 pRec->u.PhysicalDeregister.GCPhys,
2658 pRec->u.PhysicalDeregister.cb,
2659 pRec->u.PhysicalDeregister.fHasHCHandler,
2660 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2661 break;
2662
2663 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2664 REMR3NotifyHandlerPhysicalModify(pVM,
2665 pRec->u.PhysicalModify.enmType,
2666 pRec->u.PhysicalModify.GCPhysOld,
2667 pRec->u.PhysicalModify.GCPhysNew,
2668 pRec->u.PhysicalModify.cb,
2669 pRec->u.PhysicalModify.fHasHCHandler,
2670 pRec->u.PhysicalModify.fRestoreAsRAM);
2671 break;
2672
2673 default:
2674 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2675 break;
2676 }
2677 }
2678 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2679}
2680
2681
2682/**
2683 * Notify REM about changed code page.
2684 *
2685 * @returns VBox status code.
2686 * @param pVM VM handle.
2687 * @param pvCodePage Code page address
2688 */
2689REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2690{
2691#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2692 int rc;
2693 RTGCPHYS PhysGC;
2694 uint64_t flags;
2695
2696 VM_ASSERT_EMT(pVM);
2697
2698 /*
2699 * Get the physical page address.
2700 */
2701 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2702 if (rc == VINF_SUCCESS)
2703 {
2704 /*
2705 * Sync the required registers and flush the whole page.
2706 * (Easier to do the whole page than notifying it about each physical
2707 * byte that was changed.
2708 */
2709 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2710 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2711 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2712 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2713
2714 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2715 }
2716#endif
2717 return VINF_SUCCESS;
2718}
2719
2720
2721/**
2722 * Notification about a successful MMR3PhysRegister() call.
2723 *
2724 * @param pVM VM handle.
2725 * @param GCPhys The physical address the RAM.
2726 * @param cb Size of the memory.
2727 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2728 */
2729REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2730{
2731 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2732 VM_ASSERT_EMT(pVM);
2733
2734 /*
2735 * Validate input - we trust the caller.
2736 */
2737 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2738 Assert(cb);
2739 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2740#ifdef VBOX_WITH_NEW_PHYS_CODE
2741 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2742#endif
2743
2744 /*
2745 * Base ram? Update GCPhysLastRam.
2746 */
2747#ifdef VBOX_WITH_NEW_PHYS_CODE
2748 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2749#else
2750 if (!GCPhys)
2751#endif
2752 {
2753 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2754 {
2755 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2756 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2757 }
2758 }
2759
2760 /*
2761 * Register the ram.
2762 */
2763 Assert(!pVM->rem.s.fIgnoreAll);
2764 pVM->rem.s.fIgnoreAll = true;
2765
2766#ifdef VBOX_WITH_NEW_PHYS_CODE
2767 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2768#else
2769 if (!GCPhys)
2770 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2771 else
2772 {
2773 if (fFlags & MM_RAM_FLAGS_RESERVED)
2774 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2775 else
2776 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2777 }
2778#endif
2779 Assert(pVM->rem.s.fIgnoreAll);
2780 pVM->rem.s.fIgnoreAll = false;
2781}
2782
2783#ifndef VBOX_WITH_NEW_PHYS_CODE
2784
2785/**
2786 * Notification about a successful PGMR3PhysRegisterChunk() call.
2787 *
2788 * @param pVM VM handle.
2789 * @param GCPhys The physical address the RAM.
2790 * @param cb Size of the memory.
2791 * @param pvRam The HC address of the RAM.
2792 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2793 */
2794REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2795{
2796 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2797 VM_ASSERT_EMT(pVM);
2798
2799 /*
2800 * Validate input - we trust the caller.
2801 */
2802 Assert(pvRam);
2803 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2804 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2805 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2806 Assert(fFlags == 0 /* normal RAM */);
2807 Assert(!pVM->rem.s.fIgnoreAll);
2808 pVM->rem.s.fIgnoreAll = true;
2809 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2810 Assert(pVM->rem.s.fIgnoreAll);
2811 pVM->rem.s.fIgnoreAll = false;
2812}
2813
2814
2815/**
2816 * Grows dynamically allocated guest RAM.
2817 * Will raise a fatal error if the operation fails.
2818 *
2819 * @param physaddr The physical address.
2820 */
2821void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2822{
2823 int rc;
2824 PVM pVM = cpu_single_env->pVM;
2825 const RTGCPHYS GCPhys = physaddr;
2826
2827 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2828 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2829 if (RT_SUCCESS(rc))
2830 return;
2831
2832 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2833 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2834 AssertFatalFailed();
2835}
2836
2837#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2838
2839/**
2840 * Notification about a successful MMR3PhysRomRegister() call.
2841 *
2842 * @param pVM VM handle.
2843 * @param GCPhys The physical address of the ROM.
2844 * @param cb The size of the ROM.
2845 * @param pvCopy Pointer to the ROM copy.
2846 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2847 * This function will be called when ever the protection of the
2848 * shadow ROM changes (at reset and end of POST).
2849 */
2850REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2851{
2852 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2853 VM_ASSERT_EMT(pVM);
2854
2855 /*
2856 * Validate input - we trust the caller.
2857 */
2858 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2859 Assert(cb);
2860 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2861
2862 /*
2863 * Register the rom.
2864 */
2865 Assert(!pVM->rem.s.fIgnoreAll);
2866 pVM->rem.s.fIgnoreAll = true;
2867
2868 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2869
2870 Assert(pVM->rem.s.fIgnoreAll);
2871 pVM->rem.s.fIgnoreAll = false;
2872}
2873
2874
2875/**
2876 * Notification about a successful memory deregistration or reservation.
2877 *
2878 * @param pVM VM Handle.
2879 * @param GCPhys Start physical address.
2880 * @param cb The size of the range.
2881 */
2882REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2883{
2884 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2885 VM_ASSERT_EMT(pVM);
2886
2887 /*
2888 * Validate input - we trust the caller.
2889 */
2890 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2891 Assert(cb);
2892 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2893
2894 /*
2895 * Unassigning the memory.
2896 */
2897 Assert(!pVM->rem.s.fIgnoreAll);
2898 pVM->rem.s.fIgnoreAll = true;
2899
2900 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2901
2902 Assert(pVM->rem.s.fIgnoreAll);
2903 pVM->rem.s.fIgnoreAll = false;
2904}
2905
2906
2907/**
2908 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2909 *
2910 * @param pVM VM Handle.
2911 * @param enmType Handler type.
2912 * @param GCPhys Handler range address.
2913 * @param cb Size of the handler range.
2914 * @param fHasHCHandler Set if the handler has a HC callback function.
2915 *
2916 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2917 * Handler memory type to memory which has no HC handler.
2918 */
2919REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2920{
2921 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2922 enmType, GCPhys, cb, fHasHCHandler));
2923 VM_ASSERT_EMT(pVM);
2924 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2925 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2926
2927 if (pVM->rem.s.cHandlerNotifications)
2928 REMR3ReplayHandlerNotifications(pVM);
2929
2930 Assert(!pVM->rem.s.fIgnoreAll);
2931 pVM->rem.s.fIgnoreAll = true;
2932
2933 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2934 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2935 else if (fHasHCHandler)
2936 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2937
2938 Assert(pVM->rem.s.fIgnoreAll);
2939 pVM->rem.s.fIgnoreAll = false;
2940}
2941
2942
2943/**
2944 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2945 *
2946 * @param pVM VM Handle.
2947 * @param enmType Handler type.
2948 * @param GCPhys Handler range address.
2949 * @param cb Size of the handler range.
2950 * @param fHasHCHandler Set if the handler has a HC callback function.
2951 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2952 */
2953REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2954{
2955 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2956 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2957 VM_ASSERT_EMT(pVM);
2958
2959 if (pVM->rem.s.cHandlerNotifications)
2960 REMR3ReplayHandlerNotifications(pVM);
2961
2962 Assert(!pVM->rem.s.fIgnoreAll);
2963 pVM->rem.s.fIgnoreAll = true;
2964
2965/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2966 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2967 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2968 else if (fHasHCHandler)
2969 {
2970 if (!fRestoreAsRAM)
2971 {
2972 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2973 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2974 }
2975 else
2976 {
2977 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2978 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2979 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2980 }
2981 }
2982
2983 Assert(pVM->rem.s.fIgnoreAll);
2984 pVM->rem.s.fIgnoreAll = false;
2985}
2986
2987
2988/**
2989 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2990 *
2991 * @param pVM VM Handle.
2992 * @param enmType Handler type.
2993 * @param GCPhysOld Old handler range address.
2994 * @param GCPhysNew New handler range address.
2995 * @param cb Size of the handler range.
2996 * @param fHasHCHandler Set if the handler has a HC callback function.
2997 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2998 */
2999REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3000{
3001 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3002 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3003 VM_ASSERT_EMT(pVM);
3004 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3005
3006 if (pVM->rem.s.cHandlerNotifications)
3007 REMR3ReplayHandlerNotifications(pVM);
3008
3009 if (fHasHCHandler)
3010 {
3011 Assert(!pVM->rem.s.fIgnoreAll);
3012 pVM->rem.s.fIgnoreAll = true;
3013
3014 /*
3015 * Reset the old page.
3016 */
3017 if (!fRestoreAsRAM)
3018 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3019 else
3020 {
3021 /* This is not perfect, but it'll do for PD monitoring... */
3022 Assert(cb == PAGE_SIZE);
3023 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3024 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3025 }
3026
3027 /*
3028 * Update the new page.
3029 */
3030 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3031 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3032 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3033
3034 Assert(pVM->rem.s.fIgnoreAll);
3035 pVM->rem.s.fIgnoreAll = false;
3036 }
3037}
3038
3039
3040/**
3041 * Checks if we're handling access to this page or not.
3042 *
3043 * @returns true if we're trapping access.
3044 * @returns false if we aren't.
3045 * @param pVM The VM handle.
3046 * @param GCPhys The physical address.
3047 *
3048 * @remark This function will only work correctly in VBOX_STRICT builds!
3049 */
3050REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3051{
3052#ifdef VBOX_STRICT
3053 unsigned long off;
3054 if (pVM->rem.s.cHandlerNotifications)
3055 REMR3ReplayHandlerNotifications(pVM);
3056
3057 off = get_phys_page_offset(GCPhys);
3058 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3059 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3060 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3061#else
3062 return false;
3063#endif
3064}
3065
3066
3067/**
3068 * Deals with a rare case in get_phys_addr_code where the code
3069 * is being monitored.
3070 *
3071 * It could also be an MMIO page, in which case we will raise a fatal error.
3072 *
3073 * @returns The physical address corresponding to addr.
3074 * @param env The cpu environment.
3075 * @param addr The virtual address.
3076 * @param pTLBEntry The TLB entry.
3077 */
3078target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3079 target_ulong addr,
3080 CPUTLBEntry* pTLBEntry,
3081 target_phys_addr_t ioTLBEntry)
3082{
3083 PVM pVM = env->pVM;
3084
3085 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3086 {
3087 /* If code memory is being monitored, appropriate IOTLB entry will have
3088 handler IO type, and addend will provide real physical address, no
3089 matter if we store VA in TLB or not, as handlers are always passed PA */
3090 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3091 return ret;
3092 }
3093 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3094 "*** handlers\n",
3095 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3096 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3097 LogRel(("*** mmio\n"));
3098 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3099 LogRel(("*** phys\n"));
3100 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3101 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3102 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3103 AssertFatalFailed();
3104}
3105
3106/**
3107 * Read guest RAM and ROM.
3108 *
3109 * @param SrcGCPhys The source address (guest physical).
3110 * @param pvDst The destination address.
3111 * @param cb Number of bytes
3112 */
3113void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3114{
3115 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3116 VBOX_CHECK_ADDR(SrcGCPhys);
3117 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3118#ifdef VBOX_DEBUG_PHYS
3119 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3120#endif
3121 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3122}
3123
3124
3125/**
3126 * Read guest RAM and ROM, unsigned 8-bit.
3127 *
3128 * @param SrcGCPhys The source address (guest physical).
3129 */
3130RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3131{
3132 uint8_t val;
3133 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3134 VBOX_CHECK_ADDR(SrcGCPhys);
3135 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3136 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3137#ifdef VBOX_DEBUG_PHYS
3138 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3139#endif
3140 return val;
3141}
3142
3143
3144/**
3145 * Read guest RAM and ROM, signed 8-bit.
3146 *
3147 * @param SrcGCPhys The source address (guest physical).
3148 */
3149RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3150{
3151 int8_t val;
3152 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3153 VBOX_CHECK_ADDR(SrcGCPhys);
3154 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3155 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3156#ifdef VBOX_DEBUG_PHYS
3157 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3158#endif
3159 return val;
3160}
3161
3162
3163/**
3164 * Read guest RAM and ROM, unsigned 16-bit.
3165 *
3166 * @param SrcGCPhys The source address (guest physical).
3167 */
3168RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3169{
3170 uint16_t val;
3171 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3172 VBOX_CHECK_ADDR(SrcGCPhys);
3173 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3174 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3175#ifdef VBOX_DEBUG_PHYS
3176 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3177#endif
3178 return val;
3179}
3180
3181
3182/**
3183 * Read guest RAM and ROM, signed 16-bit.
3184 *
3185 * @param SrcGCPhys The source address (guest physical).
3186 */
3187RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3188{
3189 int16_t val;
3190 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3191 VBOX_CHECK_ADDR(SrcGCPhys);
3192 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3193 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3194#ifdef VBOX_DEBUG_PHYS
3195 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3196#endif
3197 return val;
3198}
3199
3200
3201/**
3202 * Read guest RAM and ROM, unsigned 32-bit.
3203 *
3204 * @param SrcGCPhys The source address (guest physical).
3205 */
3206RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3207{
3208 uint32_t val;
3209 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3210 VBOX_CHECK_ADDR(SrcGCPhys);
3211 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3212 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3213#ifdef VBOX_DEBUG_PHYS
3214 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3215#endif
3216 return val;
3217}
3218
3219
3220/**
3221 * Read guest RAM and ROM, signed 32-bit.
3222 *
3223 * @param SrcGCPhys The source address (guest physical).
3224 */
3225RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3226{
3227 int32_t val;
3228 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3229 VBOX_CHECK_ADDR(SrcGCPhys);
3230 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3231 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3232#ifdef VBOX_DEBUG_PHYS
3233 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3234#endif
3235 return val;
3236}
3237
3238
3239/**
3240 * Read guest RAM and ROM, unsigned 64-bit.
3241 *
3242 * @param SrcGCPhys The source address (guest physical).
3243 */
3244uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3245{
3246 uint64_t val;
3247 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3248 VBOX_CHECK_ADDR(SrcGCPhys);
3249 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3250 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3251#ifdef VBOX_DEBUG_PHYS
3252 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3253#endif
3254 return val;
3255}
3256
3257
3258/**
3259 * Read guest RAM and ROM, signed 64-bit.
3260 *
3261 * @param SrcGCPhys The source address (guest physical).
3262 */
3263int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3264{
3265 int64_t val;
3266 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3267 VBOX_CHECK_ADDR(SrcGCPhys);
3268 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3269 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3270#ifdef VBOX_DEBUG_PHYS
3271 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3272#endif
3273 return val;
3274}
3275
3276
3277/**
3278 * Write guest RAM.
3279 *
3280 * @param DstGCPhys The destination address (guest physical).
3281 * @param pvSrc The source address.
3282 * @param cb Number of bytes to write
3283 */
3284void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3285{
3286 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3287 VBOX_CHECK_ADDR(DstGCPhys);
3288 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3289 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3290#ifdef VBOX_DEBUG_PHYS
3291 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3292#endif
3293}
3294
3295
3296/**
3297 * Write guest RAM, unsigned 8-bit.
3298 *
3299 * @param DstGCPhys The destination address (guest physical).
3300 * @param val Value
3301 */
3302void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3303{
3304 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3305 VBOX_CHECK_ADDR(DstGCPhys);
3306 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3307 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3308#ifdef VBOX_DEBUG_PHYS
3309 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3310#endif
3311}
3312
3313
3314/**
3315 * Write guest RAM, unsigned 8-bit.
3316 *
3317 * @param DstGCPhys The destination address (guest physical).
3318 * @param val Value
3319 */
3320void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3321{
3322 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3323 VBOX_CHECK_ADDR(DstGCPhys);
3324 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3325 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3326#ifdef VBOX_DEBUG_PHYS
3327 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3328#endif
3329}
3330
3331
3332/**
3333 * Write guest RAM, unsigned 32-bit.
3334 *
3335 * @param DstGCPhys The destination address (guest physical).
3336 * @param val Value
3337 */
3338void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3339{
3340 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3341 VBOX_CHECK_ADDR(DstGCPhys);
3342 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3343 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3344#ifdef VBOX_DEBUG_PHYS
3345 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3346#endif
3347}
3348
3349
3350/**
3351 * Write guest RAM, unsigned 64-bit.
3352 *
3353 * @param DstGCPhys The destination address (guest physical).
3354 * @param val Value
3355 */
3356void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3357{
3358 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3359 VBOX_CHECK_ADDR(DstGCPhys);
3360 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3361 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3362#ifdef VBOX_DEBUG_PHYS
3363 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3364#endif
3365}
3366
3367#undef LOG_GROUP
3368#define LOG_GROUP LOG_GROUP_REM_MMIO
3369
3370/** Read MMIO memory. */
3371static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3372{
3373 uint32_t u32 = 0;
3374 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3375 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3376 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3377 return u32;
3378}
3379
3380/** Read MMIO memory. */
3381static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3382{
3383 uint32_t u32 = 0;
3384 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3385 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3386 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3387 return u32;
3388}
3389
3390/** Read MMIO memory. */
3391static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3392{
3393 uint32_t u32 = 0;
3394 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3395 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3396 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3397 return u32;
3398}
3399
3400/** Write to MMIO memory. */
3401static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3402{
3403 int rc;
3404 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3405 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3406 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3407}
3408
3409/** Write to MMIO memory. */
3410static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3411{
3412 int rc;
3413 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3414 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3415 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3416}
3417
3418/** Write to MMIO memory. */
3419static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3420{
3421 int rc;
3422 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3423 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3424 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3425}
3426
3427
3428#undef LOG_GROUP
3429#define LOG_GROUP LOG_GROUP_REM_HANDLER
3430
3431/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3432
3433static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3434{
3435 uint8_t u8;
3436 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3437 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3438 return u8;
3439}
3440
3441static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3442{
3443 uint16_t u16;
3444 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3445 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3446 return u16;
3447}
3448
3449static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3450{
3451 uint32_t u32;
3452 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3453 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3454 return u32;
3455}
3456
3457static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3458{
3459 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3460 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3461}
3462
3463static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3464{
3465 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3466 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3467}
3468
3469static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3470{
3471 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3472 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3473}
3474
3475/* -+- disassembly -+- */
3476
3477#undef LOG_GROUP
3478#define LOG_GROUP LOG_GROUP_REM_DISAS
3479
3480
3481/**
3482 * Enables or disables singled stepped disassembly.
3483 *
3484 * @returns VBox status code.
3485 * @param pVM VM handle.
3486 * @param fEnable To enable set this flag, to disable clear it.
3487 */
3488static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3489{
3490 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3491 VM_ASSERT_EMT(pVM);
3492
3493 if (fEnable)
3494 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3495 else
3496 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3497 return VINF_SUCCESS;
3498}
3499
3500
3501/**
3502 * Enables or disables singled stepped disassembly.
3503 *
3504 * @returns VBox status code.
3505 * @param pVM VM handle.
3506 * @param fEnable To enable set this flag, to disable clear it.
3507 */
3508REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3509{
3510 PVMREQ pReq;
3511 int rc;
3512
3513 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3514 if (VM_IS_EMT(pVM))
3515 return remR3DisasEnableStepping(pVM, fEnable);
3516
3517 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3518 AssertRC(rc);
3519 if (RT_SUCCESS(rc))
3520 rc = pReq->iStatus;
3521 VMR3ReqFree(pReq);
3522 return rc;
3523}
3524
3525
3526#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3527/**
3528 * External Debugger Command: .remstep [on|off|1|0]
3529 */
3530static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3531{
3532 bool fEnable;
3533 int rc;
3534
3535 /* print status */
3536 if (cArgs == 0)
3537 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3538 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3539
3540 /* convert the argument and change the mode. */
3541 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3542 if (RT_FAILURE(rc))
3543 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3544 rc = REMR3DisasEnableStepping(pVM, fEnable);
3545 if (RT_FAILURE(rc))
3546 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3547 return rc;
3548}
3549#endif
3550
3551
3552/**
3553 * Disassembles one instruction and prints it to the log.
3554 *
3555 * @returns Success indicator.
3556 * @param env Pointer to the recompiler CPU structure.
3557 * @param f32BitCode Indicates that whether or not the code should
3558 * be disassembled as 16 or 32 bit. If -1 the CS
3559 * selector will be inspected.
3560 * @param pszPrefix
3561 */
3562bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3563{
3564 PVM pVM = env->pVM;
3565 const bool fLog = LogIsEnabled();
3566 const bool fLog2 = LogIs2Enabled();
3567 int rc = VINF_SUCCESS;
3568
3569 /*
3570 * Don't bother if there ain't any log output to do.
3571 */
3572 if (!fLog && !fLog2)
3573 return true;
3574
3575 /*
3576 * Update the state so DBGF reads the correct register values.
3577 */
3578 remR3StateUpdate(pVM);
3579
3580 /*
3581 * Log registers if requested.
3582 */
3583 if (!fLog2)
3584 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3585
3586 /*
3587 * Disassemble to log.
3588 */
3589 if (fLog)
3590 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3591
3592 return RT_SUCCESS(rc);
3593}
3594
3595
3596/**
3597 * Disassemble recompiled code.
3598 *
3599 * @param phFileIgnored Ignored, logfile usually.
3600 * @param pvCode Pointer to the code block.
3601 * @param cb Size of the code block.
3602 */
3603void disas(FILE *phFile, void *pvCode, unsigned long cb)
3604{
3605#ifdef DEBUG_TMP_LOGGING
3606# define DISAS_PRINTF(x...) fprintf(phFile, x)
3607#else
3608# define DISAS_PRINTF(x...) RTLogPrintf(x)
3609 if (LogIs2Enabled())
3610#endif
3611 {
3612 unsigned off = 0;
3613 char szOutput[256];
3614 DISCPUSTATE Cpu;
3615
3616 memset(&Cpu, 0, sizeof(Cpu));
3617#ifdef RT_ARCH_X86
3618 Cpu.mode = CPUMODE_32BIT;
3619#else
3620 Cpu.mode = CPUMODE_64BIT;
3621#endif
3622
3623 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3624 while (off < cb)
3625 {
3626 uint32_t cbInstr;
3627 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3628 DISAS_PRINTF("%s", szOutput);
3629 else
3630 {
3631 DISAS_PRINTF("disas error\n");
3632 cbInstr = 1;
3633#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3634 break;
3635#endif
3636 }
3637 off += cbInstr;
3638 }
3639 }
3640
3641#undef DISAS_PRINTF
3642}
3643
3644
3645/**
3646 * Disassemble guest code.
3647 *
3648 * @param phFileIgnored Ignored, logfile usually.
3649 * @param uCode The guest address of the code to disassemble. (flat?)
3650 * @param cb Number of bytes to disassemble.
3651 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3652 */
3653void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3654{
3655#ifdef DEBUG_TMP_LOGGING
3656# define DISAS_PRINTF(x...) fprintf(phFile, x)
3657#else
3658# define DISAS_PRINTF(x...) RTLogPrintf(x)
3659 if (LogIs2Enabled())
3660#endif
3661 {
3662 PVM pVM = cpu_single_env->pVM;
3663 RTSEL cs;
3664 RTGCUINTPTR eip;
3665
3666 /*
3667 * Update the state so DBGF reads the correct register values (flags).
3668 */
3669 remR3StateUpdate(pVM);
3670
3671 /*
3672 * Do the disassembling.
3673 */
3674 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3675 cs = cpu_single_env->segs[R_CS].selector;
3676 eip = uCode - cpu_single_env->segs[R_CS].base;
3677 for (;;)
3678 {
3679 char szBuf[256];
3680 uint32_t cbInstr;
3681 int rc = DBGFR3DisasInstrEx(pVM,
3682 cs,
3683 eip,
3684 0,
3685 szBuf, sizeof(szBuf),
3686 &cbInstr);
3687 if (RT_SUCCESS(rc))
3688 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3689 else
3690 {
3691 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3692 cbInstr = 1;
3693 }
3694
3695 /* next */
3696 if (cb <= cbInstr)
3697 break;
3698 cb -= cbInstr;
3699 uCode += cbInstr;
3700 eip += cbInstr;
3701 }
3702 }
3703#undef DISAS_PRINTF
3704}
3705
3706
3707/**
3708 * Looks up a guest symbol.
3709 *
3710 * @returns Pointer to symbol name. This is a static buffer.
3711 * @param orig_addr The address in question.
3712 */
3713const char *lookup_symbol(target_ulong orig_addr)
3714{
3715 RTGCINTPTR off = 0;
3716 DBGFSYMBOL Sym;
3717 PVM pVM = cpu_single_env->pVM;
3718 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3719 if (RT_SUCCESS(rc))
3720 {
3721 static char szSym[sizeof(Sym.szName) + 48];
3722 if (!off)
3723 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3724 else if (off > 0)
3725 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3726 else
3727 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3728 return szSym;
3729 }
3730 return "<N/A>";
3731}
3732
3733
3734#undef LOG_GROUP
3735#define LOG_GROUP LOG_GROUP_REM
3736
3737
3738/* -+- FF notifications -+- */
3739
3740
3741/**
3742 * Notification about a pending interrupt.
3743 *
3744 * @param pVM VM Handle.
3745 * @param u8Interrupt Interrupt
3746 * @thread The emulation thread.
3747 */
3748REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3749{
3750 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3751 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3752}
3753
3754/**
3755 * Notification about a pending interrupt.
3756 *
3757 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3758 * @param pVM VM Handle.
3759 * @thread The emulation thread.
3760 */
3761REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3762{
3763 return pVM->rem.s.u32PendingInterrupt;
3764}
3765
3766/**
3767 * Notification about the interrupt FF being set.
3768 *
3769 * @param pVM VM Handle.
3770 * @thread The emulation thread.
3771 */
3772REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3773{
3774 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3775 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3776 if (pVM->rem.s.fInREM)
3777 {
3778 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3779 CPU_INTERRUPT_EXTERNAL_HARD);
3780 }
3781}
3782
3783
3784/**
3785 * Notification about the interrupt FF being set.
3786 *
3787 * @param pVM VM Handle.
3788 * @thread Any.
3789 */
3790REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3791{
3792 LogFlow(("REMR3NotifyInterruptClear:\n"));
3793 if (pVM->rem.s.fInREM)
3794 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3795}
3796
3797
3798/**
3799 * Notification about pending timer(s).
3800 *
3801 * @param pVM VM Handle.
3802 * @thread Any.
3803 */
3804REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3805{
3806#ifndef DEBUG_bird
3807 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3808#endif
3809 if (pVM->rem.s.fInREM)
3810 {
3811 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3812 CPU_INTERRUPT_EXTERNAL_TIMER);
3813 }
3814}
3815
3816
3817/**
3818 * Notification about pending DMA transfers.
3819 *
3820 * @param pVM VM Handle.
3821 * @thread Any.
3822 */
3823REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3824{
3825 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3826 if (pVM->rem.s.fInREM)
3827 {
3828 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3829 CPU_INTERRUPT_EXTERNAL_DMA);
3830 }
3831}
3832
3833
3834/**
3835 * Notification about pending timer(s).
3836 *
3837 * @param pVM VM Handle.
3838 * @thread Any.
3839 */
3840REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3841{
3842 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3843 if (pVM->rem.s.fInREM)
3844 {
3845 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3846 CPU_INTERRUPT_EXTERNAL_EXIT);
3847 }
3848}
3849
3850
3851/**
3852 * Notification about pending FF set by an external thread.
3853 *
3854 * @param pVM VM handle.
3855 * @thread Any.
3856 */
3857REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3858{
3859 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3860 if (pVM->rem.s.fInREM)
3861 {
3862 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3863 CPU_INTERRUPT_EXTERNAL_EXIT);
3864 }
3865}
3866
3867
3868#ifdef VBOX_WITH_STATISTICS
3869void remR3ProfileStart(int statcode)
3870{
3871 STAMPROFILEADV *pStat;
3872 switch(statcode)
3873 {
3874 case STATS_EMULATE_SINGLE_INSTR:
3875 pStat = &gStatExecuteSingleInstr;
3876 break;
3877 case STATS_QEMU_COMPILATION:
3878 pStat = &gStatCompilationQEmu;
3879 break;
3880 case STATS_QEMU_RUN_EMULATED_CODE:
3881 pStat = &gStatRunCodeQEmu;
3882 break;
3883 case STATS_QEMU_TOTAL:
3884 pStat = &gStatTotalTimeQEmu;
3885 break;
3886 case STATS_QEMU_RUN_TIMERS:
3887 pStat = &gStatTimers;
3888 break;
3889 case STATS_TLB_LOOKUP:
3890 pStat= &gStatTBLookup;
3891 break;
3892 case STATS_IRQ_HANDLING:
3893 pStat= &gStatIRQ;
3894 break;
3895 case STATS_RAW_CHECK:
3896 pStat = &gStatRawCheck;
3897 break;
3898
3899 default:
3900 AssertMsgFailed(("unknown stat %d\n", statcode));
3901 return;
3902 }
3903 STAM_PROFILE_ADV_START(pStat, a);
3904}
3905
3906
3907void remR3ProfileStop(int statcode)
3908{
3909 STAMPROFILEADV *pStat;
3910 switch(statcode)
3911 {
3912 case STATS_EMULATE_SINGLE_INSTR:
3913 pStat = &gStatExecuteSingleInstr;
3914 break;
3915 case STATS_QEMU_COMPILATION:
3916 pStat = &gStatCompilationQEmu;
3917 break;
3918 case STATS_QEMU_RUN_EMULATED_CODE:
3919 pStat = &gStatRunCodeQEmu;
3920 break;
3921 case STATS_QEMU_TOTAL:
3922 pStat = &gStatTotalTimeQEmu;
3923 break;
3924 case STATS_QEMU_RUN_TIMERS:
3925 pStat = &gStatTimers;
3926 break;
3927 case STATS_TLB_LOOKUP:
3928 pStat= &gStatTBLookup;
3929 break;
3930 case STATS_IRQ_HANDLING:
3931 pStat= &gStatIRQ;
3932 break;
3933 case STATS_RAW_CHECK:
3934 pStat = &gStatRawCheck;
3935 break;
3936 default:
3937 AssertMsgFailed(("unknown stat %d\n", statcode));
3938 return;
3939 }
3940 STAM_PROFILE_ADV_STOP(pStat, a);
3941}
3942#endif
3943
3944/**
3945 * Raise an RC, force rem exit.
3946 *
3947 * @param pVM VM handle.
3948 * @param rc The rc.
3949 */
3950void remR3RaiseRC(PVM pVM, int rc)
3951{
3952 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3953 Assert(pVM->rem.s.fInREM);
3954 VM_ASSERT_EMT(pVM);
3955 pVM->rem.s.rc = rc;
3956 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3957}
3958
3959
3960/* -+- timers -+- */
3961
3962uint64_t cpu_get_tsc(CPUX86State *env)
3963{
3964 STAM_COUNTER_INC(&gStatCpuGetTSC);
3965 return TMCpuTickGet(env->pVM);
3966}
3967
3968
3969/* -+- interrupts -+- */
3970
3971void cpu_set_ferr(CPUX86State *env)
3972{
3973 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3974 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3975}
3976
3977int cpu_get_pic_interrupt(CPUState *env)
3978{
3979 uint8_t u8Interrupt;
3980 int rc;
3981
3982 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3983 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3984 * with the (a)pic.
3985 */
3986 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3987 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3988 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3989 * remove this kludge. */
3990 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3991 {
3992 rc = VINF_SUCCESS;
3993 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
3994 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
3995 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
3996 }
3997 else
3998 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
3999
4000 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4001 if (RT_SUCCESS(rc))
4002 {
4003 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4004 env->interrupt_request |= CPU_INTERRUPT_HARD;
4005 return u8Interrupt;
4006 }
4007 return -1;
4008}
4009
4010
4011/* -+- local apic -+- */
4012
4013void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4014{
4015 int rc = PDMApicSetBase(env->pVM, val);
4016 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4017}
4018
4019uint64_t cpu_get_apic_base(CPUX86State *env)
4020{
4021 uint64_t u64;
4022 int rc = PDMApicGetBase(env->pVM, &u64);
4023 if (RT_SUCCESS(rc))
4024 {
4025 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4026 return u64;
4027 }
4028 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4029 return 0;
4030}
4031
4032void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4033{
4034 int rc = PDMApicSetTPR(env->pVM, val);
4035 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4036}
4037
4038uint8_t cpu_get_apic_tpr(CPUX86State *env)
4039{
4040 uint8_t u8;
4041 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4042 if (RT_SUCCESS(rc))
4043 {
4044 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4045 return u8;
4046 }
4047 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4048 return 0;
4049}
4050
4051
4052uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4053{
4054 uint64_t value;
4055 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4056 if (RT_SUCCESS(rc))
4057 {
4058 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4059 return value;
4060 }
4061 /** @todo: exception ? */
4062 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4063 return value;
4064}
4065
4066void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4067{
4068 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4069 /** @todo: exception if error ? */
4070 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4071}
4072
4073uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4074{
4075 return CPUMGetGuestMsr(env->pVM, msr);
4076}
4077
4078void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4079{
4080 CPUMSetGuestMsr(env->pVM, msr, val);
4081}
4082
4083/* -+- I/O Ports -+- */
4084
4085#undef LOG_GROUP
4086#define LOG_GROUP LOG_GROUP_REM_IOPORT
4087
4088void cpu_outb(CPUState *env, int addr, int val)
4089{
4090 int rc;
4091
4092 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4093 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4094
4095 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4096 if (RT_LIKELY(rc == VINF_SUCCESS))
4097 return;
4098 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4099 {
4100 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4101 remR3RaiseRC(env->pVM, rc);
4102 return;
4103 }
4104 remAbort(rc, __FUNCTION__);
4105}
4106
4107void cpu_outw(CPUState *env, int addr, int val)
4108{
4109 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4110 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4111 if (RT_LIKELY(rc == VINF_SUCCESS))
4112 return;
4113 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4114 {
4115 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4116 remR3RaiseRC(env->pVM, rc);
4117 return;
4118 }
4119 remAbort(rc, __FUNCTION__);
4120}
4121
4122void cpu_outl(CPUState *env, int addr, int val)
4123{
4124 int rc;
4125 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4126 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4127 if (RT_LIKELY(rc == VINF_SUCCESS))
4128 return;
4129 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4130 {
4131 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4132 remR3RaiseRC(env->pVM, rc);
4133 return;
4134 }
4135 remAbort(rc, __FUNCTION__);
4136}
4137
4138int cpu_inb(CPUState *env, int addr)
4139{
4140 uint32_t u32 = 0;
4141 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4142 if (RT_LIKELY(rc == VINF_SUCCESS))
4143 {
4144 if (/*addr != 0x61 && */addr != 0x71)
4145 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4146 return (int)u32;
4147 }
4148 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4149 {
4150 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4151 remR3RaiseRC(env->pVM, rc);
4152 return (int)u32;
4153 }
4154 remAbort(rc, __FUNCTION__);
4155 return 0xff;
4156}
4157
4158int cpu_inw(CPUState *env, int addr)
4159{
4160 uint32_t u32 = 0;
4161 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4162 if (RT_LIKELY(rc == VINF_SUCCESS))
4163 {
4164 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4165 return (int)u32;
4166 }
4167 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4168 {
4169 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4170 remR3RaiseRC(env->pVM, rc);
4171 return (int)u32;
4172 }
4173 remAbort(rc, __FUNCTION__);
4174 return 0xffff;
4175}
4176
4177int cpu_inl(CPUState *env, int addr)
4178{
4179 uint32_t u32 = 0;
4180 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4181 if (RT_LIKELY(rc == VINF_SUCCESS))
4182 {
4183//if (addr==0x01f0 && u32 == 0x6b6d)
4184// loglevel = ~0;
4185 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4186 return (int)u32;
4187 }
4188 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4189 {
4190 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4191 remR3RaiseRC(env->pVM, rc);
4192 return (int)u32;
4193 }
4194 remAbort(rc, __FUNCTION__);
4195 return 0xffffffff;
4196}
4197
4198#undef LOG_GROUP
4199#define LOG_GROUP LOG_GROUP_REM
4200
4201
4202/* -+- helpers and misc other interfaces -+- */
4203
4204/**
4205 * Perform the CPUID instruction.
4206 *
4207 * ASMCpuId cannot be invoked from some source files where this is used because of global
4208 * register allocations.
4209 *
4210 * @param env Pointer to the recompiler CPU structure.
4211 * @param uOperator CPUID operation (eax).
4212 * @param pvEAX Where to store eax.
4213 * @param pvEBX Where to store ebx.
4214 * @param pvECX Where to store ecx.
4215 * @param pvEDX Where to store edx.
4216 */
4217void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4218{
4219 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4220}
4221
4222
4223#if 0 /* not used */
4224/**
4225 * Interface for qemu hardware to report back fatal errors.
4226 */
4227void hw_error(const char *pszFormat, ...)
4228{
4229 /*
4230 * Bitch about it.
4231 */
4232 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4233 * this in my Odin32 tree at home! */
4234 va_list args;
4235 va_start(args, pszFormat);
4236 RTLogPrintf("fatal error in virtual hardware:");
4237 RTLogPrintfV(pszFormat, args);
4238 va_end(args);
4239 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4240
4241 /*
4242 * If we're in REM context we'll sync back the state before 'jumping' to
4243 * the EMs failure handling.
4244 */
4245 PVM pVM = cpu_single_env->pVM;
4246 if (pVM->rem.s.fInREM)
4247 REMR3StateBack(pVM);
4248 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4249 AssertMsgFailed(("EMR3FatalError returned!\n"));
4250}
4251#endif
4252
4253/**
4254 * Interface for the qemu cpu to report unhandled situation
4255 * raising a fatal VM error.
4256 */
4257void cpu_abort(CPUState *env, const char *pszFormat, ...)
4258{
4259 va_list args;
4260 PVM pVM;
4261
4262 /*
4263 * Bitch about it.
4264 */
4265#ifndef _MSC_VER
4266 /** @todo: MSVC is right - it's not valid C */
4267 RTLogFlags(NULL, "nodisabled nobuffered");
4268#endif
4269 va_start(args, pszFormat);
4270 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4271 va_end(args);
4272 va_start(args, pszFormat);
4273 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4274 va_end(args);
4275
4276 /*
4277 * If we're in REM context we'll sync back the state before 'jumping' to
4278 * the EMs failure handling.
4279 */
4280 pVM = cpu_single_env->pVM;
4281 if (pVM->rem.s.fInREM)
4282 REMR3StateBack(pVM);
4283 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4284 AssertMsgFailed(("EMR3FatalError returned!\n"));
4285}
4286
4287
4288/**
4289 * Aborts the VM.
4290 *
4291 * @param rc VBox error code.
4292 * @param pszTip Hint about why/when this happend.
4293 */
4294void remAbort(int rc, const char *pszTip)
4295{
4296 PVM pVM;
4297
4298 /*
4299 * Bitch about it.
4300 */
4301 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4302 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4303
4304 /*
4305 * Jump back to where we entered the recompiler.
4306 */
4307 pVM = cpu_single_env->pVM;
4308 if (pVM->rem.s.fInREM)
4309 REMR3StateBack(pVM);
4310 EMR3FatalError(pVM, rc);
4311 AssertMsgFailed(("EMR3FatalError returned!\n"));
4312}
4313
4314
4315/**
4316 * Dumps a linux system call.
4317 * @param pVM VM handle.
4318 */
4319void remR3DumpLnxSyscall(PVM pVM)
4320{
4321 static const char *apsz[] =
4322 {
4323 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4324 "sys_exit",
4325 "sys_fork",
4326 "sys_read",
4327 "sys_write",
4328 "sys_open", /* 5 */
4329 "sys_close",
4330 "sys_waitpid",
4331 "sys_creat",
4332 "sys_link",
4333 "sys_unlink", /* 10 */
4334 "sys_execve",
4335 "sys_chdir",
4336 "sys_time",
4337 "sys_mknod",
4338 "sys_chmod", /* 15 */
4339 "sys_lchown16",
4340 "sys_ni_syscall", /* old break syscall holder */
4341 "sys_stat",
4342 "sys_lseek",
4343 "sys_getpid", /* 20 */
4344 "sys_mount",
4345 "sys_oldumount",
4346 "sys_setuid16",
4347 "sys_getuid16",
4348 "sys_stime", /* 25 */
4349 "sys_ptrace",
4350 "sys_alarm",
4351 "sys_fstat",
4352 "sys_pause",
4353 "sys_utime", /* 30 */
4354 "sys_ni_syscall", /* old stty syscall holder */
4355 "sys_ni_syscall", /* old gtty syscall holder */
4356 "sys_access",
4357 "sys_nice",
4358 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4359 "sys_sync",
4360 "sys_kill",
4361 "sys_rename",
4362 "sys_mkdir",
4363 "sys_rmdir", /* 40 */
4364 "sys_dup",
4365 "sys_pipe",
4366 "sys_times",
4367 "sys_ni_syscall", /* old prof syscall holder */
4368 "sys_brk", /* 45 */
4369 "sys_setgid16",
4370 "sys_getgid16",
4371 "sys_signal",
4372 "sys_geteuid16",
4373 "sys_getegid16", /* 50 */
4374 "sys_acct",
4375 "sys_umount", /* recycled never used phys() */
4376 "sys_ni_syscall", /* old lock syscall holder */
4377 "sys_ioctl",
4378 "sys_fcntl", /* 55 */
4379 "sys_ni_syscall", /* old mpx syscall holder */
4380 "sys_setpgid",
4381 "sys_ni_syscall", /* old ulimit syscall holder */
4382 "sys_olduname",
4383 "sys_umask", /* 60 */
4384 "sys_chroot",
4385 "sys_ustat",
4386 "sys_dup2",
4387 "sys_getppid",
4388 "sys_getpgrp", /* 65 */
4389 "sys_setsid",
4390 "sys_sigaction",
4391 "sys_sgetmask",
4392 "sys_ssetmask",
4393 "sys_setreuid16", /* 70 */
4394 "sys_setregid16",
4395 "sys_sigsuspend",
4396 "sys_sigpending",
4397 "sys_sethostname",
4398 "sys_setrlimit", /* 75 */
4399 "sys_old_getrlimit",
4400 "sys_getrusage",
4401 "sys_gettimeofday",
4402 "sys_settimeofday",
4403 "sys_getgroups16", /* 80 */
4404 "sys_setgroups16",
4405 "old_select",
4406 "sys_symlink",
4407 "sys_lstat",
4408 "sys_readlink", /* 85 */
4409 "sys_uselib",
4410 "sys_swapon",
4411 "sys_reboot",
4412 "old_readdir",
4413 "old_mmap", /* 90 */
4414 "sys_munmap",
4415 "sys_truncate",
4416 "sys_ftruncate",
4417 "sys_fchmod",
4418 "sys_fchown16", /* 95 */
4419 "sys_getpriority",
4420 "sys_setpriority",
4421 "sys_ni_syscall", /* old profil syscall holder */
4422 "sys_statfs",
4423 "sys_fstatfs", /* 100 */
4424 "sys_ioperm",
4425 "sys_socketcall",
4426 "sys_syslog",
4427 "sys_setitimer",
4428 "sys_getitimer", /* 105 */
4429 "sys_newstat",
4430 "sys_newlstat",
4431 "sys_newfstat",
4432 "sys_uname",
4433 "sys_iopl", /* 110 */
4434 "sys_vhangup",
4435 "sys_ni_syscall", /* old "idle" system call */
4436 "sys_vm86old",
4437 "sys_wait4",
4438 "sys_swapoff", /* 115 */
4439 "sys_sysinfo",
4440 "sys_ipc",
4441 "sys_fsync",
4442 "sys_sigreturn",
4443 "sys_clone", /* 120 */
4444 "sys_setdomainname",
4445 "sys_newuname",
4446 "sys_modify_ldt",
4447 "sys_adjtimex",
4448 "sys_mprotect", /* 125 */
4449 "sys_sigprocmask",
4450 "sys_ni_syscall", /* old "create_module" */
4451 "sys_init_module",
4452 "sys_delete_module",
4453 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4454 "sys_quotactl",
4455 "sys_getpgid",
4456 "sys_fchdir",
4457 "sys_bdflush",
4458 "sys_sysfs", /* 135 */
4459 "sys_personality",
4460 "sys_ni_syscall", /* reserved for afs_syscall */
4461 "sys_setfsuid16",
4462 "sys_setfsgid16",
4463 "sys_llseek", /* 140 */
4464 "sys_getdents",
4465 "sys_select",
4466 "sys_flock",
4467 "sys_msync",
4468 "sys_readv", /* 145 */
4469 "sys_writev",
4470 "sys_getsid",
4471 "sys_fdatasync",
4472 "sys_sysctl",
4473 "sys_mlock", /* 150 */
4474 "sys_munlock",
4475 "sys_mlockall",
4476 "sys_munlockall",
4477 "sys_sched_setparam",
4478 "sys_sched_getparam", /* 155 */
4479 "sys_sched_setscheduler",
4480 "sys_sched_getscheduler",
4481 "sys_sched_yield",
4482 "sys_sched_get_priority_max",
4483 "sys_sched_get_priority_min", /* 160 */
4484 "sys_sched_rr_get_interval",
4485 "sys_nanosleep",
4486 "sys_mremap",
4487 "sys_setresuid16",
4488 "sys_getresuid16", /* 165 */
4489 "sys_vm86",
4490 "sys_ni_syscall", /* Old sys_query_module */
4491 "sys_poll",
4492 "sys_nfsservctl",
4493 "sys_setresgid16", /* 170 */
4494 "sys_getresgid16",
4495 "sys_prctl",
4496 "sys_rt_sigreturn",
4497 "sys_rt_sigaction",
4498 "sys_rt_sigprocmask", /* 175 */
4499 "sys_rt_sigpending",
4500 "sys_rt_sigtimedwait",
4501 "sys_rt_sigqueueinfo",
4502 "sys_rt_sigsuspend",
4503 "sys_pread64", /* 180 */
4504 "sys_pwrite64",
4505 "sys_chown16",
4506 "sys_getcwd",
4507 "sys_capget",
4508 "sys_capset", /* 185 */
4509 "sys_sigaltstack",
4510 "sys_sendfile",
4511 "sys_ni_syscall", /* reserved for streams1 */
4512 "sys_ni_syscall", /* reserved for streams2 */
4513 "sys_vfork", /* 190 */
4514 "sys_getrlimit",
4515 "sys_mmap2",
4516 "sys_truncate64",
4517 "sys_ftruncate64",
4518 "sys_stat64", /* 195 */
4519 "sys_lstat64",
4520 "sys_fstat64",
4521 "sys_lchown",
4522 "sys_getuid",
4523 "sys_getgid", /* 200 */
4524 "sys_geteuid",
4525 "sys_getegid",
4526 "sys_setreuid",
4527 "sys_setregid",
4528 "sys_getgroups", /* 205 */
4529 "sys_setgroups",
4530 "sys_fchown",
4531 "sys_setresuid",
4532 "sys_getresuid",
4533 "sys_setresgid", /* 210 */
4534 "sys_getresgid",
4535 "sys_chown",
4536 "sys_setuid",
4537 "sys_setgid",
4538 "sys_setfsuid", /* 215 */
4539 "sys_setfsgid",
4540 "sys_pivot_root",
4541 "sys_mincore",
4542 "sys_madvise",
4543 "sys_getdents64", /* 220 */
4544 "sys_fcntl64",
4545 "sys_ni_syscall", /* reserved for TUX */
4546 "sys_ni_syscall",
4547 "sys_gettid",
4548 "sys_readahead", /* 225 */
4549 "sys_setxattr",
4550 "sys_lsetxattr",
4551 "sys_fsetxattr",
4552 "sys_getxattr",
4553 "sys_lgetxattr", /* 230 */
4554 "sys_fgetxattr",
4555 "sys_listxattr",
4556 "sys_llistxattr",
4557 "sys_flistxattr",
4558 "sys_removexattr", /* 235 */
4559 "sys_lremovexattr",
4560 "sys_fremovexattr",
4561 "sys_tkill",
4562 "sys_sendfile64",
4563 "sys_futex", /* 240 */
4564 "sys_sched_setaffinity",
4565 "sys_sched_getaffinity",
4566 "sys_set_thread_area",
4567 "sys_get_thread_area",
4568 "sys_io_setup", /* 245 */
4569 "sys_io_destroy",
4570 "sys_io_getevents",
4571 "sys_io_submit",
4572 "sys_io_cancel",
4573 "sys_fadvise64", /* 250 */
4574 "sys_ni_syscall",
4575 "sys_exit_group",
4576 "sys_lookup_dcookie",
4577 "sys_epoll_create",
4578 "sys_epoll_ctl", /* 255 */
4579 "sys_epoll_wait",
4580 "sys_remap_file_pages",
4581 "sys_set_tid_address",
4582 "sys_timer_create",
4583 "sys_timer_settime", /* 260 */
4584 "sys_timer_gettime",
4585 "sys_timer_getoverrun",
4586 "sys_timer_delete",
4587 "sys_clock_settime",
4588 "sys_clock_gettime", /* 265 */
4589 "sys_clock_getres",
4590 "sys_clock_nanosleep",
4591 "sys_statfs64",
4592 "sys_fstatfs64",
4593 "sys_tgkill", /* 270 */
4594 "sys_utimes",
4595 "sys_fadvise64_64",
4596 "sys_ni_syscall" /* sys_vserver */
4597 };
4598
4599 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4600 switch (uEAX)
4601 {
4602 default:
4603 if (uEAX < RT_ELEMENTS(apsz))
4604 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4605 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4606 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4607 else
4608 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4609 break;
4610
4611 }
4612}
4613
4614
4615/**
4616 * Dumps an OpenBSD system call.
4617 * @param pVM VM handle.
4618 */
4619void remR3DumpOBsdSyscall(PVM pVM)
4620{
4621 static const char *apsz[] =
4622 {
4623 "SYS_syscall", //0
4624 "SYS_exit", //1
4625 "SYS_fork", //2
4626 "SYS_read", //3
4627 "SYS_write", //4
4628 "SYS_open", //5
4629 "SYS_close", //6
4630 "SYS_wait4", //7
4631 "SYS_8",
4632 "SYS_link", //9
4633 "SYS_unlink", //10
4634 "SYS_11",
4635 "SYS_chdir", //12
4636 "SYS_fchdir", //13
4637 "SYS_mknod", //14
4638 "SYS_chmod", //15
4639 "SYS_chown", //16
4640 "SYS_break", //17
4641 "SYS_18",
4642 "SYS_19",
4643 "SYS_getpid", //20
4644 "SYS_mount", //21
4645 "SYS_unmount", //22
4646 "SYS_setuid", //23
4647 "SYS_getuid", //24
4648 "SYS_geteuid", //25
4649 "SYS_ptrace", //26
4650 "SYS_recvmsg", //27
4651 "SYS_sendmsg", //28
4652 "SYS_recvfrom", //29
4653 "SYS_accept", //30
4654 "SYS_getpeername", //31
4655 "SYS_getsockname", //32
4656 "SYS_access", //33
4657 "SYS_chflags", //34
4658 "SYS_fchflags", //35
4659 "SYS_sync", //36
4660 "SYS_kill", //37
4661 "SYS_38",
4662 "SYS_getppid", //39
4663 "SYS_40",
4664 "SYS_dup", //41
4665 "SYS_opipe", //42
4666 "SYS_getegid", //43
4667 "SYS_profil", //44
4668 "SYS_ktrace", //45
4669 "SYS_sigaction", //46
4670 "SYS_getgid", //47
4671 "SYS_sigprocmask", //48
4672 "SYS_getlogin", //49
4673 "SYS_setlogin", //50
4674 "SYS_acct", //51
4675 "SYS_sigpending", //52
4676 "SYS_osigaltstack", //53
4677 "SYS_ioctl", //54
4678 "SYS_reboot", //55
4679 "SYS_revoke", //56
4680 "SYS_symlink", //57
4681 "SYS_readlink", //58
4682 "SYS_execve", //59
4683 "SYS_umask", //60
4684 "SYS_chroot", //61
4685 "SYS_62",
4686 "SYS_63",
4687 "SYS_64",
4688 "SYS_65",
4689 "SYS_vfork", //66
4690 "SYS_67",
4691 "SYS_68",
4692 "SYS_sbrk", //69
4693 "SYS_sstk", //70
4694 "SYS_61",
4695 "SYS_vadvise", //72
4696 "SYS_munmap", //73
4697 "SYS_mprotect", //74
4698 "SYS_madvise", //75
4699 "SYS_76",
4700 "SYS_77",
4701 "SYS_mincore", //78
4702 "SYS_getgroups", //79
4703 "SYS_setgroups", //80
4704 "SYS_getpgrp", //81
4705 "SYS_setpgid", //82
4706 "SYS_setitimer", //83
4707 "SYS_84",
4708 "SYS_85",
4709 "SYS_getitimer", //86
4710 "SYS_87",
4711 "SYS_88",
4712 "SYS_89",
4713 "SYS_dup2", //90
4714 "SYS_91",
4715 "SYS_fcntl", //92
4716 "SYS_select", //93
4717 "SYS_94",
4718 "SYS_fsync", //95
4719 "SYS_setpriority", //96
4720 "SYS_socket", //97
4721 "SYS_connect", //98
4722 "SYS_99",
4723 "SYS_getpriority", //100
4724 "SYS_101",
4725 "SYS_102",
4726 "SYS_sigreturn", //103
4727 "SYS_bind", //104
4728 "SYS_setsockopt", //105
4729 "SYS_listen", //106
4730 "SYS_107",
4731 "SYS_108",
4732 "SYS_109",
4733 "SYS_110",
4734 "SYS_sigsuspend", //111
4735 "SYS_112",
4736 "SYS_113",
4737 "SYS_114",
4738 "SYS_115",
4739 "SYS_gettimeofday", //116
4740 "SYS_getrusage", //117
4741 "SYS_getsockopt", //118
4742 "SYS_119",
4743 "SYS_readv", //120
4744 "SYS_writev", //121
4745 "SYS_settimeofday", //122
4746 "SYS_fchown", //123
4747 "SYS_fchmod", //124
4748 "SYS_125",
4749 "SYS_setreuid", //126
4750 "SYS_setregid", //127
4751 "SYS_rename", //128
4752 "SYS_129",
4753 "SYS_130",
4754 "SYS_flock", //131
4755 "SYS_mkfifo", //132
4756 "SYS_sendto", //133
4757 "SYS_shutdown", //134
4758 "SYS_socketpair", //135
4759 "SYS_mkdir", //136
4760 "SYS_rmdir", //137
4761 "SYS_utimes", //138
4762 "SYS_139",
4763 "SYS_adjtime", //140
4764 "SYS_141",
4765 "SYS_142",
4766 "SYS_143",
4767 "SYS_144",
4768 "SYS_145",
4769 "SYS_146",
4770 "SYS_setsid", //147
4771 "SYS_quotactl", //148
4772 "SYS_149",
4773 "SYS_150",
4774 "SYS_151",
4775 "SYS_152",
4776 "SYS_153",
4777 "SYS_154",
4778 "SYS_nfssvc", //155
4779 "SYS_156",
4780 "SYS_157",
4781 "SYS_158",
4782 "SYS_159",
4783 "SYS_160",
4784 "SYS_getfh", //161
4785 "SYS_162",
4786 "SYS_163",
4787 "SYS_164",
4788 "SYS_sysarch", //165
4789 "SYS_166",
4790 "SYS_167",
4791 "SYS_168",
4792 "SYS_169",
4793 "SYS_170",
4794 "SYS_171",
4795 "SYS_172",
4796 "SYS_pread", //173
4797 "SYS_pwrite", //174
4798 "SYS_175",
4799 "SYS_176",
4800 "SYS_177",
4801 "SYS_178",
4802 "SYS_179",
4803 "SYS_180",
4804 "SYS_setgid", //181
4805 "SYS_setegid", //182
4806 "SYS_seteuid", //183
4807 "SYS_lfs_bmapv", //184
4808 "SYS_lfs_markv", //185
4809 "SYS_lfs_segclean", //186
4810 "SYS_lfs_segwait", //187
4811 "SYS_188",
4812 "SYS_189",
4813 "SYS_190",
4814 "SYS_pathconf", //191
4815 "SYS_fpathconf", //192
4816 "SYS_swapctl", //193
4817 "SYS_getrlimit", //194
4818 "SYS_setrlimit", //195
4819 "SYS_getdirentries", //196
4820 "SYS_mmap", //197
4821 "SYS___syscall", //198
4822 "SYS_lseek", //199
4823 "SYS_truncate", //200
4824 "SYS_ftruncate", //201
4825 "SYS___sysctl", //202
4826 "SYS_mlock", //203
4827 "SYS_munlock", //204
4828 "SYS_205",
4829 "SYS_futimes", //206
4830 "SYS_getpgid", //207
4831 "SYS_xfspioctl", //208
4832 "SYS_209",
4833 "SYS_210",
4834 "SYS_211",
4835 "SYS_212",
4836 "SYS_213",
4837 "SYS_214",
4838 "SYS_215",
4839 "SYS_216",
4840 "SYS_217",
4841 "SYS_218",
4842 "SYS_219",
4843 "SYS_220",
4844 "SYS_semget", //221
4845 "SYS_222",
4846 "SYS_223",
4847 "SYS_224",
4848 "SYS_msgget", //225
4849 "SYS_msgsnd", //226
4850 "SYS_msgrcv", //227
4851 "SYS_shmat", //228
4852 "SYS_229",
4853 "SYS_shmdt", //230
4854 "SYS_231",
4855 "SYS_clock_gettime", //232
4856 "SYS_clock_settime", //233
4857 "SYS_clock_getres", //234
4858 "SYS_235",
4859 "SYS_236",
4860 "SYS_237",
4861 "SYS_238",
4862 "SYS_239",
4863 "SYS_nanosleep", //240
4864 "SYS_241",
4865 "SYS_242",
4866 "SYS_243",
4867 "SYS_244",
4868 "SYS_245",
4869 "SYS_246",
4870 "SYS_247",
4871 "SYS_248",
4872 "SYS_249",
4873 "SYS_minherit", //250
4874 "SYS_rfork", //251
4875 "SYS_poll", //252
4876 "SYS_issetugid", //253
4877 "SYS_lchown", //254
4878 "SYS_getsid", //255
4879 "SYS_msync", //256
4880 "SYS_257",
4881 "SYS_258",
4882 "SYS_259",
4883 "SYS_getfsstat", //260
4884 "SYS_statfs", //261
4885 "SYS_fstatfs", //262
4886 "SYS_pipe", //263
4887 "SYS_fhopen", //264
4888 "SYS_265",
4889 "SYS_fhstatfs", //266
4890 "SYS_preadv", //267
4891 "SYS_pwritev", //268
4892 "SYS_kqueue", //269
4893 "SYS_kevent", //270
4894 "SYS_mlockall", //271
4895 "SYS_munlockall", //272
4896 "SYS_getpeereid", //273
4897 "SYS_274",
4898 "SYS_275",
4899 "SYS_276",
4900 "SYS_277",
4901 "SYS_278",
4902 "SYS_279",
4903 "SYS_280",
4904 "SYS_getresuid", //281
4905 "SYS_setresuid", //282
4906 "SYS_getresgid", //283
4907 "SYS_setresgid", //284
4908 "SYS_285",
4909 "SYS_mquery", //286
4910 "SYS_closefrom", //287
4911 "SYS_sigaltstack", //288
4912 "SYS_shmget", //289
4913 "SYS_semop", //290
4914 "SYS_stat", //291
4915 "SYS_fstat", //292
4916 "SYS_lstat", //293
4917 "SYS_fhstat", //294
4918 "SYS___semctl", //295
4919 "SYS_shmctl", //296
4920 "SYS_msgctl", //297
4921 "SYS_MAXSYSCALL", //298
4922 //299
4923 //300
4924 };
4925 uint32_t uEAX;
4926 if (!LogIsEnabled())
4927 return;
4928 uEAX = CPUMGetGuestEAX(pVM);
4929 switch (uEAX)
4930 {
4931 default:
4932 if (uEAX < RT_ELEMENTS(apsz))
4933 {
4934 uint32_t au32Args[8] = {0};
4935 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4936 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4937 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4938 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4939 }
4940 else
4941 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4942 break;
4943 }
4944}
4945
4946
4947#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4948/**
4949 * The Dll main entry point (stub).
4950 */
4951bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4952{
4953 return true;
4954}
4955
4956void *memcpy(void *dst, const void *src, size_t size)
4957{
4958 uint8_t*pbDst = dst, *pbSrc = src;
4959 while (size-- > 0)
4960 *pbDst++ = *pbSrc++;
4961 return dst;
4962}
4963
4964#endif
4965
4966void cpu_smm_update(CPUState *env)
4967{
4968}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette