VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18720

Last change on this file since 18720 was 18662, checked in by vboxsync, 16 years ago

src/recompiler_new: Clean out the VBOX_WITH_NEW_PHYS_CODE #ifdefs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 153.6 KB
Line 
1/* $Id: VBoxRecompiler.c 18662 2009-04-02 18:35:32Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
394#endif /* VBOX_WITH_STATISTICS */
395
396 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
397 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
398 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
399
400
401#ifdef DEBUG_ALL_LOGGING
402 loglevel = ~0;
403# ifdef DEBUG_TMP_LOGGING
404 logfile = fopen("/tmp/vbox-qemu.log", "w");
405# endif
406#endif
407
408 return rc;
409}
410
411
412/**
413 * Finalizes the REM initialization.
414 *
415 * This is called after all components, devices and drivers has
416 * been initialized. Its main purpose it to finish the RAM related
417 * initialization.
418 *
419 * @returns VBox status code.
420 *
421 * @param pVM The VM handle.
422 */
423REMR3DECL(int) REMR3InitFinalize(PVM pVM)
424{
425 int rc;
426
427 /*
428 * Ram size & dirty bit map.
429 */
430 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
431 pVM->rem.s.fGCPhysLastRamFixed = true;
432#ifdef RT_STRICT
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
434#else
435 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
436#endif
437 return rc;
438}
439
440
441/**
442 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM handle.
446 * @param fGuarded Whether to guard the map.
447 */
448static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
449{
450 int rc = VINF_SUCCESS;
451 RTGCPHYS cb;
452
453 cb = pVM->rem.s.GCPhysLastRam + 1;
454 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
455 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
456 VERR_OUT_OF_RANGE);
457 phys_ram_size = cb;
458 phys_ram_dirty_size = cb >> PAGE_SHIFT;
459 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
460
461 if (!fGuarded)
462 {
463 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
464 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
465 }
466 else
467 {
468 /*
469 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
470 */
471 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
472 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
473 if (cbBitmapFull == cbBitmapAligned)
474 cbBitmapFull += _4G >> PAGE_SHIFT;
475 else if (cbBitmapFull - cbBitmapAligned < _64K)
476 cbBitmapFull += _64K;
477
478 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
479 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
480
481 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
482 if (RT_FAILURE(rc))
483 {
484 RTMemPageFree(phys_ram_dirty);
485 AssertLogRelRCReturn(rc, rc);
486 }
487
488 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
489 }
490
491 /* initialize it. */
492 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
493 return rc;
494}
495
496
497/**
498 * Terminates the REM.
499 *
500 * Termination means cleaning up and freeing all resources,
501 * the VM it self is at this point powered off or suspended.
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506REMR3DECL(int) REMR3Term(PVM pVM)
507{
508 return VINF_SUCCESS;
509}
510
511
512/**
513 * The VM is being reset.
514 *
515 * For the REM component this means to call the cpu_reset() and
516 * reinitialize some state variables.
517 *
518 * @param pVM VM handle.
519 */
520REMR3DECL(void) REMR3Reset(PVM pVM)
521{
522 /*
523 * Reset the REM cpu.
524 */
525 pVM->rem.s.fIgnoreAll = true;
526 cpu_reset(&pVM->rem.s.Env);
527 pVM->rem.s.cInvalidatedPages = 0;
528 pVM->rem.s.fIgnoreAll = false;
529
530 /* Clear raw ring 0 init state */
531 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
532
533 /* Flush the TBs the next time we execute code here. */
534 pVM->rem.s.fFlushTBs = true;
535}
536
537
538/**
539 * Execute state save operation.
540 *
541 * @returns VBox status code.
542 * @param pVM VM Handle.
543 * @param pSSM SSM operation handle.
544 */
545static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
546{
547 PREM pRem = &pVM->rem.s;
548
549 /*
550 * Save the required CPU Env bits.
551 * (Not much because we're never in REM when doing the save.)
552 */
553 LogFlow(("remR3Save:\n"));
554 Assert(!pRem->fInREM);
555 SSMR3PutU32(pSSM, pRem->Env.hflags);
556 SSMR3PutU32(pSSM, ~0); /* separator */
557
558 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
559 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
560 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
561
562 return SSMR3PutU32(pSSM, ~0); /* terminator */
563}
564
565
566/**
567 * Execute state load operation.
568 *
569 * @returns VBox status code.
570 * @param pVM VM Handle.
571 * @param pSSM SSM operation handle.
572 * @param u32Version Data layout version.
573 */
574static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
575{
576 uint32_t u32Dummy;
577 uint32_t fRawRing0 = false;
578 uint32_t u32Sep;
579 int rc;
580 PREM pRem;
581 LogFlow(("remR3Load:\n"));
582
583 /*
584 * Validate version.
585 */
586 if ( u32Version != REM_SAVED_STATE_VERSION
587 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
588 {
589 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
590 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
591 }
592
593 /*
594 * Do a reset to be on the safe side...
595 */
596 REMR3Reset(pVM);
597
598 /*
599 * Ignore all ignorable notifications.
600 * (Not doing this will cause serious trouble.)
601 */
602 pVM->rem.s.fIgnoreAll = true;
603
604 /*
605 * Load the required CPU Env bits.
606 * (Not much because we're never in REM when doing the save.)
607 */
608 pRem = &pVM->rem.s;
609 Assert(!pRem->fInREM);
610 SSMR3GetU32(pSSM, &pRem->Env.hflags);
611 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
612 {
613 /* Redundant REM CPU state has to be loaded, but can be ignored. */
614 CPUX86State_Ver16 temp;
615 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
616 }
617
618 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
619 if (RT_FAILURE(rc))
620 return rc;
621 if (u32Sep != ~0U)
622 {
623 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
624 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
625 }
626
627 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
628 SSMR3GetUInt(pSSM, &fRawRing0);
629 if (fRawRing0)
630 pRem->Env.state |= CPU_RAW_RING0;
631
632 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
633 {
634 unsigned i;
635
636 /*
637 * Load the REM stuff.
638 */
639 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
640 if (RT_FAILURE(rc))
641 return rc;
642 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
643 {
644 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
645 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
646 }
647 for (i = 0; i < pRem->cInvalidatedPages; i++)
648 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
649 }
650
651 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
652 if (RT_FAILURE(rc))
653 return rc;
654
655 /* check the terminator. */
656 rc = SSMR3GetU32(pSSM, &u32Sep);
657 if (RT_FAILURE(rc))
658 return rc;
659 if (u32Sep != ~0U)
660 {
661 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
662 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
663 }
664
665 /*
666 * Get the CPUID features.
667 */
668 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
669 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
670
671 /*
672 * Sync the Load Flush the TLB
673 */
674 tlb_flush(&pRem->Env, 1);
675
676 /*
677 * Stop ignoring ignornable notifications.
678 */
679 pVM->rem.s.fIgnoreAll = false;
680
681 /*
682 * Sync the whole CPU state when executing code in the recompiler.
683 */
684 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
685 return VINF_SUCCESS;
686}
687
688
689
690#undef LOG_GROUP
691#define LOG_GROUP LOG_GROUP_REM_RUN
692
693/**
694 * Single steps an instruction in recompiled mode.
695 *
696 * Before calling this function the REM state needs to be in sync with
697 * the VM. Call REMR3State() to perform the sync. It's only necessary
698 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
699 * and after calling REMR3StateBack().
700 *
701 * @returns VBox status code.
702 *
703 * @param pVM VM Handle.
704 */
705REMR3DECL(int) REMR3Step(PVM pVM)
706{
707 int rc, interrupt_request;
708 RTGCPTR GCPtrPC;
709 bool fBp;
710
711 /*
712 * Lock the REM - we don't wanna have anyone interrupting us
713 * while stepping - and enabled single stepping. We also ignore
714 * pending interrupts and suchlike.
715 */
716 interrupt_request = pVM->rem.s.Env.interrupt_request;
717 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
718 pVM->rem.s.Env.interrupt_request = 0;
719 cpu_single_step(&pVM->rem.s.Env, 1);
720
721 /*
722 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
723 */
724 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
725 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
726
727 /*
728 * Execute and handle the return code.
729 * We execute without enabling the cpu tick, so on success we'll
730 * just flip it on and off to make sure it moves
731 */
732 rc = cpu_exec(&pVM->rem.s.Env);
733 if (rc == EXCP_DEBUG)
734 {
735 TMCpuTickResume(pVM);
736 TMCpuTickPause(pVM);
737 TMVirtualResume(pVM);
738 TMVirtualPause(pVM);
739 rc = VINF_EM_DBG_STEPPED;
740 }
741 else
742 {
743 switch (rc)
744 {
745 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
746 case EXCP_HLT:
747 case EXCP_HALTED: rc = VINF_EM_HALT; break;
748 case EXCP_RC:
749 rc = pVM->rem.s.rc;
750 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
751 break;
752 case EXCP_EXECUTE_RAW:
753 case EXCP_EXECUTE_HWACC:
754 /** @todo: is it correct? No! */
755 rc = VINF_SUCCESS;
756 break;
757 default:
758 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
759 rc = VERR_INTERNAL_ERROR;
760 break;
761 }
762 }
763
764 /*
765 * Restore the stuff we changed to prevent interruption.
766 * Unlock the REM.
767 */
768 if (fBp)
769 {
770 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
771 Assert(rc2 == 0); NOREF(rc2);
772 }
773 cpu_single_step(&pVM->rem.s.Env, 0);
774 pVM->rem.s.Env.interrupt_request = interrupt_request;
775
776 return rc;
777}
778
779
780/**
781 * Set a breakpoint using the REM facilities.
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 * @param Address The breakpoint address.
786 * @thread The emulation thread.
787 */
788REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
789{
790 VM_ASSERT_EMT(pVM);
791 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
792 {
793 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
794 return VINF_SUCCESS;
795 }
796 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
797 return VERR_REM_NO_MORE_BP_SLOTS;
798}
799
800
801/**
802 * Clears a breakpoint set by REMR3BreakpointSet().
803 *
804 * @returns VBox status code.
805 * @param pVM The VM handle.
806 * @param Address The breakpoint address.
807 * @thread The emulation thread.
808 */
809REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
810{
811 VM_ASSERT_EMT(pVM);
812 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
813 {
814 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
815 return VINF_SUCCESS;
816 }
817 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
818 return VERR_REM_BP_NOT_FOUND;
819}
820
821
822/**
823 * Emulate an instruction.
824 *
825 * This function executes one instruction without letting anyone
826 * interrupt it. This is intended for being called while being in
827 * raw mode and thus will take care of all the state syncing between
828 * REM and the rest.
829 *
830 * @returns VBox status code.
831 * @param pVM VM handle.
832 */
833REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
834{
835 bool fFlushTBs;
836
837 int rc, rc2;
838 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
839
840 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
841 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
842 */
843 if (HWACCMIsEnabled(pVM))
844 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
845
846 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
847 fFlushTBs = pVM->rem.s.fFlushTBs;
848 pVM->rem.s.fFlushTBs = false;
849
850 /*
851 * Sync the state and enable single instruction / single stepping.
852 */
853 rc = REMR3State(pVM);
854 pVM->rem.s.fFlushTBs = fFlushTBs;
855 if (RT_SUCCESS(rc))
856 {
857 int interrupt_request = pVM->rem.s.Env.interrupt_request;
858 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
859 Assert(!pVM->rem.s.Env.singlestep_enabled);
860 /*
861 * Now we set the execute single instruction flag and enter the cpu_exec loop.
862 */
863 TMNotifyStartOfExecution(pVM);
864 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
865 rc = cpu_exec(&pVM->rem.s.Env);
866 TMNotifyEndOfExecution(pVM);
867 switch (rc)
868 {
869 /*
870 * Executed without anything out of the way happening.
871 */
872 case EXCP_SINGLE_INSTR:
873 rc = VINF_EM_RESCHEDULE;
874 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
875 break;
876
877 /*
878 * If we take a trap or start servicing a pending interrupt, we might end up here.
879 * (Timer thread or some other thread wishing EMT's attention.)
880 */
881 case EXCP_INTERRUPT:
882 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
883 rc = VINF_EM_RESCHEDULE;
884 break;
885
886 /*
887 * Single step, we assume!
888 * If there was a breakpoint there we're fucked now.
889 */
890 case EXCP_DEBUG:
891 {
892 /* breakpoint or single step? */
893 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
894 int iBP;
895 rc = VINF_EM_DBG_STEPPED;
896 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
897 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
898 {
899 rc = VINF_EM_DBG_BREAKPOINT;
900 break;
901 }
902 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
903 break;
904 }
905
906 /*
907 * hlt instruction.
908 */
909 case EXCP_HLT:
910 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
911 rc = VINF_EM_HALT;
912 break;
913
914 /*
915 * The VM has halted.
916 */
917 case EXCP_HALTED:
918 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
919 rc = VINF_EM_HALT;
920 break;
921
922 /*
923 * Switch to RAW-mode.
924 */
925 case EXCP_EXECUTE_RAW:
926 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
927 rc = VINF_EM_RESCHEDULE_RAW;
928 break;
929
930 /*
931 * Switch to hardware accelerated RAW-mode.
932 */
933 case EXCP_EXECUTE_HWACC:
934 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
935 rc = VINF_EM_RESCHEDULE_HWACC;
936 break;
937
938 /*
939 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
940 */
941 case EXCP_RC:
942 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
943 rc = pVM->rem.s.rc;
944 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
945 break;
946
947 /*
948 * Figure out the rest when they arrive....
949 */
950 default:
951 AssertMsgFailed(("rc=%d\n", rc));
952 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
953 rc = VINF_EM_RESCHEDULE;
954 break;
955 }
956
957 /*
958 * Switch back the state.
959 */
960 pVM->rem.s.Env.interrupt_request = interrupt_request;
961 rc2 = REMR3StateBack(pVM);
962 AssertRC(rc2);
963 }
964
965 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
966 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
967 return rc;
968}
969
970
971/**
972 * Runs code in recompiled mode.
973 *
974 * Before calling this function the REM state needs to be in sync with
975 * the VM. Call REMR3State() to perform the sync. It's only necessary
976 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
977 * and after calling REMR3StateBack().
978 *
979 * @returns VBox status code.
980 *
981 * @param pVM VM Handle.
982 */
983REMR3DECL(int) REMR3Run(PVM pVM)
984{
985 int rc;
986 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
987 Assert(pVM->rem.s.fInREM);
988
989 TMNotifyStartOfExecution(pVM);
990 rc = cpu_exec(&pVM->rem.s.Env);
991 TMNotifyEndOfExecution(pVM);
992 switch (rc)
993 {
994 /*
995 * This happens when the execution was interrupted
996 * by an external event, like pending timers.
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_SUCCESS;
1001 break;
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Breakpoint/single step.
1021 */
1022 case EXCP_DEBUG:
1023 {
1024#if 0//def DEBUG_bird
1025 static int iBP = 0;
1026 printf("howdy, breakpoint! iBP=%d\n", iBP);
1027 switch (iBP)
1028 {
1029 case 0:
1030 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1031 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1032 //pVM->rem.s.Env.interrupt_request = 0;
1033 //pVM->rem.s.Env.exception_index = -1;
1034 //g_fInterruptDisabled = 1;
1035 rc = VINF_SUCCESS;
1036 asm("int3");
1037 break;
1038 default:
1039 asm("int3");
1040 break;
1041 }
1042 iBP++;
1043#else
1044 /* breakpoint or single step? */
1045 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1046 int iBP;
1047 rc = VINF_EM_DBG_STEPPED;
1048 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1049 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1050 {
1051 rc = VINF_EM_DBG_BREAKPOINT;
1052 break;
1053 }
1054 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1055#endif
1056 break;
1057 }
1058
1059 /*
1060 * Switch to RAW-mode.
1061 */
1062 case EXCP_EXECUTE_RAW:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1064 rc = VINF_EM_RESCHEDULE_RAW;
1065 break;
1066
1067 /*
1068 * Switch to hardware accelerated RAW-mode.
1069 */
1070 case EXCP_EXECUTE_HWACC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1072 rc = VINF_EM_RESCHEDULE_HWACC;
1073 break;
1074
1075 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1076 /*
1077 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1078 */
1079 case EXCP_RC:
1080 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1081 rc = pVM->rem.s.rc;
1082 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1083 break;
1084
1085 /*
1086 * Figure out the rest when they arrive....
1087 */
1088 default:
1089 AssertMsgFailed(("rc=%d\n", rc));
1090 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1091 rc = VINF_SUCCESS;
1092 break;
1093 }
1094
1095 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Check if the cpu state is suitable for Raw execution.
1102 *
1103 * @returns boolean
1104 * @param env The CPU env struct.
1105 * @param eip The EIP to check this for (might differ from env->eip).
1106 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1107 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1108 *
1109 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1110 */
1111bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1112{
1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1114 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1115 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1116 uint32_t u32CR0;
1117
1118 /* Update counter. */
1119 env->pVM->rem.s.cCanExecuteRaw++;
1120
1121 if (HWACCMIsEnabled(env->pVM))
1122 {
1123 CPUMCTX Ctx;
1124
1125 env->state |= CPU_RAW_HWACC;
1126
1127 /*
1128 * Create partial context for HWACCMR3CanExecuteGuest
1129 */
1130 Ctx.cr0 = env->cr[0];
1131 Ctx.cr3 = env->cr[3];
1132 Ctx.cr4 = env->cr[4];
1133
1134 Ctx.tr = env->tr.selector;
1135 Ctx.trHid.u64Base = env->tr.base;
1136 Ctx.trHid.u32Limit = env->tr.limit;
1137 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1138
1139 Ctx.idtr.cbIdt = env->idt.limit;
1140 Ctx.idtr.pIdt = env->idt.base;
1141
1142 Ctx.gdtr.cbGdt = env->gdt.limit;
1143 Ctx.gdtr.pGdt = env->gdt.base;
1144
1145 Ctx.rsp = env->regs[R_ESP];
1146 Ctx.rip = env->eip;
1147
1148 Ctx.eflags.u32 = env->eflags;
1149
1150 Ctx.cs = env->segs[R_CS].selector;
1151 Ctx.csHid.u64Base = env->segs[R_CS].base;
1152 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1153 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1154
1155 Ctx.ds = env->segs[R_DS].selector;
1156 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1157 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1158 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1159
1160 Ctx.es = env->segs[R_ES].selector;
1161 Ctx.esHid.u64Base = env->segs[R_ES].base;
1162 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1163 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1164
1165 Ctx.fs = env->segs[R_FS].selector;
1166 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1167 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1168 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1169
1170 Ctx.gs = env->segs[R_GS].selector;
1171 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1172 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1173 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1174
1175 Ctx.ss = env->segs[R_SS].selector;
1176 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1177 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1178 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1179
1180 Ctx.msrEFER = env->efer;
1181
1182 /* Hardware accelerated raw-mode:
1183 *
1184 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1185 */
1186 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1187 {
1188 *piException = EXCP_EXECUTE_HWACC;
1189 return true;
1190 }
1191 return false;
1192 }
1193
1194 /*
1195 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1196 * or 32 bits protected mode ring 0 code
1197 *
1198 * The tests are ordered by the likelyhood of being true during normal execution.
1199 */
1200 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1201 {
1202 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1203 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1204 return false;
1205 }
1206
1207#ifndef VBOX_RAW_V86
1208 if (fFlags & VM_MASK) {
1209 STAM_COUNTER_INC(&gStatRefuseVM86);
1210 Log2(("raw mode refused: VM_MASK\n"));
1211 return false;
1212 }
1213#endif
1214
1215 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1216 {
1217#ifndef DEBUG_bird
1218 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1219#endif
1220 return false;
1221 }
1222
1223 if (env->singlestep_enabled)
1224 {
1225 //Log2(("raw mode refused: Single step\n"));
1226 return false;
1227 }
1228
1229 if (env->nb_breakpoints > 0)
1230 {
1231 //Log2(("raw mode refused: Breakpoints\n"));
1232 return false;
1233 }
1234
1235 u32CR0 = env->cr[0];
1236 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1237 {
1238 STAM_COUNTER_INC(&gStatRefusePaging);
1239 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1240 return false;
1241 }
1242
1243 if (env->cr[4] & CR4_PAE_MASK)
1244 {
1245 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1246 {
1247 STAM_COUNTER_INC(&gStatRefusePAE);
1248 return false;
1249 }
1250 }
1251
1252 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1253 {
1254 if (!EMIsRawRing3Enabled(env->pVM))
1255 return false;
1256
1257 if (!(env->eflags & IF_MASK))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseIF0);
1260 Log2(("raw mode refused: IF (RawR3)\n"));
1261 return false;
1262 }
1263
1264 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1265 {
1266 STAM_COUNTER_INC(&gStatRefuseWP0);
1267 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1268 return false;
1269 }
1270 }
1271 else
1272 {
1273 if (!EMIsRawRing0Enabled(env->pVM))
1274 return false;
1275
1276 // Let's start with pure 32 bits ring 0 code first
1277 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1278 {
1279 STAM_COUNTER_INC(&gStatRefuseCode16);
1280 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1281 return false;
1282 }
1283
1284 // Only R0
1285 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1286 {
1287 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1288 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1289 return false;
1290 }
1291
1292 if (!(u32CR0 & CR0_WP_MASK))
1293 {
1294 STAM_COUNTER_INC(&gStatRefuseWP0);
1295 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1296 return false;
1297 }
1298
1299 if (PATMIsPatchGCAddr(env->pVM, eip))
1300 {
1301 Log2(("raw r0 mode forced: patch code\n"));
1302 *piException = EXCP_EXECUTE_RAW;
1303 return true;
1304 }
1305
1306#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1307 if (!(env->eflags & IF_MASK))
1308 {
1309 STAM_COUNTER_INC(&gStatRefuseIF0);
1310 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1311 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1312 return false;
1313 }
1314#endif
1315
1316 env->state |= CPU_RAW_RING0;
1317 }
1318
1319 /*
1320 * Don't reschedule the first time we're called, because there might be
1321 * special reasons why we're here that is not covered by the above checks.
1322 */
1323 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1324 {
1325 Log2(("raw mode refused: first scheduling\n"));
1326 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1327 return false;
1328 }
1329
1330 Assert(PGMPhysIsA20Enabled(env->pVM));
1331 *piException = EXCP_EXECUTE_RAW;
1332 return true;
1333}
1334
1335
1336/**
1337 * Fetches a code byte.
1338 *
1339 * @returns Success indicator (bool) for ease of use.
1340 * @param env The CPU environment structure.
1341 * @param GCPtrInstr Where to fetch code.
1342 * @param pu8Byte Where to store the byte on success
1343 */
1344bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1345{
1346 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1347 if (RT_SUCCESS(rc))
1348 return true;
1349 return false;
1350}
1351
1352
1353/**
1354 * Flush (or invalidate if you like) page table/dir entry.
1355 *
1356 * (invlpg instruction; tlb_flush_page)
1357 *
1358 * @param env Pointer to cpu environment.
1359 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1360 */
1361void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1362{
1363 PVM pVM = env->pVM;
1364 PCPUMCTX pCtx;
1365 int rc;
1366
1367 /*
1368 * When we're replaying invlpg instructions or restoring a saved
1369 * state we disable this path.
1370 */
1371 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1372 return;
1373 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1374 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1375
1376 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1377
1378 /*
1379 * Update the control registers before calling PGMFlushPage.
1380 */
1381 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1382 pCtx->cr0 = env->cr[0];
1383 pCtx->cr3 = env->cr[3];
1384 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1385 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1386 pCtx->cr4 = env->cr[4];
1387
1388 /*
1389 * Let PGM do the rest.
1390 */
1391 rc = PGMInvalidatePage(pVM, GCPtr);
1392 if (RT_FAILURE(rc))
1393 {
1394 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1395 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1396 }
1397 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1398}
1399
1400
1401#ifndef REM_PHYS_ADDR_IN_TLB
1402/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1403void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1404{
1405 void *pv;
1406 int rc;
1407
1408 /* Address must be aligned enough to fiddle with lower bits */
1409 Assert((physAddr & 0x3) == 0);
1410
1411 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1412 Assert( rc == VINF_SUCCESS
1413 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1414 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1415 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1416 if (RT_FAILURE(rc))
1417 return (void *)1;
1418 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1419 return (void *)((uintptr_t)pv | 2);
1420 return pv;
1421}
1422#endif /* REM_PHYS_ADDR_IN_TLB */
1423
1424
1425/**
1426 * Called from tlb_protect_code in order to write monitor a code page.
1427 *
1428 * @param env Pointer to the CPU environment.
1429 * @param GCPtr Code page to monitor
1430 */
1431void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1432{
1433#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1434 Assert(env->pVM->rem.s.fInREM);
1435 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1436 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1437 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1438 && !(env->eflags & VM_MASK) /* no V86 mode */
1439 && !HWACCMIsEnabled(env->pVM))
1440 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1441#endif
1442}
1443
1444
1445/**
1446 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1447 *
1448 * @param env Pointer to the CPU environment.
1449 * @param GCPtr Code page to monitor
1450 */
1451void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1452{
1453 Assert(env->pVM->rem.s.fInREM);
1454#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1455 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1456 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1457 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1458 && !(env->eflags & VM_MASK) /* no V86 mode */
1459 && !HWACCMIsEnabled(env->pVM))
1460 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1461#endif
1462}
1463
1464
1465/**
1466 * Called when the CPU is initialized, any of the CRx registers are changed or
1467 * when the A20 line is modified.
1468 *
1469 * @param env Pointer to the CPU environment.
1470 * @param fGlobal Set if the flush is global.
1471 */
1472void remR3FlushTLB(CPUState *env, bool fGlobal)
1473{
1474 PVM pVM = env->pVM;
1475 PCPUMCTX pCtx;
1476
1477 /*
1478 * When we're replaying invlpg instructions or restoring a saved
1479 * state we disable this path.
1480 */
1481 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1482 return;
1483 Assert(pVM->rem.s.fInREM);
1484
1485 /*
1486 * The caller doesn't check cr4, so we have to do that for ourselves.
1487 */
1488 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1489 fGlobal = true;
1490 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1491
1492 /*
1493 * Update the control registers before calling PGMR3FlushTLB.
1494 */
1495 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1496 pCtx->cr0 = env->cr[0];
1497 pCtx->cr3 = env->cr[3];
1498 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1499 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1500 pCtx->cr4 = env->cr[4];
1501
1502 /*
1503 * Let PGM do the rest.
1504 */
1505 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1506}
1507
1508
1509/**
1510 * Called when any of the cr0, cr4 or efer registers is updated.
1511 *
1512 * @param env Pointer to the CPU environment.
1513 */
1514void remR3ChangeCpuMode(CPUState *env)
1515{
1516 PVM pVM = env->pVM;
1517 uint64_t efer;
1518 PCPUMCTX pCtx;
1519 int rc;
1520
1521 /*
1522 * When we're replaying loads or restoring a saved
1523 * state this path is disabled.
1524 */
1525 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1526 return;
1527 Assert(pVM->rem.s.fInREM);
1528
1529 /*
1530 * Update the control registers before calling PGMChangeMode()
1531 * as it may need to map whatever cr3 is pointing to.
1532 */
1533 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1534 pCtx->cr0 = env->cr[0];
1535 pCtx->cr3 = env->cr[3];
1536 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1537 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1538 pCtx->cr4 = env->cr[4];
1539
1540#ifdef TARGET_X86_64
1541 efer = env->efer;
1542#else
1543 efer = 0;
1544#endif
1545 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], efer);
1546 if (rc != VINF_SUCCESS)
1547 {
1548 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1549 {
1550 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1551 remR3RaiseRC(env->pVM, rc);
1552 }
1553 else
1554 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1555 }
1556}
1557
1558
1559/**
1560 * Called from compiled code to run dma.
1561 *
1562 * @param env Pointer to the CPU environment.
1563 */
1564void remR3DmaRun(CPUState *env)
1565{
1566 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1567 PDMR3DmaRun(env->pVM);
1568 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1569}
1570
1571
1572/**
1573 * Called from compiled code to schedule pending timers in VMM
1574 *
1575 * @param env Pointer to the CPU environment.
1576 */
1577void remR3TimersRun(CPUState *env)
1578{
1579 LogFlow(("remR3TimersRun:\n"));
1580 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1581 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1582 TMR3TimerQueuesDo(env->pVM);
1583 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1584 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1585}
1586
1587
1588/**
1589 * Record trap occurance
1590 *
1591 * @returns VBox status code
1592 * @param env Pointer to the CPU environment.
1593 * @param uTrap Trap nr
1594 * @param uErrorCode Error code
1595 * @param pvNextEIP Next EIP
1596 */
1597int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1598{
1599 PVM pVM = env->pVM;
1600#ifdef VBOX_WITH_STATISTICS
1601 static STAMCOUNTER s_aStatTrap[255];
1602 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1603#endif
1604
1605#ifdef VBOX_WITH_STATISTICS
1606 if (uTrap < 255)
1607 {
1608 if (!s_aRegisters[uTrap])
1609 {
1610 char szStatName[64];
1611 s_aRegisters[uTrap] = true;
1612 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1613 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1614 }
1615 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1616 }
1617#endif
1618 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1619 if( uTrap < 0x20
1620 && (env->cr[0] & X86_CR0_PE)
1621 && !(env->eflags & X86_EFL_VM))
1622 {
1623#ifdef DEBUG
1624 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1625#endif
1626 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1627 {
1628 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1629 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1630 return VERR_REM_TOO_MANY_TRAPS;
1631 }
1632 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1633 pVM->rem.s.cPendingExceptions = 1;
1634 pVM->rem.s.uPendingException = uTrap;
1635 pVM->rem.s.uPendingExcptEIP = env->eip;
1636 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1637 }
1638 else
1639 {
1640 pVM->rem.s.cPendingExceptions = 0;
1641 pVM->rem.s.uPendingException = uTrap;
1642 pVM->rem.s.uPendingExcptEIP = env->eip;
1643 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1644 }
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/*
1650 * Clear current active trap
1651 *
1652 * @param pVM VM Handle.
1653 */
1654void remR3TrapClear(PVM pVM)
1655{
1656 pVM->rem.s.cPendingExceptions = 0;
1657 pVM->rem.s.uPendingException = 0;
1658 pVM->rem.s.uPendingExcptEIP = 0;
1659 pVM->rem.s.uPendingExcptCR2 = 0;
1660}
1661
1662
1663/*
1664 * Record previous call instruction addresses
1665 *
1666 * @param env Pointer to the CPU environment.
1667 */
1668void remR3RecordCall(CPUState *env)
1669{
1670 CSAMR3RecordCallAddress(env->pVM, env->eip);
1671}
1672
1673
1674/**
1675 * Syncs the internal REM state with the VM.
1676 *
1677 * This must be called before REMR3Run() is invoked whenever when the REM
1678 * state is not up to date. Calling it several times in a row is not
1679 * permitted.
1680 *
1681 * @returns VBox status code.
1682 *
1683 * @param pVM VM Handle.
1684 *
1685 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1686 * no do this since the majority of the callers don't want any unnecessary of events
1687 * pending that would immediatly interrupt execution.
1688 */
1689REMR3DECL(int) REMR3State(PVM pVM)
1690{
1691 register const CPUMCTX *pCtx;
1692 register unsigned fFlags;
1693 bool fHiddenSelRegsValid;
1694 unsigned i;
1695 TRPMEVENT enmType;
1696 uint8_t u8TrapNo;
1697 int rc;
1698
1699 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1700 Log2(("REMR3State:\n"));
1701
1702 pCtx = pVM->rem.s.pCtx;
1703 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1704
1705 Assert(!pVM->rem.s.fInREM);
1706 pVM->rem.s.fInStateSync = true;
1707
1708 /*
1709 * If we have to flush TBs, do that immediately.
1710 */
1711 if (pVM->rem.s.fFlushTBs)
1712 {
1713 STAM_COUNTER_INC(&gStatFlushTBs);
1714 tb_flush(&pVM->rem.s.Env);
1715 pVM->rem.s.fFlushTBs = false;
1716 }
1717
1718 /*
1719 * Copy the registers which require no special handling.
1720 */
1721#ifdef TARGET_X86_64
1722 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1723 Assert(R_EAX == 0);
1724 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1725 Assert(R_ECX == 1);
1726 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1727 Assert(R_EDX == 2);
1728 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1729 Assert(R_EBX == 3);
1730 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1731 Assert(R_ESP == 4);
1732 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1733 Assert(R_EBP == 5);
1734 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1735 Assert(R_ESI == 6);
1736 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1737 Assert(R_EDI == 7);
1738 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1739 pVM->rem.s.Env.regs[8] = pCtx->r8;
1740 pVM->rem.s.Env.regs[9] = pCtx->r9;
1741 pVM->rem.s.Env.regs[10] = pCtx->r10;
1742 pVM->rem.s.Env.regs[11] = pCtx->r11;
1743 pVM->rem.s.Env.regs[12] = pCtx->r12;
1744 pVM->rem.s.Env.regs[13] = pCtx->r13;
1745 pVM->rem.s.Env.regs[14] = pCtx->r14;
1746 pVM->rem.s.Env.regs[15] = pCtx->r15;
1747
1748 pVM->rem.s.Env.eip = pCtx->rip;
1749
1750 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1751#else
1752 Assert(R_EAX == 0);
1753 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1754 Assert(R_ECX == 1);
1755 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1756 Assert(R_EDX == 2);
1757 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1758 Assert(R_EBX == 3);
1759 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1760 Assert(R_ESP == 4);
1761 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1762 Assert(R_EBP == 5);
1763 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1764 Assert(R_ESI == 6);
1765 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1766 Assert(R_EDI == 7);
1767 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1768 pVM->rem.s.Env.eip = pCtx->eip;
1769
1770 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1771#endif
1772
1773 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1774
1775 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1776 for (i=0;i<8;i++)
1777 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1778
1779 /*
1780 * Clear the halted hidden flag (the interrupt waking up the CPU can
1781 * have been dispatched in raw mode).
1782 */
1783 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1784
1785 /*
1786 * Replay invlpg?
1787 */
1788 if (pVM->rem.s.cInvalidatedPages)
1789 {
1790 RTUINT i;
1791
1792 pVM->rem.s.fIgnoreInvlPg = true;
1793 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1794 {
1795 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1796 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1797 }
1798 pVM->rem.s.fIgnoreInvlPg = false;
1799 pVM->rem.s.cInvalidatedPages = 0;
1800 }
1801
1802 /* Replay notification changes? */
1803 if (pVM->rem.s.cHandlerNotifications)
1804 REMR3ReplayHandlerNotifications(pVM);
1805
1806 /* Update MSRs; before CRx registers! */
1807 pVM->rem.s.Env.efer = pCtx->msrEFER;
1808 pVM->rem.s.Env.star = pCtx->msrSTAR;
1809 pVM->rem.s.Env.pat = pCtx->msrPAT;
1810#ifdef TARGET_X86_64
1811 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1812 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1813 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1814 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1815
1816 /* Update the internal long mode activate flag according to the new EFER value. */
1817 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1818 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1819 else
1820 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1821#endif
1822
1823 /*
1824 * Registers which are rarely changed and require special handling / order when changed.
1825 */
1826 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1827 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1828 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1829 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1830 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1831 {
1832 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1833 {
1834 pVM->rem.s.fIgnoreCR3Load = true;
1835 tlb_flush(&pVM->rem.s.Env, true);
1836 pVM->rem.s.fIgnoreCR3Load = false;
1837 }
1838
1839 /* CR4 before CR0! */
1840 if (fFlags & CPUM_CHANGED_CR4)
1841 {
1842 pVM->rem.s.fIgnoreCR3Load = true;
1843 pVM->rem.s.fIgnoreCpuMode = true;
1844 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1845 pVM->rem.s.fIgnoreCpuMode = false;
1846 pVM->rem.s.fIgnoreCR3Load = false;
1847 }
1848
1849 if (fFlags & CPUM_CHANGED_CR0)
1850 {
1851 pVM->rem.s.fIgnoreCR3Load = true;
1852 pVM->rem.s.fIgnoreCpuMode = true;
1853 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1854 pVM->rem.s.fIgnoreCpuMode = false;
1855 pVM->rem.s.fIgnoreCR3Load = false;
1856 }
1857
1858 if (fFlags & CPUM_CHANGED_CR3)
1859 {
1860 pVM->rem.s.fIgnoreCR3Load = true;
1861 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1862 pVM->rem.s.fIgnoreCR3Load = false;
1863 }
1864
1865 if (fFlags & CPUM_CHANGED_GDTR)
1866 {
1867 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1868 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1869 }
1870
1871 if (fFlags & CPUM_CHANGED_IDTR)
1872 {
1873 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1874 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1875 }
1876
1877 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1878 {
1879 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1880 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1881 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1882 }
1883
1884 if (fFlags & CPUM_CHANGED_LDTR)
1885 {
1886 if (fHiddenSelRegsValid)
1887 {
1888 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1889 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1890 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1891 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1892 }
1893 else
1894 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1895 }
1896
1897 if (fFlags & CPUM_CHANGED_CPUID)
1898 {
1899 uint32_t u32Dummy;
1900
1901 /*
1902 * Get the CPUID features.
1903 */
1904 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1905 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1906 }
1907
1908 /* Sync FPU state after CR4, CPUID and EFER (!). */
1909 if (fFlags & CPUM_CHANGED_FPU_REM)
1910 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1911 }
1912
1913 /*
1914 * Sync TR unconditionally to make life simpler.
1915 */
1916 pVM->rem.s.Env.tr.selector = pCtx->tr;
1917 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1918 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1919 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1920 /* Note! do_interrupt will fault if the busy flag is still set... */
1921 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1922
1923 /*
1924 * Update selector registers.
1925 * This must be done *after* we've synced gdt, ldt and crX registers
1926 * since we're reading the GDT/LDT om sync_seg. This will happen with
1927 * saved state which takes a quick dip into rawmode for instance.
1928 */
1929 /*
1930 * Stack; Note first check this one as the CPL might have changed. The
1931 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1932 */
1933
1934 if (fHiddenSelRegsValid)
1935 {
1936 /* The hidden selector registers are valid in the CPU context. */
1937 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1938
1939 /* Set current CPL */
1940 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1941
1942 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1943 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1944 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1945 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1946 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1947 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1948 }
1949 else
1950 {
1951 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1952 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1953 {
1954 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1955
1956 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1957 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1958#ifdef VBOX_WITH_STATISTICS
1959 if (pVM->rem.s.Env.segs[R_SS].newselector)
1960 {
1961 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1962 }
1963#endif
1964 }
1965 else
1966 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1967
1968 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1969 {
1970 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1971 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1972#ifdef VBOX_WITH_STATISTICS
1973 if (pVM->rem.s.Env.segs[R_ES].newselector)
1974 {
1975 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1976 }
1977#endif
1978 }
1979 else
1980 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1981
1982 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1983 {
1984 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1985 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1986#ifdef VBOX_WITH_STATISTICS
1987 if (pVM->rem.s.Env.segs[R_CS].newselector)
1988 {
1989 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1990 }
1991#endif
1992 }
1993 else
1994 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1995
1996 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1997 {
1998 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1999 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2000#ifdef VBOX_WITH_STATISTICS
2001 if (pVM->rem.s.Env.segs[R_DS].newselector)
2002 {
2003 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2004 }
2005#endif
2006 }
2007 else
2008 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2009
2010 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2011 * be the same but not the base/limit. */
2012 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2013 {
2014 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2015 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2016#ifdef VBOX_WITH_STATISTICS
2017 if (pVM->rem.s.Env.segs[R_FS].newselector)
2018 {
2019 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2020 }
2021#endif
2022 }
2023 else
2024 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2025
2026 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2027 {
2028 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2029 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2030#ifdef VBOX_WITH_STATISTICS
2031 if (pVM->rem.s.Env.segs[R_GS].newselector)
2032 {
2033 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2034 }
2035#endif
2036 }
2037 else
2038 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2039 }
2040
2041 /*
2042 * Check for traps.
2043 */
2044 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2045 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2046 if (RT_SUCCESS(rc))
2047 {
2048#ifdef DEBUG
2049 if (u8TrapNo == 0x80)
2050 {
2051 remR3DumpLnxSyscall(pVM);
2052 remR3DumpOBsdSyscall(pVM);
2053 }
2054#endif
2055
2056 pVM->rem.s.Env.exception_index = u8TrapNo;
2057 if (enmType != TRPM_SOFTWARE_INT)
2058 {
2059 pVM->rem.s.Env.exception_is_int = 0;
2060 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2061 }
2062 else
2063 {
2064 /*
2065 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2066 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2067 * for int03 and into.
2068 */
2069 pVM->rem.s.Env.exception_is_int = 1;
2070 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2071 /* int 3 may be generated by one-byte 0xcc */
2072 if (u8TrapNo == 3)
2073 {
2074 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2075 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2076 }
2077 /* int 4 may be generated by one-byte 0xce */
2078 else if (u8TrapNo == 4)
2079 {
2080 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2081 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2082 }
2083 }
2084
2085 /* get error code and cr2 if needed. */
2086 switch (u8TrapNo)
2087 {
2088 case 0x0e:
2089 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2090 /* fallthru */
2091 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2092 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2093 break;
2094
2095 case 0x11: case 0x08:
2096 default:
2097 pVM->rem.s.Env.error_code = 0;
2098 break;
2099 }
2100
2101 /*
2102 * We can now reset the active trap since the recompiler is gonna have a go at it.
2103 */
2104 rc = TRPMResetTrap(pVM);
2105 AssertRC(rc);
2106 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2107 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2108 }
2109
2110 /*
2111 * Clear old interrupt request flags; Check for pending hardware interrupts.
2112 * (See @remark for why we don't check for other FFs.)
2113 */
2114 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2115 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2116 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2117 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2118
2119 /*
2120 * We're now in REM mode.
2121 */
2122 pVM->rem.s.fInREM = true;
2123 pVM->rem.s.fInStateSync = false;
2124 pVM->rem.s.cCanExecuteRaw = 0;
2125 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2126 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2127 return VINF_SUCCESS;
2128}
2129
2130
2131/**
2132 * Syncs back changes in the REM state to the the VM state.
2133 *
2134 * This must be called after invoking REMR3Run().
2135 * Calling it several times in a row is not permitted.
2136 *
2137 * @returns VBox status code.
2138 *
2139 * @param pVM VM Handle.
2140 */
2141REMR3DECL(int) REMR3StateBack(PVM pVM)
2142{
2143 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2144 unsigned i;
2145
2146 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2147 Log2(("REMR3StateBack:\n"));
2148 Assert(pVM->rem.s.fInREM);
2149
2150 /*
2151 * Copy back the registers.
2152 * This is done in the order they are declared in the CPUMCTX structure.
2153 */
2154
2155 /** @todo FOP */
2156 /** @todo FPUIP */
2157 /** @todo CS */
2158 /** @todo FPUDP */
2159 /** @todo DS */
2160
2161 /** @todo check if FPU/XMM was actually used in the recompiler */
2162 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2163//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2164
2165#ifdef TARGET_X86_64
2166 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2167 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2168 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2169 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2170 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2171 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2172 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2173 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2174 pCtx->r8 = pVM->rem.s.Env.regs[8];
2175 pCtx->r9 = pVM->rem.s.Env.regs[9];
2176 pCtx->r10 = pVM->rem.s.Env.regs[10];
2177 pCtx->r11 = pVM->rem.s.Env.regs[11];
2178 pCtx->r12 = pVM->rem.s.Env.regs[12];
2179 pCtx->r13 = pVM->rem.s.Env.regs[13];
2180 pCtx->r14 = pVM->rem.s.Env.regs[14];
2181 pCtx->r15 = pVM->rem.s.Env.regs[15];
2182
2183 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2184
2185#else
2186 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2187 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2188 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2189 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2190 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2191 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2192 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2193
2194 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2195#endif
2196
2197 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2198
2199#ifdef VBOX_WITH_STATISTICS
2200 if (pVM->rem.s.Env.segs[R_SS].newselector)
2201 {
2202 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2203 }
2204 if (pVM->rem.s.Env.segs[R_GS].newselector)
2205 {
2206 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2207 }
2208 if (pVM->rem.s.Env.segs[R_FS].newselector)
2209 {
2210 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2211 }
2212 if (pVM->rem.s.Env.segs[R_ES].newselector)
2213 {
2214 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2215 }
2216 if (pVM->rem.s.Env.segs[R_DS].newselector)
2217 {
2218 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2219 }
2220 if (pVM->rem.s.Env.segs[R_CS].newselector)
2221 {
2222 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2223 }
2224#endif
2225 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2226 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2227 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2228 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2229 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2230
2231#ifdef TARGET_X86_64
2232 pCtx->rip = pVM->rem.s.Env.eip;
2233 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2234#else
2235 pCtx->eip = pVM->rem.s.Env.eip;
2236 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2237#endif
2238
2239 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2240 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2241 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2242 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2243 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2244 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2245
2246 for (i = 0; i < 8; i++)
2247 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2248
2249 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2250 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2251 {
2252 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2253 STAM_COUNTER_INC(&gStatREMGDTChange);
2254 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2255 }
2256
2257 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2258 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2259 {
2260 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2261 STAM_COUNTER_INC(&gStatREMIDTChange);
2262 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2263 }
2264
2265 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2266 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2267 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2268 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2269 {
2270 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2271 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2272 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2273 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2274 STAM_COUNTER_INC(&gStatREMLDTRChange);
2275 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2276 }
2277
2278 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2279 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2280 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2281 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2282 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2283 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2284 : 0) )
2285 {
2286 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2287 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2288 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2289 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2290 pCtx->tr = pVM->rem.s.Env.tr.selector;
2291 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2292 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2293 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2294 if (pCtx->trHid.Attr.u)
2295 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2296 STAM_COUNTER_INC(&gStatREMTRChange);
2297 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2298 }
2299
2300 /** @todo These values could still be out of sync! */
2301 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2302 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2303 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2304 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2305
2306 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2307 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2308 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2309
2310 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2311 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2312 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2313
2314 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2315 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2316 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2317
2318 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2319 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2320 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2321
2322 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2323 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2324 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2325
2326 /* Sysenter MSR */
2327 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2328 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2329 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2330
2331 /* System MSRs. */
2332 pCtx->msrEFER = pVM->rem.s.Env.efer;
2333 pCtx->msrSTAR = pVM->rem.s.Env.star;
2334 pCtx->msrPAT = pVM->rem.s.Env.pat;
2335#ifdef TARGET_X86_64
2336 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2337 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2338 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2339 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2340#endif
2341
2342 remR3TrapClear(pVM);
2343
2344 /*
2345 * Check for traps.
2346 */
2347 if ( pVM->rem.s.Env.exception_index >= 0
2348 && pVM->rem.s.Env.exception_index < 256)
2349 {
2350 int rc;
2351
2352 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2353 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2354 AssertRC(rc);
2355 switch (pVM->rem.s.Env.exception_index)
2356 {
2357 case 0x0e:
2358 TRPMSetFaultAddress(pVM, pCtx->cr2);
2359 /* fallthru */
2360 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2361 case 0x11: case 0x08: /* 0 */
2362 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2363 break;
2364 }
2365
2366 }
2367
2368 /*
2369 * We're not longer in REM mode.
2370 */
2371 pVM->rem.s.fInREM = false;
2372 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2373 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2374 return VINF_SUCCESS;
2375}
2376
2377
2378/**
2379 * This is called by the disassembler when it wants to update the cpu state
2380 * before for instance doing a register dump.
2381 */
2382static void remR3StateUpdate(PVM pVM)
2383{
2384 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2385 unsigned i;
2386
2387 Assert(pVM->rem.s.fInREM);
2388
2389 /*
2390 * Copy back the registers.
2391 * This is done in the order they are declared in the CPUMCTX structure.
2392 */
2393
2394 /** @todo FOP */
2395 /** @todo FPUIP */
2396 /** @todo CS */
2397 /** @todo FPUDP */
2398 /** @todo DS */
2399 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2400 pCtx->fpu.MXCSR = 0;
2401 pCtx->fpu.MXCSR_MASK = 0;
2402
2403 /** @todo check if FPU/XMM was actually used in the recompiler */
2404 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2405//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2406
2407#ifdef TARGET_X86_64
2408 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2409 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2410 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2411 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2412 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2413 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2414 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2415 pCtx->r8 = pVM->rem.s.Env.regs[8];
2416 pCtx->r9 = pVM->rem.s.Env.regs[9];
2417 pCtx->r10 = pVM->rem.s.Env.regs[10];
2418 pCtx->r11 = pVM->rem.s.Env.regs[11];
2419 pCtx->r12 = pVM->rem.s.Env.regs[12];
2420 pCtx->r13 = pVM->rem.s.Env.regs[13];
2421 pCtx->r14 = pVM->rem.s.Env.regs[14];
2422 pCtx->r15 = pVM->rem.s.Env.regs[15];
2423
2424 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2425#else
2426 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2427 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2428 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2429 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2430 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2431 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2432 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2433
2434 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2435#endif
2436
2437 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2438
2439 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2440 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2441 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2442 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2443 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2444
2445#ifdef TARGET_X86_64
2446 pCtx->rip = pVM->rem.s.Env.eip;
2447 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2448#else
2449 pCtx->eip = pVM->rem.s.Env.eip;
2450 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2451#endif
2452
2453 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2454 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2455 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2456 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2457 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2458 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2459
2460 for (i = 0; i < 8; i++)
2461 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2462
2463 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2464 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2465 {
2466 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2467 STAM_COUNTER_INC(&gStatREMGDTChange);
2468 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2469 }
2470
2471 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2472 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2473 {
2474 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2475 STAM_COUNTER_INC(&gStatREMIDTChange);
2476 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2477 }
2478
2479 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2480 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2481 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2482 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2483 {
2484 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2485 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2486 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2487 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2488 STAM_COUNTER_INC(&gStatREMLDTRChange);
2489 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2490 }
2491
2492 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2493 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2494 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2495 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2496 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2497 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2498 : 0) )
2499 {
2500 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2501 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2502 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2503 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2504 pCtx->tr = pVM->rem.s.Env.tr.selector;
2505 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2506 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2507 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2508 if (pCtx->trHid.Attr.u)
2509 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2510 STAM_COUNTER_INC(&gStatREMTRChange);
2511 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2512 }
2513
2514 /** @todo These values could still be out of sync! */
2515 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2516 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2517 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2518 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2519
2520 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2521 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2522 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2523
2524 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2525 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2526 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2527
2528 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2529 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2530 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2531
2532 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2533 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2534 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2535
2536 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2537 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2538 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2539
2540 /* Sysenter MSR */
2541 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2542 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2543 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2544
2545 /* System MSRs. */
2546 pCtx->msrEFER = pVM->rem.s.Env.efer;
2547 pCtx->msrSTAR = pVM->rem.s.Env.star;
2548 pCtx->msrPAT = pVM->rem.s.Env.pat;
2549#ifdef TARGET_X86_64
2550 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2551 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2552 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2553 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2554#endif
2555
2556}
2557
2558
2559/**
2560 * Update the VMM state information if we're currently in REM.
2561 *
2562 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2563 * we're currently executing in REM and the VMM state is invalid. This method will of
2564 * course check that we're executing in REM before syncing any data over to the VMM.
2565 *
2566 * @param pVM The VM handle.
2567 */
2568REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2569{
2570 if (pVM->rem.s.fInREM)
2571 remR3StateUpdate(pVM);
2572}
2573
2574
2575#undef LOG_GROUP
2576#define LOG_GROUP LOG_GROUP_REM
2577
2578
2579/**
2580 * Notify the recompiler about Address Gate 20 state change.
2581 *
2582 * This notification is required since A20 gate changes are
2583 * initialized from a device driver and the VM might just as
2584 * well be in REM mode as in RAW mode.
2585 *
2586 * @param pVM VM handle.
2587 * @param fEnable True if the gate should be enabled.
2588 * False if the gate should be disabled.
2589 */
2590REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2591{
2592 bool fSaved;
2593
2594 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2595 VM_ASSERT_EMT(pVM);
2596
2597 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2598 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2599
2600 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2601
2602 pVM->rem.s.fIgnoreAll = fSaved;
2603}
2604
2605
2606/**
2607 * Replays the invalidated recorded pages.
2608 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2609 *
2610 * @param pVM VM handle.
2611 */
2612REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2613{
2614 RTUINT i;
2615
2616 VM_ASSERT_EMT(pVM);
2617
2618 /*
2619 * Sync the required registers.
2620 */
2621 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2622 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2623 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2624 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2625
2626 /*
2627 * Replay the flushes.
2628 */
2629 pVM->rem.s.fIgnoreInvlPg = true;
2630 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2631 {
2632 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2633 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2634 }
2635 pVM->rem.s.fIgnoreInvlPg = false;
2636 pVM->rem.s.cInvalidatedPages = 0;
2637}
2638
2639
2640/**
2641 * Replays the handler notification changes
2642 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2643 *
2644 * @param pVM VM handle.
2645 */
2646REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2647{
2648 /*
2649 * Replay the flushes.
2650 */
2651 RTUINT i;
2652 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2653
2654 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2655 VM_ASSERT_EMT(pVM);
2656
2657 pVM->rem.s.cHandlerNotifications = 0;
2658 for (i = 0; i < c; i++)
2659 {
2660 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2661 switch (pRec->enmKind)
2662 {
2663 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2664 REMR3NotifyHandlerPhysicalRegister(pVM,
2665 pRec->u.PhysicalRegister.enmType,
2666 pRec->u.PhysicalRegister.GCPhys,
2667 pRec->u.PhysicalRegister.cb,
2668 pRec->u.PhysicalRegister.fHasHCHandler);
2669 break;
2670
2671 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2672 REMR3NotifyHandlerPhysicalDeregister(pVM,
2673 pRec->u.PhysicalDeregister.enmType,
2674 pRec->u.PhysicalDeregister.GCPhys,
2675 pRec->u.PhysicalDeregister.cb,
2676 pRec->u.PhysicalDeregister.fHasHCHandler,
2677 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2678 break;
2679
2680 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2681 REMR3NotifyHandlerPhysicalModify(pVM,
2682 pRec->u.PhysicalModify.enmType,
2683 pRec->u.PhysicalModify.GCPhysOld,
2684 pRec->u.PhysicalModify.GCPhysNew,
2685 pRec->u.PhysicalModify.cb,
2686 pRec->u.PhysicalModify.fHasHCHandler,
2687 pRec->u.PhysicalModify.fRestoreAsRAM);
2688 break;
2689
2690 default:
2691 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2692 break;
2693 }
2694 }
2695 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2696}
2697
2698
2699/**
2700 * Notify REM about changed code page.
2701 *
2702 * @returns VBox status code.
2703 * @param pVM VM handle.
2704 * @param pvCodePage Code page address
2705 */
2706REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2707{
2708#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2709 int rc;
2710 RTGCPHYS PhysGC;
2711 uint64_t flags;
2712
2713 VM_ASSERT_EMT(pVM);
2714
2715 /*
2716 * Get the physical page address.
2717 */
2718 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2719 if (rc == VINF_SUCCESS)
2720 {
2721 /*
2722 * Sync the required registers and flush the whole page.
2723 * (Easier to do the whole page than notifying it about each physical
2724 * byte that was changed.
2725 */
2726 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2727 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2728 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2729 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2730
2731 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2732 }
2733#endif
2734 return VINF_SUCCESS;
2735}
2736
2737
2738/**
2739 * Notification about a successful MMR3PhysRegister() call.
2740 *
2741 * @param pVM VM handle.
2742 * @param GCPhys The physical address the RAM.
2743 * @param cb Size of the memory.
2744 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2745 */
2746REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2747{
2748 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2749 VM_ASSERT_EMT(pVM);
2750
2751 /*
2752 * Validate input - we trust the caller.
2753 */
2754 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2755 Assert(cb);
2756 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2757 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2758
2759 /*
2760 * Base ram? Update GCPhysLastRam.
2761 */
2762 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2763 {
2764 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2765 {
2766 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2767 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2768 }
2769 }
2770
2771 /*
2772 * Register the ram.
2773 */
2774 Assert(!pVM->rem.s.fIgnoreAll);
2775 pVM->rem.s.fIgnoreAll = true;
2776
2777 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2778 Assert(pVM->rem.s.fIgnoreAll);
2779 pVM->rem.s.fIgnoreAll = false;
2780}
2781
2782
2783/**
2784 * Notification about a successful MMR3PhysRomRegister() call.
2785 *
2786 * @param pVM VM handle.
2787 * @param GCPhys The physical address of the ROM.
2788 * @param cb The size of the ROM.
2789 * @param pvCopy Pointer to the ROM copy.
2790 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2791 * This function will be called when ever the protection of the
2792 * shadow ROM changes (at reset and end of POST).
2793 */
2794REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2795{
2796 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2797 VM_ASSERT_EMT(pVM);
2798
2799 /*
2800 * Validate input - we trust the caller.
2801 */
2802 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2803 Assert(cb);
2804 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2805
2806 /*
2807 * Register the rom.
2808 */
2809 Assert(!pVM->rem.s.fIgnoreAll);
2810 pVM->rem.s.fIgnoreAll = true;
2811
2812 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2813
2814 Assert(pVM->rem.s.fIgnoreAll);
2815 pVM->rem.s.fIgnoreAll = false;
2816}
2817
2818
2819/**
2820 * Notification about a successful memory deregistration or reservation.
2821 *
2822 * @param pVM VM Handle.
2823 * @param GCPhys Start physical address.
2824 * @param cb The size of the range.
2825 */
2826REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2827{
2828 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2829 VM_ASSERT_EMT(pVM);
2830
2831 /*
2832 * Validate input - we trust the caller.
2833 */
2834 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2835 Assert(cb);
2836 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2837
2838 /*
2839 * Unassigning the memory.
2840 */
2841 Assert(!pVM->rem.s.fIgnoreAll);
2842 pVM->rem.s.fIgnoreAll = true;
2843
2844 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2845
2846 Assert(pVM->rem.s.fIgnoreAll);
2847 pVM->rem.s.fIgnoreAll = false;
2848}
2849
2850
2851/**
2852 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2853 *
2854 * @param pVM VM Handle.
2855 * @param enmType Handler type.
2856 * @param GCPhys Handler range address.
2857 * @param cb Size of the handler range.
2858 * @param fHasHCHandler Set if the handler has a HC callback function.
2859 *
2860 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2861 * Handler memory type to memory which has no HC handler.
2862 */
2863REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2864{
2865 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2866 enmType, GCPhys, cb, fHasHCHandler));
2867 VM_ASSERT_EMT(pVM);
2868 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2869 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2870
2871 if (pVM->rem.s.cHandlerNotifications)
2872 REMR3ReplayHandlerNotifications(pVM);
2873
2874 Assert(!pVM->rem.s.fIgnoreAll);
2875 pVM->rem.s.fIgnoreAll = true;
2876
2877 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2878 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2879 else if (fHasHCHandler)
2880 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2881
2882 Assert(pVM->rem.s.fIgnoreAll);
2883 pVM->rem.s.fIgnoreAll = false;
2884}
2885
2886
2887/**
2888 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2889 *
2890 * @param pVM VM Handle.
2891 * @param enmType Handler type.
2892 * @param GCPhys Handler range address.
2893 * @param cb Size of the handler range.
2894 * @param fHasHCHandler Set if the handler has a HC callback function.
2895 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2896 */
2897REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2898{
2899 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2900 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2901 VM_ASSERT_EMT(pVM);
2902
2903 if (pVM->rem.s.cHandlerNotifications)
2904 REMR3ReplayHandlerNotifications(pVM);
2905
2906 Assert(!pVM->rem.s.fIgnoreAll);
2907 pVM->rem.s.fIgnoreAll = true;
2908
2909/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2910 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2911 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2912 else if (fHasHCHandler)
2913 {
2914 if (!fRestoreAsRAM)
2915 {
2916 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2917 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2918 }
2919 else
2920 {
2921 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2922 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2923 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2924 }
2925 }
2926
2927 Assert(pVM->rem.s.fIgnoreAll);
2928 pVM->rem.s.fIgnoreAll = false;
2929}
2930
2931
2932/**
2933 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2934 *
2935 * @param pVM VM Handle.
2936 * @param enmType Handler type.
2937 * @param GCPhysOld Old handler range address.
2938 * @param GCPhysNew New handler range address.
2939 * @param cb Size of the handler range.
2940 * @param fHasHCHandler Set if the handler has a HC callback function.
2941 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2942 */
2943REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2944{
2945 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2946 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2947 VM_ASSERT_EMT(pVM);
2948 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2949
2950 if (pVM->rem.s.cHandlerNotifications)
2951 REMR3ReplayHandlerNotifications(pVM);
2952
2953 if (fHasHCHandler)
2954 {
2955 Assert(!pVM->rem.s.fIgnoreAll);
2956 pVM->rem.s.fIgnoreAll = true;
2957
2958 /*
2959 * Reset the old page.
2960 */
2961 if (!fRestoreAsRAM)
2962 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2963 else
2964 {
2965 /* This is not perfect, but it'll do for PD monitoring... */
2966 Assert(cb == PAGE_SIZE);
2967 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2968 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2969 }
2970
2971 /*
2972 * Update the new page.
2973 */
2974 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2975 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2976 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2977
2978 Assert(pVM->rem.s.fIgnoreAll);
2979 pVM->rem.s.fIgnoreAll = false;
2980 }
2981}
2982
2983
2984/**
2985 * Checks if we're handling access to this page or not.
2986 *
2987 * @returns true if we're trapping access.
2988 * @returns false if we aren't.
2989 * @param pVM The VM handle.
2990 * @param GCPhys The physical address.
2991 *
2992 * @remark This function will only work correctly in VBOX_STRICT builds!
2993 */
2994REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2995{
2996#ifdef VBOX_STRICT
2997 unsigned long off;
2998 if (pVM->rem.s.cHandlerNotifications)
2999 REMR3ReplayHandlerNotifications(pVM);
3000
3001 off = get_phys_page_offset(GCPhys);
3002 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3003 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3004 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3005#else
3006 return false;
3007#endif
3008}
3009
3010
3011/**
3012 * Deals with a rare case in get_phys_addr_code where the code
3013 * is being monitored.
3014 *
3015 * It could also be an MMIO page, in which case we will raise a fatal error.
3016 *
3017 * @returns The physical address corresponding to addr.
3018 * @param env The cpu environment.
3019 * @param addr The virtual address.
3020 * @param pTLBEntry The TLB entry.
3021 */
3022target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3023 target_ulong addr,
3024 CPUTLBEntry* pTLBEntry,
3025 target_phys_addr_t ioTLBEntry)
3026{
3027 PVM pVM = env->pVM;
3028
3029 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3030 {
3031 /* If code memory is being monitored, appropriate IOTLB entry will have
3032 handler IO type, and addend will provide real physical address, no
3033 matter if we store VA in TLB or not, as handlers are always passed PA */
3034 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3035 return ret;
3036 }
3037 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3038 "*** handlers\n",
3039 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3040 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3041 LogRel(("*** mmio\n"));
3042 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3043 LogRel(("*** phys\n"));
3044 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3045 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3046 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3047 AssertFatalFailed();
3048}
3049
3050/**
3051 * Read guest RAM and ROM.
3052 *
3053 * @param SrcGCPhys The source address (guest physical).
3054 * @param pvDst The destination address.
3055 * @param cb Number of bytes
3056 */
3057void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3058{
3059 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3060 VBOX_CHECK_ADDR(SrcGCPhys);
3061 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3062#ifdef VBOX_DEBUG_PHYS
3063 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3064#endif
3065 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3066}
3067
3068
3069/**
3070 * Read guest RAM and ROM, unsigned 8-bit.
3071 *
3072 * @param SrcGCPhys The source address (guest physical).
3073 */
3074RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3075{
3076 uint8_t val;
3077 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3078 VBOX_CHECK_ADDR(SrcGCPhys);
3079 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3080 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3081#ifdef VBOX_DEBUG_PHYS
3082 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3083#endif
3084 return val;
3085}
3086
3087
3088/**
3089 * Read guest RAM and ROM, signed 8-bit.
3090 *
3091 * @param SrcGCPhys The source address (guest physical).
3092 */
3093RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3094{
3095 int8_t val;
3096 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3097 VBOX_CHECK_ADDR(SrcGCPhys);
3098 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3099 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3100#ifdef VBOX_DEBUG_PHYS
3101 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3102#endif
3103 return val;
3104}
3105
3106
3107/**
3108 * Read guest RAM and ROM, unsigned 16-bit.
3109 *
3110 * @param SrcGCPhys The source address (guest physical).
3111 */
3112RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3113{
3114 uint16_t val;
3115 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3116 VBOX_CHECK_ADDR(SrcGCPhys);
3117 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3118 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3119#ifdef VBOX_DEBUG_PHYS
3120 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3121#endif
3122 return val;
3123}
3124
3125
3126/**
3127 * Read guest RAM and ROM, signed 16-bit.
3128 *
3129 * @param SrcGCPhys The source address (guest physical).
3130 */
3131RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3132{
3133 int16_t val;
3134 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3135 VBOX_CHECK_ADDR(SrcGCPhys);
3136 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3137 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3138#ifdef VBOX_DEBUG_PHYS
3139 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3140#endif
3141 return val;
3142}
3143
3144
3145/**
3146 * Read guest RAM and ROM, unsigned 32-bit.
3147 *
3148 * @param SrcGCPhys The source address (guest physical).
3149 */
3150RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3151{
3152 uint32_t val;
3153 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3154 VBOX_CHECK_ADDR(SrcGCPhys);
3155 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3156 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3157#ifdef VBOX_DEBUG_PHYS
3158 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3159#endif
3160 return val;
3161}
3162
3163
3164/**
3165 * Read guest RAM and ROM, signed 32-bit.
3166 *
3167 * @param SrcGCPhys The source address (guest physical).
3168 */
3169RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3170{
3171 int32_t val;
3172 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3173 VBOX_CHECK_ADDR(SrcGCPhys);
3174 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3175 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3176#ifdef VBOX_DEBUG_PHYS
3177 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3178#endif
3179 return val;
3180}
3181
3182
3183/**
3184 * Read guest RAM and ROM, unsigned 64-bit.
3185 *
3186 * @param SrcGCPhys The source address (guest physical).
3187 */
3188uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3189{
3190 uint64_t val;
3191 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3192 VBOX_CHECK_ADDR(SrcGCPhys);
3193 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3194 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3195#ifdef VBOX_DEBUG_PHYS
3196 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3197#endif
3198 return val;
3199}
3200
3201
3202/**
3203 * Read guest RAM and ROM, signed 64-bit.
3204 *
3205 * @param SrcGCPhys The source address (guest physical).
3206 */
3207int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3208{
3209 int64_t val;
3210 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3211 VBOX_CHECK_ADDR(SrcGCPhys);
3212 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3213 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3214#ifdef VBOX_DEBUG_PHYS
3215 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3216#endif
3217 return val;
3218}
3219
3220
3221/**
3222 * Write guest RAM.
3223 *
3224 * @param DstGCPhys The destination address (guest physical).
3225 * @param pvSrc The source address.
3226 * @param cb Number of bytes to write
3227 */
3228void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3229{
3230 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3231 VBOX_CHECK_ADDR(DstGCPhys);
3232 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3233 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3234#ifdef VBOX_DEBUG_PHYS
3235 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3236#endif
3237}
3238
3239
3240/**
3241 * Write guest RAM, unsigned 8-bit.
3242 *
3243 * @param DstGCPhys The destination address (guest physical).
3244 * @param val Value
3245 */
3246void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3247{
3248 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3249 VBOX_CHECK_ADDR(DstGCPhys);
3250 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3251 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3252#ifdef VBOX_DEBUG_PHYS
3253 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3254#endif
3255}
3256
3257
3258/**
3259 * Write guest RAM, unsigned 8-bit.
3260 *
3261 * @param DstGCPhys The destination address (guest physical).
3262 * @param val Value
3263 */
3264void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3265{
3266 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3267 VBOX_CHECK_ADDR(DstGCPhys);
3268 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3269 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3270#ifdef VBOX_DEBUG_PHYS
3271 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3272#endif
3273}
3274
3275
3276/**
3277 * Write guest RAM, unsigned 32-bit.
3278 *
3279 * @param DstGCPhys The destination address (guest physical).
3280 * @param val Value
3281 */
3282void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3283{
3284 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3285 VBOX_CHECK_ADDR(DstGCPhys);
3286 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3287 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3288#ifdef VBOX_DEBUG_PHYS
3289 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3290#endif
3291}
3292
3293
3294/**
3295 * Write guest RAM, unsigned 64-bit.
3296 *
3297 * @param DstGCPhys The destination address (guest physical).
3298 * @param val Value
3299 */
3300void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3301{
3302 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3303 VBOX_CHECK_ADDR(DstGCPhys);
3304 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3305 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3306#ifdef VBOX_DEBUG_PHYS
3307 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3308#endif
3309}
3310
3311#undef LOG_GROUP
3312#define LOG_GROUP LOG_GROUP_REM_MMIO
3313
3314/** Read MMIO memory. */
3315static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3316{
3317 uint32_t u32 = 0;
3318 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3319 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3320 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3321 return u32;
3322}
3323
3324/** Read MMIO memory. */
3325static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3326{
3327 uint32_t u32 = 0;
3328 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3329 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3330 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3331 return u32;
3332}
3333
3334/** Read MMIO memory. */
3335static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3336{
3337 uint32_t u32 = 0;
3338 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3339 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3340 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3341 return u32;
3342}
3343
3344/** Write to MMIO memory. */
3345static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3346{
3347 int rc;
3348 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3349 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3350 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3351}
3352
3353/** Write to MMIO memory. */
3354static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3355{
3356 int rc;
3357 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3358 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3359 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3360}
3361
3362/** Write to MMIO memory. */
3363static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3364{
3365 int rc;
3366 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3367 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3368 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3369}
3370
3371
3372#undef LOG_GROUP
3373#define LOG_GROUP LOG_GROUP_REM_HANDLER
3374
3375/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3376
3377static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3378{
3379 uint8_t u8;
3380 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3381 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3382 return u8;
3383}
3384
3385static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3386{
3387 uint16_t u16;
3388 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3389 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3390 return u16;
3391}
3392
3393static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3394{
3395 uint32_t u32;
3396 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3397 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3398 return u32;
3399}
3400
3401static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3402{
3403 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3404 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3405}
3406
3407static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3408{
3409 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3410 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3411}
3412
3413static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3414{
3415 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3416 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3417}
3418
3419/* -+- disassembly -+- */
3420
3421#undef LOG_GROUP
3422#define LOG_GROUP LOG_GROUP_REM_DISAS
3423
3424
3425/**
3426 * Enables or disables singled stepped disassembly.
3427 *
3428 * @returns VBox status code.
3429 * @param pVM VM handle.
3430 * @param fEnable To enable set this flag, to disable clear it.
3431 */
3432static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3433{
3434 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3435 VM_ASSERT_EMT(pVM);
3436
3437 if (fEnable)
3438 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3439 else
3440 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3441 return VINF_SUCCESS;
3442}
3443
3444
3445/**
3446 * Enables or disables singled stepped disassembly.
3447 *
3448 * @returns VBox status code.
3449 * @param pVM VM handle.
3450 * @param fEnable To enable set this flag, to disable clear it.
3451 */
3452REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3453{
3454 PVMREQ pReq;
3455 int rc;
3456
3457 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3458 if (VM_IS_EMT(pVM))
3459 return remR3DisasEnableStepping(pVM, fEnable);
3460
3461 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3462 AssertRC(rc);
3463 if (RT_SUCCESS(rc))
3464 rc = pReq->iStatus;
3465 VMR3ReqFree(pReq);
3466 return rc;
3467}
3468
3469
3470#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3471/**
3472 * External Debugger Command: .remstep [on|off|1|0]
3473 */
3474static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3475{
3476 bool fEnable;
3477 int rc;
3478
3479 /* print status */
3480 if (cArgs == 0)
3481 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3482 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3483
3484 /* convert the argument and change the mode. */
3485 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3486 if (RT_FAILURE(rc))
3487 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3488 rc = REMR3DisasEnableStepping(pVM, fEnable);
3489 if (RT_FAILURE(rc))
3490 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3491 return rc;
3492}
3493#endif
3494
3495
3496/**
3497 * Disassembles one instruction and prints it to the log.
3498 *
3499 * @returns Success indicator.
3500 * @param env Pointer to the recompiler CPU structure.
3501 * @param f32BitCode Indicates that whether or not the code should
3502 * be disassembled as 16 or 32 bit. If -1 the CS
3503 * selector will be inspected.
3504 * @param pszPrefix
3505 */
3506bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3507{
3508 PVM pVM = env->pVM;
3509 const bool fLog = LogIsEnabled();
3510 const bool fLog2 = LogIs2Enabled();
3511 int rc = VINF_SUCCESS;
3512
3513 /*
3514 * Don't bother if there ain't any log output to do.
3515 */
3516 if (!fLog && !fLog2)
3517 return true;
3518
3519 /*
3520 * Update the state so DBGF reads the correct register values.
3521 */
3522 remR3StateUpdate(pVM);
3523
3524 /*
3525 * Log registers if requested.
3526 */
3527 if (!fLog2)
3528 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3529
3530 /*
3531 * Disassemble to log.
3532 */
3533 if (fLog)
3534 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3535
3536 return RT_SUCCESS(rc);
3537}
3538
3539
3540/**
3541 * Disassemble recompiled code.
3542 *
3543 * @param phFileIgnored Ignored, logfile usually.
3544 * @param pvCode Pointer to the code block.
3545 * @param cb Size of the code block.
3546 */
3547void disas(FILE *phFile, void *pvCode, unsigned long cb)
3548{
3549#ifdef DEBUG_TMP_LOGGING
3550# define DISAS_PRINTF(x...) fprintf(phFile, x)
3551#else
3552# define DISAS_PRINTF(x...) RTLogPrintf(x)
3553 if (LogIs2Enabled())
3554#endif
3555 {
3556 unsigned off = 0;
3557 char szOutput[256];
3558 DISCPUSTATE Cpu;
3559
3560 memset(&Cpu, 0, sizeof(Cpu));
3561#ifdef RT_ARCH_X86
3562 Cpu.mode = CPUMODE_32BIT;
3563#else
3564 Cpu.mode = CPUMODE_64BIT;
3565#endif
3566
3567 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3568 while (off < cb)
3569 {
3570 uint32_t cbInstr;
3571 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3572 DISAS_PRINTF("%s", szOutput);
3573 else
3574 {
3575 DISAS_PRINTF("disas error\n");
3576 cbInstr = 1;
3577#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3578 break;
3579#endif
3580 }
3581 off += cbInstr;
3582 }
3583 }
3584
3585#undef DISAS_PRINTF
3586}
3587
3588
3589/**
3590 * Disassemble guest code.
3591 *
3592 * @param phFileIgnored Ignored, logfile usually.
3593 * @param uCode The guest address of the code to disassemble. (flat?)
3594 * @param cb Number of bytes to disassemble.
3595 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3596 */
3597void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3598{
3599#ifdef DEBUG_TMP_LOGGING
3600# define DISAS_PRINTF(x...) fprintf(phFile, x)
3601#else
3602# define DISAS_PRINTF(x...) RTLogPrintf(x)
3603 if (LogIs2Enabled())
3604#endif
3605 {
3606 PVM pVM = cpu_single_env->pVM;
3607 RTSEL cs;
3608 RTGCUINTPTR eip;
3609
3610 /*
3611 * Update the state so DBGF reads the correct register values (flags).
3612 */
3613 remR3StateUpdate(pVM);
3614
3615 /*
3616 * Do the disassembling.
3617 */
3618 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3619 cs = cpu_single_env->segs[R_CS].selector;
3620 eip = uCode - cpu_single_env->segs[R_CS].base;
3621 for (;;)
3622 {
3623 char szBuf[256];
3624 uint32_t cbInstr;
3625 int rc = DBGFR3DisasInstrEx(pVM,
3626 cs,
3627 eip,
3628 0,
3629 szBuf, sizeof(szBuf),
3630 &cbInstr);
3631 if (RT_SUCCESS(rc))
3632 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3633 else
3634 {
3635 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3636 cbInstr = 1;
3637 }
3638
3639 /* next */
3640 if (cb <= cbInstr)
3641 break;
3642 cb -= cbInstr;
3643 uCode += cbInstr;
3644 eip += cbInstr;
3645 }
3646 }
3647#undef DISAS_PRINTF
3648}
3649
3650
3651/**
3652 * Looks up a guest symbol.
3653 *
3654 * @returns Pointer to symbol name. This is a static buffer.
3655 * @param orig_addr The address in question.
3656 */
3657const char *lookup_symbol(target_ulong orig_addr)
3658{
3659 RTGCINTPTR off = 0;
3660 DBGFSYMBOL Sym;
3661 PVM pVM = cpu_single_env->pVM;
3662 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3663 if (RT_SUCCESS(rc))
3664 {
3665 static char szSym[sizeof(Sym.szName) + 48];
3666 if (!off)
3667 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3668 else if (off > 0)
3669 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3670 else
3671 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3672 return szSym;
3673 }
3674 return "<N/A>";
3675}
3676
3677
3678#undef LOG_GROUP
3679#define LOG_GROUP LOG_GROUP_REM
3680
3681
3682/* -+- FF notifications -+- */
3683
3684
3685/**
3686 * Notification about a pending interrupt.
3687 *
3688 * @param pVM VM Handle.
3689 * @param u8Interrupt Interrupt
3690 * @thread The emulation thread.
3691 */
3692REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3693{
3694 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3695 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3696}
3697
3698/**
3699 * Notification about a pending interrupt.
3700 *
3701 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3702 * @param pVM VM Handle.
3703 * @thread The emulation thread.
3704 */
3705REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3706{
3707 return pVM->rem.s.u32PendingInterrupt;
3708}
3709
3710/**
3711 * Notification about the interrupt FF being set.
3712 *
3713 * @param pVM VM Handle.
3714 * @thread The emulation thread.
3715 */
3716REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3717{
3718 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3719 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3720 if (pVM->rem.s.fInREM)
3721 {
3722 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3723 CPU_INTERRUPT_EXTERNAL_HARD);
3724 }
3725}
3726
3727
3728/**
3729 * Notification about the interrupt FF being set.
3730 *
3731 * @param pVM VM Handle.
3732 * @thread Any.
3733 */
3734REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3735{
3736 LogFlow(("REMR3NotifyInterruptClear:\n"));
3737 if (pVM->rem.s.fInREM)
3738 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3739}
3740
3741
3742/**
3743 * Notification about pending timer(s).
3744 *
3745 * @param pVM VM Handle.
3746 * @thread Any.
3747 */
3748REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3749{
3750#ifndef DEBUG_bird
3751 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3752#endif
3753 if (pVM->rem.s.fInREM)
3754 {
3755 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3756 CPU_INTERRUPT_EXTERNAL_TIMER);
3757 }
3758}
3759
3760
3761/**
3762 * Notification about pending DMA transfers.
3763 *
3764 * @param pVM VM Handle.
3765 * @thread Any.
3766 */
3767REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3768{
3769 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3770 if (pVM->rem.s.fInREM)
3771 {
3772 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3773 CPU_INTERRUPT_EXTERNAL_DMA);
3774 }
3775}
3776
3777
3778/**
3779 * Notification about pending timer(s).
3780 *
3781 * @param pVM VM Handle.
3782 * @thread Any.
3783 */
3784REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3785{
3786 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3787 if (pVM->rem.s.fInREM)
3788 {
3789 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3790 CPU_INTERRUPT_EXTERNAL_EXIT);
3791 }
3792}
3793
3794
3795/**
3796 * Notification about pending FF set by an external thread.
3797 *
3798 * @param pVM VM handle.
3799 * @thread Any.
3800 */
3801REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3802{
3803 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3804 if (pVM->rem.s.fInREM)
3805 {
3806 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3807 CPU_INTERRUPT_EXTERNAL_EXIT);
3808 }
3809}
3810
3811
3812#ifdef VBOX_WITH_STATISTICS
3813void remR3ProfileStart(int statcode)
3814{
3815 STAMPROFILEADV *pStat;
3816 switch(statcode)
3817 {
3818 case STATS_EMULATE_SINGLE_INSTR:
3819 pStat = &gStatExecuteSingleInstr;
3820 break;
3821 case STATS_QEMU_COMPILATION:
3822 pStat = &gStatCompilationQEmu;
3823 break;
3824 case STATS_QEMU_RUN_EMULATED_CODE:
3825 pStat = &gStatRunCodeQEmu;
3826 break;
3827 case STATS_QEMU_TOTAL:
3828 pStat = &gStatTotalTimeQEmu;
3829 break;
3830 case STATS_QEMU_RUN_TIMERS:
3831 pStat = &gStatTimers;
3832 break;
3833 case STATS_TLB_LOOKUP:
3834 pStat= &gStatTBLookup;
3835 break;
3836 case STATS_IRQ_HANDLING:
3837 pStat= &gStatIRQ;
3838 break;
3839 case STATS_RAW_CHECK:
3840 pStat = &gStatRawCheck;
3841 break;
3842
3843 default:
3844 AssertMsgFailed(("unknown stat %d\n", statcode));
3845 return;
3846 }
3847 STAM_PROFILE_ADV_START(pStat, a);
3848}
3849
3850
3851void remR3ProfileStop(int statcode)
3852{
3853 STAMPROFILEADV *pStat;
3854 switch(statcode)
3855 {
3856 case STATS_EMULATE_SINGLE_INSTR:
3857 pStat = &gStatExecuteSingleInstr;
3858 break;
3859 case STATS_QEMU_COMPILATION:
3860 pStat = &gStatCompilationQEmu;
3861 break;
3862 case STATS_QEMU_RUN_EMULATED_CODE:
3863 pStat = &gStatRunCodeQEmu;
3864 break;
3865 case STATS_QEMU_TOTAL:
3866 pStat = &gStatTotalTimeQEmu;
3867 break;
3868 case STATS_QEMU_RUN_TIMERS:
3869 pStat = &gStatTimers;
3870 break;
3871 case STATS_TLB_LOOKUP:
3872 pStat= &gStatTBLookup;
3873 break;
3874 case STATS_IRQ_HANDLING:
3875 pStat= &gStatIRQ;
3876 break;
3877 case STATS_RAW_CHECK:
3878 pStat = &gStatRawCheck;
3879 break;
3880 default:
3881 AssertMsgFailed(("unknown stat %d\n", statcode));
3882 return;
3883 }
3884 STAM_PROFILE_ADV_STOP(pStat, a);
3885}
3886#endif
3887
3888/**
3889 * Raise an RC, force rem exit.
3890 *
3891 * @param pVM VM handle.
3892 * @param rc The rc.
3893 */
3894void remR3RaiseRC(PVM pVM, int rc)
3895{
3896 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3897 Assert(pVM->rem.s.fInREM);
3898 VM_ASSERT_EMT(pVM);
3899 pVM->rem.s.rc = rc;
3900 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3901}
3902
3903
3904/* -+- timers -+- */
3905
3906uint64_t cpu_get_tsc(CPUX86State *env)
3907{
3908 STAM_COUNTER_INC(&gStatCpuGetTSC);
3909 return TMCpuTickGet(env->pVM);
3910}
3911
3912
3913/* -+- interrupts -+- */
3914
3915void cpu_set_ferr(CPUX86State *env)
3916{
3917 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3918 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3919}
3920
3921int cpu_get_pic_interrupt(CPUState *env)
3922{
3923 uint8_t u8Interrupt;
3924 int rc;
3925
3926 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3927 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3928 * with the (a)pic.
3929 */
3930 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3931 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3932 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3933 * remove this kludge. */
3934 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3935 {
3936 rc = VINF_SUCCESS;
3937 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
3938 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
3939 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
3940 }
3941 else
3942 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
3943
3944 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
3945 if (RT_SUCCESS(rc))
3946 {
3947 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3948 env->interrupt_request |= CPU_INTERRUPT_HARD;
3949 return u8Interrupt;
3950 }
3951 return -1;
3952}
3953
3954
3955/* -+- local apic -+- */
3956
3957void cpu_set_apic_base(CPUX86State *env, uint64_t val)
3958{
3959 int rc = PDMApicSetBase(env->pVM, val);
3960 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
3961}
3962
3963uint64_t cpu_get_apic_base(CPUX86State *env)
3964{
3965 uint64_t u64;
3966 int rc = PDMApicGetBase(env->pVM, &u64);
3967 if (RT_SUCCESS(rc))
3968 {
3969 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
3970 return u64;
3971 }
3972 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
3973 return 0;
3974}
3975
3976void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
3977{
3978 int rc = PDMApicSetTPR(env->pVM, val);
3979 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
3980}
3981
3982uint8_t cpu_get_apic_tpr(CPUX86State *env)
3983{
3984 uint8_t u8;
3985 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
3986 if (RT_SUCCESS(rc))
3987 {
3988 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
3989 return u8;
3990 }
3991 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
3992 return 0;
3993}
3994
3995
3996uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
3997{
3998 uint64_t value;
3999 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4000 if (RT_SUCCESS(rc))
4001 {
4002 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4003 return value;
4004 }
4005 /** @todo: exception ? */
4006 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4007 return value;
4008}
4009
4010void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4011{
4012 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4013 /** @todo: exception if error ? */
4014 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4015}
4016
4017uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4018{
4019 return CPUMGetGuestMsr(env->pVM, msr);
4020}
4021
4022void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4023{
4024 CPUMSetGuestMsr(env->pVM, msr, val);
4025}
4026
4027/* -+- I/O Ports -+- */
4028
4029#undef LOG_GROUP
4030#define LOG_GROUP LOG_GROUP_REM_IOPORT
4031
4032void cpu_outb(CPUState *env, int addr, int val)
4033{
4034 int rc;
4035
4036 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4037 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4038
4039 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4040 if (RT_LIKELY(rc == VINF_SUCCESS))
4041 return;
4042 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4043 {
4044 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4045 remR3RaiseRC(env->pVM, rc);
4046 return;
4047 }
4048 remAbort(rc, __FUNCTION__);
4049}
4050
4051void cpu_outw(CPUState *env, int addr, int val)
4052{
4053 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4054 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4055 if (RT_LIKELY(rc == VINF_SUCCESS))
4056 return;
4057 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4058 {
4059 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4060 remR3RaiseRC(env->pVM, rc);
4061 return;
4062 }
4063 remAbort(rc, __FUNCTION__);
4064}
4065
4066void cpu_outl(CPUState *env, int addr, int val)
4067{
4068 int rc;
4069 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4070 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4071 if (RT_LIKELY(rc == VINF_SUCCESS))
4072 return;
4073 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4074 {
4075 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4076 remR3RaiseRC(env->pVM, rc);
4077 return;
4078 }
4079 remAbort(rc, __FUNCTION__);
4080}
4081
4082int cpu_inb(CPUState *env, int addr)
4083{
4084 uint32_t u32 = 0;
4085 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4086 if (RT_LIKELY(rc == VINF_SUCCESS))
4087 {
4088 if (/*addr != 0x61 && */addr != 0x71)
4089 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4090 return (int)u32;
4091 }
4092 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4093 {
4094 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4095 remR3RaiseRC(env->pVM, rc);
4096 return (int)u32;
4097 }
4098 remAbort(rc, __FUNCTION__);
4099 return 0xff;
4100}
4101
4102int cpu_inw(CPUState *env, int addr)
4103{
4104 uint32_t u32 = 0;
4105 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4106 if (RT_LIKELY(rc == VINF_SUCCESS))
4107 {
4108 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4109 return (int)u32;
4110 }
4111 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4112 {
4113 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4114 remR3RaiseRC(env->pVM, rc);
4115 return (int)u32;
4116 }
4117 remAbort(rc, __FUNCTION__);
4118 return 0xffff;
4119}
4120
4121int cpu_inl(CPUState *env, int addr)
4122{
4123 uint32_t u32 = 0;
4124 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4125 if (RT_LIKELY(rc == VINF_SUCCESS))
4126 {
4127//if (addr==0x01f0 && u32 == 0x6b6d)
4128// loglevel = ~0;
4129 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4130 return (int)u32;
4131 }
4132 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4133 {
4134 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4135 remR3RaiseRC(env->pVM, rc);
4136 return (int)u32;
4137 }
4138 remAbort(rc, __FUNCTION__);
4139 return 0xffffffff;
4140}
4141
4142#undef LOG_GROUP
4143#define LOG_GROUP LOG_GROUP_REM
4144
4145
4146/* -+- helpers and misc other interfaces -+- */
4147
4148/**
4149 * Perform the CPUID instruction.
4150 *
4151 * ASMCpuId cannot be invoked from some source files where this is used because of global
4152 * register allocations.
4153 *
4154 * @param env Pointer to the recompiler CPU structure.
4155 * @param uOperator CPUID operation (eax).
4156 * @param pvEAX Where to store eax.
4157 * @param pvEBX Where to store ebx.
4158 * @param pvECX Where to store ecx.
4159 * @param pvEDX Where to store edx.
4160 */
4161void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4162{
4163 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4164}
4165
4166
4167#if 0 /* not used */
4168/**
4169 * Interface for qemu hardware to report back fatal errors.
4170 */
4171void hw_error(const char *pszFormat, ...)
4172{
4173 /*
4174 * Bitch about it.
4175 */
4176 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4177 * this in my Odin32 tree at home! */
4178 va_list args;
4179 va_start(args, pszFormat);
4180 RTLogPrintf("fatal error in virtual hardware:");
4181 RTLogPrintfV(pszFormat, args);
4182 va_end(args);
4183 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4184
4185 /*
4186 * If we're in REM context we'll sync back the state before 'jumping' to
4187 * the EMs failure handling.
4188 */
4189 PVM pVM = cpu_single_env->pVM;
4190 if (pVM->rem.s.fInREM)
4191 REMR3StateBack(pVM);
4192 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4193 AssertMsgFailed(("EMR3FatalError returned!\n"));
4194}
4195#endif
4196
4197/**
4198 * Interface for the qemu cpu to report unhandled situation
4199 * raising a fatal VM error.
4200 */
4201void cpu_abort(CPUState *env, const char *pszFormat, ...)
4202{
4203 va_list args;
4204 PVM pVM;
4205
4206 /*
4207 * Bitch about it.
4208 */
4209#ifndef _MSC_VER
4210 /** @todo: MSVC is right - it's not valid C */
4211 RTLogFlags(NULL, "nodisabled nobuffered");
4212#endif
4213 va_start(args, pszFormat);
4214 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4215 va_end(args);
4216 va_start(args, pszFormat);
4217 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4218 va_end(args);
4219
4220 /*
4221 * If we're in REM context we'll sync back the state before 'jumping' to
4222 * the EMs failure handling.
4223 */
4224 pVM = cpu_single_env->pVM;
4225 if (pVM->rem.s.fInREM)
4226 REMR3StateBack(pVM);
4227 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4228 AssertMsgFailed(("EMR3FatalError returned!\n"));
4229}
4230
4231
4232/**
4233 * Aborts the VM.
4234 *
4235 * @param rc VBox error code.
4236 * @param pszTip Hint about why/when this happend.
4237 */
4238void remAbort(int rc, const char *pszTip)
4239{
4240 PVM pVM;
4241
4242 /*
4243 * Bitch about it.
4244 */
4245 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4246 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4247
4248 /*
4249 * Jump back to where we entered the recompiler.
4250 */
4251 pVM = cpu_single_env->pVM;
4252 if (pVM->rem.s.fInREM)
4253 REMR3StateBack(pVM);
4254 EMR3FatalError(pVM, rc);
4255 AssertMsgFailed(("EMR3FatalError returned!\n"));
4256}
4257
4258
4259/**
4260 * Dumps a linux system call.
4261 * @param pVM VM handle.
4262 */
4263void remR3DumpLnxSyscall(PVM pVM)
4264{
4265 static const char *apsz[] =
4266 {
4267 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4268 "sys_exit",
4269 "sys_fork",
4270 "sys_read",
4271 "sys_write",
4272 "sys_open", /* 5 */
4273 "sys_close",
4274 "sys_waitpid",
4275 "sys_creat",
4276 "sys_link",
4277 "sys_unlink", /* 10 */
4278 "sys_execve",
4279 "sys_chdir",
4280 "sys_time",
4281 "sys_mknod",
4282 "sys_chmod", /* 15 */
4283 "sys_lchown16",
4284 "sys_ni_syscall", /* old break syscall holder */
4285 "sys_stat",
4286 "sys_lseek",
4287 "sys_getpid", /* 20 */
4288 "sys_mount",
4289 "sys_oldumount",
4290 "sys_setuid16",
4291 "sys_getuid16",
4292 "sys_stime", /* 25 */
4293 "sys_ptrace",
4294 "sys_alarm",
4295 "sys_fstat",
4296 "sys_pause",
4297 "sys_utime", /* 30 */
4298 "sys_ni_syscall", /* old stty syscall holder */
4299 "sys_ni_syscall", /* old gtty syscall holder */
4300 "sys_access",
4301 "sys_nice",
4302 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4303 "sys_sync",
4304 "sys_kill",
4305 "sys_rename",
4306 "sys_mkdir",
4307 "sys_rmdir", /* 40 */
4308 "sys_dup",
4309 "sys_pipe",
4310 "sys_times",
4311 "sys_ni_syscall", /* old prof syscall holder */
4312 "sys_brk", /* 45 */
4313 "sys_setgid16",
4314 "sys_getgid16",
4315 "sys_signal",
4316 "sys_geteuid16",
4317 "sys_getegid16", /* 50 */
4318 "sys_acct",
4319 "sys_umount", /* recycled never used phys() */
4320 "sys_ni_syscall", /* old lock syscall holder */
4321 "sys_ioctl",
4322 "sys_fcntl", /* 55 */
4323 "sys_ni_syscall", /* old mpx syscall holder */
4324 "sys_setpgid",
4325 "sys_ni_syscall", /* old ulimit syscall holder */
4326 "sys_olduname",
4327 "sys_umask", /* 60 */
4328 "sys_chroot",
4329 "sys_ustat",
4330 "sys_dup2",
4331 "sys_getppid",
4332 "sys_getpgrp", /* 65 */
4333 "sys_setsid",
4334 "sys_sigaction",
4335 "sys_sgetmask",
4336 "sys_ssetmask",
4337 "sys_setreuid16", /* 70 */
4338 "sys_setregid16",
4339 "sys_sigsuspend",
4340 "sys_sigpending",
4341 "sys_sethostname",
4342 "sys_setrlimit", /* 75 */
4343 "sys_old_getrlimit",
4344 "sys_getrusage",
4345 "sys_gettimeofday",
4346 "sys_settimeofday",
4347 "sys_getgroups16", /* 80 */
4348 "sys_setgroups16",
4349 "old_select",
4350 "sys_symlink",
4351 "sys_lstat",
4352 "sys_readlink", /* 85 */
4353 "sys_uselib",
4354 "sys_swapon",
4355 "sys_reboot",
4356 "old_readdir",
4357 "old_mmap", /* 90 */
4358 "sys_munmap",
4359 "sys_truncate",
4360 "sys_ftruncate",
4361 "sys_fchmod",
4362 "sys_fchown16", /* 95 */
4363 "sys_getpriority",
4364 "sys_setpriority",
4365 "sys_ni_syscall", /* old profil syscall holder */
4366 "sys_statfs",
4367 "sys_fstatfs", /* 100 */
4368 "sys_ioperm",
4369 "sys_socketcall",
4370 "sys_syslog",
4371 "sys_setitimer",
4372 "sys_getitimer", /* 105 */
4373 "sys_newstat",
4374 "sys_newlstat",
4375 "sys_newfstat",
4376 "sys_uname",
4377 "sys_iopl", /* 110 */
4378 "sys_vhangup",
4379 "sys_ni_syscall", /* old "idle" system call */
4380 "sys_vm86old",
4381 "sys_wait4",
4382 "sys_swapoff", /* 115 */
4383 "sys_sysinfo",
4384 "sys_ipc",
4385 "sys_fsync",
4386 "sys_sigreturn",
4387 "sys_clone", /* 120 */
4388 "sys_setdomainname",
4389 "sys_newuname",
4390 "sys_modify_ldt",
4391 "sys_adjtimex",
4392 "sys_mprotect", /* 125 */
4393 "sys_sigprocmask",
4394 "sys_ni_syscall", /* old "create_module" */
4395 "sys_init_module",
4396 "sys_delete_module",
4397 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4398 "sys_quotactl",
4399 "sys_getpgid",
4400 "sys_fchdir",
4401 "sys_bdflush",
4402 "sys_sysfs", /* 135 */
4403 "sys_personality",
4404 "sys_ni_syscall", /* reserved for afs_syscall */
4405 "sys_setfsuid16",
4406 "sys_setfsgid16",
4407 "sys_llseek", /* 140 */
4408 "sys_getdents",
4409 "sys_select",
4410 "sys_flock",
4411 "sys_msync",
4412 "sys_readv", /* 145 */
4413 "sys_writev",
4414 "sys_getsid",
4415 "sys_fdatasync",
4416 "sys_sysctl",
4417 "sys_mlock", /* 150 */
4418 "sys_munlock",
4419 "sys_mlockall",
4420 "sys_munlockall",
4421 "sys_sched_setparam",
4422 "sys_sched_getparam", /* 155 */
4423 "sys_sched_setscheduler",
4424 "sys_sched_getscheduler",
4425 "sys_sched_yield",
4426 "sys_sched_get_priority_max",
4427 "sys_sched_get_priority_min", /* 160 */
4428 "sys_sched_rr_get_interval",
4429 "sys_nanosleep",
4430 "sys_mremap",
4431 "sys_setresuid16",
4432 "sys_getresuid16", /* 165 */
4433 "sys_vm86",
4434 "sys_ni_syscall", /* Old sys_query_module */
4435 "sys_poll",
4436 "sys_nfsservctl",
4437 "sys_setresgid16", /* 170 */
4438 "sys_getresgid16",
4439 "sys_prctl",
4440 "sys_rt_sigreturn",
4441 "sys_rt_sigaction",
4442 "sys_rt_sigprocmask", /* 175 */
4443 "sys_rt_sigpending",
4444 "sys_rt_sigtimedwait",
4445 "sys_rt_sigqueueinfo",
4446 "sys_rt_sigsuspend",
4447 "sys_pread64", /* 180 */
4448 "sys_pwrite64",
4449 "sys_chown16",
4450 "sys_getcwd",
4451 "sys_capget",
4452 "sys_capset", /* 185 */
4453 "sys_sigaltstack",
4454 "sys_sendfile",
4455 "sys_ni_syscall", /* reserved for streams1 */
4456 "sys_ni_syscall", /* reserved for streams2 */
4457 "sys_vfork", /* 190 */
4458 "sys_getrlimit",
4459 "sys_mmap2",
4460 "sys_truncate64",
4461 "sys_ftruncate64",
4462 "sys_stat64", /* 195 */
4463 "sys_lstat64",
4464 "sys_fstat64",
4465 "sys_lchown",
4466 "sys_getuid",
4467 "sys_getgid", /* 200 */
4468 "sys_geteuid",
4469 "sys_getegid",
4470 "sys_setreuid",
4471 "sys_setregid",
4472 "sys_getgroups", /* 205 */
4473 "sys_setgroups",
4474 "sys_fchown",
4475 "sys_setresuid",
4476 "sys_getresuid",
4477 "sys_setresgid", /* 210 */
4478 "sys_getresgid",
4479 "sys_chown",
4480 "sys_setuid",
4481 "sys_setgid",
4482 "sys_setfsuid", /* 215 */
4483 "sys_setfsgid",
4484 "sys_pivot_root",
4485 "sys_mincore",
4486 "sys_madvise",
4487 "sys_getdents64", /* 220 */
4488 "sys_fcntl64",
4489 "sys_ni_syscall", /* reserved for TUX */
4490 "sys_ni_syscall",
4491 "sys_gettid",
4492 "sys_readahead", /* 225 */
4493 "sys_setxattr",
4494 "sys_lsetxattr",
4495 "sys_fsetxattr",
4496 "sys_getxattr",
4497 "sys_lgetxattr", /* 230 */
4498 "sys_fgetxattr",
4499 "sys_listxattr",
4500 "sys_llistxattr",
4501 "sys_flistxattr",
4502 "sys_removexattr", /* 235 */
4503 "sys_lremovexattr",
4504 "sys_fremovexattr",
4505 "sys_tkill",
4506 "sys_sendfile64",
4507 "sys_futex", /* 240 */
4508 "sys_sched_setaffinity",
4509 "sys_sched_getaffinity",
4510 "sys_set_thread_area",
4511 "sys_get_thread_area",
4512 "sys_io_setup", /* 245 */
4513 "sys_io_destroy",
4514 "sys_io_getevents",
4515 "sys_io_submit",
4516 "sys_io_cancel",
4517 "sys_fadvise64", /* 250 */
4518 "sys_ni_syscall",
4519 "sys_exit_group",
4520 "sys_lookup_dcookie",
4521 "sys_epoll_create",
4522 "sys_epoll_ctl", /* 255 */
4523 "sys_epoll_wait",
4524 "sys_remap_file_pages",
4525 "sys_set_tid_address",
4526 "sys_timer_create",
4527 "sys_timer_settime", /* 260 */
4528 "sys_timer_gettime",
4529 "sys_timer_getoverrun",
4530 "sys_timer_delete",
4531 "sys_clock_settime",
4532 "sys_clock_gettime", /* 265 */
4533 "sys_clock_getres",
4534 "sys_clock_nanosleep",
4535 "sys_statfs64",
4536 "sys_fstatfs64",
4537 "sys_tgkill", /* 270 */
4538 "sys_utimes",
4539 "sys_fadvise64_64",
4540 "sys_ni_syscall" /* sys_vserver */
4541 };
4542
4543 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4544 switch (uEAX)
4545 {
4546 default:
4547 if (uEAX < RT_ELEMENTS(apsz))
4548 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4549 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4550 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4551 else
4552 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4553 break;
4554
4555 }
4556}
4557
4558
4559/**
4560 * Dumps an OpenBSD system call.
4561 * @param pVM VM handle.
4562 */
4563void remR3DumpOBsdSyscall(PVM pVM)
4564{
4565 static const char *apsz[] =
4566 {
4567 "SYS_syscall", //0
4568 "SYS_exit", //1
4569 "SYS_fork", //2
4570 "SYS_read", //3
4571 "SYS_write", //4
4572 "SYS_open", //5
4573 "SYS_close", //6
4574 "SYS_wait4", //7
4575 "SYS_8",
4576 "SYS_link", //9
4577 "SYS_unlink", //10
4578 "SYS_11",
4579 "SYS_chdir", //12
4580 "SYS_fchdir", //13
4581 "SYS_mknod", //14
4582 "SYS_chmod", //15
4583 "SYS_chown", //16
4584 "SYS_break", //17
4585 "SYS_18",
4586 "SYS_19",
4587 "SYS_getpid", //20
4588 "SYS_mount", //21
4589 "SYS_unmount", //22
4590 "SYS_setuid", //23
4591 "SYS_getuid", //24
4592 "SYS_geteuid", //25
4593 "SYS_ptrace", //26
4594 "SYS_recvmsg", //27
4595 "SYS_sendmsg", //28
4596 "SYS_recvfrom", //29
4597 "SYS_accept", //30
4598 "SYS_getpeername", //31
4599 "SYS_getsockname", //32
4600 "SYS_access", //33
4601 "SYS_chflags", //34
4602 "SYS_fchflags", //35
4603 "SYS_sync", //36
4604 "SYS_kill", //37
4605 "SYS_38",
4606 "SYS_getppid", //39
4607 "SYS_40",
4608 "SYS_dup", //41
4609 "SYS_opipe", //42
4610 "SYS_getegid", //43
4611 "SYS_profil", //44
4612 "SYS_ktrace", //45
4613 "SYS_sigaction", //46
4614 "SYS_getgid", //47
4615 "SYS_sigprocmask", //48
4616 "SYS_getlogin", //49
4617 "SYS_setlogin", //50
4618 "SYS_acct", //51
4619 "SYS_sigpending", //52
4620 "SYS_osigaltstack", //53
4621 "SYS_ioctl", //54
4622 "SYS_reboot", //55
4623 "SYS_revoke", //56
4624 "SYS_symlink", //57
4625 "SYS_readlink", //58
4626 "SYS_execve", //59
4627 "SYS_umask", //60
4628 "SYS_chroot", //61
4629 "SYS_62",
4630 "SYS_63",
4631 "SYS_64",
4632 "SYS_65",
4633 "SYS_vfork", //66
4634 "SYS_67",
4635 "SYS_68",
4636 "SYS_sbrk", //69
4637 "SYS_sstk", //70
4638 "SYS_61",
4639 "SYS_vadvise", //72
4640 "SYS_munmap", //73
4641 "SYS_mprotect", //74
4642 "SYS_madvise", //75
4643 "SYS_76",
4644 "SYS_77",
4645 "SYS_mincore", //78
4646 "SYS_getgroups", //79
4647 "SYS_setgroups", //80
4648 "SYS_getpgrp", //81
4649 "SYS_setpgid", //82
4650 "SYS_setitimer", //83
4651 "SYS_84",
4652 "SYS_85",
4653 "SYS_getitimer", //86
4654 "SYS_87",
4655 "SYS_88",
4656 "SYS_89",
4657 "SYS_dup2", //90
4658 "SYS_91",
4659 "SYS_fcntl", //92
4660 "SYS_select", //93
4661 "SYS_94",
4662 "SYS_fsync", //95
4663 "SYS_setpriority", //96
4664 "SYS_socket", //97
4665 "SYS_connect", //98
4666 "SYS_99",
4667 "SYS_getpriority", //100
4668 "SYS_101",
4669 "SYS_102",
4670 "SYS_sigreturn", //103
4671 "SYS_bind", //104
4672 "SYS_setsockopt", //105
4673 "SYS_listen", //106
4674 "SYS_107",
4675 "SYS_108",
4676 "SYS_109",
4677 "SYS_110",
4678 "SYS_sigsuspend", //111
4679 "SYS_112",
4680 "SYS_113",
4681 "SYS_114",
4682 "SYS_115",
4683 "SYS_gettimeofday", //116
4684 "SYS_getrusage", //117
4685 "SYS_getsockopt", //118
4686 "SYS_119",
4687 "SYS_readv", //120
4688 "SYS_writev", //121
4689 "SYS_settimeofday", //122
4690 "SYS_fchown", //123
4691 "SYS_fchmod", //124
4692 "SYS_125",
4693 "SYS_setreuid", //126
4694 "SYS_setregid", //127
4695 "SYS_rename", //128
4696 "SYS_129",
4697 "SYS_130",
4698 "SYS_flock", //131
4699 "SYS_mkfifo", //132
4700 "SYS_sendto", //133
4701 "SYS_shutdown", //134
4702 "SYS_socketpair", //135
4703 "SYS_mkdir", //136
4704 "SYS_rmdir", //137
4705 "SYS_utimes", //138
4706 "SYS_139",
4707 "SYS_adjtime", //140
4708 "SYS_141",
4709 "SYS_142",
4710 "SYS_143",
4711 "SYS_144",
4712 "SYS_145",
4713 "SYS_146",
4714 "SYS_setsid", //147
4715 "SYS_quotactl", //148
4716 "SYS_149",
4717 "SYS_150",
4718 "SYS_151",
4719 "SYS_152",
4720 "SYS_153",
4721 "SYS_154",
4722 "SYS_nfssvc", //155
4723 "SYS_156",
4724 "SYS_157",
4725 "SYS_158",
4726 "SYS_159",
4727 "SYS_160",
4728 "SYS_getfh", //161
4729 "SYS_162",
4730 "SYS_163",
4731 "SYS_164",
4732 "SYS_sysarch", //165
4733 "SYS_166",
4734 "SYS_167",
4735 "SYS_168",
4736 "SYS_169",
4737 "SYS_170",
4738 "SYS_171",
4739 "SYS_172",
4740 "SYS_pread", //173
4741 "SYS_pwrite", //174
4742 "SYS_175",
4743 "SYS_176",
4744 "SYS_177",
4745 "SYS_178",
4746 "SYS_179",
4747 "SYS_180",
4748 "SYS_setgid", //181
4749 "SYS_setegid", //182
4750 "SYS_seteuid", //183
4751 "SYS_lfs_bmapv", //184
4752 "SYS_lfs_markv", //185
4753 "SYS_lfs_segclean", //186
4754 "SYS_lfs_segwait", //187
4755 "SYS_188",
4756 "SYS_189",
4757 "SYS_190",
4758 "SYS_pathconf", //191
4759 "SYS_fpathconf", //192
4760 "SYS_swapctl", //193
4761 "SYS_getrlimit", //194
4762 "SYS_setrlimit", //195
4763 "SYS_getdirentries", //196
4764 "SYS_mmap", //197
4765 "SYS___syscall", //198
4766 "SYS_lseek", //199
4767 "SYS_truncate", //200
4768 "SYS_ftruncate", //201
4769 "SYS___sysctl", //202
4770 "SYS_mlock", //203
4771 "SYS_munlock", //204
4772 "SYS_205",
4773 "SYS_futimes", //206
4774 "SYS_getpgid", //207
4775 "SYS_xfspioctl", //208
4776 "SYS_209",
4777 "SYS_210",
4778 "SYS_211",
4779 "SYS_212",
4780 "SYS_213",
4781 "SYS_214",
4782 "SYS_215",
4783 "SYS_216",
4784 "SYS_217",
4785 "SYS_218",
4786 "SYS_219",
4787 "SYS_220",
4788 "SYS_semget", //221
4789 "SYS_222",
4790 "SYS_223",
4791 "SYS_224",
4792 "SYS_msgget", //225
4793 "SYS_msgsnd", //226
4794 "SYS_msgrcv", //227
4795 "SYS_shmat", //228
4796 "SYS_229",
4797 "SYS_shmdt", //230
4798 "SYS_231",
4799 "SYS_clock_gettime", //232
4800 "SYS_clock_settime", //233
4801 "SYS_clock_getres", //234
4802 "SYS_235",
4803 "SYS_236",
4804 "SYS_237",
4805 "SYS_238",
4806 "SYS_239",
4807 "SYS_nanosleep", //240
4808 "SYS_241",
4809 "SYS_242",
4810 "SYS_243",
4811 "SYS_244",
4812 "SYS_245",
4813 "SYS_246",
4814 "SYS_247",
4815 "SYS_248",
4816 "SYS_249",
4817 "SYS_minherit", //250
4818 "SYS_rfork", //251
4819 "SYS_poll", //252
4820 "SYS_issetugid", //253
4821 "SYS_lchown", //254
4822 "SYS_getsid", //255
4823 "SYS_msync", //256
4824 "SYS_257",
4825 "SYS_258",
4826 "SYS_259",
4827 "SYS_getfsstat", //260
4828 "SYS_statfs", //261
4829 "SYS_fstatfs", //262
4830 "SYS_pipe", //263
4831 "SYS_fhopen", //264
4832 "SYS_265",
4833 "SYS_fhstatfs", //266
4834 "SYS_preadv", //267
4835 "SYS_pwritev", //268
4836 "SYS_kqueue", //269
4837 "SYS_kevent", //270
4838 "SYS_mlockall", //271
4839 "SYS_munlockall", //272
4840 "SYS_getpeereid", //273
4841 "SYS_274",
4842 "SYS_275",
4843 "SYS_276",
4844 "SYS_277",
4845 "SYS_278",
4846 "SYS_279",
4847 "SYS_280",
4848 "SYS_getresuid", //281
4849 "SYS_setresuid", //282
4850 "SYS_getresgid", //283
4851 "SYS_setresgid", //284
4852 "SYS_285",
4853 "SYS_mquery", //286
4854 "SYS_closefrom", //287
4855 "SYS_sigaltstack", //288
4856 "SYS_shmget", //289
4857 "SYS_semop", //290
4858 "SYS_stat", //291
4859 "SYS_fstat", //292
4860 "SYS_lstat", //293
4861 "SYS_fhstat", //294
4862 "SYS___semctl", //295
4863 "SYS_shmctl", //296
4864 "SYS_msgctl", //297
4865 "SYS_MAXSYSCALL", //298
4866 //299
4867 //300
4868 };
4869 uint32_t uEAX;
4870 if (!LogIsEnabled())
4871 return;
4872 uEAX = CPUMGetGuestEAX(pVM);
4873 switch (uEAX)
4874 {
4875 default:
4876 if (uEAX < RT_ELEMENTS(apsz))
4877 {
4878 uint32_t au32Args[8] = {0};
4879 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4880 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4881 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4882 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4883 }
4884 else
4885 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4886 break;
4887 }
4888}
4889
4890
4891#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4892/**
4893 * The Dll main entry point (stub).
4894 */
4895bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4896{
4897 return true;
4898}
4899
4900void *memcpy(void *dst, const void *src, size_t size)
4901{
4902 uint8_t*pbDst = dst, *pbSrc = src;
4903 while (size-- > 0)
4904 *pbDst++ = *pbSrc++;
4905 return dst;
4906}
4907
4908#endif
4909
4910void cpu_smm_update(CPUState *env)
4911{
4912}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette