VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 19688

Last change on this file since 19688 was 19660, checked in by vboxsync, 16 years ago

TM+affected: SMP changes in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 159.6 KB
Line 
1/* $Id: VBoxRecompiler.c 19660 2009-05-13 14:09:15Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = NULL; /* set when executing code. */
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 PVMCPU pVCpu = VMMGetCpu(pVM);
296 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
297 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
298
299 /* allocate code buffer for single instruction emulation. */
300 pVM->rem.s.Env.cbCodeBuffer = 4096;
301 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
302 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
303
304 /* finally, set the cpu_single_env global. */
305 cpu_single_env = &pVM->rem.s.Env;
306
307 /* Nothing is pending by default */
308 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
309
310 /*
311 * Register ram types.
312 */
313 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
314 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
315 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
316 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
317 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
318
319 /* stop ignoring. */
320 pVM->rem.s.fIgnoreAll = false;
321
322 /*
323 * Register the saved state data unit.
324 */
325 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
326 NULL, remR3Save, NULL,
327 NULL, remR3Load, NULL);
328 if (RT_FAILURE(rc))
329 return rc;
330
331#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
332 /*
333 * Debugger commands.
334 */
335 static bool fRegisteredCmds = false;
336 if (!fRegisteredCmds)
337 {
338 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
339 if (RT_SUCCESS(rc))
340 fRegisteredCmds = true;
341 }
342#endif
343
344#ifdef VBOX_WITH_STATISTICS
345 /*
346 * Statistics.
347 */
348 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
349 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
350 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
351 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
352 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
356 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
358 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
360
361 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
362
363 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
364 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
365 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
366 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
367 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
368 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
369 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
370 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
371 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
372 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
373 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
374
375 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
376 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
377 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
378 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
379
380 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
386
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
392 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
393
394 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
395#endif /* VBOX_WITH_STATISTICS */
396
397 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
398 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
399 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
400
401
402#ifdef DEBUG_ALL_LOGGING
403 loglevel = ~0;
404# ifdef DEBUG_TMP_LOGGING
405 logfile = fopen("/tmp/vbox-qemu.log", "w");
406# endif
407#endif
408
409 return rc;
410}
411
412
413/**
414 * Finalizes the REM initialization.
415 *
416 * This is called after all components, devices and drivers has
417 * been initialized. Its main purpose it to finish the RAM related
418 * initialization.
419 *
420 * @returns VBox status code.
421 *
422 * @param pVM The VM handle.
423 */
424REMR3DECL(int) REMR3InitFinalize(PVM pVM)
425{
426 int rc;
427
428 /*
429 * Ram size & dirty bit map.
430 */
431 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
432 pVM->rem.s.fGCPhysLastRamFixed = true;
433#ifdef RT_STRICT
434 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
435#else
436 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
437#endif
438 return rc;
439}
440
441
442/**
443 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
444 *
445 * @returns VBox status code.
446 * @param pVM The VM handle.
447 * @param fGuarded Whether to guard the map.
448 */
449static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
450{
451 int rc = VINF_SUCCESS;
452 RTGCPHYS cb;
453
454 cb = pVM->rem.s.GCPhysLastRam + 1;
455 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
456 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
457 VERR_OUT_OF_RANGE);
458 phys_ram_size = cb;
459 phys_ram_dirty_size = cb >> PAGE_SHIFT;
460 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
461
462 if (!fGuarded)
463 {
464 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
465 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
466 }
467 else
468 {
469 /*
470 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
471 */
472 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
473 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
474 if (cbBitmapFull == cbBitmapAligned)
475 cbBitmapFull += _4G >> PAGE_SHIFT;
476 else if (cbBitmapFull - cbBitmapAligned < _64K)
477 cbBitmapFull += _64K;
478
479 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
480 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
481
482 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
483 if (RT_FAILURE(rc))
484 {
485 RTMemPageFree(phys_ram_dirty);
486 AssertLogRelRCReturn(rc, rc);
487 }
488
489 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
490 }
491
492 /* initialize it. */
493 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
494 return rc;
495}
496
497
498/**
499 * Terminates the REM.
500 *
501 * Termination means cleaning up and freeing all resources,
502 * the VM it self is at this point powered off or suspended.
503 *
504 * @returns VBox status code.
505 * @param pVM The VM to operate on.
506 */
507REMR3DECL(int) REMR3Term(PVM pVM)
508{
509#ifdef VBOX_WITH_STATISTICS
510 /*
511 * Statistics.
512 */
513 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
514 STAM_DEREG(pVM, &gStatCompilationQEmu);
515 STAM_DEREG(pVM, &gStatRunCodeQEmu);
516 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
517 STAM_DEREG(pVM, &gStatTimers);
518 STAM_DEREG(pVM, &gStatTBLookup);
519 STAM_DEREG(pVM, &gStatIRQ);
520 STAM_DEREG(pVM, &gStatRawCheck);
521 STAM_DEREG(pVM, &gStatMemRead);
522 STAM_DEREG(pVM, &gStatMemWrite);
523 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
524 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
525
526 STAM_DEREG(pVM, &gStatCpuGetTSC);
527
528 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
529 STAM_DEREG(pVM, &gStatRefuseVM86);
530 STAM_DEREG(pVM, &gStatRefusePaging);
531 STAM_DEREG(pVM, &gStatRefusePAE);
532 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
533 STAM_DEREG(pVM, &gStatRefuseIF0);
534 STAM_DEREG(pVM, &gStatRefuseCode16);
535 STAM_DEREG(pVM, &gStatRefuseWP0);
536 STAM_DEREG(pVM, &gStatRefuseRing1or2);
537 STAM_DEREG(pVM, &gStatRefuseCanExecute);
538 STAM_DEREG(pVM, &gStatFlushTBs);
539
540 STAM_DEREG(pVM, &gStatREMGDTChange);
541 STAM_DEREG(pVM, &gStatREMLDTRChange);
542 STAM_DEREG(pVM, &gStatREMIDTChange);
543 STAM_DEREG(pVM, &gStatREMTRChange);
544
545 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
546 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
547 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
548 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
549 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
550 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
551
552 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
553 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
554 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
555 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
556 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
557 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
558
559 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
560#endif /* VBOX_WITH_STATISTICS */
561
562 STAM_REL_DEREG(pVM, &tb_flush_count);
563 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
564 STAM_REL_DEREG(pVM, &tlb_flush_count);
565
566 return VINF_SUCCESS;
567}
568
569
570/**
571 * The VM is being reset.
572 *
573 * For the REM component this means to call the cpu_reset() and
574 * reinitialize some state variables.
575 *
576 * @param pVM VM handle.
577 */
578REMR3DECL(void) REMR3Reset(PVM pVM)
579{
580 /*
581 * Reset the REM cpu.
582 */
583 pVM->rem.s.fIgnoreAll = true;
584 cpu_reset(&pVM->rem.s.Env);
585 pVM->rem.s.cInvalidatedPages = 0;
586 pVM->rem.s.fIgnoreAll = false;
587
588 /* Clear raw ring 0 init state */
589 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
590
591 /* Flush the TBs the next time we execute code here. */
592 pVM->rem.s.fFlushTBs = true;
593}
594
595
596/**
597 * Execute state save operation.
598 *
599 * @returns VBox status code.
600 * @param pVM VM Handle.
601 * @param pSSM SSM operation handle.
602 */
603static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
604{
605 PREM pRem = &pVM->rem.s;
606
607 /*
608 * Save the required CPU Env bits.
609 * (Not much because we're never in REM when doing the save.)
610 */
611 LogFlow(("remR3Save:\n"));
612 Assert(!pRem->fInREM);
613 SSMR3PutU32(pSSM, pRem->Env.hflags);
614 SSMR3PutU32(pSSM, ~0); /* separator */
615
616 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
617 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
618 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
619
620 return SSMR3PutU32(pSSM, ~0); /* terminator */
621}
622
623
624/**
625 * Execute state load operation.
626 *
627 * @returns VBox status code.
628 * @param pVM VM Handle.
629 * @param pSSM SSM operation handle.
630 * @param u32Version Data layout version.
631 */
632static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
633{
634 uint32_t u32Dummy;
635 uint32_t fRawRing0 = false;
636 uint32_t u32Sep;
637 unsigned i;
638 int rc;
639 PREM pRem;
640 LogFlow(("remR3Load:\n"));
641
642 /*
643 * Validate version.
644 */
645 if ( u32Version != REM_SAVED_STATE_VERSION
646 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
647 {
648 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
649 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
650 }
651
652 /*
653 * Do a reset to be on the safe side...
654 */
655 REMR3Reset(pVM);
656
657 /*
658 * Ignore all ignorable notifications.
659 * (Not doing this will cause serious trouble.)
660 */
661 pVM->rem.s.fIgnoreAll = true;
662
663 /*
664 * Load the required CPU Env bits.
665 * (Not much because we're never in REM when doing the save.)
666 */
667 pRem = &pVM->rem.s;
668 Assert(!pRem->fInREM);
669 SSMR3GetU32(pSSM, &pRem->Env.hflags);
670 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
671 {
672 /* Redundant REM CPU state has to be loaded, but can be ignored. */
673 CPUX86State_Ver16 temp;
674 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
675 }
676
677 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
678 if (RT_FAILURE(rc))
679 return rc;
680 if (u32Sep != ~0U)
681 {
682 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
683 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
684 }
685
686 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
687 SSMR3GetUInt(pSSM, &fRawRing0);
688 if (fRawRing0)
689 pRem->Env.state |= CPU_RAW_RING0;
690
691 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
692 {
693 /*
694 * Load the REM stuff.
695 */
696 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
697 if (RT_FAILURE(rc))
698 return rc;
699 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
700 {
701 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
702 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
703 }
704 for (i = 0; i < pRem->cInvalidatedPages; i++)
705 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
706 }
707
708 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
709 if (RT_FAILURE(rc))
710 return rc;
711
712 /* check the terminator. */
713 rc = SSMR3GetU32(pSSM, &u32Sep);
714 if (RT_FAILURE(rc))
715 return rc;
716 if (u32Sep != ~0U)
717 {
718 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
719 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
720 }
721
722 /*
723 * Get the CPUID features.
724 */
725 PVMCPU pVCpu = VMMGetCpu(pVM);
726 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
727 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
728
729 /*
730 * Sync the Load Flush the TLB
731 */
732 tlb_flush(&pRem->Env, 1);
733
734 /*
735 * Stop ignoring ignornable notifications.
736 */
737 pVM->rem.s.fIgnoreAll = false;
738
739 /*
740 * Sync the whole CPU state when executing code in the recompiler.
741 */
742 for (i=0;i<pVM->cCPUs;i++)
743 {
744 PVMCPU pVCpu = &pVM->aCpus[i];
745
746 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
747 }
748 return VINF_SUCCESS;
749}
750
751
752
753#undef LOG_GROUP
754#define LOG_GROUP LOG_GROUP_REM_RUN
755
756/**
757 * Single steps an instruction in recompiled mode.
758 *
759 * Before calling this function the REM state needs to be in sync with
760 * the VM. Call REMR3State() to perform the sync. It's only necessary
761 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
762 * and after calling REMR3StateBack().
763 *
764 * @returns VBox status code.
765 *
766 * @param pVM VM Handle.
767 * @param pVCpu VMCPU Handle.
768 */
769REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
770{
771 int rc, interrupt_request;
772 RTGCPTR GCPtrPC;
773 bool fBp;
774
775 /*
776 * Lock the REM - we don't wanna have anyone interrupting us
777 * while stepping - and enabled single stepping. We also ignore
778 * pending interrupts and suchlike.
779 */
780 interrupt_request = pVM->rem.s.Env.interrupt_request;
781 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
782 pVM->rem.s.Env.interrupt_request = 0;
783 cpu_single_step(&pVM->rem.s.Env, 1);
784
785 /*
786 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
787 */
788 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
789 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
790
791 /*
792 * Execute and handle the return code.
793 * We execute without enabling the cpu tick, so on success we'll
794 * just flip it on and off to make sure it moves
795 */
796 rc = cpu_exec(&pVM->rem.s.Env);
797 if (rc == EXCP_DEBUG)
798 {
799 TMCpuTickResume(pVCpu);
800 TMCpuTickPause(pVCpu);
801 TMVirtualResume(pVM);
802 TMVirtualPause(pVM);
803 rc = VINF_EM_DBG_STEPPED;
804 }
805 else
806 {
807 switch (rc)
808 {
809 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
810 case EXCP_HLT:
811 case EXCP_HALTED: rc = VINF_EM_HALT; break;
812 case EXCP_RC:
813 rc = pVM->rem.s.rc;
814 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
815 break;
816 case EXCP_EXECUTE_RAW:
817 case EXCP_EXECUTE_HWACC:
818 /** @todo: is it correct? No! */
819 rc = VINF_SUCCESS;
820 break;
821 default:
822 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
823 rc = VERR_INTERNAL_ERROR;
824 break;
825 }
826 }
827
828 /*
829 * Restore the stuff we changed to prevent interruption.
830 * Unlock the REM.
831 */
832 if (fBp)
833 {
834 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
835 Assert(rc2 == 0); NOREF(rc2);
836 }
837 cpu_single_step(&pVM->rem.s.Env, 0);
838 pVM->rem.s.Env.interrupt_request = interrupt_request;
839
840 return rc;
841}
842
843
844/**
845 * Set a breakpoint using the REM facilities.
846 *
847 * @returns VBox status code.
848 * @param pVM The VM handle.
849 * @param Address The breakpoint address.
850 * @thread The emulation thread.
851 */
852REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
853{
854 VM_ASSERT_EMT(pVM);
855 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
856 {
857 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
858 return VINF_SUCCESS;
859 }
860 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
861 return VERR_REM_NO_MORE_BP_SLOTS;
862}
863
864
865/**
866 * Clears a breakpoint set by REMR3BreakpointSet().
867 *
868 * @returns VBox status code.
869 * @param pVM The VM handle.
870 * @param Address The breakpoint address.
871 * @thread The emulation thread.
872 */
873REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
874{
875 VM_ASSERT_EMT(pVM);
876 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
877 {
878 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
879 return VINF_SUCCESS;
880 }
881 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
882 return VERR_REM_BP_NOT_FOUND;
883}
884
885
886/**
887 * Emulate an instruction.
888 *
889 * This function executes one instruction without letting anyone
890 * interrupt it. This is intended for being called while being in
891 * raw mode and thus will take care of all the state syncing between
892 * REM and the rest.
893 *
894 * @returns VBox status code.
895 * @param pVM VM handle.
896 * @param pVCpu VMCPU Handle.
897 */
898REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
899{
900 bool fFlushTBs;
901
902 int rc, rc2;
903 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
904
905 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
906 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
907 */
908 if (HWACCMIsEnabled(pVM))
909 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
910
911 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
912 fFlushTBs = pVM->rem.s.fFlushTBs;
913 pVM->rem.s.fFlushTBs = false;
914
915 /*
916 * Sync the state and enable single instruction / single stepping.
917 */
918 rc = REMR3State(pVM, pVCpu);
919 pVM->rem.s.fFlushTBs = fFlushTBs;
920 if (RT_SUCCESS(rc))
921 {
922 int interrupt_request = pVM->rem.s.Env.interrupt_request;
923 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
924 Assert(!pVM->rem.s.Env.singlestep_enabled);
925 /*
926 * Now we set the execute single instruction flag and enter the cpu_exec loop.
927 */
928 TMNotifyStartOfExecution(pVCpu);
929 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
930 rc = cpu_exec(&pVM->rem.s.Env);
931 TMNotifyEndOfExecution(pVCpu);
932 switch (rc)
933 {
934 /*
935 * Executed without anything out of the way happening.
936 */
937 case EXCP_SINGLE_INSTR:
938 rc = VINF_EM_RESCHEDULE;
939 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
940 break;
941
942 /*
943 * If we take a trap or start servicing a pending interrupt, we might end up here.
944 * (Timer thread or some other thread wishing EMT's attention.)
945 */
946 case EXCP_INTERRUPT:
947 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
948 rc = VINF_EM_RESCHEDULE;
949 break;
950
951 /*
952 * Single step, we assume!
953 * If there was a breakpoint there we're fucked now.
954 */
955 case EXCP_DEBUG:
956 {
957 /* breakpoint or single step? */
958 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
959 int iBP;
960 rc = VINF_EM_DBG_STEPPED;
961 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
962 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
963 {
964 rc = VINF_EM_DBG_BREAKPOINT;
965 break;
966 }
967 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
968 break;
969 }
970
971 /*
972 * hlt instruction.
973 */
974 case EXCP_HLT:
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
976 rc = VINF_EM_HALT;
977 break;
978
979 /*
980 * The VM has halted.
981 */
982 case EXCP_HALTED:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
984 rc = VINF_EM_HALT;
985 break;
986
987 /*
988 * Switch to RAW-mode.
989 */
990 case EXCP_EXECUTE_RAW:
991 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
992 rc = VINF_EM_RESCHEDULE_RAW;
993 break;
994
995 /*
996 * Switch to hardware accelerated RAW-mode.
997 */
998 case EXCP_EXECUTE_HWACC:
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1000 rc = VINF_EM_RESCHEDULE_HWACC;
1001 break;
1002
1003 /*
1004 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1005 */
1006 case EXCP_RC:
1007 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1008 rc = pVM->rem.s.rc;
1009 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1010 break;
1011
1012 /*
1013 * Figure out the rest when they arrive....
1014 */
1015 default:
1016 AssertMsgFailed(("rc=%d\n", rc));
1017 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1018 rc = VINF_EM_RESCHEDULE;
1019 break;
1020 }
1021
1022 /*
1023 * Switch back the state.
1024 */
1025 pVM->rem.s.Env.interrupt_request = interrupt_request;
1026 rc2 = REMR3StateBack(pVM, pVCpu);
1027 AssertRC(rc2);
1028 }
1029
1030 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1031 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1032 return rc;
1033}
1034
1035
1036/**
1037 * Runs code in recompiled mode.
1038 *
1039 * Before calling this function the REM state needs to be in sync with
1040 * the VM. Call REMR3State() to perform the sync. It's only necessary
1041 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1042 * and after calling REMR3StateBack().
1043 *
1044 * @returns VBox status code.
1045 *
1046 * @param pVM VM Handle.
1047 * @param pVCpu VMCPU Handle.
1048 */
1049REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1050{
1051 int rc;
1052 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1053 Assert(pVM->rem.s.fInREM);
1054
1055 TMNotifyStartOfExecution(pVCpu);
1056 rc = cpu_exec(&pVM->rem.s.Env);
1057 TMNotifyEndOfExecution(pVCpu);
1058 switch (rc)
1059 {
1060 /*
1061 * This happens when the execution was interrupted
1062 * by an external event, like pending timers.
1063 */
1064 case EXCP_INTERRUPT:
1065 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1066 rc = VINF_SUCCESS;
1067 break;
1068
1069 /*
1070 * hlt instruction.
1071 */
1072 case EXCP_HLT:
1073 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1074 rc = VINF_EM_HALT;
1075 break;
1076
1077 /*
1078 * The VM has halted.
1079 */
1080 case EXCP_HALTED:
1081 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1082 rc = VINF_EM_HALT;
1083 break;
1084
1085 /*
1086 * Breakpoint/single step.
1087 */
1088 case EXCP_DEBUG:
1089 {
1090#if 0//def DEBUG_bird
1091 static int iBP = 0;
1092 printf("howdy, breakpoint! iBP=%d\n", iBP);
1093 switch (iBP)
1094 {
1095 case 0:
1096 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1097 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1098 //pVM->rem.s.Env.interrupt_request = 0;
1099 //pVM->rem.s.Env.exception_index = -1;
1100 //g_fInterruptDisabled = 1;
1101 rc = VINF_SUCCESS;
1102 asm("int3");
1103 break;
1104 default:
1105 asm("int3");
1106 break;
1107 }
1108 iBP++;
1109#else
1110 /* breakpoint or single step? */
1111 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1112 int iBP;
1113 rc = VINF_EM_DBG_STEPPED;
1114 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1115 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1116 {
1117 rc = VINF_EM_DBG_BREAKPOINT;
1118 break;
1119 }
1120 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1121#endif
1122 break;
1123 }
1124
1125 /*
1126 * Switch to RAW-mode.
1127 */
1128 case EXCP_EXECUTE_RAW:
1129 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1130 rc = VINF_EM_RESCHEDULE_RAW;
1131 break;
1132
1133 /*
1134 * Switch to hardware accelerated RAW-mode.
1135 */
1136 case EXCP_EXECUTE_HWACC:
1137 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1138 rc = VINF_EM_RESCHEDULE_HWACC;
1139 break;
1140
1141 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1142 /*
1143 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1144 */
1145 case EXCP_RC:
1146 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1147 rc = pVM->rem.s.rc;
1148 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1149 break;
1150
1151 /*
1152 * Figure out the rest when they arrive....
1153 */
1154 default:
1155 AssertMsgFailed(("rc=%d\n", rc));
1156 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1157 rc = VINF_SUCCESS;
1158 break;
1159 }
1160
1161 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1162 return rc;
1163}
1164
1165
1166/**
1167 * Check if the cpu state is suitable for Raw execution.
1168 *
1169 * @returns boolean
1170 * @param env The CPU env struct.
1171 * @param eip The EIP to check this for (might differ from env->eip).
1172 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1173 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1174 *
1175 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1176 */
1177bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1178{
1179 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1180 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1181 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1182 uint32_t u32CR0;
1183
1184 /* Update counter. */
1185 env->pVM->rem.s.cCanExecuteRaw++;
1186
1187 if (HWACCMIsEnabled(env->pVM))
1188 {
1189 CPUMCTX Ctx;
1190
1191 env->state |= CPU_RAW_HWACC;
1192
1193 /*
1194 * Create partial context for HWACCMR3CanExecuteGuest
1195 */
1196 Ctx.cr0 = env->cr[0];
1197 Ctx.cr3 = env->cr[3];
1198 Ctx.cr4 = env->cr[4];
1199
1200 Ctx.tr = env->tr.selector;
1201 Ctx.trHid.u64Base = env->tr.base;
1202 Ctx.trHid.u32Limit = env->tr.limit;
1203 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1204
1205 Ctx.idtr.cbIdt = env->idt.limit;
1206 Ctx.idtr.pIdt = env->idt.base;
1207
1208 Ctx.gdtr.cbGdt = env->gdt.limit;
1209 Ctx.gdtr.pGdt = env->gdt.base;
1210
1211 Ctx.rsp = env->regs[R_ESP];
1212 Ctx.rip = env->eip;
1213
1214 Ctx.eflags.u32 = env->eflags;
1215
1216 Ctx.cs = env->segs[R_CS].selector;
1217 Ctx.csHid.u64Base = env->segs[R_CS].base;
1218 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1219 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1220
1221 Ctx.ds = env->segs[R_DS].selector;
1222 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1223 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1224 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1225
1226 Ctx.es = env->segs[R_ES].selector;
1227 Ctx.esHid.u64Base = env->segs[R_ES].base;
1228 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1229 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1230
1231 Ctx.fs = env->segs[R_FS].selector;
1232 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1233 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1234 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1235
1236 Ctx.gs = env->segs[R_GS].selector;
1237 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1238 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1239 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1240
1241 Ctx.ss = env->segs[R_SS].selector;
1242 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1243 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1244 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1245
1246 Ctx.msrEFER = env->efer;
1247
1248 /* Hardware accelerated raw-mode:
1249 *
1250 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1251 */
1252 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1253 {
1254 *piException = EXCP_EXECUTE_HWACC;
1255 return true;
1256 }
1257 return false;
1258 }
1259
1260 /*
1261 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1262 * or 32 bits protected mode ring 0 code
1263 *
1264 * The tests are ordered by the likelyhood of being true during normal execution.
1265 */
1266 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1267 {
1268 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1269 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1270 return false;
1271 }
1272
1273#ifndef VBOX_RAW_V86
1274 if (fFlags & VM_MASK) {
1275 STAM_COUNTER_INC(&gStatRefuseVM86);
1276 Log2(("raw mode refused: VM_MASK\n"));
1277 return false;
1278 }
1279#endif
1280
1281 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1282 {
1283#ifndef DEBUG_bird
1284 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1285#endif
1286 return false;
1287 }
1288
1289 if (env->singlestep_enabled)
1290 {
1291 //Log2(("raw mode refused: Single step\n"));
1292 return false;
1293 }
1294
1295 if (env->nb_breakpoints > 0)
1296 {
1297 //Log2(("raw mode refused: Breakpoints\n"));
1298 return false;
1299 }
1300
1301 u32CR0 = env->cr[0];
1302 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1303 {
1304 STAM_COUNTER_INC(&gStatRefusePaging);
1305 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1306 return false;
1307 }
1308
1309 if (env->cr[4] & CR4_PAE_MASK)
1310 {
1311 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1312 {
1313 STAM_COUNTER_INC(&gStatRefusePAE);
1314 return false;
1315 }
1316 }
1317
1318 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1319 {
1320 if (!EMIsRawRing3Enabled(env->pVM))
1321 return false;
1322
1323 if (!(env->eflags & IF_MASK))
1324 {
1325 STAM_COUNTER_INC(&gStatRefuseIF0);
1326 Log2(("raw mode refused: IF (RawR3)\n"));
1327 return false;
1328 }
1329
1330 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1331 {
1332 STAM_COUNTER_INC(&gStatRefuseWP0);
1333 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1334 return false;
1335 }
1336 }
1337 else
1338 {
1339 if (!EMIsRawRing0Enabled(env->pVM))
1340 return false;
1341
1342 // Let's start with pure 32 bits ring 0 code first
1343 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1344 {
1345 STAM_COUNTER_INC(&gStatRefuseCode16);
1346 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1347 return false;
1348 }
1349
1350 // Only R0
1351 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1352 {
1353 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1354 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1355 return false;
1356 }
1357
1358 if (!(u32CR0 & CR0_WP_MASK))
1359 {
1360 STAM_COUNTER_INC(&gStatRefuseWP0);
1361 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1362 return false;
1363 }
1364
1365 if (PATMIsPatchGCAddr(env->pVM, eip))
1366 {
1367 Log2(("raw r0 mode forced: patch code\n"));
1368 *piException = EXCP_EXECUTE_RAW;
1369 return true;
1370 }
1371
1372#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1373 if (!(env->eflags & IF_MASK))
1374 {
1375 STAM_COUNTER_INC(&gStatRefuseIF0);
1376 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1377 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1378 return false;
1379 }
1380#endif
1381
1382 env->state |= CPU_RAW_RING0;
1383 }
1384
1385 /*
1386 * Don't reschedule the first time we're called, because there might be
1387 * special reasons why we're here that is not covered by the above checks.
1388 */
1389 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1390 {
1391 Log2(("raw mode refused: first scheduling\n"));
1392 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1393 return false;
1394 }
1395
1396 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1397 *piException = EXCP_EXECUTE_RAW;
1398 return true;
1399}
1400
1401
1402/**
1403 * Fetches a code byte.
1404 *
1405 * @returns Success indicator (bool) for ease of use.
1406 * @param env The CPU environment structure.
1407 * @param GCPtrInstr Where to fetch code.
1408 * @param pu8Byte Where to store the byte on success
1409 */
1410bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1411{
1412 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1413 if (RT_SUCCESS(rc))
1414 return true;
1415 return false;
1416}
1417
1418
1419/**
1420 * Flush (or invalidate if you like) page table/dir entry.
1421 *
1422 * (invlpg instruction; tlb_flush_page)
1423 *
1424 * @param env Pointer to cpu environment.
1425 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1426 */
1427void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1428{
1429 PVM pVM = env->pVM;
1430 PCPUMCTX pCtx;
1431 int rc;
1432
1433 /*
1434 * When we're replaying invlpg instructions or restoring a saved
1435 * state we disable this path.
1436 */
1437 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1438 return;
1439 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1440 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1441
1442 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1443
1444 /*
1445 * Update the control registers before calling PGMFlushPage.
1446 */
1447 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1448 Assert(pCtx);
1449 pCtx->cr0 = env->cr[0];
1450 pCtx->cr3 = env->cr[3];
1451 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1452 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1453 pCtx->cr4 = env->cr[4];
1454
1455 /*
1456 * Let PGM do the rest.
1457 */
1458 Assert(env->pVCpu);
1459 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1460 if (RT_FAILURE(rc))
1461 {
1462 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1463 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1464 }
1465 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1466}
1467
1468
1469#ifndef REM_PHYS_ADDR_IN_TLB
1470/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1471void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1472{
1473 void *pv;
1474 int rc;
1475
1476 /* Address must be aligned enough to fiddle with lower bits */
1477 Assert((physAddr & 0x3) == 0);
1478
1479 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1480 Assert( rc == VINF_SUCCESS
1481 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1482 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1483 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1484 if (RT_FAILURE(rc))
1485 return (void *)1;
1486 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1487 return (void *)((uintptr_t)pv | 2);
1488 return pv;
1489}
1490#endif /* REM_PHYS_ADDR_IN_TLB */
1491
1492
1493/**
1494 * Called from tlb_protect_code in order to write monitor a code page.
1495 *
1496 * @param env Pointer to the CPU environment.
1497 * @param GCPtr Code page to monitor
1498 */
1499void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1500{
1501#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1502 Assert(env->pVM->rem.s.fInREM);
1503 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1504 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1505 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1506 && !(env->eflags & VM_MASK) /* no V86 mode */
1507 && !HWACCMIsEnabled(env->pVM))
1508 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1509#endif
1510}
1511
1512
1513/**
1514 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1515 *
1516 * @param env Pointer to the CPU environment.
1517 * @param GCPtr Code page to monitor
1518 */
1519void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1520{
1521 Assert(env->pVM->rem.s.fInREM);
1522#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1523 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1524 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1525 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1526 && !(env->eflags & VM_MASK) /* no V86 mode */
1527 && !HWACCMIsEnabled(env->pVM))
1528 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1529#endif
1530}
1531
1532
1533/**
1534 * Called when the CPU is initialized, any of the CRx registers are changed or
1535 * when the A20 line is modified.
1536 *
1537 * @param env Pointer to the CPU environment.
1538 * @param fGlobal Set if the flush is global.
1539 */
1540void remR3FlushTLB(CPUState *env, bool fGlobal)
1541{
1542 PVM pVM = env->pVM;
1543 PCPUMCTX pCtx;
1544
1545 /*
1546 * When we're replaying invlpg instructions or restoring a saved
1547 * state we disable this path.
1548 */
1549 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1550 return;
1551 Assert(pVM->rem.s.fInREM);
1552
1553 /*
1554 * The caller doesn't check cr4, so we have to do that for ourselves.
1555 */
1556 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1557 fGlobal = true;
1558 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1559
1560 /*
1561 * Update the control registers before calling PGMR3FlushTLB.
1562 */
1563 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1564 Assert(pCtx);
1565 pCtx->cr0 = env->cr[0];
1566 pCtx->cr3 = env->cr[3];
1567 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1568 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1569 pCtx->cr4 = env->cr[4];
1570
1571 /*
1572 * Let PGM do the rest.
1573 */
1574 Assert(env->pVCpu);
1575 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1576}
1577
1578
1579/**
1580 * Called when any of the cr0, cr4 or efer registers is updated.
1581 *
1582 * @param env Pointer to the CPU environment.
1583 */
1584void remR3ChangeCpuMode(CPUState *env)
1585{
1586 PVM pVM = env->pVM;
1587 uint64_t efer;
1588 PCPUMCTX pCtx;
1589 int rc;
1590
1591 /*
1592 * When we're replaying loads or restoring a saved
1593 * state this path is disabled.
1594 */
1595 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1596 return;
1597 Assert(pVM->rem.s.fInREM);
1598
1599 /*
1600 * Update the control registers before calling PGMChangeMode()
1601 * as it may need to map whatever cr3 is pointing to.
1602 */
1603 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1604 Assert(pCtx);
1605 pCtx->cr0 = env->cr[0];
1606 pCtx->cr3 = env->cr[3];
1607 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1608 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1609 pCtx->cr4 = env->cr[4];
1610
1611#ifdef TARGET_X86_64
1612 efer = env->efer;
1613#else
1614 efer = 0;
1615#endif
1616 Assert(env->pVCpu);
1617 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1618 if (rc != VINF_SUCCESS)
1619 {
1620 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1621 {
1622 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1623 remR3RaiseRC(env->pVM, rc);
1624 }
1625 else
1626 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1627 }
1628}
1629
1630
1631/**
1632 * Called from compiled code to run dma.
1633 *
1634 * @param env Pointer to the CPU environment.
1635 */
1636void remR3DmaRun(CPUState *env)
1637{
1638 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1639 PDMR3DmaRun(env->pVM);
1640 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1641}
1642
1643
1644/**
1645 * Called from compiled code to schedule pending timers in VMM
1646 *
1647 * @param env Pointer to the CPU environment.
1648 */
1649void remR3TimersRun(CPUState *env)
1650{
1651 LogFlow(("remR3TimersRun:\n"));
1652 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1653 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1654 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1655 TMR3TimerQueuesDo(env->pVM);
1656 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1657 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1658}
1659
1660
1661/**
1662 * Record trap occurance
1663 *
1664 * @returns VBox status code
1665 * @param env Pointer to the CPU environment.
1666 * @param uTrap Trap nr
1667 * @param uErrorCode Error code
1668 * @param pvNextEIP Next EIP
1669 */
1670int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1671{
1672 PVM pVM = env->pVM;
1673#ifdef VBOX_WITH_STATISTICS
1674 static STAMCOUNTER s_aStatTrap[255];
1675 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1676#endif
1677
1678#ifdef VBOX_WITH_STATISTICS
1679 if (uTrap < 255)
1680 {
1681 if (!s_aRegisters[uTrap])
1682 {
1683 char szStatName[64];
1684 s_aRegisters[uTrap] = true;
1685 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1686 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1687 }
1688 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1689 }
1690#endif
1691 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1692 if( uTrap < 0x20
1693 && (env->cr[0] & X86_CR0_PE)
1694 && !(env->eflags & X86_EFL_VM))
1695 {
1696#ifdef DEBUG
1697 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1698#endif
1699 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1700 {
1701 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1702 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1703 return VERR_REM_TOO_MANY_TRAPS;
1704 }
1705 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1706 pVM->rem.s.cPendingExceptions = 1;
1707 pVM->rem.s.uPendingException = uTrap;
1708 pVM->rem.s.uPendingExcptEIP = env->eip;
1709 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1710 }
1711 else
1712 {
1713 pVM->rem.s.cPendingExceptions = 0;
1714 pVM->rem.s.uPendingException = uTrap;
1715 pVM->rem.s.uPendingExcptEIP = env->eip;
1716 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1717 }
1718 return VINF_SUCCESS;
1719}
1720
1721
1722/*
1723 * Clear current active trap
1724 *
1725 * @param pVM VM Handle.
1726 */
1727void remR3TrapClear(PVM pVM)
1728{
1729 pVM->rem.s.cPendingExceptions = 0;
1730 pVM->rem.s.uPendingException = 0;
1731 pVM->rem.s.uPendingExcptEIP = 0;
1732 pVM->rem.s.uPendingExcptCR2 = 0;
1733}
1734
1735
1736/*
1737 * Record previous call instruction addresses
1738 *
1739 * @param env Pointer to the CPU environment.
1740 */
1741void remR3RecordCall(CPUState *env)
1742{
1743 CSAMR3RecordCallAddress(env->pVM, env->eip);
1744}
1745
1746
1747/**
1748 * Syncs the internal REM state with the VM.
1749 *
1750 * This must be called before REMR3Run() is invoked whenever when the REM
1751 * state is not up to date. Calling it several times in a row is not
1752 * permitted.
1753 *
1754 * @returns VBox status code.
1755 *
1756 * @param pVM VM Handle.
1757 * @param pVCpu VMCPU Handle.
1758 *
1759 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1760 * no do this since the majority of the callers don't want any unnecessary of events
1761 * pending that would immediatly interrupt execution.
1762 */
1763REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1764{
1765 register const CPUMCTX *pCtx;
1766 register unsigned fFlags;
1767 bool fHiddenSelRegsValid;
1768 unsigned i;
1769 TRPMEVENT enmType;
1770 uint8_t u8TrapNo;
1771 int rc;
1772
1773 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1774 Log2(("REMR3State:\n"));
1775
1776 pVM->rem.s.Env.pVCpu = pVCpu;
1777 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1778 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1779
1780 Assert(!pVM->rem.s.fInREM);
1781 pVM->rem.s.fInStateSync = true;
1782
1783 /*
1784 * If we have to flush TBs, do that immediately.
1785 */
1786 if (pVM->rem.s.fFlushTBs)
1787 {
1788 STAM_COUNTER_INC(&gStatFlushTBs);
1789 tb_flush(&pVM->rem.s.Env);
1790 pVM->rem.s.fFlushTBs = false;
1791 }
1792
1793 /*
1794 * Copy the registers which require no special handling.
1795 */
1796#ifdef TARGET_X86_64
1797 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1798 Assert(R_EAX == 0);
1799 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1800 Assert(R_ECX == 1);
1801 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1802 Assert(R_EDX == 2);
1803 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1804 Assert(R_EBX == 3);
1805 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1806 Assert(R_ESP == 4);
1807 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1808 Assert(R_EBP == 5);
1809 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1810 Assert(R_ESI == 6);
1811 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1812 Assert(R_EDI == 7);
1813 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1814 pVM->rem.s.Env.regs[8] = pCtx->r8;
1815 pVM->rem.s.Env.regs[9] = pCtx->r9;
1816 pVM->rem.s.Env.regs[10] = pCtx->r10;
1817 pVM->rem.s.Env.regs[11] = pCtx->r11;
1818 pVM->rem.s.Env.regs[12] = pCtx->r12;
1819 pVM->rem.s.Env.regs[13] = pCtx->r13;
1820 pVM->rem.s.Env.regs[14] = pCtx->r14;
1821 pVM->rem.s.Env.regs[15] = pCtx->r15;
1822
1823 pVM->rem.s.Env.eip = pCtx->rip;
1824
1825 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1826#else
1827 Assert(R_EAX == 0);
1828 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1829 Assert(R_ECX == 1);
1830 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1831 Assert(R_EDX == 2);
1832 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1833 Assert(R_EBX == 3);
1834 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1835 Assert(R_ESP == 4);
1836 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1837 Assert(R_EBP == 5);
1838 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1839 Assert(R_ESI == 6);
1840 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1841 Assert(R_EDI == 7);
1842 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1843 pVM->rem.s.Env.eip = pCtx->eip;
1844
1845 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1846#endif
1847
1848 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1849
1850 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1851 for (i=0;i<8;i++)
1852 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1853
1854 /*
1855 * Clear the halted hidden flag (the interrupt waking up the CPU can
1856 * have been dispatched in raw mode).
1857 */
1858 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1859
1860 /*
1861 * Replay invlpg?
1862 */
1863 if (pVM->rem.s.cInvalidatedPages)
1864 {
1865 RTUINT i;
1866
1867 pVM->rem.s.fIgnoreInvlPg = true;
1868 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1869 {
1870 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1871 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1872 }
1873 pVM->rem.s.fIgnoreInvlPg = false;
1874 pVM->rem.s.cInvalidatedPages = 0;
1875 }
1876
1877 /* Replay notification changes? */
1878 if (pVM->rem.s.cHandlerNotifications)
1879 REMR3ReplayHandlerNotifications(pVM);
1880
1881 /* Update MSRs; before CRx registers! */
1882 pVM->rem.s.Env.efer = pCtx->msrEFER;
1883 pVM->rem.s.Env.star = pCtx->msrSTAR;
1884 pVM->rem.s.Env.pat = pCtx->msrPAT;
1885#ifdef TARGET_X86_64
1886 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1887 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1888 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1889 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1890
1891 /* Update the internal long mode activate flag according to the new EFER value. */
1892 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1893 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1894 else
1895 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1896#endif
1897
1898 /*
1899 * Registers which are rarely changed and require special handling / order when changed.
1900 */
1901 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1902 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1903 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1904 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1905 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1906 {
1907 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1908 {
1909 pVM->rem.s.fIgnoreCR3Load = true;
1910 tlb_flush(&pVM->rem.s.Env, true);
1911 pVM->rem.s.fIgnoreCR3Load = false;
1912 }
1913
1914 /* CR4 before CR0! */
1915 if (fFlags & CPUM_CHANGED_CR4)
1916 {
1917 pVM->rem.s.fIgnoreCR3Load = true;
1918 pVM->rem.s.fIgnoreCpuMode = true;
1919 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1920 pVM->rem.s.fIgnoreCpuMode = false;
1921 pVM->rem.s.fIgnoreCR3Load = false;
1922 }
1923
1924 if (fFlags & CPUM_CHANGED_CR0)
1925 {
1926 pVM->rem.s.fIgnoreCR3Load = true;
1927 pVM->rem.s.fIgnoreCpuMode = true;
1928 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1929 pVM->rem.s.fIgnoreCpuMode = false;
1930 pVM->rem.s.fIgnoreCR3Load = false;
1931 }
1932
1933 if (fFlags & CPUM_CHANGED_CR3)
1934 {
1935 pVM->rem.s.fIgnoreCR3Load = true;
1936 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1937 pVM->rem.s.fIgnoreCR3Load = false;
1938 }
1939
1940 if (fFlags & CPUM_CHANGED_GDTR)
1941 {
1942 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1943 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1944 }
1945
1946 if (fFlags & CPUM_CHANGED_IDTR)
1947 {
1948 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1949 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1950 }
1951
1952 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1953 {
1954 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1955 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1956 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1957 }
1958
1959 if (fFlags & CPUM_CHANGED_LDTR)
1960 {
1961 if (fHiddenSelRegsValid)
1962 {
1963 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1964 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1965 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1966 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1967 }
1968 else
1969 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1970 }
1971
1972 if (fFlags & CPUM_CHANGED_CPUID)
1973 {
1974 uint32_t u32Dummy;
1975
1976 /*
1977 * Get the CPUID features.
1978 */
1979 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1980 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1981 }
1982
1983 /* Sync FPU state after CR4, CPUID and EFER (!). */
1984 if (fFlags & CPUM_CHANGED_FPU_REM)
1985 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1986 }
1987
1988 /*
1989 * Sync TR unconditionally to make life simpler.
1990 */
1991 pVM->rem.s.Env.tr.selector = pCtx->tr;
1992 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1993 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1994 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1995 /* Note! do_interrupt will fault if the busy flag is still set... */
1996 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1997
1998 /*
1999 * Update selector registers.
2000 * This must be done *after* we've synced gdt, ldt and crX registers
2001 * since we're reading the GDT/LDT om sync_seg. This will happen with
2002 * saved state which takes a quick dip into rawmode for instance.
2003 */
2004 /*
2005 * Stack; Note first check this one as the CPL might have changed. The
2006 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2007 */
2008
2009 if (fHiddenSelRegsValid)
2010 {
2011 /* The hidden selector registers are valid in the CPU context. */
2012 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2013
2014 /* Set current CPL */
2015 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2016
2017 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2018 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2019 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2020 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2021 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2022 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2023 }
2024 else
2025 {
2026 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2027 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2028 {
2029 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2030
2031 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2032 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2033#ifdef VBOX_WITH_STATISTICS
2034 if (pVM->rem.s.Env.segs[R_SS].newselector)
2035 {
2036 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2037 }
2038#endif
2039 }
2040 else
2041 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2042
2043 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2044 {
2045 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2046 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2047#ifdef VBOX_WITH_STATISTICS
2048 if (pVM->rem.s.Env.segs[R_ES].newselector)
2049 {
2050 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2051 }
2052#endif
2053 }
2054 else
2055 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2056
2057 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2058 {
2059 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2060 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2061#ifdef VBOX_WITH_STATISTICS
2062 if (pVM->rem.s.Env.segs[R_CS].newselector)
2063 {
2064 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2065 }
2066#endif
2067 }
2068 else
2069 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2070
2071 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2072 {
2073 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2074 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2075#ifdef VBOX_WITH_STATISTICS
2076 if (pVM->rem.s.Env.segs[R_DS].newselector)
2077 {
2078 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2079 }
2080#endif
2081 }
2082 else
2083 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2084
2085 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2086 * be the same but not the base/limit. */
2087 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2088 {
2089 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2090 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2091#ifdef VBOX_WITH_STATISTICS
2092 if (pVM->rem.s.Env.segs[R_FS].newselector)
2093 {
2094 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2095 }
2096#endif
2097 }
2098 else
2099 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2100
2101 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2102 {
2103 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2104 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2105#ifdef VBOX_WITH_STATISTICS
2106 if (pVM->rem.s.Env.segs[R_GS].newselector)
2107 {
2108 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2109 }
2110#endif
2111 }
2112 else
2113 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2114 }
2115
2116 /*
2117 * Check for traps.
2118 */
2119 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2120 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2121 if (RT_SUCCESS(rc))
2122 {
2123#ifdef DEBUG
2124 if (u8TrapNo == 0x80)
2125 {
2126 remR3DumpLnxSyscall(pVCpu);
2127 remR3DumpOBsdSyscall(pVCpu);
2128 }
2129#endif
2130
2131 pVM->rem.s.Env.exception_index = u8TrapNo;
2132 if (enmType != TRPM_SOFTWARE_INT)
2133 {
2134 pVM->rem.s.Env.exception_is_int = 0;
2135 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2136 }
2137 else
2138 {
2139 /*
2140 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2141 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2142 * for int03 and into.
2143 */
2144 pVM->rem.s.Env.exception_is_int = 1;
2145 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2146 /* int 3 may be generated by one-byte 0xcc */
2147 if (u8TrapNo == 3)
2148 {
2149 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2150 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2151 }
2152 /* int 4 may be generated by one-byte 0xce */
2153 else if (u8TrapNo == 4)
2154 {
2155 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2156 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2157 }
2158 }
2159
2160 /* get error code and cr2 if needed. */
2161 switch (u8TrapNo)
2162 {
2163 case 0x0e:
2164 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2165 /* fallthru */
2166 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2167 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2168 break;
2169
2170 case 0x11: case 0x08:
2171 default:
2172 pVM->rem.s.Env.error_code = 0;
2173 break;
2174 }
2175
2176 /*
2177 * We can now reset the active trap since the recompiler is gonna have a go at it.
2178 */
2179 rc = TRPMResetTrap(pVCpu);
2180 AssertRC(rc);
2181 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2182 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2183 }
2184
2185 /*
2186 * Clear old interrupt request flags; Check for pending hardware interrupts.
2187 * (See @remark for why we don't check for other FFs.)
2188 */
2189 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2190 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2191 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2192 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2193
2194 /*
2195 * We're now in REM mode.
2196 */
2197 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2198 pVM->rem.s.fInREM = true;
2199 pVM->rem.s.fInStateSync = false;
2200 pVM->rem.s.cCanExecuteRaw = 0;
2201 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2202 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2203 return VINF_SUCCESS;
2204}
2205
2206
2207/**
2208 * Syncs back changes in the REM state to the the VM state.
2209 *
2210 * This must be called after invoking REMR3Run().
2211 * Calling it several times in a row is not permitted.
2212 *
2213 * @returns VBox status code.
2214 *
2215 * @param pVM VM Handle.
2216 * @param pVCpu VMCPU Handle.
2217 */
2218REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2219{
2220 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2221 Assert(pCtx);
2222 unsigned i;
2223
2224 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2225 Log2(("REMR3StateBack:\n"));
2226 Assert(pVM->rem.s.fInREM);
2227
2228 /*
2229 * Copy back the registers.
2230 * This is done in the order they are declared in the CPUMCTX structure.
2231 */
2232
2233 /** @todo FOP */
2234 /** @todo FPUIP */
2235 /** @todo CS */
2236 /** @todo FPUDP */
2237 /** @todo DS */
2238
2239 /** @todo check if FPU/XMM was actually used in the recompiler */
2240 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2241//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2242
2243#ifdef TARGET_X86_64
2244 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2245 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2246 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2247 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2248 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2249 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2250 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2251 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2252 pCtx->r8 = pVM->rem.s.Env.regs[8];
2253 pCtx->r9 = pVM->rem.s.Env.regs[9];
2254 pCtx->r10 = pVM->rem.s.Env.regs[10];
2255 pCtx->r11 = pVM->rem.s.Env.regs[11];
2256 pCtx->r12 = pVM->rem.s.Env.regs[12];
2257 pCtx->r13 = pVM->rem.s.Env.regs[13];
2258 pCtx->r14 = pVM->rem.s.Env.regs[14];
2259 pCtx->r15 = pVM->rem.s.Env.regs[15];
2260
2261 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2262
2263#else
2264 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2265 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2266 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2267 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2268 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2269 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2270 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2271
2272 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2273#endif
2274
2275 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2276
2277#ifdef VBOX_WITH_STATISTICS
2278 if (pVM->rem.s.Env.segs[R_SS].newselector)
2279 {
2280 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2281 }
2282 if (pVM->rem.s.Env.segs[R_GS].newselector)
2283 {
2284 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2285 }
2286 if (pVM->rem.s.Env.segs[R_FS].newselector)
2287 {
2288 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2289 }
2290 if (pVM->rem.s.Env.segs[R_ES].newselector)
2291 {
2292 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2293 }
2294 if (pVM->rem.s.Env.segs[R_DS].newselector)
2295 {
2296 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2297 }
2298 if (pVM->rem.s.Env.segs[R_CS].newselector)
2299 {
2300 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2301 }
2302#endif
2303 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2304 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2305 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2306 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2307 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2308
2309#ifdef TARGET_X86_64
2310 pCtx->rip = pVM->rem.s.Env.eip;
2311 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2312#else
2313 pCtx->eip = pVM->rem.s.Env.eip;
2314 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2315#endif
2316
2317 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2318 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2319 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2320 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2321 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2322 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2323
2324 for (i = 0; i < 8; i++)
2325 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2326
2327 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2328 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2329 {
2330 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2331 STAM_COUNTER_INC(&gStatREMGDTChange);
2332 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2333 }
2334
2335 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2336 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2337 {
2338 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2339 STAM_COUNTER_INC(&gStatREMIDTChange);
2340 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2341 }
2342
2343 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2344 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2345 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2346 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2347 {
2348 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2349 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2350 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2351 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2352 STAM_COUNTER_INC(&gStatREMLDTRChange);
2353 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2354 }
2355
2356 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2357 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2358 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2359 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2360 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2361 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2362 : 0) )
2363 {
2364 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2365 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2366 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2367 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2368 pCtx->tr = pVM->rem.s.Env.tr.selector;
2369 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2370 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2371 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2372 if (pCtx->trHid.Attr.u)
2373 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2374 STAM_COUNTER_INC(&gStatREMTRChange);
2375 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2376 }
2377
2378 /** @todo These values could still be out of sync! */
2379 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2380 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2381 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2382 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2383
2384 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2385 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2386 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2387
2388 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2389 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2390 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2391
2392 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2393 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2394 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2395
2396 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2397 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2398 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2399
2400 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2401 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2402 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2403
2404 /* Sysenter MSR */
2405 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2406 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2407 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2408
2409 /* System MSRs. */
2410 pCtx->msrEFER = pVM->rem.s.Env.efer;
2411 pCtx->msrSTAR = pVM->rem.s.Env.star;
2412 pCtx->msrPAT = pVM->rem.s.Env.pat;
2413#ifdef TARGET_X86_64
2414 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2415 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2416 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2417 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2418#endif
2419
2420 remR3TrapClear(pVM);
2421
2422 /*
2423 * Check for traps.
2424 */
2425 if ( pVM->rem.s.Env.exception_index >= 0
2426 && pVM->rem.s.Env.exception_index < 256)
2427 {
2428 int rc;
2429
2430 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2431 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2432 AssertRC(rc);
2433 switch (pVM->rem.s.Env.exception_index)
2434 {
2435 case 0x0e:
2436 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2437 /* fallthru */
2438 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2439 case 0x11: case 0x08: /* 0 */
2440 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2441 break;
2442 }
2443
2444 }
2445
2446 /*
2447 * We're not longer in REM mode.
2448 */
2449 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2450 pVM->rem.s.fInREM = false;
2451 pVM->rem.s.pCtx = NULL;
2452 pVM->rem.s.Env.pVCpu = NULL;
2453 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2454 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * This is called by the disassembler when it wants to update the cpu state
2461 * before for instance doing a register dump.
2462 */
2463static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2464{
2465 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2466 unsigned i;
2467
2468 Assert(pVM->rem.s.fInREM);
2469
2470 /*
2471 * Copy back the registers.
2472 * This is done in the order they are declared in the CPUMCTX structure.
2473 */
2474
2475 /** @todo FOP */
2476 /** @todo FPUIP */
2477 /** @todo CS */
2478 /** @todo FPUDP */
2479 /** @todo DS */
2480 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2481 pCtx->fpu.MXCSR = 0;
2482 pCtx->fpu.MXCSR_MASK = 0;
2483
2484 /** @todo check if FPU/XMM was actually used in the recompiler */
2485 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2486//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2487
2488#ifdef TARGET_X86_64
2489 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2490 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2491 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2492 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2493 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2494 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2495 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2496 pCtx->r8 = pVM->rem.s.Env.regs[8];
2497 pCtx->r9 = pVM->rem.s.Env.regs[9];
2498 pCtx->r10 = pVM->rem.s.Env.regs[10];
2499 pCtx->r11 = pVM->rem.s.Env.regs[11];
2500 pCtx->r12 = pVM->rem.s.Env.regs[12];
2501 pCtx->r13 = pVM->rem.s.Env.regs[13];
2502 pCtx->r14 = pVM->rem.s.Env.regs[14];
2503 pCtx->r15 = pVM->rem.s.Env.regs[15];
2504
2505 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2506#else
2507 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2508 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2509 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2510 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2511 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2512 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2513 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2514
2515 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2516#endif
2517
2518 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2519
2520 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2521 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2522 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2523 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2524 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2525
2526#ifdef TARGET_X86_64
2527 pCtx->rip = pVM->rem.s.Env.eip;
2528 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2529#else
2530 pCtx->eip = pVM->rem.s.Env.eip;
2531 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2532#endif
2533
2534 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2535 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2536 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2537 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2538 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2539 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2540
2541 for (i = 0; i < 8; i++)
2542 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2543
2544 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2545 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2546 {
2547 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2548 STAM_COUNTER_INC(&gStatREMGDTChange);
2549 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2550 }
2551
2552 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2553 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2554 {
2555 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2556 STAM_COUNTER_INC(&gStatREMIDTChange);
2557 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2558 }
2559
2560 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2561 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2562 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2563 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2564 {
2565 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2566 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2567 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2568 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2569 STAM_COUNTER_INC(&gStatREMLDTRChange);
2570 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2571 }
2572
2573 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2574 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2575 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2576 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2577 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2578 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2579 : 0) )
2580 {
2581 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2582 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2583 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2584 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2585 pCtx->tr = pVM->rem.s.Env.tr.selector;
2586 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2587 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2588 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2589 if (pCtx->trHid.Attr.u)
2590 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2591 STAM_COUNTER_INC(&gStatREMTRChange);
2592 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2593 }
2594
2595 /** @todo These values could still be out of sync! */
2596 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2597 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2598 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2599 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2600
2601 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2602 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2603 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2604
2605 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2606 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2607 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2608
2609 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2610 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2611 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2612
2613 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2614 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2615 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2616
2617 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2618 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2619 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2620
2621 /* Sysenter MSR */
2622 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2623 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2624 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2625
2626 /* System MSRs. */
2627 pCtx->msrEFER = pVM->rem.s.Env.efer;
2628 pCtx->msrSTAR = pVM->rem.s.Env.star;
2629 pCtx->msrPAT = pVM->rem.s.Env.pat;
2630#ifdef TARGET_X86_64
2631 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2632 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2633 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2634 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2635#endif
2636
2637}
2638
2639
2640/**
2641 * Update the VMM state information if we're currently in REM.
2642 *
2643 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2644 * we're currently executing in REM and the VMM state is invalid. This method will of
2645 * course check that we're executing in REM before syncing any data over to the VMM.
2646 *
2647 * @param pVM The VM handle.
2648 * @param pVCpu The VMCPU handle.
2649 */
2650REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2651{
2652 if (pVM->rem.s.fInREM)
2653 remR3StateUpdate(pVM, pVCpu);
2654}
2655
2656
2657#undef LOG_GROUP
2658#define LOG_GROUP LOG_GROUP_REM
2659
2660
2661/**
2662 * Notify the recompiler about Address Gate 20 state change.
2663 *
2664 * This notification is required since A20 gate changes are
2665 * initialized from a device driver and the VM might just as
2666 * well be in REM mode as in RAW mode.
2667 *
2668 * @param pVM VM handle.
2669 * @param pVCpu VMCPU handle.
2670 * @param fEnable True if the gate should be enabled.
2671 * False if the gate should be disabled.
2672 */
2673REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2674{
2675 bool fSaved;
2676
2677 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2678 VM_ASSERT_EMT(pVM);
2679
2680 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2681 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2682
2683 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2684
2685 pVM->rem.s.fIgnoreAll = fSaved;
2686}
2687
2688
2689/**
2690 * Replays the invalidated recorded pages.
2691 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2692 *
2693 * @param pVM VM handle.
2694 * @param pVCpu VMCPU handle.
2695 */
2696REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM, PVMCPU pVCpu)
2697{
2698 RTUINT i;
2699
2700 VM_ASSERT_EMT(pVM);
2701
2702 /*
2703 * Sync the required registers.
2704 */
2705 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2706 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2707 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2708 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2709
2710 /*
2711 * Replay the flushes.
2712 */
2713 pVM->rem.s.fIgnoreInvlPg = true;
2714 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2715 {
2716 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2717 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2718 }
2719 pVM->rem.s.fIgnoreInvlPg = false;
2720 pVM->rem.s.cInvalidatedPages = 0;
2721}
2722
2723
2724/**
2725 * Replays the handler notification changes
2726 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2727 *
2728 * @param pVM VM handle.
2729 */
2730REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2731{
2732 /*
2733 * Replay the flushes.
2734 */
2735 RTUINT i;
2736 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2737
2738 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2739 VM_ASSERT_EMT(pVM);
2740
2741 pVM->rem.s.cHandlerNotifications = 0;
2742 for (i = 0; i < c; i++)
2743 {
2744 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2745 switch (pRec->enmKind)
2746 {
2747 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2748 REMR3NotifyHandlerPhysicalRegister(pVM,
2749 pRec->u.PhysicalRegister.enmType,
2750 pRec->u.PhysicalRegister.GCPhys,
2751 pRec->u.PhysicalRegister.cb,
2752 pRec->u.PhysicalRegister.fHasHCHandler);
2753 break;
2754
2755 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2756 REMR3NotifyHandlerPhysicalDeregister(pVM,
2757 pRec->u.PhysicalDeregister.enmType,
2758 pRec->u.PhysicalDeregister.GCPhys,
2759 pRec->u.PhysicalDeregister.cb,
2760 pRec->u.PhysicalDeregister.fHasHCHandler,
2761 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2762 break;
2763
2764 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2765 REMR3NotifyHandlerPhysicalModify(pVM,
2766 pRec->u.PhysicalModify.enmType,
2767 pRec->u.PhysicalModify.GCPhysOld,
2768 pRec->u.PhysicalModify.GCPhysNew,
2769 pRec->u.PhysicalModify.cb,
2770 pRec->u.PhysicalModify.fHasHCHandler,
2771 pRec->u.PhysicalModify.fRestoreAsRAM);
2772 break;
2773
2774 default:
2775 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2776 break;
2777 }
2778 }
2779 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2780}
2781
2782
2783/**
2784 * Notify REM about changed code page.
2785 *
2786 * @returns VBox status code.
2787 * @param pVM VM handle.
2788 * @param pVCpu VMCPU handle.
2789 * @param pvCodePage Code page address
2790 */
2791REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2792{
2793#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2794 int rc;
2795 RTGCPHYS PhysGC;
2796 uint64_t flags;
2797
2798 VM_ASSERT_EMT(pVM);
2799
2800 /*
2801 * Get the physical page address.
2802 */
2803 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2804 if (rc == VINF_SUCCESS)
2805 {
2806 /*
2807 * Sync the required registers and flush the whole page.
2808 * (Easier to do the whole page than notifying it about each physical
2809 * byte that was changed.
2810 */
2811 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2812 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2813 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2814 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2815
2816 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2817 }
2818#endif
2819 return VINF_SUCCESS;
2820}
2821
2822
2823/**
2824 * Notification about a successful MMR3PhysRegister() call.
2825 *
2826 * @param pVM VM handle.
2827 * @param GCPhys The physical address the RAM.
2828 * @param cb Size of the memory.
2829 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2830 */
2831REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2832{
2833 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2834 VM_ASSERT_EMT(pVM);
2835
2836 /*
2837 * Validate input - we trust the caller.
2838 */
2839 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2840 Assert(cb);
2841 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2842 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2843
2844 /*
2845 * Base ram? Update GCPhysLastRam.
2846 */
2847 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2848 {
2849 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2850 {
2851 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2852 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2853 }
2854 }
2855
2856 /*
2857 * Register the ram.
2858 */
2859 Assert(!pVM->rem.s.fIgnoreAll);
2860 pVM->rem.s.fIgnoreAll = true;
2861
2862 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2863 Assert(pVM->rem.s.fIgnoreAll);
2864 pVM->rem.s.fIgnoreAll = false;
2865}
2866
2867
2868/**
2869 * Notification about a successful MMR3PhysRomRegister() call.
2870 *
2871 * @param pVM VM handle.
2872 * @param GCPhys The physical address of the ROM.
2873 * @param cb The size of the ROM.
2874 * @param pvCopy Pointer to the ROM copy.
2875 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2876 * This function will be called when ever the protection of the
2877 * shadow ROM changes (at reset and end of POST).
2878 */
2879REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2880{
2881 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2882 VM_ASSERT_EMT(pVM);
2883
2884 /*
2885 * Validate input - we trust the caller.
2886 */
2887 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2888 Assert(cb);
2889 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2890
2891 /*
2892 * Register the rom.
2893 */
2894 Assert(!pVM->rem.s.fIgnoreAll);
2895 pVM->rem.s.fIgnoreAll = true;
2896
2897 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2898
2899 Assert(pVM->rem.s.fIgnoreAll);
2900 pVM->rem.s.fIgnoreAll = false;
2901}
2902
2903
2904/**
2905 * Notification about a successful memory deregistration or reservation.
2906 *
2907 * @param pVM VM Handle.
2908 * @param GCPhys Start physical address.
2909 * @param cb The size of the range.
2910 */
2911REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2912{
2913 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2914 VM_ASSERT_EMT(pVM);
2915
2916 /*
2917 * Validate input - we trust the caller.
2918 */
2919 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2920 Assert(cb);
2921 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2922
2923 /*
2924 * Unassigning the memory.
2925 */
2926 Assert(!pVM->rem.s.fIgnoreAll);
2927 pVM->rem.s.fIgnoreAll = true;
2928
2929 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2930
2931 Assert(pVM->rem.s.fIgnoreAll);
2932 pVM->rem.s.fIgnoreAll = false;
2933}
2934
2935
2936/**
2937 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2938 *
2939 * @param pVM VM Handle.
2940 * @param enmType Handler type.
2941 * @param GCPhys Handler range address.
2942 * @param cb Size of the handler range.
2943 * @param fHasHCHandler Set if the handler has a HC callback function.
2944 *
2945 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2946 * Handler memory type to memory which has no HC handler.
2947 */
2948REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2949{
2950 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2951 enmType, GCPhys, cb, fHasHCHandler));
2952 VM_ASSERT_EMT(pVM);
2953 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2954 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2955
2956 if (pVM->rem.s.cHandlerNotifications)
2957 REMR3ReplayHandlerNotifications(pVM);
2958
2959 Assert(!pVM->rem.s.fIgnoreAll);
2960 pVM->rem.s.fIgnoreAll = true;
2961
2962 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2963 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2964 else if (fHasHCHandler)
2965 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2966
2967 Assert(pVM->rem.s.fIgnoreAll);
2968 pVM->rem.s.fIgnoreAll = false;
2969}
2970
2971
2972/**
2973 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2974 *
2975 * @param pVM VM Handle.
2976 * @param enmType Handler type.
2977 * @param GCPhys Handler range address.
2978 * @param cb Size of the handler range.
2979 * @param fHasHCHandler Set if the handler has a HC callback function.
2980 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2981 */
2982REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2983{
2984 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2985 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2986 VM_ASSERT_EMT(pVM);
2987
2988 if (pVM->rem.s.cHandlerNotifications)
2989 REMR3ReplayHandlerNotifications(pVM);
2990
2991 Assert(!pVM->rem.s.fIgnoreAll);
2992 pVM->rem.s.fIgnoreAll = true;
2993
2994/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2995 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2996 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2997 else if (fHasHCHandler)
2998 {
2999 if (!fRestoreAsRAM)
3000 {
3001 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3002 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3003 }
3004 else
3005 {
3006 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3007 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3008 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3009 }
3010 }
3011
3012 Assert(pVM->rem.s.fIgnoreAll);
3013 pVM->rem.s.fIgnoreAll = false;
3014}
3015
3016
3017/**
3018 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3019 *
3020 * @param pVM VM Handle.
3021 * @param enmType Handler type.
3022 * @param GCPhysOld Old handler range address.
3023 * @param GCPhysNew New handler range address.
3024 * @param cb Size of the handler range.
3025 * @param fHasHCHandler Set if the handler has a HC callback function.
3026 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3027 */
3028REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3029{
3030 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3031 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3032 VM_ASSERT_EMT(pVM);
3033 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3034
3035 if (pVM->rem.s.cHandlerNotifications)
3036 REMR3ReplayHandlerNotifications(pVM);
3037
3038 if (fHasHCHandler)
3039 {
3040 Assert(!pVM->rem.s.fIgnoreAll);
3041 pVM->rem.s.fIgnoreAll = true;
3042
3043 /*
3044 * Reset the old page.
3045 */
3046 if (!fRestoreAsRAM)
3047 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3048 else
3049 {
3050 /* This is not perfect, but it'll do for PD monitoring... */
3051 Assert(cb == PAGE_SIZE);
3052 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3053 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3054 }
3055
3056 /*
3057 * Update the new page.
3058 */
3059 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3060 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3061 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3062
3063 Assert(pVM->rem.s.fIgnoreAll);
3064 pVM->rem.s.fIgnoreAll = false;
3065 }
3066}
3067
3068
3069/**
3070 * Checks if we're handling access to this page or not.
3071 *
3072 * @returns true if we're trapping access.
3073 * @returns false if we aren't.
3074 * @param pVM The VM handle.
3075 * @param GCPhys The physical address.
3076 *
3077 * @remark This function will only work correctly in VBOX_STRICT builds!
3078 */
3079REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3080{
3081#ifdef VBOX_STRICT
3082 unsigned long off;
3083 if (pVM->rem.s.cHandlerNotifications)
3084 REMR3ReplayHandlerNotifications(pVM);
3085
3086 off = get_phys_page_offset(GCPhys);
3087 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3088 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3089 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3090#else
3091 return false;
3092#endif
3093}
3094
3095
3096/**
3097 * Deals with a rare case in get_phys_addr_code where the code
3098 * is being monitored.
3099 *
3100 * It could also be an MMIO page, in which case we will raise a fatal error.
3101 *
3102 * @returns The physical address corresponding to addr.
3103 * @param env The cpu environment.
3104 * @param addr The virtual address.
3105 * @param pTLBEntry The TLB entry.
3106 */
3107target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3108 target_ulong addr,
3109 CPUTLBEntry* pTLBEntry,
3110 target_phys_addr_t ioTLBEntry)
3111{
3112 PVM pVM = env->pVM;
3113
3114 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3115 {
3116 /* If code memory is being monitored, appropriate IOTLB entry will have
3117 handler IO type, and addend will provide real physical address, no
3118 matter if we store VA in TLB or not, as handlers are always passed PA */
3119 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3120 return ret;
3121 }
3122 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3123 "*** handlers\n",
3124 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3125 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3126 LogRel(("*** mmio\n"));
3127 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3128 LogRel(("*** phys\n"));
3129 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3130 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3131 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3132 AssertFatalFailed();
3133}
3134
3135/**
3136 * Read guest RAM and ROM.
3137 *
3138 * @param SrcGCPhys The source address (guest physical).
3139 * @param pvDst The destination address.
3140 * @param cb Number of bytes
3141 */
3142void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3143{
3144 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3145 VBOX_CHECK_ADDR(SrcGCPhys);
3146 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3147#ifdef VBOX_DEBUG_PHYS
3148 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3149#endif
3150 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3151}
3152
3153
3154/**
3155 * Read guest RAM and ROM, unsigned 8-bit.
3156 *
3157 * @param SrcGCPhys The source address (guest physical).
3158 */
3159RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3160{
3161 uint8_t val;
3162 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3163 VBOX_CHECK_ADDR(SrcGCPhys);
3164 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3165 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3166#ifdef VBOX_DEBUG_PHYS
3167 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3168#endif
3169 return val;
3170}
3171
3172
3173/**
3174 * Read guest RAM and ROM, signed 8-bit.
3175 *
3176 * @param SrcGCPhys The source address (guest physical).
3177 */
3178RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3179{
3180 int8_t val;
3181 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3182 VBOX_CHECK_ADDR(SrcGCPhys);
3183 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3184 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3185#ifdef VBOX_DEBUG_PHYS
3186 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3187#endif
3188 return val;
3189}
3190
3191
3192/**
3193 * Read guest RAM and ROM, unsigned 16-bit.
3194 *
3195 * @param SrcGCPhys The source address (guest physical).
3196 */
3197RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3198{
3199 uint16_t val;
3200 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3201 VBOX_CHECK_ADDR(SrcGCPhys);
3202 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3203 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3204#ifdef VBOX_DEBUG_PHYS
3205 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3206#endif
3207 return val;
3208}
3209
3210
3211/**
3212 * Read guest RAM and ROM, signed 16-bit.
3213 *
3214 * @param SrcGCPhys The source address (guest physical).
3215 */
3216RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3217{
3218 int16_t val;
3219 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3220 VBOX_CHECK_ADDR(SrcGCPhys);
3221 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3222 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3223#ifdef VBOX_DEBUG_PHYS
3224 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3225#endif
3226 return val;
3227}
3228
3229
3230/**
3231 * Read guest RAM and ROM, unsigned 32-bit.
3232 *
3233 * @param SrcGCPhys The source address (guest physical).
3234 */
3235RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3236{
3237 uint32_t val;
3238 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3239 VBOX_CHECK_ADDR(SrcGCPhys);
3240 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3241 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3242#ifdef VBOX_DEBUG_PHYS
3243 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3244#endif
3245 return val;
3246}
3247
3248
3249/**
3250 * Read guest RAM and ROM, signed 32-bit.
3251 *
3252 * @param SrcGCPhys The source address (guest physical).
3253 */
3254RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3255{
3256 int32_t val;
3257 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3258 VBOX_CHECK_ADDR(SrcGCPhys);
3259 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3260 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3261#ifdef VBOX_DEBUG_PHYS
3262 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3263#endif
3264 return val;
3265}
3266
3267
3268/**
3269 * Read guest RAM and ROM, unsigned 64-bit.
3270 *
3271 * @param SrcGCPhys The source address (guest physical).
3272 */
3273uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3274{
3275 uint64_t val;
3276 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3277 VBOX_CHECK_ADDR(SrcGCPhys);
3278 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3279 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3280#ifdef VBOX_DEBUG_PHYS
3281 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3282#endif
3283 return val;
3284}
3285
3286
3287/**
3288 * Read guest RAM and ROM, signed 64-bit.
3289 *
3290 * @param SrcGCPhys The source address (guest physical).
3291 */
3292int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3293{
3294 int64_t val;
3295 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3296 VBOX_CHECK_ADDR(SrcGCPhys);
3297 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3298 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3299#ifdef VBOX_DEBUG_PHYS
3300 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3301#endif
3302 return val;
3303}
3304
3305
3306/**
3307 * Write guest RAM.
3308 *
3309 * @param DstGCPhys The destination address (guest physical).
3310 * @param pvSrc The source address.
3311 * @param cb Number of bytes to write
3312 */
3313void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3314{
3315 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3316 VBOX_CHECK_ADDR(DstGCPhys);
3317 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3318 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3319#ifdef VBOX_DEBUG_PHYS
3320 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3321#endif
3322}
3323
3324
3325/**
3326 * Write guest RAM, unsigned 8-bit.
3327 *
3328 * @param DstGCPhys The destination address (guest physical).
3329 * @param val Value
3330 */
3331void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3332{
3333 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3334 VBOX_CHECK_ADDR(DstGCPhys);
3335 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3336 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3337#ifdef VBOX_DEBUG_PHYS
3338 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3339#endif
3340}
3341
3342
3343/**
3344 * Write guest RAM, unsigned 8-bit.
3345 *
3346 * @param DstGCPhys The destination address (guest physical).
3347 * @param val Value
3348 */
3349void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3350{
3351 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3352 VBOX_CHECK_ADDR(DstGCPhys);
3353 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3354 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3355#ifdef VBOX_DEBUG_PHYS
3356 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3357#endif
3358}
3359
3360
3361/**
3362 * Write guest RAM, unsigned 32-bit.
3363 *
3364 * @param DstGCPhys The destination address (guest physical).
3365 * @param val Value
3366 */
3367void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3368{
3369 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3370 VBOX_CHECK_ADDR(DstGCPhys);
3371 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3372 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3373#ifdef VBOX_DEBUG_PHYS
3374 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3375#endif
3376}
3377
3378
3379/**
3380 * Write guest RAM, unsigned 64-bit.
3381 *
3382 * @param DstGCPhys The destination address (guest physical).
3383 * @param val Value
3384 */
3385void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3386{
3387 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3388 VBOX_CHECK_ADDR(DstGCPhys);
3389 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3390 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3391#ifdef VBOX_DEBUG_PHYS
3392 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3393#endif
3394}
3395
3396#undef LOG_GROUP
3397#define LOG_GROUP LOG_GROUP_REM_MMIO
3398
3399/** Read MMIO memory. */
3400static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3401{
3402 uint32_t u32 = 0;
3403 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3404 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3405 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3406 return u32;
3407}
3408
3409/** Read MMIO memory. */
3410static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3411{
3412 uint32_t u32 = 0;
3413 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3414 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3415 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3416 return u32;
3417}
3418
3419/** Read MMIO memory. */
3420static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3421{
3422 uint32_t u32 = 0;
3423 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3424 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3425 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3426 return u32;
3427}
3428
3429/** Write to MMIO memory. */
3430static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3431{
3432 int rc;
3433 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3434 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3435 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3436}
3437
3438/** Write to MMIO memory. */
3439static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3440{
3441 int rc;
3442 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3443 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3444 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3445}
3446
3447/** Write to MMIO memory. */
3448static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3449{
3450 int rc;
3451 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3452 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3453 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3454}
3455
3456
3457#undef LOG_GROUP
3458#define LOG_GROUP LOG_GROUP_REM_HANDLER
3459
3460/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3461
3462static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3463{
3464 uint8_t u8;
3465 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3466 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3467 return u8;
3468}
3469
3470static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3471{
3472 uint16_t u16;
3473 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3474 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3475 return u16;
3476}
3477
3478static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3479{
3480 uint32_t u32;
3481 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3482 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3483 return u32;
3484}
3485
3486static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3487{
3488 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3489 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3490}
3491
3492static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3493{
3494 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3495 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3496}
3497
3498static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3499{
3500 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3501 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3502}
3503
3504/* -+- disassembly -+- */
3505
3506#undef LOG_GROUP
3507#define LOG_GROUP LOG_GROUP_REM_DISAS
3508
3509
3510/**
3511 * Enables or disables singled stepped disassembly.
3512 *
3513 * @returns VBox status code.
3514 * @param pVM VM handle.
3515 * @param fEnable To enable set this flag, to disable clear it.
3516 */
3517static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3518{
3519 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3520 VM_ASSERT_EMT(pVM);
3521
3522 if (fEnable)
3523 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3524 else
3525 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3526 return VINF_SUCCESS;
3527}
3528
3529
3530/**
3531 * Enables or disables singled stepped disassembly.
3532 *
3533 * @returns VBox status code.
3534 * @param pVM VM handle.
3535 * @param fEnable To enable set this flag, to disable clear it.
3536 */
3537REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3538{
3539 PVMREQ pReq;
3540 int rc;
3541
3542 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3543 if (VM_IS_EMT(pVM))
3544 return remR3DisasEnableStepping(pVM, fEnable);
3545
3546 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3547 AssertRC(rc);
3548 if (RT_SUCCESS(rc))
3549 rc = pReq->iStatus;
3550 VMR3ReqFree(pReq);
3551 return rc;
3552}
3553
3554
3555#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3556/**
3557 * External Debugger Command: .remstep [on|off|1|0]
3558 */
3559static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3560{
3561 bool fEnable;
3562 int rc;
3563
3564 /* print status */
3565 if (cArgs == 0)
3566 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3567 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3568
3569 /* convert the argument and change the mode. */
3570 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3571 if (RT_FAILURE(rc))
3572 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3573 rc = REMR3DisasEnableStepping(pVM, fEnable);
3574 if (RT_FAILURE(rc))
3575 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3576 return rc;
3577}
3578#endif
3579
3580
3581/**
3582 * Disassembles one instruction and prints it to the log.
3583 *
3584 * @returns Success indicator.
3585 * @param env Pointer to the recompiler CPU structure.
3586 * @param f32BitCode Indicates that whether or not the code should
3587 * be disassembled as 16 or 32 bit. If -1 the CS
3588 * selector will be inspected.
3589 * @param pszPrefix
3590 */
3591bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3592{
3593 PVM pVM = env->pVM;
3594 const bool fLog = LogIsEnabled();
3595 const bool fLog2 = LogIs2Enabled();
3596 int rc = VINF_SUCCESS;
3597
3598 /*
3599 * Don't bother if there ain't any log output to do.
3600 */
3601 if (!fLog && !fLog2)
3602 return true;
3603
3604 /*
3605 * Update the state so DBGF reads the correct register values.
3606 */
3607 remR3StateUpdate(pVM, env->pVCpu);
3608
3609 /*
3610 * Log registers if requested.
3611 */
3612 if (!fLog2)
3613 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3614
3615 /*
3616 * Disassemble to log.
3617 */
3618 if (fLog)
3619 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3620
3621 return RT_SUCCESS(rc);
3622}
3623
3624
3625/**
3626 * Disassemble recompiled code.
3627 *
3628 * @param phFileIgnored Ignored, logfile usually.
3629 * @param pvCode Pointer to the code block.
3630 * @param cb Size of the code block.
3631 */
3632void disas(FILE *phFile, void *pvCode, unsigned long cb)
3633{
3634#ifdef DEBUG_TMP_LOGGING
3635# define DISAS_PRINTF(x...) fprintf(phFile, x)
3636#else
3637# define DISAS_PRINTF(x...) RTLogPrintf(x)
3638 if (LogIs2Enabled())
3639#endif
3640 {
3641 unsigned off = 0;
3642 char szOutput[256];
3643 DISCPUSTATE Cpu;
3644
3645 memset(&Cpu, 0, sizeof(Cpu));
3646#ifdef RT_ARCH_X86
3647 Cpu.mode = CPUMODE_32BIT;
3648#else
3649 Cpu.mode = CPUMODE_64BIT;
3650#endif
3651
3652 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3653 while (off < cb)
3654 {
3655 uint32_t cbInstr;
3656 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3657 DISAS_PRINTF("%s", szOutput);
3658 else
3659 {
3660 DISAS_PRINTF("disas error\n");
3661 cbInstr = 1;
3662#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3663 break;
3664#endif
3665 }
3666 off += cbInstr;
3667 }
3668 }
3669
3670#undef DISAS_PRINTF
3671}
3672
3673
3674/**
3675 * Disassemble guest code.
3676 *
3677 * @param phFileIgnored Ignored, logfile usually.
3678 * @param uCode The guest address of the code to disassemble. (flat?)
3679 * @param cb Number of bytes to disassemble.
3680 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3681 */
3682void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3683{
3684#ifdef DEBUG_TMP_LOGGING
3685# define DISAS_PRINTF(x...) fprintf(phFile, x)
3686#else
3687# define DISAS_PRINTF(x...) RTLogPrintf(x)
3688 if (LogIs2Enabled())
3689#endif
3690 {
3691 PVM pVM = cpu_single_env->pVM;
3692 PVMCPU pVCpu = cpu_single_env->pVCpu;
3693 RTSEL cs;
3694 RTGCUINTPTR eip;
3695
3696 Assert(pVCpu);
3697
3698 /*
3699 * Update the state so DBGF reads the correct register values (flags).
3700 */
3701 remR3StateUpdate(pVM, pVCpu);
3702
3703 /*
3704 * Do the disassembling.
3705 */
3706 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3707 cs = cpu_single_env->segs[R_CS].selector;
3708 eip = uCode - cpu_single_env->segs[R_CS].base;
3709 for (;;)
3710 {
3711 char szBuf[256];
3712 uint32_t cbInstr;
3713 int rc = DBGFR3DisasInstrEx(pVM,
3714 pVCpu->idCpu,
3715 cs,
3716 eip,
3717 0,
3718 szBuf, sizeof(szBuf),
3719 &cbInstr);
3720 if (RT_SUCCESS(rc))
3721 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3722 else
3723 {
3724 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3725 cbInstr = 1;
3726 }
3727
3728 /* next */
3729 if (cb <= cbInstr)
3730 break;
3731 cb -= cbInstr;
3732 uCode += cbInstr;
3733 eip += cbInstr;
3734 }
3735 }
3736#undef DISAS_PRINTF
3737}
3738
3739
3740/**
3741 * Looks up a guest symbol.
3742 *
3743 * @returns Pointer to symbol name. This is a static buffer.
3744 * @param orig_addr The address in question.
3745 */
3746const char *lookup_symbol(target_ulong orig_addr)
3747{
3748 RTGCINTPTR off = 0;
3749 DBGFSYMBOL Sym;
3750 PVM pVM = cpu_single_env->pVM;
3751 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3752 if (RT_SUCCESS(rc))
3753 {
3754 static char szSym[sizeof(Sym.szName) + 48];
3755 if (!off)
3756 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3757 else if (off > 0)
3758 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3759 else
3760 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3761 return szSym;
3762 }
3763 return "<N/A>";
3764}
3765
3766
3767#undef LOG_GROUP
3768#define LOG_GROUP LOG_GROUP_REM
3769
3770
3771/* -+- FF notifications -+- */
3772
3773
3774/**
3775 * Notification about a pending interrupt.
3776 *
3777 * @param pVM VM Handle.
3778 * @param pVCpu VMCPU Handle.
3779 * @param u8Interrupt Interrupt
3780 * @thread The emulation thread.
3781 */
3782REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3783{
3784 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3785 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3786}
3787
3788/**
3789 * Notification about a pending interrupt.
3790 *
3791 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3792 * @param pVM VM Handle.
3793 * @param pVCpu VMCPU Handle.
3794 * @thread The emulation thread.
3795 */
3796REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3797{
3798 return pVM->rem.s.u32PendingInterrupt;
3799}
3800
3801/**
3802 * Notification about the interrupt FF being set.
3803 *
3804 * @param pVM VM Handle.
3805 * @param pVCpu VMCPU Handle.
3806 * @thread The emulation thread.
3807 */
3808REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3809{
3810 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3811 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3812 if (pVM->rem.s.fInREM)
3813 {
3814 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3815 CPU_INTERRUPT_EXTERNAL_HARD);
3816 }
3817}
3818
3819
3820/**
3821 * Notification about the interrupt FF being set.
3822 *
3823 * @param pVM VM Handle.
3824 * @param pVCpu VMCPU Handle.
3825 * @thread Any.
3826 */
3827REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3828{
3829 LogFlow(("REMR3NotifyInterruptClear:\n"));
3830 if (pVM->rem.s.fInREM)
3831 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3832}
3833
3834
3835/**
3836 * Notification about pending timer(s).
3837 *
3838 * @param pVM VM Handle.
3839 * @param pVCpuDst The target cpu for this notification.
3840 * TM will not broadcast pending timer events, but use
3841 * a decidated EMT for them. So, only interrupt REM
3842 * execution if the given CPU is executing in REM.
3843 * @thread Any.
3844 */
3845REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3846{
3847#ifndef DEBUG_bird
3848 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3849#endif
3850 if (pVM->rem.s.fInREM)
3851 {
3852 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3853 {
3854 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3855 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3856 CPU_INTERRUPT_EXTERNAL_TIMER);
3857 }
3858 else
3859 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3860 }
3861 else
3862 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3863}
3864
3865
3866/**
3867 * Notification about pending DMA transfers.
3868 *
3869 * @param pVM VM Handle.
3870 * @thread Any.
3871 */
3872REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3873{
3874 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3875 if (pVM->rem.s.fInREM)
3876 {
3877 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3878 CPU_INTERRUPT_EXTERNAL_DMA);
3879 }
3880}
3881
3882
3883/**
3884 * Notification about pending timer(s).
3885 *
3886 * @param pVM VM Handle.
3887 * @thread Any.
3888 */
3889REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3890{
3891 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3892 if (pVM->rem.s.fInREM)
3893 {
3894 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3895 CPU_INTERRUPT_EXTERNAL_EXIT);
3896 }
3897}
3898
3899
3900/**
3901 * Notification about pending FF set by an external thread.
3902 *
3903 * @param pVM VM handle.
3904 * @thread Any.
3905 */
3906REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3907{
3908 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3909 if (pVM->rem.s.fInREM)
3910 {
3911 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3912 CPU_INTERRUPT_EXTERNAL_EXIT);
3913 }
3914}
3915
3916
3917#ifdef VBOX_WITH_STATISTICS
3918void remR3ProfileStart(int statcode)
3919{
3920 STAMPROFILEADV *pStat;
3921 switch(statcode)
3922 {
3923 case STATS_EMULATE_SINGLE_INSTR:
3924 pStat = &gStatExecuteSingleInstr;
3925 break;
3926 case STATS_QEMU_COMPILATION:
3927 pStat = &gStatCompilationQEmu;
3928 break;
3929 case STATS_QEMU_RUN_EMULATED_CODE:
3930 pStat = &gStatRunCodeQEmu;
3931 break;
3932 case STATS_QEMU_TOTAL:
3933 pStat = &gStatTotalTimeQEmu;
3934 break;
3935 case STATS_QEMU_RUN_TIMERS:
3936 pStat = &gStatTimers;
3937 break;
3938 case STATS_TLB_LOOKUP:
3939 pStat= &gStatTBLookup;
3940 break;
3941 case STATS_IRQ_HANDLING:
3942 pStat= &gStatIRQ;
3943 break;
3944 case STATS_RAW_CHECK:
3945 pStat = &gStatRawCheck;
3946 break;
3947
3948 default:
3949 AssertMsgFailed(("unknown stat %d\n", statcode));
3950 return;
3951 }
3952 STAM_PROFILE_ADV_START(pStat, a);
3953}
3954
3955
3956void remR3ProfileStop(int statcode)
3957{
3958 STAMPROFILEADV *pStat;
3959 switch(statcode)
3960 {
3961 case STATS_EMULATE_SINGLE_INSTR:
3962 pStat = &gStatExecuteSingleInstr;
3963 break;
3964 case STATS_QEMU_COMPILATION:
3965 pStat = &gStatCompilationQEmu;
3966 break;
3967 case STATS_QEMU_RUN_EMULATED_CODE:
3968 pStat = &gStatRunCodeQEmu;
3969 break;
3970 case STATS_QEMU_TOTAL:
3971 pStat = &gStatTotalTimeQEmu;
3972 break;
3973 case STATS_QEMU_RUN_TIMERS:
3974 pStat = &gStatTimers;
3975 break;
3976 case STATS_TLB_LOOKUP:
3977 pStat= &gStatTBLookup;
3978 break;
3979 case STATS_IRQ_HANDLING:
3980 pStat= &gStatIRQ;
3981 break;
3982 case STATS_RAW_CHECK:
3983 pStat = &gStatRawCheck;
3984 break;
3985 default:
3986 AssertMsgFailed(("unknown stat %d\n", statcode));
3987 return;
3988 }
3989 STAM_PROFILE_ADV_STOP(pStat, a);
3990}
3991#endif
3992
3993/**
3994 * Raise an RC, force rem exit.
3995 *
3996 * @param pVM VM handle.
3997 * @param rc The rc.
3998 */
3999void remR3RaiseRC(PVM pVM, int rc)
4000{
4001 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4002 Assert(pVM->rem.s.fInREM);
4003 VM_ASSERT_EMT(pVM);
4004 pVM->rem.s.rc = rc;
4005 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4006}
4007
4008
4009/* -+- timers -+- */
4010
4011uint64_t cpu_get_tsc(CPUX86State *env)
4012{
4013 STAM_COUNTER_INC(&gStatCpuGetTSC);
4014 return TMCpuTickGet(env->pVCpu);
4015}
4016
4017
4018/* -+- interrupts -+- */
4019
4020void cpu_set_ferr(CPUX86State *env)
4021{
4022 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4023 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4024}
4025
4026int cpu_get_pic_interrupt(CPUState *env)
4027{
4028 uint8_t u8Interrupt;
4029 int rc;
4030
4031 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4032 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4033 * with the (a)pic.
4034 */
4035 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4036 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4037 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4038 * remove this kludge. */
4039 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4040 {
4041 rc = VINF_SUCCESS;
4042 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4043 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4044 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4045 }
4046 else
4047 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4048
4049 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4050 if (RT_SUCCESS(rc))
4051 {
4052 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4053 env->interrupt_request |= CPU_INTERRUPT_HARD;
4054 return u8Interrupt;
4055 }
4056 return -1;
4057}
4058
4059
4060/* -+- local apic -+- */
4061
4062void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4063{
4064 int rc = PDMApicSetBase(env->pVM, val);
4065 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4066}
4067
4068uint64_t cpu_get_apic_base(CPUX86State *env)
4069{
4070 uint64_t u64;
4071 int rc = PDMApicGetBase(env->pVM, &u64);
4072 if (RT_SUCCESS(rc))
4073 {
4074 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4075 return u64;
4076 }
4077 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4078 return 0;
4079}
4080
4081void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4082{
4083 int rc = PDMApicSetTPR(env->pVM, val);
4084 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4085}
4086
4087uint8_t cpu_get_apic_tpr(CPUX86State *env)
4088{
4089 uint8_t u8;
4090 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4091 if (RT_SUCCESS(rc))
4092 {
4093 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4094 return u8;
4095 }
4096 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4097 return 0;
4098}
4099
4100
4101uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4102{
4103 uint64_t value;
4104 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4105 if (RT_SUCCESS(rc))
4106 {
4107 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4108 return value;
4109 }
4110 /** @todo: exception ? */
4111 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4112 return value;
4113}
4114
4115void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4116{
4117 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4118 /** @todo: exception if error ? */
4119 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4120}
4121
4122uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4123{
4124 Assert(env->pVCpu);
4125 return CPUMGetGuestMsr(env->pVCpu, msr);
4126}
4127
4128void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4129{
4130 Assert(env->pVCpu);
4131 CPUMSetGuestMsr(env->pVCpu, msr, val);
4132}
4133
4134/* -+- I/O Ports -+- */
4135
4136#undef LOG_GROUP
4137#define LOG_GROUP LOG_GROUP_REM_IOPORT
4138
4139void cpu_outb(CPUState *env, int addr, int val)
4140{
4141 int rc;
4142
4143 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4144 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4145
4146 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4147 if (RT_LIKELY(rc == VINF_SUCCESS))
4148 return;
4149 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4150 {
4151 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4152 remR3RaiseRC(env->pVM, rc);
4153 return;
4154 }
4155 remAbort(rc, __FUNCTION__);
4156}
4157
4158void cpu_outw(CPUState *env, int addr, int val)
4159{
4160 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4161 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4162 if (RT_LIKELY(rc == VINF_SUCCESS))
4163 return;
4164 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4165 {
4166 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4167 remR3RaiseRC(env->pVM, rc);
4168 return;
4169 }
4170 remAbort(rc, __FUNCTION__);
4171}
4172
4173void cpu_outl(CPUState *env, int addr, int val)
4174{
4175 int rc;
4176 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4177 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4178 if (RT_LIKELY(rc == VINF_SUCCESS))
4179 return;
4180 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4181 {
4182 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4183 remR3RaiseRC(env->pVM, rc);
4184 return;
4185 }
4186 remAbort(rc, __FUNCTION__);
4187}
4188
4189int cpu_inb(CPUState *env, int addr)
4190{
4191 uint32_t u32 = 0;
4192 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4193 if (RT_LIKELY(rc == VINF_SUCCESS))
4194 {
4195 if (/*addr != 0x61 && */addr != 0x71)
4196 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4197 return (int)u32;
4198 }
4199 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4200 {
4201 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4202 remR3RaiseRC(env->pVM, rc);
4203 return (int)u32;
4204 }
4205 remAbort(rc, __FUNCTION__);
4206 return 0xff;
4207}
4208
4209int cpu_inw(CPUState *env, int addr)
4210{
4211 uint32_t u32 = 0;
4212 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4213 if (RT_LIKELY(rc == VINF_SUCCESS))
4214 {
4215 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4216 return (int)u32;
4217 }
4218 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4219 {
4220 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4221 remR3RaiseRC(env->pVM, rc);
4222 return (int)u32;
4223 }
4224 remAbort(rc, __FUNCTION__);
4225 return 0xffff;
4226}
4227
4228int cpu_inl(CPUState *env, int addr)
4229{
4230 uint32_t u32 = 0;
4231 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4232 if (RT_LIKELY(rc == VINF_SUCCESS))
4233 {
4234//if (addr==0x01f0 && u32 == 0x6b6d)
4235// loglevel = ~0;
4236 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4237 return (int)u32;
4238 }
4239 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4240 {
4241 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4242 remR3RaiseRC(env->pVM, rc);
4243 return (int)u32;
4244 }
4245 remAbort(rc, __FUNCTION__);
4246 return 0xffffffff;
4247}
4248
4249#undef LOG_GROUP
4250#define LOG_GROUP LOG_GROUP_REM
4251
4252
4253/* -+- helpers and misc other interfaces -+- */
4254
4255/**
4256 * Perform the CPUID instruction.
4257 *
4258 * ASMCpuId cannot be invoked from some source files where this is used because of global
4259 * register allocations.
4260 *
4261 * @param env Pointer to the recompiler CPU structure.
4262 * @param uOperator CPUID operation (eax).
4263 * @param pvEAX Where to store eax.
4264 * @param pvEBX Where to store ebx.
4265 * @param pvECX Where to store ecx.
4266 * @param pvEDX Where to store edx.
4267 */
4268void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4269{
4270 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4271}
4272
4273
4274#if 0 /* not used */
4275/**
4276 * Interface for qemu hardware to report back fatal errors.
4277 */
4278void hw_error(const char *pszFormat, ...)
4279{
4280 /*
4281 * Bitch about it.
4282 */
4283 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4284 * this in my Odin32 tree at home! */
4285 va_list args;
4286 va_start(args, pszFormat);
4287 RTLogPrintf("fatal error in virtual hardware:");
4288 RTLogPrintfV(pszFormat, args);
4289 va_end(args);
4290 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4291
4292 /*
4293 * If we're in REM context we'll sync back the state before 'jumping' to
4294 * the EMs failure handling.
4295 */
4296 PVM pVM = cpu_single_env->pVM;
4297 if (pVM->rem.s.fInREM)
4298 REMR3StateBack(pVM);
4299 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4300 AssertMsgFailed(("EMR3FatalError returned!\n"));
4301}
4302#endif
4303
4304/**
4305 * Interface for the qemu cpu to report unhandled situation
4306 * raising a fatal VM error.
4307 */
4308void cpu_abort(CPUState *env, const char *pszFormat, ...)
4309{
4310 va_list va;
4311 PVM pVM;
4312 PVMCPU pVCpu;
4313 char szMsg[256];
4314
4315 /*
4316 * Bitch about it.
4317 */
4318 RTLogFlags(NULL, "nodisabled nobuffered");
4319 RTLogFlush(NULL);
4320
4321 va_start(va, pszFormat);
4322#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4323 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4324 unsigned cArgs = 0;
4325 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4326 const char *psz = strchr(pszFormat, '%');
4327 while (psz && cArgs < 6)
4328 {
4329 auArgs[cArgs++] = va_arg(va, uintptr_t);
4330 psz = strchr(psz + 1, '%');
4331 }
4332 switch (cArgs)
4333 {
4334 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4335 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4336 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4337 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4338 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4339 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4340 default:
4341 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4342 }
4343#else
4344 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4345#endif
4346 va_end(va);
4347
4348 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4349 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4350
4351 /*
4352 * If we're in REM context we'll sync back the state before 'jumping' to
4353 * the EMs failure handling.
4354 */
4355 pVM = cpu_single_env->pVM;
4356 pVCpu = cpu_single_env->pVCpu;
4357 Assert(pVCpu);
4358
4359 if (pVM->rem.s.fInREM)
4360 REMR3StateBack(pVM, pVCpu);
4361 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4362 AssertMsgFailed(("EMR3FatalError returned!\n"));
4363}
4364
4365
4366/**
4367 * Aborts the VM.
4368 *
4369 * @param rc VBox error code.
4370 * @param pszTip Hint about why/when this happend.
4371 */
4372void remAbort(int rc, const char *pszTip)
4373{
4374 PVM pVM;
4375 PVMCPU pVCpu;
4376
4377 /*
4378 * Bitch about it.
4379 */
4380 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4381 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4382
4383 /*
4384 * Jump back to where we entered the recompiler.
4385 */
4386 pVM = cpu_single_env->pVM;
4387 pVCpu = cpu_single_env->pVCpu;
4388 Assert(pVCpu);
4389
4390 if (pVM->rem.s.fInREM)
4391 REMR3StateBack(pVM, pVCpu);
4392
4393 EMR3FatalError(pVCpu, rc);
4394 AssertMsgFailed(("EMR3FatalError returned!\n"));
4395}
4396
4397
4398/**
4399 * Dumps a linux system call.
4400 * @param pVCpu VMCPU handle.
4401 */
4402void remR3DumpLnxSyscall(PVMCPU pVCpu)
4403{
4404 static const char *apsz[] =
4405 {
4406 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4407 "sys_exit",
4408 "sys_fork",
4409 "sys_read",
4410 "sys_write",
4411 "sys_open", /* 5 */
4412 "sys_close",
4413 "sys_waitpid",
4414 "sys_creat",
4415 "sys_link",
4416 "sys_unlink", /* 10 */
4417 "sys_execve",
4418 "sys_chdir",
4419 "sys_time",
4420 "sys_mknod",
4421 "sys_chmod", /* 15 */
4422 "sys_lchown16",
4423 "sys_ni_syscall", /* old break syscall holder */
4424 "sys_stat",
4425 "sys_lseek",
4426 "sys_getpid", /* 20 */
4427 "sys_mount",
4428 "sys_oldumount",
4429 "sys_setuid16",
4430 "sys_getuid16",
4431 "sys_stime", /* 25 */
4432 "sys_ptrace",
4433 "sys_alarm",
4434 "sys_fstat",
4435 "sys_pause",
4436 "sys_utime", /* 30 */
4437 "sys_ni_syscall", /* old stty syscall holder */
4438 "sys_ni_syscall", /* old gtty syscall holder */
4439 "sys_access",
4440 "sys_nice",
4441 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4442 "sys_sync",
4443 "sys_kill",
4444 "sys_rename",
4445 "sys_mkdir",
4446 "sys_rmdir", /* 40 */
4447 "sys_dup",
4448 "sys_pipe",
4449 "sys_times",
4450 "sys_ni_syscall", /* old prof syscall holder */
4451 "sys_brk", /* 45 */
4452 "sys_setgid16",
4453 "sys_getgid16",
4454 "sys_signal",
4455 "sys_geteuid16",
4456 "sys_getegid16", /* 50 */
4457 "sys_acct",
4458 "sys_umount", /* recycled never used phys() */
4459 "sys_ni_syscall", /* old lock syscall holder */
4460 "sys_ioctl",
4461 "sys_fcntl", /* 55 */
4462 "sys_ni_syscall", /* old mpx syscall holder */
4463 "sys_setpgid",
4464 "sys_ni_syscall", /* old ulimit syscall holder */
4465 "sys_olduname",
4466 "sys_umask", /* 60 */
4467 "sys_chroot",
4468 "sys_ustat",
4469 "sys_dup2",
4470 "sys_getppid",
4471 "sys_getpgrp", /* 65 */
4472 "sys_setsid",
4473 "sys_sigaction",
4474 "sys_sgetmask",
4475 "sys_ssetmask",
4476 "sys_setreuid16", /* 70 */
4477 "sys_setregid16",
4478 "sys_sigsuspend",
4479 "sys_sigpending",
4480 "sys_sethostname",
4481 "sys_setrlimit", /* 75 */
4482 "sys_old_getrlimit",
4483 "sys_getrusage",
4484 "sys_gettimeofday",
4485 "sys_settimeofday",
4486 "sys_getgroups16", /* 80 */
4487 "sys_setgroups16",
4488 "old_select",
4489 "sys_symlink",
4490 "sys_lstat",
4491 "sys_readlink", /* 85 */
4492 "sys_uselib",
4493 "sys_swapon",
4494 "sys_reboot",
4495 "old_readdir",
4496 "old_mmap", /* 90 */
4497 "sys_munmap",
4498 "sys_truncate",
4499 "sys_ftruncate",
4500 "sys_fchmod",
4501 "sys_fchown16", /* 95 */
4502 "sys_getpriority",
4503 "sys_setpriority",
4504 "sys_ni_syscall", /* old profil syscall holder */
4505 "sys_statfs",
4506 "sys_fstatfs", /* 100 */
4507 "sys_ioperm",
4508 "sys_socketcall",
4509 "sys_syslog",
4510 "sys_setitimer",
4511 "sys_getitimer", /* 105 */
4512 "sys_newstat",
4513 "sys_newlstat",
4514 "sys_newfstat",
4515 "sys_uname",
4516 "sys_iopl", /* 110 */
4517 "sys_vhangup",
4518 "sys_ni_syscall", /* old "idle" system call */
4519 "sys_vm86old",
4520 "sys_wait4",
4521 "sys_swapoff", /* 115 */
4522 "sys_sysinfo",
4523 "sys_ipc",
4524 "sys_fsync",
4525 "sys_sigreturn",
4526 "sys_clone", /* 120 */
4527 "sys_setdomainname",
4528 "sys_newuname",
4529 "sys_modify_ldt",
4530 "sys_adjtimex",
4531 "sys_mprotect", /* 125 */
4532 "sys_sigprocmask",
4533 "sys_ni_syscall", /* old "create_module" */
4534 "sys_init_module",
4535 "sys_delete_module",
4536 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4537 "sys_quotactl",
4538 "sys_getpgid",
4539 "sys_fchdir",
4540 "sys_bdflush",
4541 "sys_sysfs", /* 135 */
4542 "sys_personality",
4543 "sys_ni_syscall", /* reserved for afs_syscall */
4544 "sys_setfsuid16",
4545 "sys_setfsgid16",
4546 "sys_llseek", /* 140 */
4547 "sys_getdents",
4548 "sys_select",
4549 "sys_flock",
4550 "sys_msync",
4551 "sys_readv", /* 145 */
4552 "sys_writev",
4553 "sys_getsid",
4554 "sys_fdatasync",
4555 "sys_sysctl",
4556 "sys_mlock", /* 150 */
4557 "sys_munlock",
4558 "sys_mlockall",
4559 "sys_munlockall",
4560 "sys_sched_setparam",
4561 "sys_sched_getparam", /* 155 */
4562 "sys_sched_setscheduler",
4563 "sys_sched_getscheduler",
4564 "sys_sched_yield",
4565 "sys_sched_get_priority_max",
4566 "sys_sched_get_priority_min", /* 160 */
4567 "sys_sched_rr_get_interval",
4568 "sys_nanosleep",
4569 "sys_mremap",
4570 "sys_setresuid16",
4571 "sys_getresuid16", /* 165 */
4572 "sys_vm86",
4573 "sys_ni_syscall", /* Old sys_query_module */
4574 "sys_poll",
4575 "sys_nfsservctl",
4576 "sys_setresgid16", /* 170 */
4577 "sys_getresgid16",
4578 "sys_prctl",
4579 "sys_rt_sigreturn",
4580 "sys_rt_sigaction",
4581 "sys_rt_sigprocmask", /* 175 */
4582 "sys_rt_sigpending",
4583 "sys_rt_sigtimedwait",
4584 "sys_rt_sigqueueinfo",
4585 "sys_rt_sigsuspend",
4586 "sys_pread64", /* 180 */
4587 "sys_pwrite64",
4588 "sys_chown16",
4589 "sys_getcwd",
4590 "sys_capget",
4591 "sys_capset", /* 185 */
4592 "sys_sigaltstack",
4593 "sys_sendfile",
4594 "sys_ni_syscall", /* reserved for streams1 */
4595 "sys_ni_syscall", /* reserved for streams2 */
4596 "sys_vfork", /* 190 */
4597 "sys_getrlimit",
4598 "sys_mmap2",
4599 "sys_truncate64",
4600 "sys_ftruncate64",
4601 "sys_stat64", /* 195 */
4602 "sys_lstat64",
4603 "sys_fstat64",
4604 "sys_lchown",
4605 "sys_getuid",
4606 "sys_getgid", /* 200 */
4607 "sys_geteuid",
4608 "sys_getegid",
4609 "sys_setreuid",
4610 "sys_setregid",
4611 "sys_getgroups", /* 205 */
4612 "sys_setgroups",
4613 "sys_fchown",
4614 "sys_setresuid",
4615 "sys_getresuid",
4616 "sys_setresgid", /* 210 */
4617 "sys_getresgid",
4618 "sys_chown",
4619 "sys_setuid",
4620 "sys_setgid",
4621 "sys_setfsuid", /* 215 */
4622 "sys_setfsgid",
4623 "sys_pivot_root",
4624 "sys_mincore",
4625 "sys_madvise",
4626 "sys_getdents64", /* 220 */
4627 "sys_fcntl64",
4628 "sys_ni_syscall", /* reserved for TUX */
4629 "sys_ni_syscall",
4630 "sys_gettid",
4631 "sys_readahead", /* 225 */
4632 "sys_setxattr",
4633 "sys_lsetxattr",
4634 "sys_fsetxattr",
4635 "sys_getxattr",
4636 "sys_lgetxattr", /* 230 */
4637 "sys_fgetxattr",
4638 "sys_listxattr",
4639 "sys_llistxattr",
4640 "sys_flistxattr",
4641 "sys_removexattr", /* 235 */
4642 "sys_lremovexattr",
4643 "sys_fremovexattr",
4644 "sys_tkill",
4645 "sys_sendfile64",
4646 "sys_futex", /* 240 */
4647 "sys_sched_setaffinity",
4648 "sys_sched_getaffinity",
4649 "sys_set_thread_area",
4650 "sys_get_thread_area",
4651 "sys_io_setup", /* 245 */
4652 "sys_io_destroy",
4653 "sys_io_getevents",
4654 "sys_io_submit",
4655 "sys_io_cancel",
4656 "sys_fadvise64", /* 250 */
4657 "sys_ni_syscall",
4658 "sys_exit_group",
4659 "sys_lookup_dcookie",
4660 "sys_epoll_create",
4661 "sys_epoll_ctl", /* 255 */
4662 "sys_epoll_wait",
4663 "sys_remap_file_pages",
4664 "sys_set_tid_address",
4665 "sys_timer_create",
4666 "sys_timer_settime", /* 260 */
4667 "sys_timer_gettime",
4668 "sys_timer_getoverrun",
4669 "sys_timer_delete",
4670 "sys_clock_settime",
4671 "sys_clock_gettime", /* 265 */
4672 "sys_clock_getres",
4673 "sys_clock_nanosleep",
4674 "sys_statfs64",
4675 "sys_fstatfs64",
4676 "sys_tgkill", /* 270 */
4677 "sys_utimes",
4678 "sys_fadvise64_64",
4679 "sys_ni_syscall" /* sys_vserver */
4680 };
4681
4682 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4683 switch (uEAX)
4684 {
4685 default:
4686 if (uEAX < RT_ELEMENTS(apsz))
4687 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4688 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4689 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4690 else
4691 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4692 break;
4693
4694 }
4695}
4696
4697
4698/**
4699 * Dumps an OpenBSD system call.
4700 * @param pVCpu VMCPU handle.
4701 */
4702void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4703{
4704 static const char *apsz[] =
4705 {
4706 "SYS_syscall", //0
4707 "SYS_exit", //1
4708 "SYS_fork", //2
4709 "SYS_read", //3
4710 "SYS_write", //4
4711 "SYS_open", //5
4712 "SYS_close", //6
4713 "SYS_wait4", //7
4714 "SYS_8",
4715 "SYS_link", //9
4716 "SYS_unlink", //10
4717 "SYS_11",
4718 "SYS_chdir", //12
4719 "SYS_fchdir", //13
4720 "SYS_mknod", //14
4721 "SYS_chmod", //15
4722 "SYS_chown", //16
4723 "SYS_break", //17
4724 "SYS_18",
4725 "SYS_19",
4726 "SYS_getpid", //20
4727 "SYS_mount", //21
4728 "SYS_unmount", //22
4729 "SYS_setuid", //23
4730 "SYS_getuid", //24
4731 "SYS_geteuid", //25
4732 "SYS_ptrace", //26
4733 "SYS_recvmsg", //27
4734 "SYS_sendmsg", //28
4735 "SYS_recvfrom", //29
4736 "SYS_accept", //30
4737 "SYS_getpeername", //31
4738 "SYS_getsockname", //32
4739 "SYS_access", //33
4740 "SYS_chflags", //34
4741 "SYS_fchflags", //35
4742 "SYS_sync", //36
4743 "SYS_kill", //37
4744 "SYS_38",
4745 "SYS_getppid", //39
4746 "SYS_40",
4747 "SYS_dup", //41
4748 "SYS_opipe", //42
4749 "SYS_getegid", //43
4750 "SYS_profil", //44
4751 "SYS_ktrace", //45
4752 "SYS_sigaction", //46
4753 "SYS_getgid", //47
4754 "SYS_sigprocmask", //48
4755 "SYS_getlogin", //49
4756 "SYS_setlogin", //50
4757 "SYS_acct", //51
4758 "SYS_sigpending", //52
4759 "SYS_osigaltstack", //53
4760 "SYS_ioctl", //54
4761 "SYS_reboot", //55
4762 "SYS_revoke", //56
4763 "SYS_symlink", //57
4764 "SYS_readlink", //58
4765 "SYS_execve", //59
4766 "SYS_umask", //60
4767 "SYS_chroot", //61
4768 "SYS_62",
4769 "SYS_63",
4770 "SYS_64",
4771 "SYS_65",
4772 "SYS_vfork", //66
4773 "SYS_67",
4774 "SYS_68",
4775 "SYS_sbrk", //69
4776 "SYS_sstk", //70
4777 "SYS_61",
4778 "SYS_vadvise", //72
4779 "SYS_munmap", //73
4780 "SYS_mprotect", //74
4781 "SYS_madvise", //75
4782 "SYS_76",
4783 "SYS_77",
4784 "SYS_mincore", //78
4785 "SYS_getgroups", //79
4786 "SYS_setgroups", //80
4787 "SYS_getpgrp", //81
4788 "SYS_setpgid", //82
4789 "SYS_setitimer", //83
4790 "SYS_84",
4791 "SYS_85",
4792 "SYS_getitimer", //86
4793 "SYS_87",
4794 "SYS_88",
4795 "SYS_89",
4796 "SYS_dup2", //90
4797 "SYS_91",
4798 "SYS_fcntl", //92
4799 "SYS_select", //93
4800 "SYS_94",
4801 "SYS_fsync", //95
4802 "SYS_setpriority", //96
4803 "SYS_socket", //97
4804 "SYS_connect", //98
4805 "SYS_99",
4806 "SYS_getpriority", //100
4807 "SYS_101",
4808 "SYS_102",
4809 "SYS_sigreturn", //103
4810 "SYS_bind", //104
4811 "SYS_setsockopt", //105
4812 "SYS_listen", //106
4813 "SYS_107",
4814 "SYS_108",
4815 "SYS_109",
4816 "SYS_110",
4817 "SYS_sigsuspend", //111
4818 "SYS_112",
4819 "SYS_113",
4820 "SYS_114",
4821 "SYS_115",
4822 "SYS_gettimeofday", //116
4823 "SYS_getrusage", //117
4824 "SYS_getsockopt", //118
4825 "SYS_119",
4826 "SYS_readv", //120
4827 "SYS_writev", //121
4828 "SYS_settimeofday", //122
4829 "SYS_fchown", //123
4830 "SYS_fchmod", //124
4831 "SYS_125",
4832 "SYS_setreuid", //126
4833 "SYS_setregid", //127
4834 "SYS_rename", //128
4835 "SYS_129",
4836 "SYS_130",
4837 "SYS_flock", //131
4838 "SYS_mkfifo", //132
4839 "SYS_sendto", //133
4840 "SYS_shutdown", //134
4841 "SYS_socketpair", //135
4842 "SYS_mkdir", //136
4843 "SYS_rmdir", //137
4844 "SYS_utimes", //138
4845 "SYS_139",
4846 "SYS_adjtime", //140
4847 "SYS_141",
4848 "SYS_142",
4849 "SYS_143",
4850 "SYS_144",
4851 "SYS_145",
4852 "SYS_146",
4853 "SYS_setsid", //147
4854 "SYS_quotactl", //148
4855 "SYS_149",
4856 "SYS_150",
4857 "SYS_151",
4858 "SYS_152",
4859 "SYS_153",
4860 "SYS_154",
4861 "SYS_nfssvc", //155
4862 "SYS_156",
4863 "SYS_157",
4864 "SYS_158",
4865 "SYS_159",
4866 "SYS_160",
4867 "SYS_getfh", //161
4868 "SYS_162",
4869 "SYS_163",
4870 "SYS_164",
4871 "SYS_sysarch", //165
4872 "SYS_166",
4873 "SYS_167",
4874 "SYS_168",
4875 "SYS_169",
4876 "SYS_170",
4877 "SYS_171",
4878 "SYS_172",
4879 "SYS_pread", //173
4880 "SYS_pwrite", //174
4881 "SYS_175",
4882 "SYS_176",
4883 "SYS_177",
4884 "SYS_178",
4885 "SYS_179",
4886 "SYS_180",
4887 "SYS_setgid", //181
4888 "SYS_setegid", //182
4889 "SYS_seteuid", //183
4890 "SYS_lfs_bmapv", //184
4891 "SYS_lfs_markv", //185
4892 "SYS_lfs_segclean", //186
4893 "SYS_lfs_segwait", //187
4894 "SYS_188",
4895 "SYS_189",
4896 "SYS_190",
4897 "SYS_pathconf", //191
4898 "SYS_fpathconf", //192
4899 "SYS_swapctl", //193
4900 "SYS_getrlimit", //194
4901 "SYS_setrlimit", //195
4902 "SYS_getdirentries", //196
4903 "SYS_mmap", //197
4904 "SYS___syscall", //198
4905 "SYS_lseek", //199
4906 "SYS_truncate", //200
4907 "SYS_ftruncate", //201
4908 "SYS___sysctl", //202
4909 "SYS_mlock", //203
4910 "SYS_munlock", //204
4911 "SYS_205",
4912 "SYS_futimes", //206
4913 "SYS_getpgid", //207
4914 "SYS_xfspioctl", //208
4915 "SYS_209",
4916 "SYS_210",
4917 "SYS_211",
4918 "SYS_212",
4919 "SYS_213",
4920 "SYS_214",
4921 "SYS_215",
4922 "SYS_216",
4923 "SYS_217",
4924 "SYS_218",
4925 "SYS_219",
4926 "SYS_220",
4927 "SYS_semget", //221
4928 "SYS_222",
4929 "SYS_223",
4930 "SYS_224",
4931 "SYS_msgget", //225
4932 "SYS_msgsnd", //226
4933 "SYS_msgrcv", //227
4934 "SYS_shmat", //228
4935 "SYS_229",
4936 "SYS_shmdt", //230
4937 "SYS_231",
4938 "SYS_clock_gettime", //232
4939 "SYS_clock_settime", //233
4940 "SYS_clock_getres", //234
4941 "SYS_235",
4942 "SYS_236",
4943 "SYS_237",
4944 "SYS_238",
4945 "SYS_239",
4946 "SYS_nanosleep", //240
4947 "SYS_241",
4948 "SYS_242",
4949 "SYS_243",
4950 "SYS_244",
4951 "SYS_245",
4952 "SYS_246",
4953 "SYS_247",
4954 "SYS_248",
4955 "SYS_249",
4956 "SYS_minherit", //250
4957 "SYS_rfork", //251
4958 "SYS_poll", //252
4959 "SYS_issetugid", //253
4960 "SYS_lchown", //254
4961 "SYS_getsid", //255
4962 "SYS_msync", //256
4963 "SYS_257",
4964 "SYS_258",
4965 "SYS_259",
4966 "SYS_getfsstat", //260
4967 "SYS_statfs", //261
4968 "SYS_fstatfs", //262
4969 "SYS_pipe", //263
4970 "SYS_fhopen", //264
4971 "SYS_265",
4972 "SYS_fhstatfs", //266
4973 "SYS_preadv", //267
4974 "SYS_pwritev", //268
4975 "SYS_kqueue", //269
4976 "SYS_kevent", //270
4977 "SYS_mlockall", //271
4978 "SYS_munlockall", //272
4979 "SYS_getpeereid", //273
4980 "SYS_274",
4981 "SYS_275",
4982 "SYS_276",
4983 "SYS_277",
4984 "SYS_278",
4985 "SYS_279",
4986 "SYS_280",
4987 "SYS_getresuid", //281
4988 "SYS_setresuid", //282
4989 "SYS_getresgid", //283
4990 "SYS_setresgid", //284
4991 "SYS_285",
4992 "SYS_mquery", //286
4993 "SYS_closefrom", //287
4994 "SYS_sigaltstack", //288
4995 "SYS_shmget", //289
4996 "SYS_semop", //290
4997 "SYS_stat", //291
4998 "SYS_fstat", //292
4999 "SYS_lstat", //293
5000 "SYS_fhstat", //294
5001 "SYS___semctl", //295
5002 "SYS_shmctl", //296
5003 "SYS_msgctl", //297
5004 "SYS_MAXSYSCALL", //298
5005 //299
5006 //300
5007 };
5008 uint32_t uEAX;
5009 if (!LogIsEnabled())
5010 return;
5011 uEAX = CPUMGetGuestEAX(pVCpu);
5012 switch (uEAX)
5013 {
5014 default:
5015 if (uEAX < RT_ELEMENTS(apsz))
5016 {
5017 uint32_t au32Args[8] = {0};
5018 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5019 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5020 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5021 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5022 }
5023 else
5024 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5025 break;
5026 }
5027}
5028
5029
5030#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5031/**
5032 * The Dll main entry point (stub).
5033 */
5034bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5035{
5036 return true;
5037}
5038
5039void *memcpy(void *dst, const void *src, size_t size)
5040{
5041 uint8_t*pbDst = dst, *pbSrc = src;
5042 while (size-- > 0)
5043 *pbDst++ = *pbSrc++;
5044 return dst;
5045}
5046
5047#endif
5048
5049void cpu_smm_update(CPUState *env)
5050{
5051}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette