VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 19032

Last change on this file since 19032 was 19032, checked in by vboxsync, 16 years ago

Split TM for SMP guests.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.2 KB
Line 
1/* $Id: VBoxRecompiler.c 19032 2009-04-20 15:03:08Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = NULL; /* set when executing code. */
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
394#endif /* VBOX_WITH_STATISTICS */
395
396 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
397 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
398 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
399
400
401#ifdef DEBUG_ALL_LOGGING
402 loglevel = ~0;
403# ifdef DEBUG_TMP_LOGGING
404 logfile = fopen("/tmp/vbox-qemu.log", "w");
405# endif
406#endif
407
408 return rc;
409}
410
411
412/**
413 * Finalizes the REM initialization.
414 *
415 * This is called after all components, devices and drivers has
416 * been initialized. Its main purpose it to finish the RAM related
417 * initialization.
418 *
419 * @returns VBox status code.
420 *
421 * @param pVM The VM handle.
422 */
423REMR3DECL(int) REMR3InitFinalize(PVM pVM)
424{
425 int rc;
426
427 /*
428 * Ram size & dirty bit map.
429 */
430 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
431 pVM->rem.s.fGCPhysLastRamFixed = true;
432#ifdef RT_STRICT
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
434#else
435 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
436#endif
437 return rc;
438}
439
440
441/**
442 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM handle.
446 * @param fGuarded Whether to guard the map.
447 */
448static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
449{
450 int rc = VINF_SUCCESS;
451 RTGCPHYS cb;
452
453 cb = pVM->rem.s.GCPhysLastRam + 1;
454 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
455 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
456 VERR_OUT_OF_RANGE);
457 phys_ram_size = cb;
458 phys_ram_dirty_size = cb >> PAGE_SHIFT;
459 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
460
461 if (!fGuarded)
462 {
463 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
464 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
465 }
466 else
467 {
468 /*
469 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
470 */
471 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
472 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
473 if (cbBitmapFull == cbBitmapAligned)
474 cbBitmapFull += _4G >> PAGE_SHIFT;
475 else if (cbBitmapFull - cbBitmapAligned < _64K)
476 cbBitmapFull += _64K;
477
478 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
479 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
480
481 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
482 if (RT_FAILURE(rc))
483 {
484 RTMemPageFree(phys_ram_dirty);
485 AssertLogRelRCReturn(rc, rc);
486 }
487
488 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
489 }
490
491 /* initialize it. */
492 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
493 return rc;
494}
495
496
497/**
498 * Terminates the REM.
499 *
500 * Termination means cleaning up and freeing all resources,
501 * the VM it self is at this point powered off or suspended.
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506REMR3DECL(int) REMR3Term(PVM pVM)
507{
508#ifdef VBOX_WITH_STATISTICS
509 /*
510 * Statistics.
511 */
512 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
513 STAM_DEREG(pVM, &gStatCompilationQEmu);
514 STAM_DEREG(pVM, &gStatRunCodeQEmu);
515 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
516 STAM_DEREG(pVM, &gStatTimers);
517 STAM_DEREG(pVM, &gStatTBLookup);
518 STAM_DEREG(pVM, &gStatIRQ);
519 STAM_DEREG(pVM, &gStatRawCheck);
520 STAM_DEREG(pVM, &gStatMemRead);
521 STAM_DEREG(pVM, &gStatMemWrite);
522 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
523 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
524
525 STAM_DEREG(pVM, &gStatCpuGetTSC);
526
527 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
528 STAM_DEREG(pVM, &gStatRefuseVM86);
529 STAM_DEREG(pVM, &gStatRefusePaging);
530 STAM_DEREG(pVM, &gStatRefusePAE);
531 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
532 STAM_DEREG(pVM, &gStatRefuseIF0);
533 STAM_DEREG(pVM, &gStatRefuseCode16);
534 STAM_DEREG(pVM, &gStatRefuseWP0);
535 STAM_DEREG(pVM, &gStatRefuseRing1or2);
536 STAM_DEREG(pVM, &gStatRefuseCanExecute);
537 STAM_DEREG(pVM, &gStatFlushTBs);
538
539 STAM_DEREG(pVM, &gStatREMGDTChange);
540 STAM_DEREG(pVM, &gStatREMLDTRChange);
541 STAM_DEREG(pVM, &gStatREMIDTChange);
542 STAM_DEREG(pVM, &gStatREMTRChange);
543
544 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
545 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
546 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
547 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
548 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
549 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
550
551 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
552 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
553 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
554 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
555 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
556 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
557
558 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
559#endif /* VBOX_WITH_STATISTICS */
560
561 STAM_REL_DEREG(pVM, &tb_flush_count);
562 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
563 STAM_REL_DEREG(pVM, &tlb_flush_count);
564
565 return VINF_SUCCESS;
566}
567
568
569/**
570 * The VM is being reset.
571 *
572 * For the REM component this means to call the cpu_reset() and
573 * reinitialize some state variables.
574 *
575 * @param pVM VM handle.
576 */
577REMR3DECL(void) REMR3Reset(PVM pVM)
578{
579 /*
580 * Reset the REM cpu.
581 */
582 pVM->rem.s.fIgnoreAll = true;
583 cpu_reset(&pVM->rem.s.Env);
584 pVM->rem.s.cInvalidatedPages = 0;
585 pVM->rem.s.fIgnoreAll = false;
586
587 /* Clear raw ring 0 init state */
588 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
589
590 /* Flush the TBs the next time we execute code here. */
591 pVM->rem.s.fFlushTBs = true;
592}
593
594
595/**
596 * Execute state save operation.
597 *
598 * @returns VBox status code.
599 * @param pVM VM Handle.
600 * @param pSSM SSM operation handle.
601 */
602static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
603{
604 PREM pRem = &pVM->rem.s;
605
606 /*
607 * Save the required CPU Env bits.
608 * (Not much because we're never in REM when doing the save.)
609 */
610 LogFlow(("remR3Save:\n"));
611 Assert(!pRem->fInREM);
612 SSMR3PutU32(pSSM, pRem->Env.hflags);
613 SSMR3PutU32(pSSM, ~0); /* separator */
614
615 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
616 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
617 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
618
619 return SSMR3PutU32(pSSM, ~0); /* terminator */
620}
621
622
623/**
624 * Execute state load operation.
625 *
626 * @returns VBox status code.
627 * @param pVM VM Handle.
628 * @param pSSM SSM operation handle.
629 * @param u32Version Data layout version.
630 */
631static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
632{
633 uint32_t u32Dummy;
634 uint32_t fRawRing0 = false;
635 uint32_t u32Sep;
636 unsigned i;
637 int rc;
638 PREM pRem;
639 LogFlow(("remR3Load:\n"));
640
641 /*
642 * Validate version.
643 */
644 if ( u32Version != REM_SAVED_STATE_VERSION
645 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
646 {
647 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
648 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
649 }
650
651 /*
652 * Do a reset to be on the safe side...
653 */
654 REMR3Reset(pVM);
655
656 /*
657 * Ignore all ignorable notifications.
658 * (Not doing this will cause serious trouble.)
659 */
660 pVM->rem.s.fIgnoreAll = true;
661
662 /*
663 * Load the required CPU Env bits.
664 * (Not much because we're never in REM when doing the save.)
665 */
666 pRem = &pVM->rem.s;
667 Assert(!pRem->fInREM);
668 SSMR3GetU32(pSSM, &pRem->Env.hflags);
669 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
670 {
671 /* Redundant REM CPU state has to be loaded, but can be ignored. */
672 CPUX86State_Ver16 temp;
673 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
674 }
675
676 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
677 if (RT_FAILURE(rc))
678 return rc;
679 if (u32Sep != ~0U)
680 {
681 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
682 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
683 }
684
685 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
686 SSMR3GetUInt(pSSM, &fRawRing0);
687 if (fRawRing0)
688 pRem->Env.state |= CPU_RAW_RING0;
689
690 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 /*
693 * Load the REM stuff.
694 */
695 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
696 if (RT_FAILURE(rc))
697 return rc;
698 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
699 {
700 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
701 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
702 }
703 for (i = 0; i < pRem->cInvalidatedPages; i++)
704 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
705 }
706
707 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
708 if (RT_FAILURE(rc))
709 return rc;
710
711 /* check the terminator. */
712 rc = SSMR3GetU32(pSSM, &u32Sep);
713 if (RT_FAILURE(rc))
714 return rc;
715 if (u32Sep != ~0U)
716 {
717 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
718 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
719 }
720
721 /*
722 * Get the CPUID features.
723 */
724 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
725 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
726
727 /*
728 * Sync the Load Flush the TLB
729 */
730 tlb_flush(&pRem->Env, 1);
731
732 /*
733 * Stop ignoring ignornable notifications.
734 */
735 pVM->rem.s.fIgnoreAll = false;
736
737 /*
738 * Sync the whole CPU state when executing code in the recompiler.
739 */
740 for (i=0;i<pVM->cCPUs;i++)
741 {
742 PVMCPU pVCpu = &pVM->aCpus[i];
743
744 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
745 }
746 return VINF_SUCCESS;
747}
748
749
750
751#undef LOG_GROUP
752#define LOG_GROUP LOG_GROUP_REM_RUN
753
754/**
755 * Single steps an instruction in recompiled mode.
756 *
757 * Before calling this function the REM state needs to be in sync with
758 * the VM. Call REMR3State() to perform the sync. It's only necessary
759 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
760 * and after calling REMR3StateBack().
761 *
762 * @returns VBox status code.
763 *
764 * @param pVM VM Handle.
765 * @param pVCpu VMCPU Handle.
766 */
767REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
768{
769 int rc, interrupt_request;
770 RTGCPTR GCPtrPC;
771 bool fBp;
772
773 /*
774 * Lock the REM - we don't wanna have anyone interrupting us
775 * while stepping - and enabled single stepping. We also ignore
776 * pending interrupts and suchlike.
777 */
778 interrupt_request = pVM->rem.s.Env.interrupt_request;
779 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
780 pVM->rem.s.Env.interrupt_request = 0;
781 cpu_single_step(&pVM->rem.s.Env, 1);
782
783 /*
784 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
785 */
786 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
787 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
788
789 /*
790 * Execute and handle the return code.
791 * We execute without enabling the cpu tick, so on success we'll
792 * just flip it on and off to make sure it moves
793 */
794 rc = cpu_exec(&pVM->rem.s.Env);
795 if (rc == EXCP_DEBUG)
796 {
797 TMCpuTickResume(pVCpu);
798 TMCpuTickPause(pVCpu);
799 TMVirtualResume(pVM);
800 TMVirtualPause(pVM);
801 rc = VINF_EM_DBG_STEPPED;
802 }
803 else
804 {
805 switch (rc)
806 {
807 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
808 case EXCP_HLT:
809 case EXCP_HALTED: rc = VINF_EM_HALT; break;
810 case EXCP_RC:
811 rc = pVM->rem.s.rc;
812 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
813 break;
814 case EXCP_EXECUTE_RAW:
815 case EXCP_EXECUTE_HWACC:
816 /** @todo: is it correct? No! */
817 rc = VINF_SUCCESS;
818 break;
819 default:
820 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
821 rc = VERR_INTERNAL_ERROR;
822 break;
823 }
824 }
825
826 /*
827 * Restore the stuff we changed to prevent interruption.
828 * Unlock the REM.
829 */
830 if (fBp)
831 {
832 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
833 Assert(rc2 == 0); NOREF(rc2);
834 }
835 cpu_single_step(&pVM->rem.s.Env, 0);
836 pVM->rem.s.Env.interrupt_request = interrupt_request;
837
838 return rc;
839}
840
841
842/**
843 * Set a breakpoint using the REM facilities.
844 *
845 * @returns VBox status code.
846 * @param pVM The VM handle.
847 * @param Address The breakpoint address.
848 * @thread The emulation thread.
849 */
850REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
851{
852 VM_ASSERT_EMT(pVM);
853 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
854 {
855 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
856 return VINF_SUCCESS;
857 }
858 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
859 return VERR_REM_NO_MORE_BP_SLOTS;
860}
861
862
863/**
864 * Clears a breakpoint set by REMR3BreakpointSet().
865 *
866 * @returns VBox status code.
867 * @param pVM The VM handle.
868 * @param Address The breakpoint address.
869 * @thread The emulation thread.
870 */
871REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
872{
873 VM_ASSERT_EMT(pVM);
874 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
875 {
876 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
877 return VINF_SUCCESS;
878 }
879 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
880 return VERR_REM_BP_NOT_FOUND;
881}
882
883
884/**
885 * Emulate an instruction.
886 *
887 * This function executes one instruction without letting anyone
888 * interrupt it. This is intended for being called while being in
889 * raw mode and thus will take care of all the state syncing between
890 * REM and the rest.
891 *
892 * @returns VBox status code.
893 * @param pVM VM handle.
894 * @param pVCpu VMCPU Handle.
895 */
896REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
897{
898 bool fFlushTBs;
899
900 int rc, rc2;
901 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
902
903 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
904 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
905 */
906 if (HWACCMIsEnabled(pVM))
907 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
908
909 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
910 fFlushTBs = pVM->rem.s.fFlushTBs;
911 pVM->rem.s.fFlushTBs = false;
912
913 /*
914 * Sync the state and enable single instruction / single stepping.
915 */
916 rc = REMR3State(pVM, pVCpu);
917 pVM->rem.s.fFlushTBs = fFlushTBs;
918 if (RT_SUCCESS(rc))
919 {
920 int interrupt_request = pVM->rem.s.Env.interrupt_request;
921 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
922 Assert(!pVM->rem.s.Env.singlestep_enabled);
923 /*
924 * Now we set the execute single instruction flag and enter the cpu_exec loop.
925 */
926 TMNotifyStartOfExecution(pVCpu);
927 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
928 rc = cpu_exec(&pVM->rem.s.Env);
929 TMNotifyEndOfExecution(pVCpu);
930 switch (rc)
931 {
932 /*
933 * Executed without anything out of the way happening.
934 */
935 case EXCP_SINGLE_INSTR:
936 rc = VINF_EM_RESCHEDULE;
937 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
938 break;
939
940 /*
941 * If we take a trap or start servicing a pending interrupt, we might end up here.
942 * (Timer thread or some other thread wishing EMT's attention.)
943 */
944 case EXCP_INTERRUPT:
945 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
946 rc = VINF_EM_RESCHEDULE;
947 break;
948
949 /*
950 * Single step, we assume!
951 * If there was a breakpoint there we're fucked now.
952 */
953 case EXCP_DEBUG:
954 {
955 /* breakpoint or single step? */
956 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
957 int iBP;
958 rc = VINF_EM_DBG_STEPPED;
959 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
960 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
961 {
962 rc = VINF_EM_DBG_BREAKPOINT;
963 break;
964 }
965 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
966 break;
967 }
968
969 /*
970 * hlt instruction.
971 */
972 case EXCP_HLT:
973 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
974 rc = VINF_EM_HALT;
975 break;
976
977 /*
978 * The VM has halted.
979 */
980 case EXCP_HALTED:
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
982 rc = VINF_EM_HALT;
983 break;
984
985 /*
986 * Switch to RAW-mode.
987 */
988 case EXCP_EXECUTE_RAW:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
990 rc = VINF_EM_RESCHEDULE_RAW;
991 break;
992
993 /*
994 * Switch to hardware accelerated RAW-mode.
995 */
996 case EXCP_EXECUTE_HWACC:
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
998 rc = VINF_EM_RESCHEDULE_HWACC;
999 break;
1000
1001 /*
1002 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1003 */
1004 case EXCP_RC:
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1006 rc = pVM->rem.s.rc;
1007 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1008 break;
1009
1010 /*
1011 * Figure out the rest when they arrive....
1012 */
1013 default:
1014 AssertMsgFailed(("rc=%d\n", rc));
1015 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1016 rc = VINF_EM_RESCHEDULE;
1017 break;
1018 }
1019
1020 /*
1021 * Switch back the state.
1022 */
1023 pVM->rem.s.Env.interrupt_request = interrupt_request;
1024 rc2 = REMR3StateBack(pVM, pVCpu);
1025 AssertRC(rc2);
1026 }
1027
1028 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1029 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1030 return rc;
1031}
1032
1033
1034/**
1035 * Runs code in recompiled mode.
1036 *
1037 * Before calling this function the REM state needs to be in sync with
1038 * the VM. Call REMR3State() to perform the sync. It's only necessary
1039 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1040 * and after calling REMR3StateBack().
1041 *
1042 * @returns VBox status code.
1043 *
1044 * @param pVM VM Handle.
1045 * @param pVCpu VMCPU Handle.
1046 */
1047REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1048{
1049 int rc;
1050 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1051 Assert(pVM->rem.s.fInREM);
1052
1053 TMNotifyStartOfExecution(pVCpu);
1054 rc = cpu_exec(&pVM->rem.s.Env);
1055 TMNotifyEndOfExecution(pVCpu);
1056 switch (rc)
1057 {
1058 /*
1059 * This happens when the execution was interrupted
1060 * by an external event, like pending timers.
1061 */
1062 case EXCP_INTERRUPT:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1064 rc = VINF_SUCCESS;
1065 break;
1066
1067 /*
1068 * hlt instruction.
1069 */
1070 case EXCP_HLT:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1072 rc = VINF_EM_HALT;
1073 break;
1074
1075 /*
1076 * The VM has halted.
1077 */
1078 case EXCP_HALTED:
1079 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1080 rc = VINF_EM_HALT;
1081 break;
1082
1083 /*
1084 * Breakpoint/single step.
1085 */
1086 case EXCP_DEBUG:
1087 {
1088#if 0//def DEBUG_bird
1089 static int iBP = 0;
1090 printf("howdy, breakpoint! iBP=%d\n", iBP);
1091 switch (iBP)
1092 {
1093 case 0:
1094 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1095 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1096 //pVM->rem.s.Env.interrupt_request = 0;
1097 //pVM->rem.s.Env.exception_index = -1;
1098 //g_fInterruptDisabled = 1;
1099 rc = VINF_SUCCESS;
1100 asm("int3");
1101 break;
1102 default:
1103 asm("int3");
1104 break;
1105 }
1106 iBP++;
1107#else
1108 /* breakpoint or single step? */
1109 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1110 int iBP;
1111 rc = VINF_EM_DBG_STEPPED;
1112 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1113 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1114 {
1115 rc = VINF_EM_DBG_BREAKPOINT;
1116 break;
1117 }
1118 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1119#endif
1120 break;
1121 }
1122
1123 /*
1124 * Switch to RAW-mode.
1125 */
1126 case EXCP_EXECUTE_RAW:
1127 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1128 rc = VINF_EM_RESCHEDULE_RAW;
1129 break;
1130
1131 /*
1132 * Switch to hardware accelerated RAW-mode.
1133 */
1134 case EXCP_EXECUTE_HWACC:
1135 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1136 rc = VINF_EM_RESCHEDULE_HWACC;
1137 break;
1138
1139 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1140 /*
1141 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1142 */
1143 case EXCP_RC:
1144 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1145 rc = pVM->rem.s.rc;
1146 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1147 break;
1148
1149 /*
1150 * Figure out the rest when they arrive....
1151 */
1152 default:
1153 AssertMsgFailed(("rc=%d\n", rc));
1154 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1155 rc = VINF_SUCCESS;
1156 break;
1157 }
1158
1159 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1160 return rc;
1161}
1162
1163
1164/**
1165 * Check if the cpu state is suitable for Raw execution.
1166 *
1167 * @returns boolean
1168 * @param env The CPU env struct.
1169 * @param eip The EIP to check this for (might differ from env->eip).
1170 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1171 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1172 *
1173 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1174 */
1175bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1176{
1177 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1178 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1179 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1180 uint32_t u32CR0;
1181
1182 /* Update counter. */
1183 env->pVM->rem.s.cCanExecuteRaw++;
1184
1185 if (HWACCMIsEnabled(env->pVM))
1186 {
1187 CPUMCTX Ctx;
1188
1189 env->state |= CPU_RAW_HWACC;
1190
1191 /*
1192 * Create partial context for HWACCMR3CanExecuteGuest
1193 */
1194 Ctx.cr0 = env->cr[0];
1195 Ctx.cr3 = env->cr[3];
1196 Ctx.cr4 = env->cr[4];
1197
1198 Ctx.tr = env->tr.selector;
1199 Ctx.trHid.u64Base = env->tr.base;
1200 Ctx.trHid.u32Limit = env->tr.limit;
1201 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1202
1203 Ctx.idtr.cbIdt = env->idt.limit;
1204 Ctx.idtr.pIdt = env->idt.base;
1205
1206 Ctx.gdtr.cbGdt = env->gdt.limit;
1207 Ctx.gdtr.pGdt = env->gdt.base;
1208
1209 Ctx.rsp = env->regs[R_ESP];
1210 Ctx.rip = env->eip;
1211
1212 Ctx.eflags.u32 = env->eflags;
1213
1214 Ctx.cs = env->segs[R_CS].selector;
1215 Ctx.csHid.u64Base = env->segs[R_CS].base;
1216 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1217 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1218
1219 Ctx.ds = env->segs[R_DS].selector;
1220 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1221 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1222 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1223
1224 Ctx.es = env->segs[R_ES].selector;
1225 Ctx.esHid.u64Base = env->segs[R_ES].base;
1226 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1227 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1228
1229 Ctx.fs = env->segs[R_FS].selector;
1230 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1231 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1232 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1233
1234 Ctx.gs = env->segs[R_GS].selector;
1235 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1236 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1237 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1238
1239 Ctx.ss = env->segs[R_SS].selector;
1240 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1241 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1242 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1243
1244 Ctx.msrEFER = env->efer;
1245
1246 /* Hardware accelerated raw-mode:
1247 *
1248 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1249 */
1250 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1251 {
1252 *piException = EXCP_EXECUTE_HWACC;
1253 return true;
1254 }
1255 return false;
1256 }
1257
1258 /*
1259 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1260 * or 32 bits protected mode ring 0 code
1261 *
1262 * The tests are ordered by the likelyhood of being true during normal execution.
1263 */
1264 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1265 {
1266 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1267 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1268 return false;
1269 }
1270
1271#ifndef VBOX_RAW_V86
1272 if (fFlags & VM_MASK) {
1273 STAM_COUNTER_INC(&gStatRefuseVM86);
1274 Log2(("raw mode refused: VM_MASK\n"));
1275 return false;
1276 }
1277#endif
1278
1279 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1280 {
1281#ifndef DEBUG_bird
1282 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1283#endif
1284 return false;
1285 }
1286
1287 if (env->singlestep_enabled)
1288 {
1289 //Log2(("raw mode refused: Single step\n"));
1290 return false;
1291 }
1292
1293 if (env->nb_breakpoints > 0)
1294 {
1295 //Log2(("raw mode refused: Breakpoints\n"));
1296 return false;
1297 }
1298
1299 u32CR0 = env->cr[0];
1300 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1301 {
1302 STAM_COUNTER_INC(&gStatRefusePaging);
1303 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1304 return false;
1305 }
1306
1307 if (env->cr[4] & CR4_PAE_MASK)
1308 {
1309 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1310 {
1311 STAM_COUNTER_INC(&gStatRefusePAE);
1312 return false;
1313 }
1314 }
1315
1316 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1317 {
1318 if (!EMIsRawRing3Enabled(env->pVM))
1319 return false;
1320
1321 if (!(env->eflags & IF_MASK))
1322 {
1323 STAM_COUNTER_INC(&gStatRefuseIF0);
1324 Log2(("raw mode refused: IF (RawR3)\n"));
1325 return false;
1326 }
1327
1328 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1329 {
1330 STAM_COUNTER_INC(&gStatRefuseWP0);
1331 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1332 return false;
1333 }
1334 }
1335 else
1336 {
1337 if (!EMIsRawRing0Enabled(env->pVM))
1338 return false;
1339
1340 // Let's start with pure 32 bits ring 0 code first
1341 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1342 {
1343 STAM_COUNTER_INC(&gStatRefuseCode16);
1344 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1345 return false;
1346 }
1347
1348 // Only R0
1349 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1350 {
1351 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1352 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1353 return false;
1354 }
1355
1356 if (!(u32CR0 & CR0_WP_MASK))
1357 {
1358 STAM_COUNTER_INC(&gStatRefuseWP0);
1359 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1360 return false;
1361 }
1362
1363 if (PATMIsPatchGCAddr(env->pVM, eip))
1364 {
1365 Log2(("raw r0 mode forced: patch code\n"));
1366 *piException = EXCP_EXECUTE_RAW;
1367 return true;
1368 }
1369
1370#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1371 if (!(env->eflags & IF_MASK))
1372 {
1373 STAM_COUNTER_INC(&gStatRefuseIF0);
1374 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1375 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1376 return false;
1377 }
1378#endif
1379
1380 env->state |= CPU_RAW_RING0;
1381 }
1382
1383 /*
1384 * Don't reschedule the first time we're called, because there might be
1385 * special reasons why we're here that is not covered by the above checks.
1386 */
1387 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1388 {
1389 Log2(("raw mode refused: first scheduling\n"));
1390 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1391 return false;
1392 }
1393
1394 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1395 *piException = EXCP_EXECUTE_RAW;
1396 return true;
1397}
1398
1399
1400/**
1401 * Fetches a code byte.
1402 *
1403 * @returns Success indicator (bool) for ease of use.
1404 * @param env The CPU environment structure.
1405 * @param GCPtrInstr Where to fetch code.
1406 * @param pu8Byte Where to store the byte on success
1407 */
1408bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1409{
1410 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1411 if (RT_SUCCESS(rc))
1412 return true;
1413 return false;
1414}
1415
1416
1417/**
1418 * Flush (or invalidate if you like) page table/dir entry.
1419 *
1420 * (invlpg instruction; tlb_flush_page)
1421 *
1422 * @param env Pointer to cpu environment.
1423 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1424 */
1425void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1426{
1427 PVM pVM = env->pVM;
1428 PCPUMCTX pCtx;
1429 int rc;
1430
1431 /*
1432 * When we're replaying invlpg instructions or restoring a saved
1433 * state we disable this path.
1434 */
1435 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1436 return;
1437 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1438 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1439
1440 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1441
1442 /*
1443 * Update the control registers before calling PGMFlushPage.
1444 */
1445 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1446 Assert(pCtx);
1447 pCtx->cr0 = env->cr[0];
1448 pCtx->cr3 = env->cr[3];
1449 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1450 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1451 pCtx->cr4 = env->cr[4];
1452
1453 /*
1454 * Let PGM do the rest.
1455 */
1456 Assert(env->pVCpu);
1457 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1458 if (RT_FAILURE(rc))
1459 {
1460 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1461 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1462 }
1463 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1464}
1465
1466
1467#ifndef REM_PHYS_ADDR_IN_TLB
1468/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1469void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1470{
1471 void *pv;
1472 int rc;
1473
1474 /* Address must be aligned enough to fiddle with lower bits */
1475 Assert((physAddr & 0x3) == 0);
1476
1477 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1478 Assert( rc == VINF_SUCCESS
1479 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1480 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1481 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1482 if (RT_FAILURE(rc))
1483 return (void *)1;
1484 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1485 return (void *)((uintptr_t)pv | 2);
1486 return pv;
1487}
1488#endif /* REM_PHYS_ADDR_IN_TLB */
1489
1490
1491/**
1492 * Called from tlb_protect_code in order to write monitor a code page.
1493 *
1494 * @param env Pointer to the CPU environment.
1495 * @param GCPtr Code page to monitor
1496 */
1497void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1498{
1499#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1500 Assert(env->pVM->rem.s.fInREM);
1501 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1502 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1503 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1504 && !(env->eflags & VM_MASK) /* no V86 mode */
1505 && !HWACCMIsEnabled(env->pVM))
1506 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1507#endif
1508}
1509
1510
1511/**
1512 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1513 *
1514 * @param env Pointer to the CPU environment.
1515 * @param GCPtr Code page to monitor
1516 */
1517void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1518{
1519 Assert(env->pVM->rem.s.fInREM);
1520#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1521 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1522 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1523 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1524 && !(env->eflags & VM_MASK) /* no V86 mode */
1525 && !HWACCMIsEnabled(env->pVM))
1526 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1527#endif
1528}
1529
1530
1531/**
1532 * Called when the CPU is initialized, any of the CRx registers are changed or
1533 * when the A20 line is modified.
1534 *
1535 * @param env Pointer to the CPU environment.
1536 * @param fGlobal Set if the flush is global.
1537 */
1538void remR3FlushTLB(CPUState *env, bool fGlobal)
1539{
1540 PVM pVM = env->pVM;
1541 PCPUMCTX pCtx;
1542
1543 /*
1544 * When we're replaying invlpg instructions or restoring a saved
1545 * state we disable this path.
1546 */
1547 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1548 return;
1549 Assert(pVM->rem.s.fInREM);
1550
1551 /*
1552 * The caller doesn't check cr4, so we have to do that for ourselves.
1553 */
1554 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1555 fGlobal = true;
1556 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1557
1558 /*
1559 * Update the control registers before calling PGMR3FlushTLB.
1560 */
1561 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1562 Assert(pCtx);
1563 pCtx->cr0 = env->cr[0];
1564 pCtx->cr3 = env->cr[3];
1565 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1566 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1567 pCtx->cr4 = env->cr[4];
1568
1569 /*
1570 * Let PGM do the rest.
1571 */
1572 Assert(env->pVCpu);
1573 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1574}
1575
1576
1577/**
1578 * Called when any of the cr0, cr4 or efer registers is updated.
1579 *
1580 * @param env Pointer to the CPU environment.
1581 */
1582void remR3ChangeCpuMode(CPUState *env)
1583{
1584 PVM pVM = env->pVM;
1585 uint64_t efer;
1586 PCPUMCTX pCtx;
1587 int rc;
1588
1589 /*
1590 * When we're replaying loads or restoring a saved
1591 * state this path is disabled.
1592 */
1593 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1594 return;
1595 Assert(pVM->rem.s.fInREM);
1596
1597 /*
1598 * Update the control registers before calling PGMChangeMode()
1599 * as it may need to map whatever cr3 is pointing to.
1600 */
1601 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1602 Assert(pCtx);
1603 pCtx->cr0 = env->cr[0];
1604 pCtx->cr3 = env->cr[3];
1605 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1606 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1607 pCtx->cr4 = env->cr[4];
1608
1609#ifdef TARGET_X86_64
1610 efer = env->efer;
1611#else
1612 efer = 0;
1613#endif
1614 Assert(env->pVCpu);
1615 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1616 if (rc != VINF_SUCCESS)
1617 {
1618 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1619 {
1620 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1621 remR3RaiseRC(env->pVM, rc);
1622 }
1623 else
1624 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1625 }
1626}
1627
1628
1629/**
1630 * Called from compiled code to run dma.
1631 *
1632 * @param env Pointer to the CPU environment.
1633 */
1634void remR3DmaRun(CPUState *env)
1635{
1636 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1637 PDMR3DmaRun(env->pVM);
1638 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1639}
1640
1641
1642/**
1643 * Called from compiled code to schedule pending timers in VMM
1644 *
1645 * @param env Pointer to the CPU environment.
1646 */
1647void remR3TimersRun(CPUState *env)
1648{
1649 LogFlow(("remR3TimersRun:\n"));
1650 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1651 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1652 TMR3TimerQueuesDo(env->pVM);
1653 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1654 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1655}
1656
1657
1658/**
1659 * Record trap occurance
1660 *
1661 * @returns VBox status code
1662 * @param env Pointer to the CPU environment.
1663 * @param uTrap Trap nr
1664 * @param uErrorCode Error code
1665 * @param pvNextEIP Next EIP
1666 */
1667int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1668{
1669 PVM pVM = env->pVM;
1670#ifdef VBOX_WITH_STATISTICS
1671 static STAMCOUNTER s_aStatTrap[255];
1672 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1673#endif
1674
1675#ifdef VBOX_WITH_STATISTICS
1676 if (uTrap < 255)
1677 {
1678 if (!s_aRegisters[uTrap])
1679 {
1680 char szStatName[64];
1681 s_aRegisters[uTrap] = true;
1682 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1683 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1684 }
1685 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1686 }
1687#endif
1688 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1689 if( uTrap < 0x20
1690 && (env->cr[0] & X86_CR0_PE)
1691 && !(env->eflags & X86_EFL_VM))
1692 {
1693#ifdef DEBUG
1694 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1695#endif
1696 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1697 {
1698 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1699 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1700 return VERR_REM_TOO_MANY_TRAPS;
1701 }
1702 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1703 pVM->rem.s.cPendingExceptions = 1;
1704 pVM->rem.s.uPendingException = uTrap;
1705 pVM->rem.s.uPendingExcptEIP = env->eip;
1706 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1707 }
1708 else
1709 {
1710 pVM->rem.s.cPendingExceptions = 0;
1711 pVM->rem.s.uPendingException = uTrap;
1712 pVM->rem.s.uPendingExcptEIP = env->eip;
1713 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1714 }
1715 return VINF_SUCCESS;
1716}
1717
1718
1719/*
1720 * Clear current active trap
1721 *
1722 * @param pVM VM Handle.
1723 */
1724void remR3TrapClear(PVM pVM)
1725{
1726 pVM->rem.s.cPendingExceptions = 0;
1727 pVM->rem.s.uPendingException = 0;
1728 pVM->rem.s.uPendingExcptEIP = 0;
1729 pVM->rem.s.uPendingExcptCR2 = 0;
1730}
1731
1732
1733/*
1734 * Record previous call instruction addresses
1735 *
1736 * @param env Pointer to the CPU environment.
1737 */
1738void remR3RecordCall(CPUState *env)
1739{
1740 CSAMR3RecordCallAddress(env->pVM, env->eip);
1741}
1742
1743
1744/**
1745 * Syncs the internal REM state with the VM.
1746 *
1747 * This must be called before REMR3Run() is invoked whenever when the REM
1748 * state is not up to date. Calling it several times in a row is not
1749 * permitted.
1750 *
1751 * @returns VBox status code.
1752 *
1753 * @param pVM VM Handle.
1754 * @param pVCpu VMCPU Handle.
1755 *
1756 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1757 * no do this since the majority of the callers don't want any unnecessary of events
1758 * pending that would immediatly interrupt execution.
1759 */
1760REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1761{
1762 register const CPUMCTX *pCtx;
1763 register unsigned fFlags;
1764 bool fHiddenSelRegsValid;
1765 unsigned i;
1766 TRPMEVENT enmType;
1767 uint8_t u8TrapNo;
1768 int rc;
1769
1770 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1771 Log2(("REMR3State:\n"));
1772
1773 pVM->rem.s.Env.pVCpu = pVCpu;
1774 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1775 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1776
1777 Assert(!pVM->rem.s.fInREM);
1778 pVM->rem.s.fInStateSync = true;
1779
1780 /*
1781 * If we have to flush TBs, do that immediately.
1782 */
1783 if (pVM->rem.s.fFlushTBs)
1784 {
1785 STAM_COUNTER_INC(&gStatFlushTBs);
1786 tb_flush(&pVM->rem.s.Env);
1787 pVM->rem.s.fFlushTBs = false;
1788 }
1789
1790 /*
1791 * Copy the registers which require no special handling.
1792 */
1793#ifdef TARGET_X86_64
1794 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1795 Assert(R_EAX == 0);
1796 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1797 Assert(R_ECX == 1);
1798 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1799 Assert(R_EDX == 2);
1800 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1801 Assert(R_EBX == 3);
1802 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1803 Assert(R_ESP == 4);
1804 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1805 Assert(R_EBP == 5);
1806 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1807 Assert(R_ESI == 6);
1808 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1809 Assert(R_EDI == 7);
1810 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1811 pVM->rem.s.Env.regs[8] = pCtx->r8;
1812 pVM->rem.s.Env.regs[9] = pCtx->r9;
1813 pVM->rem.s.Env.regs[10] = pCtx->r10;
1814 pVM->rem.s.Env.regs[11] = pCtx->r11;
1815 pVM->rem.s.Env.regs[12] = pCtx->r12;
1816 pVM->rem.s.Env.regs[13] = pCtx->r13;
1817 pVM->rem.s.Env.regs[14] = pCtx->r14;
1818 pVM->rem.s.Env.regs[15] = pCtx->r15;
1819
1820 pVM->rem.s.Env.eip = pCtx->rip;
1821
1822 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1823#else
1824 Assert(R_EAX == 0);
1825 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1826 Assert(R_ECX == 1);
1827 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1828 Assert(R_EDX == 2);
1829 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1830 Assert(R_EBX == 3);
1831 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1832 Assert(R_ESP == 4);
1833 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1834 Assert(R_EBP == 5);
1835 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1836 Assert(R_ESI == 6);
1837 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1838 Assert(R_EDI == 7);
1839 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1840 pVM->rem.s.Env.eip = pCtx->eip;
1841
1842 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1843#endif
1844
1845 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1846
1847 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1848 for (i=0;i<8;i++)
1849 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1850
1851 /*
1852 * Clear the halted hidden flag (the interrupt waking up the CPU can
1853 * have been dispatched in raw mode).
1854 */
1855 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1856
1857 /*
1858 * Replay invlpg?
1859 */
1860 if (pVM->rem.s.cInvalidatedPages)
1861 {
1862 RTUINT i;
1863
1864 pVM->rem.s.fIgnoreInvlPg = true;
1865 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1866 {
1867 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1868 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1869 }
1870 pVM->rem.s.fIgnoreInvlPg = false;
1871 pVM->rem.s.cInvalidatedPages = 0;
1872 }
1873
1874 /* Replay notification changes? */
1875 if (pVM->rem.s.cHandlerNotifications)
1876 REMR3ReplayHandlerNotifications(pVM);
1877
1878 /* Update MSRs; before CRx registers! */
1879 pVM->rem.s.Env.efer = pCtx->msrEFER;
1880 pVM->rem.s.Env.star = pCtx->msrSTAR;
1881 pVM->rem.s.Env.pat = pCtx->msrPAT;
1882#ifdef TARGET_X86_64
1883 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1884 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1885 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1886 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1887
1888 /* Update the internal long mode activate flag according to the new EFER value. */
1889 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1890 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1891 else
1892 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1893#endif
1894
1895 /*
1896 * Registers which are rarely changed and require special handling / order when changed.
1897 */
1898 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1899 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1900 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1901 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1902 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1903 {
1904 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1905 {
1906 pVM->rem.s.fIgnoreCR3Load = true;
1907 tlb_flush(&pVM->rem.s.Env, true);
1908 pVM->rem.s.fIgnoreCR3Load = false;
1909 }
1910
1911 /* CR4 before CR0! */
1912 if (fFlags & CPUM_CHANGED_CR4)
1913 {
1914 pVM->rem.s.fIgnoreCR3Load = true;
1915 pVM->rem.s.fIgnoreCpuMode = true;
1916 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1917 pVM->rem.s.fIgnoreCpuMode = false;
1918 pVM->rem.s.fIgnoreCR3Load = false;
1919 }
1920
1921 if (fFlags & CPUM_CHANGED_CR0)
1922 {
1923 pVM->rem.s.fIgnoreCR3Load = true;
1924 pVM->rem.s.fIgnoreCpuMode = true;
1925 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1926 pVM->rem.s.fIgnoreCpuMode = false;
1927 pVM->rem.s.fIgnoreCR3Load = false;
1928 }
1929
1930 if (fFlags & CPUM_CHANGED_CR3)
1931 {
1932 pVM->rem.s.fIgnoreCR3Load = true;
1933 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1934 pVM->rem.s.fIgnoreCR3Load = false;
1935 }
1936
1937 if (fFlags & CPUM_CHANGED_GDTR)
1938 {
1939 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1940 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1941 }
1942
1943 if (fFlags & CPUM_CHANGED_IDTR)
1944 {
1945 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1946 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1947 }
1948
1949 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1950 {
1951 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1952 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1953 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1954 }
1955
1956 if (fFlags & CPUM_CHANGED_LDTR)
1957 {
1958 if (fHiddenSelRegsValid)
1959 {
1960 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1961 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1962 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1963 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1964 }
1965 else
1966 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1967 }
1968
1969 if (fFlags & CPUM_CHANGED_CPUID)
1970 {
1971 uint32_t u32Dummy;
1972
1973 /*
1974 * Get the CPUID features.
1975 */
1976 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1977 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1978 }
1979
1980 /* Sync FPU state after CR4, CPUID and EFER (!). */
1981 if (fFlags & CPUM_CHANGED_FPU_REM)
1982 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1983 }
1984
1985 /*
1986 * Sync TR unconditionally to make life simpler.
1987 */
1988 pVM->rem.s.Env.tr.selector = pCtx->tr;
1989 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1990 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1991 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1992 /* Note! do_interrupt will fault if the busy flag is still set... */
1993 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1994
1995 /*
1996 * Update selector registers.
1997 * This must be done *after* we've synced gdt, ldt and crX registers
1998 * since we're reading the GDT/LDT om sync_seg. This will happen with
1999 * saved state which takes a quick dip into rawmode for instance.
2000 */
2001 /*
2002 * Stack; Note first check this one as the CPL might have changed. The
2003 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2004 */
2005
2006 if (fHiddenSelRegsValid)
2007 {
2008 /* The hidden selector registers are valid in the CPU context. */
2009 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2010
2011 /* Set current CPL */
2012 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2013
2014 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2015 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2016 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2017 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2018 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2019 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2020 }
2021 else
2022 {
2023 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2024 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2025 {
2026 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2027
2028 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2029 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2030#ifdef VBOX_WITH_STATISTICS
2031 if (pVM->rem.s.Env.segs[R_SS].newselector)
2032 {
2033 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2034 }
2035#endif
2036 }
2037 else
2038 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2039
2040 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2041 {
2042 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2043 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2044#ifdef VBOX_WITH_STATISTICS
2045 if (pVM->rem.s.Env.segs[R_ES].newselector)
2046 {
2047 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2048 }
2049#endif
2050 }
2051 else
2052 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2053
2054 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2055 {
2056 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2057 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2058#ifdef VBOX_WITH_STATISTICS
2059 if (pVM->rem.s.Env.segs[R_CS].newselector)
2060 {
2061 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2062 }
2063#endif
2064 }
2065 else
2066 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2067
2068 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2069 {
2070 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2071 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2072#ifdef VBOX_WITH_STATISTICS
2073 if (pVM->rem.s.Env.segs[R_DS].newselector)
2074 {
2075 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2076 }
2077#endif
2078 }
2079 else
2080 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2081
2082 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2083 * be the same but not the base/limit. */
2084 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2085 {
2086 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2087 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2088#ifdef VBOX_WITH_STATISTICS
2089 if (pVM->rem.s.Env.segs[R_FS].newselector)
2090 {
2091 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2092 }
2093#endif
2094 }
2095 else
2096 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2097
2098 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2099 {
2100 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2101 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2102#ifdef VBOX_WITH_STATISTICS
2103 if (pVM->rem.s.Env.segs[R_GS].newselector)
2104 {
2105 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2106 }
2107#endif
2108 }
2109 else
2110 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2111 }
2112
2113 /*
2114 * Check for traps.
2115 */
2116 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2117 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2118 if (RT_SUCCESS(rc))
2119 {
2120#ifdef DEBUG
2121 if (u8TrapNo == 0x80)
2122 {
2123 remR3DumpLnxSyscall(pVCpu);
2124 remR3DumpOBsdSyscall(pVCpu);
2125 }
2126#endif
2127
2128 pVM->rem.s.Env.exception_index = u8TrapNo;
2129 if (enmType != TRPM_SOFTWARE_INT)
2130 {
2131 pVM->rem.s.Env.exception_is_int = 0;
2132 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2133 }
2134 else
2135 {
2136 /*
2137 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2138 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2139 * for int03 and into.
2140 */
2141 pVM->rem.s.Env.exception_is_int = 1;
2142 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2143 /* int 3 may be generated by one-byte 0xcc */
2144 if (u8TrapNo == 3)
2145 {
2146 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2147 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2148 }
2149 /* int 4 may be generated by one-byte 0xce */
2150 else if (u8TrapNo == 4)
2151 {
2152 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2153 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2154 }
2155 }
2156
2157 /* get error code and cr2 if needed. */
2158 switch (u8TrapNo)
2159 {
2160 case 0x0e:
2161 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2162 /* fallthru */
2163 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2164 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2165 break;
2166
2167 case 0x11: case 0x08:
2168 default:
2169 pVM->rem.s.Env.error_code = 0;
2170 break;
2171 }
2172
2173 /*
2174 * We can now reset the active trap since the recompiler is gonna have a go at it.
2175 */
2176 rc = TRPMResetTrap(pVCpu);
2177 AssertRC(rc);
2178 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2179 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2180 }
2181
2182 /*
2183 * Clear old interrupt request flags; Check for pending hardware interrupts.
2184 * (See @remark for why we don't check for other FFs.)
2185 */
2186 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2187 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2188 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2189 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2190
2191 /*
2192 * We're now in REM mode.
2193 */
2194 pVM->rem.s.fInREM = true;
2195 pVM->rem.s.fInStateSync = false;
2196 pVM->rem.s.cCanExecuteRaw = 0;
2197 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2198 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2199 return VINF_SUCCESS;
2200}
2201
2202
2203/**
2204 * Syncs back changes in the REM state to the the VM state.
2205 *
2206 * This must be called after invoking REMR3Run().
2207 * Calling it several times in a row is not permitted.
2208 *
2209 * @returns VBox status code.
2210 *
2211 * @param pVM VM Handle.
2212 * @param pVCpu VMCPU Handle.
2213 */
2214REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2215{
2216 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2217 Assert(pCtx);
2218 unsigned i;
2219
2220 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2221 Log2(("REMR3StateBack:\n"));
2222 Assert(pVM->rem.s.fInREM);
2223
2224 /*
2225 * Copy back the registers.
2226 * This is done in the order they are declared in the CPUMCTX structure.
2227 */
2228
2229 /** @todo FOP */
2230 /** @todo FPUIP */
2231 /** @todo CS */
2232 /** @todo FPUDP */
2233 /** @todo DS */
2234
2235 /** @todo check if FPU/XMM was actually used in the recompiler */
2236 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2237//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2238
2239#ifdef TARGET_X86_64
2240 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2241 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2242 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2243 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2244 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2245 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2246 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2247 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2248 pCtx->r8 = pVM->rem.s.Env.regs[8];
2249 pCtx->r9 = pVM->rem.s.Env.regs[9];
2250 pCtx->r10 = pVM->rem.s.Env.regs[10];
2251 pCtx->r11 = pVM->rem.s.Env.regs[11];
2252 pCtx->r12 = pVM->rem.s.Env.regs[12];
2253 pCtx->r13 = pVM->rem.s.Env.regs[13];
2254 pCtx->r14 = pVM->rem.s.Env.regs[14];
2255 pCtx->r15 = pVM->rem.s.Env.regs[15];
2256
2257 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2258
2259#else
2260 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2261 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2262 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2263 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2264 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2265 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2266 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2267
2268 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2269#endif
2270
2271 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2272
2273#ifdef VBOX_WITH_STATISTICS
2274 if (pVM->rem.s.Env.segs[R_SS].newselector)
2275 {
2276 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2277 }
2278 if (pVM->rem.s.Env.segs[R_GS].newselector)
2279 {
2280 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2281 }
2282 if (pVM->rem.s.Env.segs[R_FS].newselector)
2283 {
2284 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2285 }
2286 if (pVM->rem.s.Env.segs[R_ES].newselector)
2287 {
2288 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2289 }
2290 if (pVM->rem.s.Env.segs[R_DS].newselector)
2291 {
2292 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2293 }
2294 if (pVM->rem.s.Env.segs[R_CS].newselector)
2295 {
2296 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2297 }
2298#endif
2299 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2300 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2301 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2302 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2303 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2304
2305#ifdef TARGET_X86_64
2306 pCtx->rip = pVM->rem.s.Env.eip;
2307 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2308#else
2309 pCtx->eip = pVM->rem.s.Env.eip;
2310 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2311#endif
2312
2313 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2314 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2315 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2316 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2317 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2318 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2319
2320 for (i = 0; i < 8; i++)
2321 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2322
2323 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2324 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2325 {
2326 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2327 STAM_COUNTER_INC(&gStatREMGDTChange);
2328 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2329 }
2330
2331 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2332 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2333 {
2334 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2335 STAM_COUNTER_INC(&gStatREMIDTChange);
2336 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2337 }
2338
2339 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2340 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2341 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2342 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2343 {
2344 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2345 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2346 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2347 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2348 STAM_COUNTER_INC(&gStatREMLDTRChange);
2349 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2350 }
2351
2352 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2353 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2354 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2355 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2356 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2357 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2358 : 0) )
2359 {
2360 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2361 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2362 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2363 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2364 pCtx->tr = pVM->rem.s.Env.tr.selector;
2365 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2366 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2367 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2368 if (pCtx->trHid.Attr.u)
2369 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2370 STAM_COUNTER_INC(&gStatREMTRChange);
2371 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2372 }
2373
2374 /** @todo These values could still be out of sync! */
2375 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2376 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2377 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2378 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2379
2380 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2381 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2382 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2383
2384 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2385 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2386 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2387
2388 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2389 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2390 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2391
2392 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2393 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2394 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2395
2396 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2397 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2398 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2399
2400 /* Sysenter MSR */
2401 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2402 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2403 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2404
2405 /* System MSRs. */
2406 pCtx->msrEFER = pVM->rem.s.Env.efer;
2407 pCtx->msrSTAR = pVM->rem.s.Env.star;
2408 pCtx->msrPAT = pVM->rem.s.Env.pat;
2409#ifdef TARGET_X86_64
2410 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2411 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2412 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2413 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2414#endif
2415
2416 remR3TrapClear(pVM);
2417
2418 /*
2419 * Check for traps.
2420 */
2421 if ( pVM->rem.s.Env.exception_index >= 0
2422 && pVM->rem.s.Env.exception_index < 256)
2423 {
2424 int rc;
2425
2426 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2427 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2428 AssertRC(rc);
2429 switch (pVM->rem.s.Env.exception_index)
2430 {
2431 case 0x0e:
2432 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2433 /* fallthru */
2434 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2435 case 0x11: case 0x08: /* 0 */
2436 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2437 break;
2438 }
2439
2440 }
2441
2442 /*
2443 * We're not longer in REM mode.
2444 */
2445 pVM->rem.s.fInREM = false;
2446 pVM->rem.s.pCtx = NULL;
2447 pVM->rem.s.Env.pVCpu = NULL;
2448 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2449 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2450 return VINF_SUCCESS;
2451}
2452
2453
2454/**
2455 * This is called by the disassembler when it wants to update the cpu state
2456 * before for instance doing a register dump.
2457 */
2458static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2459{
2460 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2461 unsigned i;
2462
2463 Assert(pVM->rem.s.fInREM);
2464
2465 /*
2466 * Copy back the registers.
2467 * This is done in the order they are declared in the CPUMCTX structure.
2468 */
2469
2470 /** @todo FOP */
2471 /** @todo FPUIP */
2472 /** @todo CS */
2473 /** @todo FPUDP */
2474 /** @todo DS */
2475 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2476 pCtx->fpu.MXCSR = 0;
2477 pCtx->fpu.MXCSR_MASK = 0;
2478
2479 /** @todo check if FPU/XMM was actually used in the recompiler */
2480 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2481//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2482
2483#ifdef TARGET_X86_64
2484 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2485 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2486 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2487 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2488 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2489 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2490 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2491 pCtx->r8 = pVM->rem.s.Env.regs[8];
2492 pCtx->r9 = pVM->rem.s.Env.regs[9];
2493 pCtx->r10 = pVM->rem.s.Env.regs[10];
2494 pCtx->r11 = pVM->rem.s.Env.regs[11];
2495 pCtx->r12 = pVM->rem.s.Env.regs[12];
2496 pCtx->r13 = pVM->rem.s.Env.regs[13];
2497 pCtx->r14 = pVM->rem.s.Env.regs[14];
2498 pCtx->r15 = pVM->rem.s.Env.regs[15];
2499
2500 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2501#else
2502 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2503 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2504 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2505 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2506 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2507 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2508 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2509
2510 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2511#endif
2512
2513 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2514
2515 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2516 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2517 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2518 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2519 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2520
2521#ifdef TARGET_X86_64
2522 pCtx->rip = pVM->rem.s.Env.eip;
2523 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2524#else
2525 pCtx->eip = pVM->rem.s.Env.eip;
2526 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2527#endif
2528
2529 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2530 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2531 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2532 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2533 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2534 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2535
2536 for (i = 0; i < 8; i++)
2537 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2538
2539 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2540 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2541 {
2542 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2543 STAM_COUNTER_INC(&gStatREMGDTChange);
2544 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2545 }
2546
2547 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2548 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2549 {
2550 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2551 STAM_COUNTER_INC(&gStatREMIDTChange);
2552 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2553 }
2554
2555 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2556 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2557 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2558 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2559 {
2560 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2561 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2562 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2563 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2564 STAM_COUNTER_INC(&gStatREMLDTRChange);
2565 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2566 }
2567
2568 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2569 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2570 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2571 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2572 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2573 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2574 : 0) )
2575 {
2576 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2577 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2578 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2579 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2580 pCtx->tr = pVM->rem.s.Env.tr.selector;
2581 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2582 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2583 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2584 if (pCtx->trHid.Attr.u)
2585 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2586 STAM_COUNTER_INC(&gStatREMTRChange);
2587 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2588 }
2589
2590 /** @todo These values could still be out of sync! */
2591 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2592 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2593 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2594 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2595
2596 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2597 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2598 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2599
2600 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2601 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2602 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2603
2604 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2605 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2606 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2607
2608 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2609 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2610 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2611
2612 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2613 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2614 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2615
2616 /* Sysenter MSR */
2617 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2618 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2619 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2620
2621 /* System MSRs. */
2622 pCtx->msrEFER = pVM->rem.s.Env.efer;
2623 pCtx->msrSTAR = pVM->rem.s.Env.star;
2624 pCtx->msrPAT = pVM->rem.s.Env.pat;
2625#ifdef TARGET_X86_64
2626 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2627 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2628 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2629 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2630#endif
2631
2632}
2633
2634
2635/**
2636 * Update the VMM state information if we're currently in REM.
2637 *
2638 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2639 * we're currently executing in REM and the VMM state is invalid. This method will of
2640 * course check that we're executing in REM before syncing any data over to the VMM.
2641 *
2642 * @param pVM The VM handle.
2643 * @param pVCpu The VMCPU handle.
2644 */
2645REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2646{
2647 if (pVM->rem.s.fInREM)
2648 remR3StateUpdate(pVM, pVCpu);
2649}
2650
2651
2652#undef LOG_GROUP
2653#define LOG_GROUP LOG_GROUP_REM
2654
2655
2656/**
2657 * Notify the recompiler about Address Gate 20 state change.
2658 *
2659 * This notification is required since A20 gate changes are
2660 * initialized from a device driver and the VM might just as
2661 * well be in REM mode as in RAW mode.
2662 *
2663 * @param pVM VM handle.
2664 * @param pVCpu VMCPU handle.
2665 * @param fEnable True if the gate should be enabled.
2666 * False if the gate should be disabled.
2667 */
2668REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2669{
2670 bool fSaved;
2671
2672 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2673 VM_ASSERT_EMT(pVM);
2674
2675 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2676 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2677
2678 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2679
2680 pVM->rem.s.fIgnoreAll = fSaved;
2681}
2682
2683
2684/**
2685 * Replays the invalidated recorded pages.
2686 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2687 *
2688 * @param pVM VM handle.
2689 * @param pVCpu VMCPU handle.
2690 */
2691REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM, PVMCPU pVCpu)
2692{
2693 RTUINT i;
2694
2695 VM_ASSERT_EMT(pVM);
2696
2697 /*
2698 * Sync the required registers.
2699 */
2700 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2701 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2702 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2703 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2704
2705 /*
2706 * Replay the flushes.
2707 */
2708 pVM->rem.s.fIgnoreInvlPg = true;
2709 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2710 {
2711 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2712 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2713 }
2714 pVM->rem.s.fIgnoreInvlPg = false;
2715 pVM->rem.s.cInvalidatedPages = 0;
2716}
2717
2718
2719/**
2720 * Replays the handler notification changes
2721 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2722 *
2723 * @param pVM VM handle.
2724 */
2725REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2726{
2727 /*
2728 * Replay the flushes.
2729 */
2730 RTUINT i;
2731 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2732
2733 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2734 VM_ASSERT_EMT(pVM);
2735
2736 pVM->rem.s.cHandlerNotifications = 0;
2737 for (i = 0; i < c; i++)
2738 {
2739 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2740 switch (pRec->enmKind)
2741 {
2742 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2743 REMR3NotifyHandlerPhysicalRegister(pVM,
2744 pRec->u.PhysicalRegister.enmType,
2745 pRec->u.PhysicalRegister.GCPhys,
2746 pRec->u.PhysicalRegister.cb,
2747 pRec->u.PhysicalRegister.fHasHCHandler);
2748 break;
2749
2750 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2751 REMR3NotifyHandlerPhysicalDeregister(pVM,
2752 pRec->u.PhysicalDeregister.enmType,
2753 pRec->u.PhysicalDeregister.GCPhys,
2754 pRec->u.PhysicalDeregister.cb,
2755 pRec->u.PhysicalDeregister.fHasHCHandler,
2756 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2757 break;
2758
2759 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2760 REMR3NotifyHandlerPhysicalModify(pVM,
2761 pRec->u.PhysicalModify.enmType,
2762 pRec->u.PhysicalModify.GCPhysOld,
2763 pRec->u.PhysicalModify.GCPhysNew,
2764 pRec->u.PhysicalModify.cb,
2765 pRec->u.PhysicalModify.fHasHCHandler,
2766 pRec->u.PhysicalModify.fRestoreAsRAM);
2767 break;
2768
2769 default:
2770 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2771 break;
2772 }
2773 }
2774 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2775}
2776
2777
2778/**
2779 * Notify REM about changed code page.
2780 *
2781 * @returns VBox status code.
2782 * @param pVM VM handle.
2783 * @param pVCpu VMCPU handle.
2784 * @param pvCodePage Code page address
2785 */
2786REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2787{
2788#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2789 int rc;
2790 RTGCPHYS PhysGC;
2791 uint64_t flags;
2792
2793 VM_ASSERT_EMT(pVM);
2794
2795 /*
2796 * Get the physical page address.
2797 */
2798 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2799 if (rc == VINF_SUCCESS)
2800 {
2801 /*
2802 * Sync the required registers and flush the whole page.
2803 * (Easier to do the whole page than notifying it about each physical
2804 * byte that was changed.
2805 */
2806 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2807 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2808 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2809 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2810
2811 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2812 }
2813#endif
2814 return VINF_SUCCESS;
2815}
2816
2817
2818/**
2819 * Notification about a successful MMR3PhysRegister() call.
2820 *
2821 * @param pVM VM handle.
2822 * @param GCPhys The physical address the RAM.
2823 * @param cb Size of the memory.
2824 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2825 */
2826REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2827{
2828 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2829 VM_ASSERT_EMT(pVM);
2830
2831 /*
2832 * Validate input - we trust the caller.
2833 */
2834 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2835 Assert(cb);
2836 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2837 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2838
2839 /*
2840 * Base ram? Update GCPhysLastRam.
2841 */
2842 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2843 {
2844 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2845 {
2846 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2847 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2848 }
2849 }
2850
2851 /*
2852 * Register the ram.
2853 */
2854 Assert(!pVM->rem.s.fIgnoreAll);
2855 pVM->rem.s.fIgnoreAll = true;
2856
2857 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2858 Assert(pVM->rem.s.fIgnoreAll);
2859 pVM->rem.s.fIgnoreAll = false;
2860}
2861
2862
2863/**
2864 * Notification about a successful MMR3PhysRomRegister() call.
2865 *
2866 * @param pVM VM handle.
2867 * @param GCPhys The physical address of the ROM.
2868 * @param cb The size of the ROM.
2869 * @param pvCopy Pointer to the ROM copy.
2870 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2871 * This function will be called when ever the protection of the
2872 * shadow ROM changes (at reset and end of POST).
2873 */
2874REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2875{
2876 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2877 VM_ASSERT_EMT(pVM);
2878
2879 /*
2880 * Validate input - we trust the caller.
2881 */
2882 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2883 Assert(cb);
2884 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2885
2886 /*
2887 * Register the rom.
2888 */
2889 Assert(!pVM->rem.s.fIgnoreAll);
2890 pVM->rem.s.fIgnoreAll = true;
2891
2892 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2893
2894 Assert(pVM->rem.s.fIgnoreAll);
2895 pVM->rem.s.fIgnoreAll = false;
2896}
2897
2898
2899/**
2900 * Notification about a successful memory deregistration or reservation.
2901 *
2902 * @param pVM VM Handle.
2903 * @param GCPhys Start physical address.
2904 * @param cb The size of the range.
2905 */
2906REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2907{
2908 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2909 VM_ASSERT_EMT(pVM);
2910
2911 /*
2912 * Validate input - we trust the caller.
2913 */
2914 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2915 Assert(cb);
2916 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2917
2918 /*
2919 * Unassigning the memory.
2920 */
2921 Assert(!pVM->rem.s.fIgnoreAll);
2922 pVM->rem.s.fIgnoreAll = true;
2923
2924 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2925
2926 Assert(pVM->rem.s.fIgnoreAll);
2927 pVM->rem.s.fIgnoreAll = false;
2928}
2929
2930
2931/**
2932 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2933 *
2934 * @param pVM VM Handle.
2935 * @param enmType Handler type.
2936 * @param GCPhys Handler range address.
2937 * @param cb Size of the handler range.
2938 * @param fHasHCHandler Set if the handler has a HC callback function.
2939 *
2940 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2941 * Handler memory type to memory which has no HC handler.
2942 */
2943REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2944{
2945 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2946 enmType, GCPhys, cb, fHasHCHandler));
2947 VM_ASSERT_EMT(pVM);
2948 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2949 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2950
2951 if (pVM->rem.s.cHandlerNotifications)
2952 REMR3ReplayHandlerNotifications(pVM);
2953
2954 Assert(!pVM->rem.s.fIgnoreAll);
2955 pVM->rem.s.fIgnoreAll = true;
2956
2957 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2958 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2959 else if (fHasHCHandler)
2960 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2961
2962 Assert(pVM->rem.s.fIgnoreAll);
2963 pVM->rem.s.fIgnoreAll = false;
2964}
2965
2966
2967/**
2968 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2969 *
2970 * @param pVM VM Handle.
2971 * @param enmType Handler type.
2972 * @param GCPhys Handler range address.
2973 * @param cb Size of the handler range.
2974 * @param fHasHCHandler Set if the handler has a HC callback function.
2975 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2976 */
2977REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2978{
2979 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2980 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2981 VM_ASSERT_EMT(pVM);
2982
2983 if (pVM->rem.s.cHandlerNotifications)
2984 REMR3ReplayHandlerNotifications(pVM);
2985
2986 Assert(!pVM->rem.s.fIgnoreAll);
2987 pVM->rem.s.fIgnoreAll = true;
2988
2989/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2990 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2991 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2992 else if (fHasHCHandler)
2993 {
2994 if (!fRestoreAsRAM)
2995 {
2996 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2997 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2998 }
2999 else
3000 {
3001 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3002 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3003 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3004 }
3005 }
3006
3007 Assert(pVM->rem.s.fIgnoreAll);
3008 pVM->rem.s.fIgnoreAll = false;
3009}
3010
3011
3012/**
3013 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3014 *
3015 * @param pVM VM Handle.
3016 * @param enmType Handler type.
3017 * @param GCPhysOld Old handler range address.
3018 * @param GCPhysNew New handler range address.
3019 * @param cb Size of the handler range.
3020 * @param fHasHCHandler Set if the handler has a HC callback function.
3021 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3022 */
3023REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3024{
3025 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3026 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3027 VM_ASSERT_EMT(pVM);
3028 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3029
3030 if (pVM->rem.s.cHandlerNotifications)
3031 REMR3ReplayHandlerNotifications(pVM);
3032
3033 if (fHasHCHandler)
3034 {
3035 Assert(!pVM->rem.s.fIgnoreAll);
3036 pVM->rem.s.fIgnoreAll = true;
3037
3038 /*
3039 * Reset the old page.
3040 */
3041 if (!fRestoreAsRAM)
3042 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3043 else
3044 {
3045 /* This is not perfect, but it'll do for PD monitoring... */
3046 Assert(cb == PAGE_SIZE);
3047 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3048 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3049 }
3050
3051 /*
3052 * Update the new page.
3053 */
3054 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3055 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3056 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3057
3058 Assert(pVM->rem.s.fIgnoreAll);
3059 pVM->rem.s.fIgnoreAll = false;
3060 }
3061}
3062
3063
3064/**
3065 * Checks if we're handling access to this page or not.
3066 *
3067 * @returns true if we're trapping access.
3068 * @returns false if we aren't.
3069 * @param pVM The VM handle.
3070 * @param GCPhys The physical address.
3071 *
3072 * @remark This function will only work correctly in VBOX_STRICT builds!
3073 */
3074REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3075{
3076#ifdef VBOX_STRICT
3077 unsigned long off;
3078 if (pVM->rem.s.cHandlerNotifications)
3079 REMR3ReplayHandlerNotifications(pVM);
3080
3081 off = get_phys_page_offset(GCPhys);
3082 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3083 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3084 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3085#else
3086 return false;
3087#endif
3088}
3089
3090
3091/**
3092 * Deals with a rare case in get_phys_addr_code where the code
3093 * is being monitored.
3094 *
3095 * It could also be an MMIO page, in which case we will raise a fatal error.
3096 *
3097 * @returns The physical address corresponding to addr.
3098 * @param env The cpu environment.
3099 * @param addr The virtual address.
3100 * @param pTLBEntry The TLB entry.
3101 */
3102target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3103 target_ulong addr,
3104 CPUTLBEntry* pTLBEntry,
3105 target_phys_addr_t ioTLBEntry)
3106{
3107 PVM pVM = env->pVM;
3108
3109 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3110 {
3111 /* If code memory is being monitored, appropriate IOTLB entry will have
3112 handler IO type, and addend will provide real physical address, no
3113 matter if we store VA in TLB or not, as handlers are always passed PA */
3114 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3115 return ret;
3116 }
3117 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3118 "*** handlers\n",
3119 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3120 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3121 LogRel(("*** mmio\n"));
3122 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3123 LogRel(("*** phys\n"));
3124 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3125 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3126 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3127 AssertFatalFailed();
3128}
3129
3130/**
3131 * Read guest RAM and ROM.
3132 *
3133 * @param SrcGCPhys The source address (guest physical).
3134 * @param pvDst The destination address.
3135 * @param cb Number of bytes
3136 */
3137void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3138{
3139 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3140 VBOX_CHECK_ADDR(SrcGCPhys);
3141 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3142#ifdef VBOX_DEBUG_PHYS
3143 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3144#endif
3145 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3146}
3147
3148
3149/**
3150 * Read guest RAM and ROM, unsigned 8-bit.
3151 *
3152 * @param SrcGCPhys The source address (guest physical).
3153 */
3154RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3155{
3156 uint8_t val;
3157 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3158 VBOX_CHECK_ADDR(SrcGCPhys);
3159 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3160 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3161#ifdef VBOX_DEBUG_PHYS
3162 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3163#endif
3164 return val;
3165}
3166
3167
3168/**
3169 * Read guest RAM and ROM, signed 8-bit.
3170 *
3171 * @param SrcGCPhys The source address (guest physical).
3172 */
3173RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3174{
3175 int8_t val;
3176 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3177 VBOX_CHECK_ADDR(SrcGCPhys);
3178 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3179 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3180#ifdef VBOX_DEBUG_PHYS
3181 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3182#endif
3183 return val;
3184}
3185
3186
3187/**
3188 * Read guest RAM and ROM, unsigned 16-bit.
3189 *
3190 * @param SrcGCPhys The source address (guest physical).
3191 */
3192RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3193{
3194 uint16_t val;
3195 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3196 VBOX_CHECK_ADDR(SrcGCPhys);
3197 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3198 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3199#ifdef VBOX_DEBUG_PHYS
3200 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3201#endif
3202 return val;
3203}
3204
3205
3206/**
3207 * Read guest RAM and ROM, signed 16-bit.
3208 *
3209 * @param SrcGCPhys The source address (guest physical).
3210 */
3211RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3212{
3213 int16_t val;
3214 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3215 VBOX_CHECK_ADDR(SrcGCPhys);
3216 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3217 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3218#ifdef VBOX_DEBUG_PHYS
3219 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3220#endif
3221 return val;
3222}
3223
3224
3225/**
3226 * Read guest RAM and ROM, unsigned 32-bit.
3227 *
3228 * @param SrcGCPhys The source address (guest physical).
3229 */
3230RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3231{
3232 uint32_t val;
3233 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3234 VBOX_CHECK_ADDR(SrcGCPhys);
3235 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3236 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3237#ifdef VBOX_DEBUG_PHYS
3238 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3239#endif
3240 return val;
3241}
3242
3243
3244/**
3245 * Read guest RAM and ROM, signed 32-bit.
3246 *
3247 * @param SrcGCPhys The source address (guest physical).
3248 */
3249RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3250{
3251 int32_t val;
3252 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3253 VBOX_CHECK_ADDR(SrcGCPhys);
3254 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3255 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3256#ifdef VBOX_DEBUG_PHYS
3257 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3258#endif
3259 return val;
3260}
3261
3262
3263/**
3264 * Read guest RAM and ROM, unsigned 64-bit.
3265 *
3266 * @param SrcGCPhys The source address (guest physical).
3267 */
3268uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3269{
3270 uint64_t val;
3271 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3272 VBOX_CHECK_ADDR(SrcGCPhys);
3273 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3274 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3275#ifdef VBOX_DEBUG_PHYS
3276 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3277#endif
3278 return val;
3279}
3280
3281
3282/**
3283 * Read guest RAM and ROM, signed 64-bit.
3284 *
3285 * @param SrcGCPhys The source address (guest physical).
3286 */
3287int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3288{
3289 int64_t val;
3290 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3291 VBOX_CHECK_ADDR(SrcGCPhys);
3292 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3293 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3294#ifdef VBOX_DEBUG_PHYS
3295 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3296#endif
3297 return val;
3298}
3299
3300
3301/**
3302 * Write guest RAM.
3303 *
3304 * @param DstGCPhys The destination address (guest physical).
3305 * @param pvSrc The source address.
3306 * @param cb Number of bytes to write
3307 */
3308void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3309{
3310 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3311 VBOX_CHECK_ADDR(DstGCPhys);
3312 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3313 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3314#ifdef VBOX_DEBUG_PHYS
3315 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3316#endif
3317}
3318
3319
3320/**
3321 * Write guest RAM, unsigned 8-bit.
3322 *
3323 * @param DstGCPhys The destination address (guest physical).
3324 * @param val Value
3325 */
3326void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3327{
3328 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3329 VBOX_CHECK_ADDR(DstGCPhys);
3330 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3331 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3332#ifdef VBOX_DEBUG_PHYS
3333 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3334#endif
3335}
3336
3337
3338/**
3339 * Write guest RAM, unsigned 8-bit.
3340 *
3341 * @param DstGCPhys The destination address (guest physical).
3342 * @param val Value
3343 */
3344void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3345{
3346 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3347 VBOX_CHECK_ADDR(DstGCPhys);
3348 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3349 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3350#ifdef VBOX_DEBUG_PHYS
3351 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3352#endif
3353}
3354
3355
3356/**
3357 * Write guest RAM, unsigned 32-bit.
3358 *
3359 * @param DstGCPhys The destination address (guest physical).
3360 * @param val Value
3361 */
3362void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3363{
3364 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3365 VBOX_CHECK_ADDR(DstGCPhys);
3366 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3367 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3368#ifdef VBOX_DEBUG_PHYS
3369 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3370#endif
3371}
3372
3373
3374/**
3375 * Write guest RAM, unsigned 64-bit.
3376 *
3377 * @param DstGCPhys The destination address (guest physical).
3378 * @param val Value
3379 */
3380void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3381{
3382 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3383 VBOX_CHECK_ADDR(DstGCPhys);
3384 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3385 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3386#ifdef VBOX_DEBUG_PHYS
3387 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3388#endif
3389}
3390
3391#undef LOG_GROUP
3392#define LOG_GROUP LOG_GROUP_REM_MMIO
3393
3394/** Read MMIO memory. */
3395static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3396{
3397 uint32_t u32 = 0;
3398 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3399 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3400 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3401 return u32;
3402}
3403
3404/** Read MMIO memory. */
3405static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3406{
3407 uint32_t u32 = 0;
3408 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3409 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3410 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3411 return u32;
3412}
3413
3414/** Read MMIO memory. */
3415static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3416{
3417 uint32_t u32 = 0;
3418 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3419 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3420 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3421 return u32;
3422}
3423
3424/** Write to MMIO memory. */
3425static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3426{
3427 int rc;
3428 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3429 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3430 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3431}
3432
3433/** Write to MMIO memory. */
3434static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3435{
3436 int rc;
3437 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3438 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3439 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3440}
3441
3442/** Write to MMIO memory. */
3443static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3444{
3445 int rc;
3446 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3447 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3448 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3449}
3450
3451
3452#undef LOG_GROUP
3453#define LOG_GROUP LOG_GROUP_REM_HANDLER
3454
3455/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3456
3457static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3458{
3459 uint8_t u8;
3460 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3461 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3462 return u8;
3463}
3464
3465static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3466{
3467 uint16_t u16;
3468 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3469 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3470 return u16;
3471}
3472
3473static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3474{
3475 uint32_t u32;
3476 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3477 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3478 return u32;
3479}
3480
3481static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3482{
3483 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3484 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3485}
3486
3487static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3488{
3489 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3490 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3491}
3492
3493static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3494{
3495 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3496 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3497}
3498
3499/* -+- disassembly -+- */
3500
3501#undef LOG_GROUP
3502#define LOG_GROUP LOG_GROUP_REM_DISAS
3503
3504
3505/**
3506 * Enables or disables singled stepped disassembly.
3507 *
3508 * @returns VBox status code.
3509 * @param pVM VM handle.
3510 * @param fEnable To enable set this flag, to disable clear it.
3511 */
3512static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3513{
3514 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3515 VM_ASSERT_EMT(pVM);
3516
3517 if (fEnable)
3518 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3519 else
3520 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3521 return VINF_SUCCESS;
3522}
3523
3524
3525/**
3526 * Enables or disables singled stepped disassembly.
3527 *
3528 * @returns VBox status code.
3529 * @param pVM VM handle.
3530 * @param fEnable To enable set this flag, to disable clear it.
3531 */
3532REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3533{
3534 PVMREQ pReq;
3535 int rc;
3536
3537 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3538 if (VM_IS_EMT(pVM))
3539 return remR3DisasEnableStepping(pVM, fEnable);
3540
3541 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3542 AssertRC(rc);
3543 if (RT_SUCCESS(rc))
3544 rc = pReq->iStatus;
3545 VMR3ReqFree(pReq);
3546 return rc;
3547}
3548
3549
3550#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3551/**
3552 * External Debugger Command: .remstep [on|off|1|0]
3553 */
3554static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3555{
3556 bool fEnable;
3557 int rc;
3558
3559 /* print status */
3560 if (cArgs == 0)
3561 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3562 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3563
3564 /* convert the argument and change the mode. */
3565 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3566 if (RT_FAILURE(rc))
3567 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3568 rc = REMR3DisasEnableStepping(pVM, fEnable);
3569 if (RT_FAILURE(rc))
3570 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3571 return rc;
3572}
3573#endif
3574
3575
3576/**
3577 * Disassembles one instruction and prints it to the log.
3578 *
3579 * @returns Success indicator.
3580 * @param env Pointer to the recompiler CPU structure.
3581 * @param f32BitCode Indicates that whether or not the code should
3582 * be disassembled as 16 or 32 bit. If -1 the CS
3583 * selector will be inspected.
3584 * @param pszPrefix
3585 */
3586bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3587{
3588 PVM pVM = env->pVM;
3589 const bool fLog = LogIsEnabled();
3590 const bool fLog2 = LogIs2Enabled();
3591 int rc = VINF_SUCCESS;
3592
3593 /*
3594 * Don't bother if there ain't any log output to do.
3595 */
3596 if (!fLog && !fLog2)
3597 return true;
3598
3599 /*
3600 * Update the state so DBGF reads the correct register values.
3601 */
3602 remR3StateUpdate(pVM, env->pVCpu);
3603
3604 /*
3605 * Log registers if requested.
3606 */
3607 if (!fLog2)
3608 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3609
3610 /*
3611 * Disassemble to log.
3612 */
3613 if (fLog)
3614 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3615
3616 return RT_SUCCESS(rc);
3617}
3618
3619
3620/**
3621 * Disassemble recompiled code.
3622 *
3623 * @param phFileIgnored Ignored, logfile usually.
3624 * @param pvCode Pointer to the code block.
3625 * @param cb Size of the code block.
3626 */
3627void disas(FILE *phFile, void *pvCode, unsigned long cb)
3628{
3629#ifdef DEBUG_TMP_LOGGING
3630# define DISAS_PRINTF(x...) fprintf(phFile, x)
3631#else
3632# define DISAS_PRINTF(x...) RTLogPrintf(x)
3633 if (LogIs2Enabled())
3634#endif
3635 {
3636 unsigned off = 0;
3637 char szOutput[256];
3638 DISCPUSTATE Cpu;
3639
3640 memset(&Cpu, 0, sizeof(Cpu));
3641#ifdef RT_ARCH_X86
3642 Cpu.mode = CPUMODE_32BIT;
3643#else
3644 Cpu.mode = CPUMODE_64BIT;
3645#endif
3646
3647 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3648 while (off < cb)
3649 {
3650 uint32_t cbInstr;
3651 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3652 DISAS_PRINTF("%s", szOutput);
3653 else
3654 {
3655 DISAS_PRINTF("disas error\n");
3656 cbInstr = 1;
3657#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3658 break;
3659#endif
3660 }
3661 off += cbInstr;
3662 }
3663 }
3664
3665#undef DISAS_PRINTF
3666}
3667
3668
3669/**
3670 * Disassemble guest code.
3671 *
3672 * @param phFileIgnored Ignored, logfile usually.
3673 * @param uCode The guest address of the code to disassemble. (flat?)
3674 * @param cb Number of bytes to disassemble.
3675 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3676 */
3677void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3678{
3679#ifdef DEBUG_TMP_LOGGING
3680# define DISAS_PRINTF(x...) fprintf(phFile, x)
3681#else
3682# define DISAS_PRINTF(x...) RTLogPrintf(x)
3683 if (LogIs2Enabled())
3684#endif
3685 {
3686 PVM pVM = cpu_single_env->pVM;
3687 PVMCPU pVCpu = cpu_single_env->pVCpu;
3688 RTSEL cs;
3689 RTGCUINTPTR eip;
3690
3691 Assert(pVCpu);
3692
3693 /*
3694 * Update the state so DBGF reads the correct register values (flags).
3695 */
3696 remR3StateUpdate(pVM, pVCpu);
3697
3698 /*
3699 * Do the disassembling.
3700 */
3701 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3702 cs = cpu_single_env->segs[R_CS].selector;
3703 eip = uCode - cpu_single_env->segs[R_CS].base;
3704 for (;;)
3705 {
3706 char szBuf[256];
3707 uint32_t cbInstr;
3708 int rc = DBGFR3DisasInstrEx(pVM,
3709 pVCpu,
3710 cs,
3711 eip,
3712 0,
3713 szBuf, sizeof(szBuf),
3714 &cbInstr);
3715 if (RT_SUCCESS(rc))
3716 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3717 else
3718 {
3719 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3720 cbInstr = 1;
3721 }
3722
3723 /* next */
3724 if (cb <= cbInstr)
3725 break;
3726 cb -= cbInstr;
3727 uCode += cbInstr;
3728 eip += cbInstr;
3729 }
3730 }
3731#undef DISAS_PRINTF
3732}
3733
3734
3735/**
3736 * Looks up a guest symbol.
3737 *
3738 * @returns Pointer to symbol name. This is a static buffer.
3739 * @param orig_addr The address in question.
3740 */
3741const char *lookup_symbol(target_ulong orig_addr)
3742{
3743 RTGCINTPTR off = 0;
3744 DBGFSYMBOL Sym;
3745 PVM pVM = cpu_single_env->pVM;
3746 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3747 if (RT_SUCCESS(rc))
3748 {
3749 static char szSym[sizeof(Sym.szName) + 48];
3750 if (!off)
3751 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3752 else if (off > 0)
3753 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3754 else
3755 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3756 return szSym;
3757 }
3758 return "<N/A>";
3759}
3760
3761
3762#undef LOG_GROUP
3763#define LOG_GROUP LOG_GROUP_REM
3764
3765
3766/* -+- FF notifications -+- */
3767
3768
3769/**
3770 * Notification about a pending interrupt.
3771 *
3772 * @param pVM VM Handle.
3773 * @param pVCpu VMCPU Handle.
3774 * @param u8Interrupt Interrupt
3775 * @thread The emulation thread.
3776 */
3777REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3778{
3779 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3780 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3781}
3782
3783/**
3784 * Notification about a pending interrupt.
3785 *
3786 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3787 * @param pVM VM Handle.
3788 * @param pVCpu VMCPU Handle.
3789 * @thread The emulation thread.
3790 */
3791REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3792{
3793 return pVM->rem.s.u32PendingInterrupt;
3794}
3795
3796/**
3797 * Notification about the interrupt FF being set.
3798 *
3799 * @param pVM VM Handle.
3800 * @param pVCpu VMCPU Handle.
3801 * @thread The emulation thread.
3802 */
3803REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3804{
3805 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3806 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3807 if (pVM->rem.s.fInREM)
3808 {
3809 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3810 CPU_INTERRUPT_EXTERNAL_HARD);
3811 }
3812}
3813
3814
3815/**
3816 * Notification about the interrupt FF being set.
3817 *
3818 * @param pVM VM Handle.
3819 * @param pVCpu VMCPU Handle.
3820 * @thread Any.
3821 */
3822REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3823{
3824 LogFlow(("REMR3NotifyInterruptClear:\n"));
3825 if (pVM->rem.s.fInREM)
3826 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3827}
3828
3829
3830/**
3831 * Notification about pending timer(s).
3832 *
3833 * @param pVM VM Handle.
3834 * @thread Any.
3835 */
3836REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3837{
3838#ifndef DEBUG_bird
3839 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3840#endif
3841 if (pVM->rem.s.fInREM)
3842 {
3843 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3844 CPU_INTERRUPT_EXTERNAL_TIMER);
3845 }
3846}
3847
3848
3849/**
3850 * Notification about pending DMA transfers.
3851 *
3852 * @param pVM VM Handle.
3853 * @thread Any.
3854 */
3855REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3856{
3857 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3858 if (pVM->rem.s.fInREM)
3859 {
3860 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3861 CPU_INTERRUPT_EXTERNAL_DMA);
3862 }
3863}
3864
3865
3866/**
3867 * Notification about pending timer(s).
3868 *
3869 * @param pVM VM Handle.
3870 * @thread Any.
3871 */
3872REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3873{
3874 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3875 if (pVM->rem.s.fInREM)
3876 {
3877 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3878 CPU_INTERRUPT_EXTERNAL_EXIT);
3879 }
3880}
3881
3882
3883/**
3884 * Notification about pending FF set by an external thread.
3885 *
3886 * @param pVM VM handle.
3887 * @thread Any.
3888 */
3889REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3890{
3891 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3892 if (pVM->rem.s.fInREM)
3893 {
3894 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3895 CPU_INTERRUPT_EXTERNAL_EXIT);
3896 }
3897}
3898
3899
3900#ifdef VBOX_WITH_STATISTICS
3901void remR3ProfileStart(int statcode)
3902{
3903 STAMPROFILEADV *pStat;
3904 switch(statcode)
3905 {
3906 case STATS_EMULATE_SINGLE_INSTR:
3907 pStat = &gStatExecuteSingleInstr;
3908 break;
3909 case STATS_QEMU_COMPILATION:
3910 pStat = &gStatCompilationQEmu;
3911 break;
3912 case STATS_QEMU_RUN_EMULATED_CODE:
3913 pStat = &gStatRunCodeQEmu;
3914 break;
3915 case STATS_QEMU_TOTAL:
3916 pStat = &gStatTotalTimeQEmu;
3917 break;
3918 case STATS_QEMU_RUN_TIMERS:
3919 pStat = &gStatTimers;
3920 break;
3921 case STATS_TLB_LOOKUP:
3922 pStat= &gStatTBLookup;
3923 break;
3924 case STATS_IRQ_HANDLING:
3925 pStat= &gStatIRQ;
3926 break;
3927 case STATS_RAW_CHECK:
3928 pStat = &gStatRawCheck;
3929 break;
3930
3931 default:
3932 AssertMsgFailed(("unknown stat %d\n", statcode));
3933 return;
3934 }
3935 STAM_PROFILE_ADV_START(pStat, a);
3936}
3937
3938
3939void remR3ProfileStop(int statcode)
3940{
3941 STAMPROFILEADV *pStat;
3942 switch(statcode)
3943 {
3944 case STATS_EMULATE_SINGLE_INSTR:
3945 pStat = &gStatExecuteSingleInstr;
3946 break;
3947 case STATS_QEMU_COMPILATION:
3948 pStat = &gStatCompilationQEmu;
3949 break;
3950 case STATS_QEMU_RUN_EMULATED_CODE:
3951 pStat = &gStatRunCodeQEmu;
3952 break;
3953 case STATS_QEMU_TOTAL:
3954 pStat = &gStatTotalTimeQEmu;
3955 break;
3956 case STATS_QEMU_RUN_TIMERS:
3957 pStat = &gStatTimers;
3958 break;
3959 case STATS_TLB_LOOKUP:
3960 pStat= &gStatTBLookup;
3961 break;
3962 case STATS_IRQ_HANDLING:
3963 pStat= &gStatIRQ;
3964 break;
3965 case STATS_RAW_CHECK:
3966 pStat = &gStatRawCheck;
3967 break;
3968 default:
3969 AssertMsgFailed(("unknown stat %d\n", statcode));
3970 return;
3971 }
3972 STAM_PROFILE_ADV_STOP(pStat, a);
3973}
3974#endif
3975
3976/**
3977 * Raise an RC, force rem exit.
3978 *
3979 * @param pVM VM handle.
3980 * @param rc The rc.
3981 */
3982void remR3RaiseRC(PVM pVM, int rc)
3983{
3984 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3985 Assert(pVM->rem.s.fInREM);
3986 VM_ASSERT_EMT(pVM);
3987 pVM->rem.s.rc = rc;
3988 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3989}
3990
3991
3992/* -+- timers -+- */
3993
3994uint64_t cpu_get_tsc(CPUX86State *env)
3995{
3996 STAM_COUNTER_INC(&gStatCpuGetTSC);
3997 return TMCpuTickGet(env->pVCpu);
3998}
3999
4000
4001/* -+- interrupts -+- */
4002
4003void cpu_set_ferr(CPUX86State *env)
4004{
4005 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4006 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4007}
4008
4009int cpu_get_pic_interrupt(CPUState *env)
4010{
4011 uint8_t u8Interrupt;
4012 int rc;
4013
4014 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4015 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4016 * with the (a)pic.
4017 */
4018 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4019 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4020 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4021 * remove this kludge. */
4022 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4023 {
4024 rc = VINF_SUCCESS;
4025 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4026 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4027 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4028 }
4029 else
4030 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4031
4032 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4033 if (RT_SUCCESS(rc))
4034 {
4035 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4036 env->interrupt_request |= CPU_INTERRUPT_HARD;
4037 return u8Interrupt;
4038 }
4039 return -1;
4040}
4041
4042
4043/* -+- local apic -+- */
4044
4045void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4046{
4047 int rc = PDMApicSetBase(env->pVM, val);
4048 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4049}
4050
4051uint64_t cpu_get_apic_base(CPUX86State *env)
4052{
4053 uint64_t u64;
4054 int rc = PDMApicGetBase(env->pVM, &u64);
4055 if (RT_SUCCESS(rc))
4056 {
4057 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4058 return u64;
4059 }
4060 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4061 return 0;
4062}
4063
4064void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4065{
4066 int rc = PDMApicSetTPR(env->pVM, val);
4067 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4068}
4069
4070uint8_t cpu_get_apic_tpr(CPUX86State *env)
4071{
4072 uint8_t u8;
4073 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4074 if (RT_SUCCESS(rc))
4075 {
4076 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4077 return u8;
4078 }
4079 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4080 return 0;
4081}
4082
4083
4084uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4085{
4086 uint64_t value;
4087 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4088 if (RT_SUCCESS(rc))
4089 {
4090 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4091 return value;
4092 }
4093 /** @todo: exception ? */
4094 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4095 return value;
4096}
4097
4098void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4099{
4100 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4101 /** @todo: exception if error ? */
4102 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4103}
4104
4105uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4106{
4107 Assert(env->pVCpu);
4108 return CPUMGetGuestMsr(env->pVCpu, msr);
4109}
4110
4111void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4112{
4113 Assert(env->pVCpu);
4114 CPUMSetGuestMsr(env->pVCpu, msr, val);
4115}
4116
4117/* -+- I/O Ports -+- */
4118
4119#undef LOG_GROUP
4120#define LOG_GROUP LOG_GROUP_REM_IOPORT
4121
4122void cpu_outb(CPUState *env, int addr, int val)
4123{
4124 int rc;
4125
4126 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4127 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4128
4129 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4130 if (RT_LIKELY(rc == VINF_SUCCESS))
4131 return;
4132 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4133 {
4134 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4135 remR3RaiseRC(env->pVM, rc);
4136 return;
4137 }
4138 remAbort(rc, __FUNCTION__);
4139}
4140
4141void cpu_outw(CPUState *env, int addr, int val)
4142{
4143 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4144 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4145 if (RT_LIKELY(rc == VINF_SUCCESS))
4146 return;
4147 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4148 {
4149 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4150 remR3RaiseRC(env->pVM, rc);
4151 return;
4152 }
4153 remAbort(rc, __FUNCTION__);
4154}
4155
4156void cpu_outl(CPUState *env, int addr, int val)
4157{
4158 int rc;
4159 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4160 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4161 if (RT_LIKELY(rc == VINF_SUCCESS))
4162 return;
4163 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4164 {
4165 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4166 remR3RaiseRC(env->pVM, rc);
4167 return;
4168 }
4169 remAbort(rc, __FUNCTION__);
4170}
4171
4172int cpu_inb(CPUState *env, int addr)
4173{
4174 uint32_t u32 = 0;
4175 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4176 if (RT_LIKELY(rc == VINF_SUCCESS))
4177 {
4178 if (/*addr != 0x61 && */addr != 0x71)
4179 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4180 return (int)u32;
4181 }
4182 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4183 {
4184 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4185 remR3RaiseRC(env->pVM, rc);
4186 return (int)u32;
4187 }
4188 remAbort(rc, __FUNCTION__);
4189 return 0xff;
4190}
4191
4192int cpu_inw(CPUState *env, int addr)
4193{
4194 uint32_t u32 = 0;
4195 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4196 if (RT_LIKELY(rc == VINF_SUCCESS))
4197 {
4198 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4199 return (int)u32;
4200 }
4201 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4202 {
4203 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4204 remR3RaiseRC(env->pVM, rc);
4205 return (int)u32;
4206 }
4207 remAbort(rc, __FUNCTION__);
4208 return 0xffff;
4209}
4210
4211int cpu_inl(CPUState *env, int addr)
4212{
4213 uint32_t u32 = 0;
4214 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4215 if (RT_LIKELY(rc == VINF_SUCCESS))
4216 {
4217//if (addr==0x01f0 && u32 == 0x6b6d)
4218// loglevel = ~0;
4219 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4220 return (int)u32;
4221 }
4222 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4223 {
4224 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4225 remR3RaiseRC(env->pVM, rc);
4226 return (int)u32;
4227 }
4228 remAbort(rc, __FUNCTION__);
4229 return 0xffffffff;
4230}
4231
4232#undef LOG_GROUP
4233#define LOG_GROUP LOG_GROUP_REM
4234
4235
4236/* -+- helpers and misc other interfaces -+- */
4237
4238/**
4239 * Perform the CPUID instruction.
4240 *
4241 * ASMCpuId cannot be invoked from some source files where this is used because of global
4242 * register allocations.
4243 *
4244 * @param env Pointer to the recompiler CPU structure.
4245 * @param uOperator CPUID operation (eax).
4246 * @param pvEAX Where to store eax.
4247 * @param pvEBX Where to store ebx.
4248 * @param pvECX Where to store ecx.
4249 * @param pvEDX Where to store edx.
4250 */
4251void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4252{
4253 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4254}
4255
4256
4257#if 0 /* not used */
4258/**
4259 * Interface for qemu hardware to report back fatal errors.
4260 */
4261void hw_error(const char *pszFormat, ...)
4262{
4263 /*
4264 * Bitch about it.
4265 */
4266 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4267 * this in my Odin32 tree at home! */
4268 va_list args;
4269 va_start(args, pszFormat);
4270 RTLogPrintf("fatal error in virtual hardware:");
4271 RTLogPrintfV(pszFormat, args);
4272 va_end(args);
4273 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4274
4275 /*
4276 * If we're in REM context we'll sync back the state before 'jumping' to
4277 * the EMs failure handling.
4278 */
4279 PVM pVM = cpu_single_env->pVM;
4280 if (pVM->rem.s.fInREM)
4281 REMR3StateBack(pVM);
4282 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4283 AssertMsgFailed(("EMR3FatalError returned!\n"));
4284}
4285#endif
4286
4287/**
4288 * Interface for the qemu cpu to report unhandled situation
4289 * raising a fatal VM error.
4290 */
4291void cpu_abort(CPUState *env, const char *pszFormat, ...)
4292{
4293 va_list args;
4294 PVM pVM;
4295 PVMCPU pVCpu;
4296
4297 /*
4298 * Bitch about it.
4299 */
4300#ifndef _MSC_VER
4301 /** @todo: MSVC is right - it's not valid C */
4302 RTLogFlags(NULL, "nodisabled nobuffered");
4303#endif
4304 va_start(args, pszFormat);
4305 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4306 va_end(args);
4307 va_start(args, pszFormat);
4308 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4309 va_end(args);
4310
4311 /*
4312 * If we're in REM context we'll sync back the state before 'jumping' to
4313 * the EMs failure handling.
4314 */
4315 pVM = cpu_single_env->pVM;
4316 pVCpu = cpu_single_env->pVCpu;
4317 Assert(pVCpu);
4318
4319 if (pVM->rem.s.fInREM)
4320 REMR3StateBack(pVM, pVCpu);
4321 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4322 AssertMsgFailed(("EMR3FatalError returned!\n"));
4323}
4324
4325
4326/**
4327 * Aborts the VM.
4328 *
4329 * @param rc VBox error code.
4330 * @param pszTip Hint about why/when this happend.
4331 */
4332void remAbort(int rc, const char *pszTip)
4333{
4334 PVM pVM;
4335 PVMCPU pVCpu;
4336
4337 /*
4338 * Bitch about it.
4339 */
4340 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4341 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4342
4343 /*
4344 * Jump back to where we entered the recompiler.
4345 */
4346 pVM = cpu_single_env->pVM;
4347 pVCpu = cpu_single_env->pVCpu;
4348 Assert(pVCpu);
4349
4350 if (pVM->rem.s.fInREM)
4351 REMR3StateBack(pVM, pVCpu);
4352
4353 EMR3FatalError(pVCpu, rc);
4354 AssertMsgFailed(("EMR3FatalError returned!\n"));
4355}
4356
4357
4358/**
4359 * Dumps a linux system call.
4360 * @param pVCpu VMCPU handle.
4361 */
4362void remR3DumpLnxSyscall(PVMCPU pVCpu)
4363{
4364 static const char *apsz[] =
4365 {
4366 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4367 "sys_exit",
4368 "sys_fork",
4369 "sys_read",
4370 "sys_write",
4371 "sys_open", /* 5 */
4372 "sys_close",
4373 "sys_waitpid",
4374 "sys_creat",
4375 "sys_link",
4376 "sys_unlink", /* 10 */
4377 "sys_execve",
4378 "sys_chdir",
4379 "sys_time",
4380 "sys_mknod",
4381 "sys_chmod", /* 15 */
4382 "sys_lchown16",
4383 "sys_ni_syscall", /* old break syscall holder */
4384 "sys_stat",
4385 "sys_lseek",
4386 "sys_getpid", /* 20 */
4387 "sys_mount",
4388 "sys_oldumount",
4389 "sys_setuid16",
4390 "sys_getuid16",
4391 "sys_stime", /* 25 */
4392 "sys_ptrace",
4393 "sys_alarm",
4394 "sys_fstat",
4395 "sys_pause",
4396 "sys_utime", /* 30 */
4397 "sys_ni_syscall", /* old stty syscall holder */
4398 "sys_ni_syscall", /* old gtty syscall holder */
4399 "sys_access",
4400 "sys_nice",
4401 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4402 "sys_sync",
4403 "sys_kill",
4404 "sys_rename",
4405 "sys_mkdir",
4406 "sys_rmdir", /* 40 */
4407 "sys_dup",
4408 "sys_pipe",
4409 "sys_times",
4410 "sys_ni_syscall", /* old prof syscall holder */
4411 "sys_brk", /* 45 */
4412 "sys_setgid16",
4413 "sys_getgid16",
4414 "sys_signal",
4415 "sys_geteuid16",
4416 "sys_getegid16", /* 50 */
4417 "sys_acct",
4418 "sys_umount", /* recycled never used phys() */
4419 "sys_ni_syscall", /* old lock syscall holder */
4420 "sys_ioctl",
4421 "sys_fcntl", /* 55 */
4422 "sys_ni_syscall", /* old mpx syscall holder */
4423 "sys_setpgid",
4424 "sys_ni_syscall", /* old ulimit syscall holder */
4425 "sys_olduname",
4426 "sys_umask", /* 60 */
4427 "sys_chroot",
4428 "sys_ustat",
4429 "sys_dup2",
4430 "sys_getppid",
4431 "sys_getpgrp", /* 65 */
4432 "sys_setsid",
4433 "sys_sigaction",
4434 "sys_sgetmask",
4435 "sys_ssetmask",
4436 "sys_setreuid16", /* 70 */
4437 "sys_setregid16",
4438 "sys_sigsuspend",
4439 "sys_sigpending",
4440 "sys_sethostname",
4441 "sys_setrlimit", /* 75 */
4442 "sys_old_getrlimit",
4443 "sys_getrusage",
4444 "sys_gettimeofday",
4445 "sys_settimeofday",
4446 "sys_getgroups16", /* 80 */
4447 "sys_setgroups16",
4448 "old_select",
4449 "sys_symlink",
4450 "sys_lstat",
4451 "sys_readlink", /* 85 */
4452 "sys_uselib",
4453 "sys_swapon",
4454 "sys_reboot",
4455 "old_readdir",
4456 "old_mmap", /* 90 */
4457 "sys_munmap",
4458 "sys_truncate",
4459 "sys_ftruncate",
4460 "sys_fchmod",
4461 "sys_fchown16", /* 95 */
4462 "sys_getpriority",
4463 "sys_setpriority",
4464 "sys_ni_syscall", /* old profil syscall holder */
4465 "sys_statfs",
4466 "sys_fstatfs", /* 100 */
4467 "sys_ioperm",
4468 "sys_socketcall",
4469 "sys_syslog",
4470 "sys_setitimer",
4471 "sys_getitimer", /* 105 */
4472 "sys_newstat",
4473 "sys_newlstat",
4474 "sys_newfstat",
4475 "sys_uname",
4476 "sys_iopl", /* 110 */
4477 "sys_vhangup",
4478 "sys_ni_syscall", /* old "idle" system call */
4479 "sys_vm86old",
4480 "sys_wait4",
4481 "sys_swapoff", /* 115 */
4482 "sys_sysinfo",
4483 "sys_ipc",
4484 "sys_fsync",
4485 "sys_sigreturn",
4486 "sys_clone", /* 120 */
4487 "sys_setdomainname",
4488 "sys_newuname",
4489 "sys_modify_ldt",
4490 "sys_adjtimex",
4491 "sys_mprotect", /* 125 */
4492 "sys_sigprocmask",
4493 "sys_ni_syscall", /* old "create_module" */
4494 "sys_init_module",
4495 "sys_delete_module",
4496 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4497 "sys_quotactl",
4498 "sys_getpgid",
4499 "sys_fchdir",
4500 "sys_bdflush",
4501 "sys_sysfs", /* 135 */
4502 "sys_personality",
4503 "sys_ni_syscall", /* reserved for afs_syscall */
4504 "sys_setfsuid16",
4505 "sys_setfsgid16",
4506 "sys_llseek", /* 140 */
4507 "sys_getdents",
4508 "sys_select",
4509 "sys_flock",
4510 "sys_msync",
4511 "sys_readv", /* 145 */
4512 "sys_writev",
4513 "sys_getsid",
4514 "sys_fdatasync",
4515 "sys_sysctl",
4516 "sys_mlock", /* 150 */
4517 "sys_munlock",
4518 "sys_mlockall",
4519 "sys_munlockall",
4520 "sys_sched_setparam",
4521 "sys_sched_getparam", /* 155 */
4522 "sys_sched_setscheduler",
4523 "sys_sched_getscheduler",
4524 "sys_sched_yield",
4525 "sys_sched_get_priority_max",
4526 "sys_sched_get_priority_min", /* 160 */
4527 "sys_sched_rr_get_interval",
4528 "sys_nanosleep",
4529 "sys_mremap",
4530 "sys_setresuid16",
4531 "sys_getresuid16", /* 165 */
4532 "sys_vm86",
4533 "sys_ni_syscall", /* Old sys_query_module */
4534 "sys_poll",
4535 "sys_nfsservctl",
4536 "sys_setresgid16", /* 170 */
4537 "sys_getresgid16",
4538 "sys_prctl",
4539 "sys_rt_sigreturn",
4540 "sys_rt_sigaction",
4541 "sys_rt_sigprocmask", /* 175 */
4542 "sys_rt_sigpending",
4543 "sys_rt_sigtimedwait",
4544 "sys_rt_sigqueueinfo",
4545 "sys_rt_sigsuspend",
4546 "sys_pread64", /* 180 */
4547 "sys_pwrite64",
4548 "sys_chown16",
4549 "sys_getcwd",
4550 "sys_capget",
4551 "sys_capset", /* 185 */
4552 "sys_sigaltstack",
4553 "sys_sendfile",
4554 "sys_ni_syscall", /* reserved for streams1 */
4555 "sys_ni_syscall", /* reserved for streams2 */
4556 "sys_vfork", /* 190 */
4557 "sys_getrlimit",
4558 "sys_mmap2",
4559 "sys_truncate64",
4560 "sys_ftruncate64",
4561 "sys_stat64", /* 195 */
4562 "sys_lstat64",
4563 "sys_fstat64",
4564 "sys_lchown",
4565 "sys_getuid",
4566 "sys_getgid", /* 200 */
4567 "sys_geteuid",
4568 "sys_getegid",
4569 "sys_setreuid",
4570 "sys_setregid",
4571 "sys_getgroups", /* 205 */
4572 "sys_setgroups",
4573 "sys_fchown",
4574 "sys_setresuid",
4575 "sys_getresuid",
4576 "sys_setresgid", /* 210 */
4577 "sys_getresgid",
4578 "sys_chown",
4579 "sys_setuid",
4580 "sys_setgid",
4581 "sys_setfsuid", /* 215 */
4582 "sys_setfsgid",
4583 "sys_pivot_root",
4584 "sys_mincore",
4585 "sys_madvise",
4586 "sys_getdents64", /* 220 */
4587 "sys_fcntl64",
4588 "sys_ni_syscall", /* reserved for TUX */
4589 "sys_ni_syscall",
4590 "sys_gettid",
4591 "sys_readahead", /* 225 */
4592 "sys_setxattr",
4593 "sys_lsetxattr",
4594 "sys_fsetxattr",
4595 "sys_getxattr",
4596 "sys_lgetxattr", /* 230 */
4597 "sys_fgetxattr",
4598 "sys_listxattr",
4599 "sys_llistxattr",
4600 "sys_flistxattr",
4601 "sys_removexattr", /* 235 */
4602 "sys_lremovexattr",
4603 "sys_fremovexattr",
4604 "sys_tkill",
4605 "sys_sendfile64",
4606 "sys_futex", /* 240 */
4607 "sys_sched_setaffinity",
4608 "sys_sched_getaffinity",
4609 "sys_set_thread_area",
4610 "sys_get_thread_area",
4611 "sys_io_setup", /* 245 */
4612 "sys_io_destroy",
4613 "sys_io_getevents",
4614 "sys_io_submit",
4615 "sys_io_cancel",
4616 "sys_fadvise64", /* 250 */
4617 "sys_ni_syscall",
4618 "sys_exit_group",
4619 "sys_lookup_dcookie",
4620 "sys_epoll_create",
4621 "sys_epoll_ctl", /* 255 */
4622 "sys_epoll_wait",
4623 "sys_remap_file_pages",
4624 "sys_set_tid_address",
4625 "sys_timer_create",
4626 "sys_timer_settime", /* 260 */
4627 "sys_timer_gettime",
4628 "sys_timer_getoverrun",
4629 "sys_timer_delete",
4630 "sys_clock_settime",
4631 "sys_clock_gettime", /* 265 */
4632 "sys_clock_getres",
4633 "sys_clock_nanosleep",
4634 "sys_statfs64",
4635 "sys_fstatfs64",
4636 "sys_tgkill", /* 270 */
4637 "sys_utimes",
4638 "sys_fadvise64_64",
4639 "sys_ni_syscall" /* sys_vserver */
4640 };
4641
4642 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4643 switch (uEAX)
4644 {
4645 default:
4646 if (uEAX < RT_ELEMENTS(apsz))
4647 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4648 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4649 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4650 else
4651 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4652 break;
4653
4654 }
4655}
4656
4657
4658/**
4659 * Dumps an OpenBSD system call.
4660 * @param pVCpu VMCPU handle.
4661 */
4662void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4663{
4664 static const char *apsz[] =
4665 {
4666 "SYS_syscall", //0
4667 "SYS_exit", //1
4668 "SYS_fork", //2
4669 "SYS_read", //3
4670 "SYS_write", //4
4671 "SYS_open", //5
4672 "SYS_close", //6
4673 "SYS_wait4", //7
4674 "SYS_8",
4675 "SYS_link", //9
4676 "SYS_unlink", //10
4677 "SYS_11",
4678 "SYS_chdir", //12
4679 "SYS_fchdir", //13
4680 "SYS_mknod", //14
4681 "SYS_chmod", //15
4682 "SYS_chown", //16
4683 "SYS_break", //17
4684 "SYS_18",
4685 "SYS_19",
4686 "SYS_getpid", //20
4687 "SYS_mount", //21
4688 "SYS_unmount", //22
4689 "SYS_setuid", //23
4690 "SYS_getuid", //24
4691 "SYS_geteuid", //25
4692 "SYS_ptrace", //26
4693 "SYS_recvmsg", //27
4694 "SYS_sendmsg", //28
4695 "SYS_recvfrom", //29
4696 "SYS_accept", //30
4697 "SYS_getpeername", //31
4698 "SYS_getsockname", //32
4699 "SYS_access", //33
4700 "SYS_chflags", //34
4701 "SYS_fchflags", //35
4702 "SYS_sync", //36
4703 "SYS_kill", //37
4704 "SYS_38",
4705 "SYS_getppid", //39
4706 "SYS_40",
4707 "SYS_dup", //41
4708 "SYS_opipe", //42
4709 "SYS_getegid", //43
4710 "SYS_profil", //44
4711 "SYS_ktrace", //45
4712 "SYS_sigaction", //46
4713 "SYS_getgid", //47
4714 "SYS_sigprocmask", //48
4715 "SYS_getlogin", //49
4716 "SYS_setlogin", //50
4717 "SYS_acct", //51
4718 "SYS_sigpending", //52
4719 "SYS_osigaltstack", //53
4720 "SYS_ioctl", //54
4721 "SYS_reboot", //55
4722 "SYS_revoke", //56
4723 "SYS_symlink", //57
4724 "SYS_readlink", //58
4725 "SYS_execve", //59
4726 "SYS_umask", //60
4727 "SYS_chroot", //61
4728 "SYS_62",
4729 "SYS_63",
4730 "SYS_64",
4731 "SYS_65",
4732 "SYS_vfork", //66
4733 "SYS_67",
4734 "SYS_68",
4735 "SYS_sbrk", //69
4736 "SYS_sstk", //70
4737 "SYS_61",
4738 "SYS_vadvise", //72
4739 "SYS_munmap", //73
4740 "SYS_mprotect", //74
4741 "SYS_madvise", //75
4742 "SYS_76",
4743 "SYS_77",
4744 "SYS_mincore", //78
4745 "SYS_getgroups", //79
4746 "SYS_setgroups", //80
4747 "SYS_getpgrp", //81
4748 "SYS_setpgid", //82
4749 "SYS_setitimer", //83
4750 "SYS_84",
4751 "SYS_85",
4752 "SYS_getitimer", //86
4753 "SYS_87",
4754 "SYS_88",
4755 "SYS_89",
4756 "SYS_dup2", //90
4757 "SYS_91",
4758 "SYS_fcntl", //92
4759 "SYS_select", //93
4760 "SYS_94",
4761 "SYS_fsync", //95
4762 "SYS_setpriority", //96
4763 "SYS_socket", //97
4764 "SYS_connect", //98
4765 "SYS_99",
4766 "SYS_getpriority", //100
4767 "SYS_101",
4768 "SYS_102",
4769 "SYS_sigreturn", //103
4770 "SYS_bind", //104
4771 "SYS_setsockopt", //105
4772 "SYS_listen", //106
4773 "SYS_107",
4774 "SYS_108",
4775 "SYS_109",
4776 "SYS_110",
4777 "SYS_sigsuspend", //111
4778 "SYS_112",
4779 "SYS_113",
4780 "SYS_114",
4781 "SYS_115",
4782 "SYS_gettimeofday", //116
4783 "SYS_getrusage", //117
4784 "SYS_getsockopt", //118
4785 "SYS_119",
4786 "SYS_readv", //120
4787 "SYS_writev", //121
4788 "SYS_settimeofday", //122
4789 "SYS_fchown", //123
4790 "SYS_fchmod", //124
4791 "SYS_125",
4792 "SYS_setreuid", //126
4793 "SYS_setregid", //127
4794 "SYS_rename", //128
4795 "SYS_129",
4796 "SYS_130",
4797 "SYS_flock", //131
4798 "SYS_mkfifo", //132
4799 "SYS_sendto", //133
4800 "SYS_shutdown", //134
4801 "SYS_socketpair", //135
4802 "SYS_mkdir", //136
4803 "SYS_rmdir", //137
4804 "SYS_utimes", //138
4805 "SYS_139",
4806 "SYS_adjtime", //140
4807 "SYS_141",
4808 "SYS_142",
4809 "SYS_143",
4810 "SYS_144",
4811 "SYS_145",
4812 "SYS_146",
4813 "SYS_setsid", //147
4814 "SYS_quotactl", //148
4815 "SYS_149",
4816 "SYS_150",
4817 "SYS_151",
4818 "SYS_152",
4819 "SYS_153",
4820 "SYS_154",
4821 "SYS_nfssvc", //155
4822 "SYS_156",
4823 "SYS_157",
4824 "SYS_158",
4825 "SYS_159",
4826 "SYS_160",
4827 "SYS_getfh", //161
4828 "SYS_162",
4829 "SYS_163",
4830 "SYS_164",
4831 "SYS_sysarch", //165
4832 "SYS_166",
4833 "SYS_167",
4834 "SYS_168",
4835 "SYS_169",
4836 "SYS_170",
4837 "SYS_171",
4838 "SYS_172",
4839 "SYS_pread", //173
4840 "SYS_pwrite", //174
4841 "SYS_175",
4842 "SYS_176",
4843 "SYS_177",
4844 "SYS_178",
4845 "SYS_179",
4846 "SYS_180",
4847 "SYS_setgid", //181
4848 "SYS_setegid", //182
4849 "SYS_seteuid", //183
4850 "SYS_lfs_bmapv", //184
4851 "SYS_lfs_markv", //185
4852 "SYS_lfs_segclean", //186
4853 "SYS_lfs_segwait", //187
4854 "SYS_188",
4855 "SYS_189",
4856 "SYS_190",
4857 "SYS_pathconf", //191
4858 "SYS_fpathconf", //192
4859 "SYS_swapctl", //193
4860 "SYS_getrlimit", //194
4861 "SYS_setrlimit", //195
4862 "SYS_getdirentries", //196
4863 "SYS_mmap", //197
4864 "SYS___syscall", //198
4865 "SYS_lseek", //199
4866 "SYS_truncate", //200
4867 "SYS_ftruncate", //201
4868 "SYS___sysctl", //202
4869 "SYS_mlock", //203
4870 "SYS_munlock", //204
4871 "SYS_205",
4872 "SYS_futimes", //206
4873 "SYS_getpgid", //207
4874 "SYS_xfspioctl", //208
4875 "SYS_209",
4876 "SYS_210",
4877 "SYS_211",
4878 "SYS_212",
4879 "SYS_213",
4880 "SYS_214",
4881 "SYS_215",
4882 "SYS_216",
4883 "SYS_217",
4884 "SYS_218",
4885 "SYS_219",
4886 "SYS_220",
4887 "SYS_semget", //221
4888 "SYS_222",
4889 "SYS_223",
4890 "SYS_224",
4891 "SYS_msgget", //225
4892 "SYS_msgsnd", //226
4893 "SYS_msgrcv", //227
4894 "SYS_shmat", //228
4895 "SYS_229",
4896 "SYS_shmdt", //230
4897 "SYS_231",
4898 "SYS_clock_gettime", //232
4899 "SYS_clock_settime", //233
4900 "SYS_clock_getres", //234
4901 "SYS_235",
4902 "SYS_236",
4903 "SYS_237",
4904 "SYS_238",
4905 "SYS_239",
4906 "SYS_nanosleep", //240
4907 "SYS_241",
4908 "SYS_242",
4909 "SYS_243",
4910 "SYS_244",
4911 "SYS_245",
4912 "SYS_246",
4913 "SYS_247",
4914 "SYS_248",
4915 "SYS_249",
4916 "SYS_minherit", //250
4917 "SYS_rfork", //251
4918 "SYS_poll", //252
4919 "SYS_issetugid", //253
4920 "SYS_lchown", //254
4921 "SYS_getsid", //255
4922 "SYS_msync", //256
4923 "SYS_257",
4924 "SYS_258",
4925 "SYS_259",
4926 "SYS_getfsstat", //260
4927 "SYS_statfs", //261
4928 "SYS_fstatfs", //262
4929 "SYS_pipe", //263
4930 "SYS_fhopen", //264
4931 "SYS_265",
4932 "SYS_fhstatfs", //266
4933 "SYS_preadv", //267
4934 "SYS_pwritev", //268
4935 "SYS_kqueue", //269
4936 "SYS_kevent", //270
4937 "SYS_mlockall", //271
4938 "SYS_munlockall", //272
4939 "SYS_getpeereid", //273
4940 "SYS_274",
4941 "SYS_275",
4942 "SYS_276",
4943 "SYS_277",
4944 "SYS_278",
4945 "SYS_279",
4946 "SYS_280",
4947 "SYS_getresuid", //281
4948 "SYS_setresuid", //282
4949 "SYS_getresgid", //283
4950 "SYS_setresgid", //284
4951 "SYS_285",
4952 "SYS_mquery", //286
4953 "SYS_closefrom", //287
4954 "SYS_sigaltstack", //288
4955 "SYS_shmget", //289
4956 "SYS_semop", //290
4957 "SYS_stat", //291
4958 "SYS_fstat", //292
4959 "SYS_lstat", //293
4960 "SYS_fhstat", //294
4961 "SYS___semctl", //295
4962 "SYS_shmctl", //296
4963 "SYS_msgctl", //297
4964 "SYS_MAXSYSCALL", //298
4965 //299
4966 //300
4967 };
4968 uint32_t uEAX;
4969 if (!LogIsEnabled())
4970 return;
4971 uEAX = CPUMGetGuestEAX(pVCpu);
4972 switch (uEAX)
4973 {
4974 default:
4975 if (uEAX < RT_ELEMENTS(apsz))
4976 {
4977 uint32_t au32Args[8] = {0};
4978 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
4979 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4980 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4981 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4982 }
4983 else
4984 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
4985 break;
4986 }
4987}
4988
4989
4990#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4991/**
4992 * The Dll main entry point (stub).
4993 */
4994bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4995{
4996 return true;
4997}
4998
4999void *memcpy(void *dst, const void *src, size_t size)
5000{
5001 uint8_t*pbDst = dst, *pbSrc = src;
5002 while (size-- > 0)
5003 *pbDst++ = *pbSrc++;
5004 return dst;
5005}
5006
5007#endif
5008
5009void cpu_smm_update(CPUState *env)
5010{
5011}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette