VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18879

Last change on this file since 18879 was 18753, checked in by vboxsync, 16 years ago

REMR3Term: Deregister the statistics since we're usually unloaded.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.6 KB
Line 
1/* $Id: VBoxRecompiler.c 18753 2009-04-06 12:52:38Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
394#endif /* VBOX_WITH_STATISTICS */
395
396 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
397 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
398 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
399
400
401#ifdef DEBUG_ALL_LOGGING
402 loglevel = ~0;
403# ifdef DEBUG_TMP_LOGGING
404 logfile = fopen("/tmp/vbox-qemu.log", "w");
405# endif
406#endif
407
408 return rc;
409}
410
411
412/**
413 * Finalizes the REM initialization.
414 *
415 * This is called after all components, devices and drivers has
416 * been initialized. Its main purpose it to finish the RAM related
417 * initialization.
418 *
419 * @returns VBox status code.
420 *
421 * @param pVM The VM handle.
422 */
423REMR3DECL(int) REMR3InitFinalize(PVM pVM)
424{
425 int rc;
426
427 /*
428 * Ram size & dirty bit map.
429 */
430 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
431 pVM->rem.s.fGCPhysLastRamFixed = true;
432#ifdef RT_STRICT
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
434#else
435 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
436#endif
437 return rc;
438}
439
440
441/**
442 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM handle.
446 * @param fGuarded Whether to guard the map.
447 */
448static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
449{
450 int rc = VINF_SUCCESS;
451 RTGCPHYS cb;
452
453 cb = pVM->rem.s.GCPhysLastRam + 1;
454 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
455 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
456 VERR_OUT_OF_RANGE);
457 phys_ram_size = cb;
458 phys_ram_dirty_size = cb >> PAGE_SHIFT;
459 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
460
461 if (!fGuarded)
462 {
463 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
464 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
465 }
466 else
467 {
468 /*
469 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
470 */
471 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
472 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
473 if (cbBitmapFull == cbBitmapAligned)
474 cbBitmapFull += _4G >> PAGE_SHIFT;
475 else if (cbBitmapFull - cbBitmapAligned < _64K)
476 cbBitmapFull += _64K;
477
478 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
479 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
480
481 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
482 if (RT_FAILURE(rc))
483 {
484 RTMemPageFree(phys_ram_dirty);
485 AssertLogRelRCReturn(rc, rc);
486 }
487
488 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
489 }
490
491 /* initialize it. */
492 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
493 return rc;
494}
495
496
497/**
498 * Terminates the REM.
499 *
500 * Termination means cleaning up and freeing all resources,
501 * the VM it self is at this point powered off or suspended.
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506REMR3DECL(int) REMR3Term(PVM pVM)
507{
508#ifdef VBOX_WITH_STATISTICS
509 /*
510 * Statistics.
511 */
512 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
513 STAM_DEREG(pVM, &gStatCompilationQEmu);
514 STAM_DEREG(pVM, &gStatRunCodeQEmu);
515 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
516 STAM_DEREG(pVM, &gStatTimers);
517 STAM_DEREG(pVM, &gStatTBLookup);
518 STAM_DEREG(pVM, &gStatIRQ);
519 STAM_DEREG(pVM, &gStatRawCheck);
520 STAM_DEREG(pVM, &gStatMemRead);
521 STAM_DEREG(pVM, &gStatMemWrite);
522 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
523 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
524
525 STAM_DEREG(pVM, &gStatCpuGetTSC);
526
527 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
528 STAM_DEREG(pVM, &gStatRefuseVM86);
529 STAM_DEREG(pVM, &gStatRefusePaging);
530 STAM_DEREG(pVM, &gStatRefusePAE);
531 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
532 STAM_DEREG(pVM, &gStatRefuseIF0);
533 STAM_DEREG(pVM, &gStatRefuseCode16);
534 STAM_DEREG(pVM, &gStatRefuseWP0);
535 STAM_DEREG(pVM, &gStatRefuseRing1or2);
536 STAM_DEREG(pVM, &gStatRefuseCanExecute);
537 STAM_DEREG(pVM, &gStatFlushTBs);
538
539 STAM_DEREG(pVM, &gStatREMGDTChange);
540 STAM_DEREG(pVM, &gStatREMLDTRChange);
541 STAM_DEREG(pVM, &gStatREMIDTChange);
542 STAM_DEREG(pVM, &gStatREMTRChange);
543
544 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
545 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
546 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
547 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
548 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
549 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
550
551 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
552 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
553 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
554 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
555 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
556 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
557
558 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
559#endif /* VBOX_WITH_STATISTICS */
560
561 STAM_REL_DEREG(pVM, &tb_flush_count);
562 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
563 STAM_REL_DEREG(pVM, &tlb_flush_count);
564
565 return VINF_SUCCESS;
566}
567
568
569/**
570 * The VM is being reset.
571 *
572 * For the REM component this means to call the cpu_reset() and
573 * reinitialize some state variables.
574 *
575 * @param pVM VM handle.
576 */
577REMR3DECL(void) REMR3Reset(PVM pVM)
578{
579 /*
580 * Reset the REM cpu.
581 */
582 pVM->rem.s.fIgnoreAll = true;
583 cpu_reset(&pVM->rem.s.Env);
584 pVM->rem.s.cInvalidatedPages = 0;
585 pVM->rem.s.fIgnoreAll = false;
586
587 /* Clear raw ring 0 init state */
588 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
589
590 /* Flush the TBs the next time we execute code here. */
591 pVM->rem.s.fFlushTBs = true;
592}
593
594
595/**
596 * Execute state save operation.
597 *
598 * @returns VBox status code.
599 * @param pVM VM Handle.
600 * @param pSSM SSM operation handle.
601 */
602static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
603{
604 PREM pRem = &pVM->rem.s;
605
606 /*
607 * Save the required CPU Env bits.
608 * (Not much because we're never in REM when doing the save.)
609 */
610 LogFlow(("remR3Save:\n"));
611 Assert(!pRem->fInREM);
612 SSMR3PutU32(pSSM, pRem->Env.hflags);
613 SSMR3PutU32(pSSM, ~0); /* separator */
614
615 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
616 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
617 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
618
619 return SSMR3PutU32(pSSM, ~0); /* terminator */
620}
621
622
623/**
624 * Execute state load operation.
625 *
626 * @returns VBox status code.
627 * @param pVM VM Handle.
628 * @param pSSM SSM operation handle.
629 * @param u32Version Data layout version.
630 */
631static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
632{
633 uint32_t u32Dummy;
634 uint32_t fRawRing0 = false;
635 uint32_t u32Sep;
636 int rc;
637 PREM pRem;
638 LogFlow(("remR3Load:\n"));
639
640 /*
641 * Validate version.
642 */
643 if ( u32Version != REM_SAVED_STATE_VERSION
644 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
645 {
646 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
647 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
648 }
649
650 /*
651 * Do a reset to be on the safe side...
652 */
653 REMR3Reset(pVM);
654
655 /*
656 * Ignore all ignorable notifications.
657 * (Not doing this will cause serious trouble.)
658 */
659 pVM->rem.s.fIgnoreAll = true;
660
661 /*
662 * Load the required CPU Env bits.
663 * (Not much because we're never in REM when doing the save.)
664 */
665 pRem = &pVM->rem.s;
666 Assert(!pRem->fInREM);
667 SSMR3GetU32(pSSM, &pRem->Env.hflags);
668 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
669 {
670 /* Redundant REM CPU state has to be loaded, but can be ignored. */
671 CPUX86State_Ver16 temp;
672 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
673 }
674
675 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
676 if (RT_FAILURE(rc))
677 return rc;
678 if (u32Sep != ~0U)
679 {
680 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
681 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
682 }
683
684 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
685 SSMR3GetUInt(pSSM, &fRawRing0);
686 if (fRawRing0)
687 pRem->Env.state |= CPU_RAW_RING0;
688
689 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
690 {
691 unsigned i;
692
693 /*
694 * Load the REM stuff.
695 */
696 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
697 if (RT_FAILURE(rc))
698 return rc;
699 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
700 {
701 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
702 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
703 }
704 for (i = 0; i < pRem->cInvalidatedPages; i++)
705 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
706 }
707
708 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
709 if (RT_FAILURE(rc))
710 return rc;
711
712 /* check the terminator. */
713 rc = SSMR3GetU32(pSSM, &u32Sep);
714 if (RT_FAILURE(rc))
715 return rc;
716 if (u32Sep != ~0U)
717 {
718 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
719 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
720 }
721
722 /*
723 * Get the CPUID features.
724 */
725 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
726 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
727
728 /*
729 * Sync the Load Flush the TLB
730 */
731 tlb_flush(&pRem->Env, 1);
732
733 /*
734 * Stop ignoring ignornable notifications.
735 */
736 pVM->rem.s.fIgnoreAll = false;
737
738 /*
739 * Sync the whole CPU state when executing code in the recompiler.
740 */
741 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
742 return VINF_SUCCESS;
743}
744
745
746
747#undef LOG_GROUP
748#define LOG_GROUP LOG_GROUP_REM_RUN
749
750/**
751 * Single steps an instruction in recompiled mode.
752 *
753 * Before calling this function the REM state needs to be in sync with
754 * the VM. Call REMR3State() to perform the sync. It's only necessary
755 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
756 * and after calling REMR3StateBack().
757 *
758 * @returns VBox status code.
759 *
760 * @param pVM VM Handle.
761 */
762REMR3DECL(int) REMR3Step(PVM pVM)
763{
764 int rc, interrupt_request;
765 RTGCPTR GCPtrPC;
766 bool fBp;
767
768 /*
769 * Lock the REM - we don't wanna have anyone interrupting us
770 * while stepping - and enabled single stepping. We also ignore
771 * pending interrupts and suchlike.
772 */
773 interrupt_request = pVM->rem.s.Env.interrupt_request;
774 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
775 pVM->rem.s.Env.interrupt_request = 0;
776 cpu_single_step(&pVM->rem.s.Env, 1);
777
778 /*
779 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
780 */
781 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
782 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
783
784 /*
785 * Execute and handle the return code.
786 * We execute without enabling the cpu tick, so on success we'll
787 * just flip it on and off to make sure it moves
788 */
789 rc = cpu_exec(&pVM->rem.s.Env);
790 if (rc == EXCP_DEBUG)
791 {
792 TMCpuTickResume(pVM);
793 TMCpuTickPause(pVM);
794 TMVirtualResume(pVM);
795 TMVirtualPause(pVM);
796 rc = VINF_EM_DBG_STEPPED;
797 }
798 else
799 {
800 switch (rc)
801 {
802 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
803 case EXCP_HLT:
804 case EXCP_HALTED: rc = VINF_EM_HALT; break;
805 case EXCP_RC:
806 rc = pVM->rem.s.rc;
807 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
808 break;
809 case EXCP_EXECUTE_RAW:
810 case EXCP_EXECUTE_HWACC:
811 /** @todo: is it correct? No! */
812 rc = VINF_SUCCESS;
813 break;
814 default:
815 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
816 rc = VERR_INTERNAL_ERROR;
817 break;
818 }
819 }
820
821 /*
822 * Restore the stuff we changed to prevent interruption.
823 * Unlock the REM.
824 */
825 if (fBp)
826 {
827 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
828 Assert(rc2 == 0); NOREF(rc2);
829 }
830 cpu_single_step(&pVM->rem.s.Env, 0);
831 pVM->rem.s.Env.interrupt_request = interrupt_request;
832
833 return rc;
834}
835
836
837/**
838 * Set a breakpoint using the REM facilities.
839 *
840 * @returns VBox status code.
841 * @param pVM The VM handle.
842 * @param Address The breakpoint address.
843 * @thread The emulation thread.
844 */
845REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
846{
847 VM_ASSERT_EMT(pVM);
848 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
849 {
850 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
851 return VINF_SUCCESS;
852 }
853 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
854 return VERR_REM_NO_MORE_BP_SLOTS;
855}
856
857
858/**
859 * Clears a breakpoint set by REMR3BreakpointSet().
860 *
861 * @returns VBox status code.
862 * @param pVM The VM handle.
863 * @param Address The breakpoint address.
864 * @thread The emulation thread.
865 */
866REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
867{
868 VM_ASSERT_EMT(pVM);
869 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
870 {
871 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
872 return VINF_SUCCESS;
873 }
874 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
875 return VERR_REM_BP_NOT_FOUND;
876}
877
878
879/**
880 * Emulate an instruction.
881 *
882 * This function executes one instruction without letting anyone
883 * interrupt it. This is intended for being called while being in
884 * raw mode and thus will take care of all the state syncing between
885 * REM and the rest.
886 *
887 * @returns VBox status code.
888 * @param pVM VM handle.
889 */
890REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
891{
892 bool fFlushTBs;
893
894 int rc, rc2;
895 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
896
897 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
898 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
899 */
900 if (HWACCMIsEnabled(pVM))
901 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
902
903 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
904 fFlushTBs = pVM->rem.s.fFlushTBs;
905 pVM->rem.s.fFlushTBs = false;
906
907 /*
908 * Sync the state and enable single instruction / single stepping.
909 */
910 rc = REMR3State(pVM);
911 pVM->rem.s.fFlushTBs = fFlushTBs;
912 if (RT_SUCCESS(rc))
913 {
914 int interrupt_request = pVM->rem.s.Env.interrupt_request;
915 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
916 Assert(!pVM->rem.s.Env.singlestep_enabled);
917 /*
918 * Now we set the execute single instruction flag and enter the cpu_exec loop.
919 */
920 TMNotifyStartOfExecution(pVM);
921 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
922 rc = cpu_exec(&pVM->rem.s.Env);
923 TMNotifyEndOfExecution(pVM);
924 switch (rc)
925 {
926 /*
927 * Executed without anything out of the way happening.
928 */
929 case EXCP_SINGLE_INSTR:
930 rc = VINF_EM_RESCHEDULE;
931 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
932 break;
933
934 /*
935 * If we take a trap or start servicing a pending interrupt, we might end up here.
936 * (Timer thread or some other thread wishing EMT's attention.)
937 */
938 case EXCP_INTERRUPT:
939 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
940 rc = VINF_EM_RESCHEDULE;
941 break;
942
943 /*
944 * Single step, we assume!
945 * If there was a breakpoint there we're fucked now.
946 */
947 case EXCP_DEBUG:
948 {
949 /* breakpoint or single step? */
950 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
951 int iBP;
952 rc = VINF_EM_DBG_STEPPED;
953 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
954 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
955 {
956 rc = VINF_EM_DBG_BREAKPOINT;
957 break;
958 }
959 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
960 break;
961 }
962
963 /*
964 * hlt instruction.
965 */
966 case EXCP_HLT:
967 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
968 rc = VINF_EM_HALT;
969 break;
970
971 /*
972 * The VM has halted.
973 */
974 case EXCP_HALTED:
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
976 rc = VINF_EM_HALT;
977 break;
978
979 /*
980 * Switch to RAW-mode.
981 */
982 case EXCP_EXECUTE_RAW:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
984 rc = VINF_EM_RESCHEDULE_RAW;
985 break;
986
987 /*
988 * Switch to hardware accelerated RAW-mode.
989 */
990 case EXCP_EXECUTE_HWACC:
991 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
992 rc = VINF_EM_RESCHEDULE_HWACC;
993 break;
994
995 /*
996 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
997 */
998 case EXCP_RC:
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1000 rc = pVM->rem.s.rc;
1001 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1002 break;
1003
1004 /*
1005 * Figure out the rest when they arrive....
1006 */
1007 default:
1008 AssertMsgFailed(("rc=%d\n", rc));
1009 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1010 rc = VINF_EM_RESCHEDULE;
1011 break;
1012 }
1013
1014 /*
1015 * Switch back the state.
1016 */
1017 pVM->rem.s.Env.interrupt_request = interrupt_request;
1018 rc2 = REMR3StateBack(pVM);
1019 AssertRC(rc2);
1020 }
1021
1022 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1023 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1024 return rc;
1025}
1026
1027
1028/**
1029 * Runs code in recompiled mode.
1030 *
1031 * Before calling this function the REM state needs to be in sync with
1032 * the VM. Call REMR3State() to perform the sync. It's only necessary
1033 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1034 * and after calling REMR3StateBack().
1035 *
1036 * @returns VBox status code.
1037 *
1038 * @param pVM VM Handle.
1039 */
1040REMR3DECL(int) REMR3Run(PVM pVM)
1041{
1042 int rc;
1043 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1044 Assert(pVM->rem.s.fInREM);
1045
1046 TMNotifyStartOfExecution(pVM);
1047 rc = cpu_exec(&pVM->rem.s.Env);
1048 TMNotifyEndOfExecution(pVM);
1049 switch (rc)
1050 {
1051 /*
1052 * This happens when the execution was interrupted
1053 * by an external event, like pending timers.
1054 */
1055 case EXCP_INTERRUPT:
1056 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1057 rc = VINF_SUCCESS;
1058 break;
1059
1060 /*
1061 * hlt instruction.
1062 */
1063 case EXCP_HLT:
1064 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1065 rc = VINF_EM_HALT;
1066 break;
1067
1068 /*
1069 * The VM has halted.
1070 */
1071 case EXCP_HALTED:
1072 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1073 rc = VINF_EM_HALT;
1074 break;
1075
1076 /*
1077 * Breakpoint/single step.
1078 */
1079 case EXCP_DEBUG:
1080 {
1081#if 0//def DEBUG_bird
1082 static int iBP = 0;
1083 printf("howdy, breakpoint! iBP=%d\n", iBP);
1084 switch (iBP)
1085 {
1086 case 0:
1087 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1088 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1089 //pVM->rem.s.Env.interrupt_request = 0;
1090 //pVM->rem.s.Env.exception_index = -1;
1091 //g_fInterruptDisabled = 1;
1092 rc = VINF_SUCCESS;
1093 asm("int3");
1094 break;
1095 default:
1096 asm("int3");
1097 break;
1098 }
1099 iBP++;
1100#else
1101 /* breakpoint or single step? */
1102 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1103 int iBP;
1104 rc = VINF_EM_DBG_STEPPED;
1105 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1106 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1107 {
1108 rc = VINF_EM_DBG_BREAKPOINT;
1109 break;
1110 }
1111 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1112#endif
1113 break;
1114 }
1115
1116 /*
1117 * Switch to RAW-mode.
1118 */
1119 case EXCP_EXECUTE_RAW:
1120 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1121 rc = VINF_EM_RESCHEDULE_RAW;
1122 break;
1123
1124 /*
1125 * Switch to hardware accelerated RAW-mode.
1126 */
1127 case EXCP_EXECUTE_HWACC:
1128 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1129 rc = VINF_EM_RESCHEDULE_HWACC;
1130 break;
1131
1132 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1133 /*
1134 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1135 */
1136 case EXCP_RC:
1137 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1138 rc = pVM->rem.s.rc;
1139 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1140 break;
1141
1142 /*
1143 * Figure out the rest when they arrive....
1144 */
1145 default:
1146 AssertMsgFailed(("rc=%d\n", rc));
1147 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1148 rc = VINF_SUCCESS;
1149 break;
1150 }
1151
1152 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1153 return rc;
1154}
1155
1156
1157/**
1158 * Check if the cpu state is suitable for Raw execution.
1159 *
1160 * @returns boolean
1161 * @param env The CPU env struct.
1162 * @param eip The EIP to check this for (might differ from env->eip).
1163 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1164 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1165 *
1166 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1167 */
1168bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1169{
1170 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1171 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1172 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1173 uint32_t u32CR0;
1174
1175 /* Update counter. */
1176 env->pVM->rem.s.cCanExecuteRaw++;
1177
1178 if (HWACCMIsEnabled(env->pVM))
1179 {
1180 CPUMCTX Ctx;
1181
1182 env->state |= CPU_RAW_HWACC;
1183
1184 /*
1185 * Create partial context for HWACCMR3CanExecuteGuest
1186 */
1187 Ctx.cr0 = env->cr[0];
1188 Ctx.cr3 = env->cr[3];
1189 Ctx.cr4 = env->cr[4];
1190
1191 Ctx.tr = env->tr.selector;
1192 Ctx.trHid.u64Base = env->tr.base;
1193 Ctx.trHid.u32Limit = env->tr.limit;
1194 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1195
1196 Ctx.idtr.cbIdt = env->idt.limit;
1197 Ctx.idtr.pIdt = env->idt.base;
1198
1199 Ctx.gdtr.cbGdt = env->gdt.limit;
1200 Ctx.gdtr.pGdt = env->gdt.base;
1201
1202 Ctx.rsp = env->regs[R_ESP];
1203 Ctx.rip = env->eip;
1204
1205 Ctx.eflags.u32 = env->eflags;
1206
1207 Ctx.cs = env->segs[R_CS].selector;
1208 Ctx.csHid.u64Base = env->segs[R_CS].base;
1209 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1210 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1211
1212 Ctx.ds = env->segs[R_DS].selector;
1213 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1214 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1215 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1216
1217 Ctx.es = env->segs[R_ES].selector;
1218 Ctx.esHid.u64Base = env->segs[R_ES].base;
1219 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1220 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1221
1222 Ctx.fs = env->segs[R_FS].selector;
1223 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1224 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1225 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1226
1227 Ctx.gs = env->segs[R_GS].selector;
1228 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1229 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1230 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1231
1232 Ctx.ss = env->segs[R_SS].selector;
1233 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1234 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1235 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1236
1237 Ctx.msrEFER = env->efer;
1238
1239 /* Hardware accelerated raw-mode:
1240 *
1241 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1242 */
1243 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1244 {
1245 *piException = EXCP_EXECUTE_HWACC;
1246 return true;
1247 }
1248 return false;
1249 }
1250
1251 /*
1252 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1253 * or 32 bits protected mode ring 0 code
1254 *
1255 * The tests are ordered by the likelyhood of being true during normal execution.
1256 */
1257 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1260 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1261 return false;
1262 }
1263
1264#ifndef VBOX_RAW_V86
1265 if (fFlags & VM_MASK) {
1266 STAM_COUNTER_INC(&gStatRefuseVM86);
1267 Log2(("raw mode refused: VM_MASK\n"));
1268 return false;
1269 }
1270#endif
1271
1272 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1273 {
1274#ifndef DEBUG_bird
1275 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1276#endif
1277 return false;
1278 }
1279
1280 if (env->singlestep_enabled)
1281 {
1282 //Log2(("raw mode refused: Single step\n"));
1283 return false;
1284 }
1285
1286 if (env->nb_breakpoints > 0)
1287 {
1288 //Log2(("raw mode refused: Breakpoints\n"));
1289 return false;
1290 }
1291
1292 u32CR0 = env->cr[0];
1293 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1294 {
1295 STAM_COUNTER_INC(&gStatRefusePaging);
1296 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1297 return false;
1298 }
1299
1300 if (env->cr[4] & CR4_PAE_MASK)
1301 {
1302 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1303 {
1304 STAM_COUNTER_INC(&gStatRefusePAE);
1305 return false;
1306 }
1307 }
1308
1309 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1310 {
1311 if (!EMIsRawRing3Enabled(env->pVM))
1312 return false;
1313
1314 if (!(env->eflags & IF_MASK))
1315 {
1316 STAM_COUNTER_INC(&gStatRefuseIF0);
1317 Log2(("raw mode refused: IF (RawR3)\n"));
1318 return false;
1319 }
1320
1321 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1322 {
1323 STAM_COUNTER_INC(&gStatRefuseWP0);
1324 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1325 return false;
1326 }
1327 }
1328 else
1329 {
1330 if (!EMIsRawRing0Enabled(env->pVM))
1331 return false;
1332
1333 // Let's start with pure 32 bits ring 0 code first
1334 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1335 {
1336 STAM_COUNTER_INC(&gStatRefuseCode16);
1337 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1338 return false;
1339 }
1340
1341 // Only R0
1342 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1343 {
1344 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1345 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1346 return false;
1347 }
1348
1349 if (!(u32CR0 & CR0_WP_MASK))
1350 {
1351 STAM_COUNTER_INC(&gStatRefuseWP0);
1352 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1353 return false;
1354 }
1355
1356 if (PATMIsPatchGCAddr(env->pVM, eip))
1357 {
1358 Log2(("raw r0 mode forced: patch code\n"));
1359 *piException = EXCP_EXECUTE_RAW;
1360 return true;
1361 }
1362
1363#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1364 if (!(env->eflags & IF_MASK))
1365 {
1366 STAM_COUNTER_INC(&gStatRefuseIF0);
1367 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1368 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1369 return false;
1370 }
1371#endif
1372
1373 env->state |= CPU_RAW_RING0;
1374 }
1375
1376 /*
1377 * Don't reschedule the first time we're called, because there might be
1378 * special reasons why we're here that is not covered by the above checks.
1379 */
1380 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1381 {
1382 Log2(("raw mode refused: first scheduling\n"));
1383 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1384 return false;
1385 }
1386
1387 Assert(PGMPhysIsA20Enabled(env->pVM));
1388 *piException = EXCP_EXECUTE_RAW;
1389 return true;
1390}
1391
1392
1393/**
1394 * Fetches a code byte.
1395 *
1396 * @returns Success indicator (bool) for ease of use.
1397 * @param env The CPU environment structure.
1398 * @param GCPtrInstr Where to fetch code.
1399 * @param pu8Byte Where to store the byte on success
1400 */
1401bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1402{
1403 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1404 if (RT_SUCCESS(rc))
1405 return true;
1406 return false;
1407}
1408
1409
1410/**
1411 * Flush (or invalidate if you like) page table/dir entry.
1412 *
1413 * (invlpg instruction; tlb_flush_page)
1414 *
1415 * @param env Pointer to cpu environment.
1416 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1417 */
1418void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1419{
1420 PVM pVM = env->pVM;
1421 PCPUMCTX pCtx;
1422 int rc;
1423
1424 /*
1425 * When we're replaying invlpg instructions or restoring a saved
1426 * state we disable this path.
1427 */
1428 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1429 return;
1430 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1431 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1432
1433 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1434
1435 /*
1436 * Update the control registers before calling PGMFlushPage.
1437 */
1438 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1439 pCtx->cr0 = env->cr[0];
1440 pCtx->cr3 = env->cr[3];
1441 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1442 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1443 pCtx->cr4 = env->cr[4];
1444
1445 /*
1446 * Let PGM do the rest.
1447 */
1448 rc = PGMInvalidatePage(pVM, GCPtr);
1449 if (RT_FAILURE(rc))
1450 {
1451 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1452 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1453 }
1454 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1455}
1456
1457
1458#ifndef REM_PHYS_ADDR_IN_TLB
1459/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1460void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1461{
1462 void *pv;
1463 int rc;
1464
1465 /* Address must be aligned enough to fiddle with lower bits */
1466 Assert((physAddr & 0x3) == 0);
1467
1468 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1469 Assert( rc == VINF_SUCCESS
1470 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1471 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1472 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1473 if (RT_FAILURE(rc))
1474 return (void *)1;
1475 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1476 return (void *)((uintptr_t)pv | 2);
1477 return pv;
1478}
1479#endif /* REM_PHYS_ADDR_IN_TLB */
1480
1481
1482/**
1483 * Called from tlb_protect_code in order to write monitor a code page.
1484 *
1485 * @param env Pointer to the CPU environment.
1486 * @param GCPtr Code page to monitor
1487 */
1488void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1489{
1490#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1491 Assert(env->pVM->rem.s.fInREM);
1492 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1493 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1494 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1495 && !(env->eflags & VM_MASK) /* no V86 mode */
1496 && !HWACCMIsEnabled(env->pVM))
1497 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1498#endif
1499}
1500
1501
1502/**
1503 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1504 *
1505 * @param env Pointer to the CPU environment.
1506 * @param GCPtr Code page to monitor
1507 */
1508void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1509{
1510 Assert(env->pVM->rem.s.fInREM);
1511#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1512 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1513 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1514 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1515 && !(env->eflags & VM_MASK) /* no V86 mode */
1516 && !HWACCMIsEnabled(env->pVM))
1517 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1518#endif
1519}
1520
1521
1522/**
1523 * Called when the CPU is initialized, any of the CRx registers are changed or
1524 * when the A20 line is modified.
1525 *
1526 * @param env Pointer to the CPU environment.
1527 * @param fGlobal Set if the flush is global.
1528 */
1529void remR3FlushTLB(CPUState *env, bool fGlobal)
1530{
1531 PVM pVM = env->pVM;
1532 PCPUMCTX pCtx;
1533
1534 /*
1535 * When we're replaying invlpg instructions or restoring a saved
1536 * state we disable this path.
1537 */
1538 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1539 return;
1540 Assert(pVM->rem.s.fInREM);
1541
1542 /*
1543 * The caller doesn't check cr4, so we have to do that for ourselves.
1544 */
1545 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1546 fGlobal = true;
1547 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1548
1549 /*
1550 * Update the control registers before calling PGMR3FlushTLB.
1551 */
1552 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1553 pCtx->cr0 = env->cr[0];
1554 pCtx->cr3 = env->cr[3];
1555 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1556 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1557 pCtx->cr4 = env->cr[4];
1558
1559 /*
1560 * Let PGM do the rest.
1561 */
1562 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1563}
1564
1565
1566/**
1567 * Called when any of the cr0, cr4 or efer registers is updated.
1568 *
1569 * @param env Pointer to the CPU environment.
1570 */
1571void remR3ChangeCpuMode(CPUState *env)
1572{
1573 PVM pVM = env->pVM;
1574 uint64_t efer;
1575 PCPUMCTX pCtx;
1576 int rc;
1577
1578 /*
1579 * When we're replaying loads or restoring a saved
1580 * state this path is disabled.
1581 */
1582 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1583 return;
1584 Assert(pVM->rem.s.fInREM);
1585
1586 /*
1587 * Update the control registers before calling PGMChangeMode()
1588 * as it may need to map whatever cr3 is pointing to.
1589 */
1590 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1591 pCtx->cr0 = env->cr[0];
1592 pCtx->cr3 = env->cr[3];
1593 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1594 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1595 pCtx->cr4 = env->cr[4];
1596
1597#ifdef TARGET_X86_64
1598 efer = env->efer;
1599#else
1600 efer = 0;
1601#endif
1602 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], efer);
1603 if (rc != VINF_SUCCESS)
1604 {
1605 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1606 {
1607 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1608 remR3RaiseRC(env->pVM, rc);
1609 }
1610 else
1611 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1612 }
1613}
1614
1615
1616/**
1617 * Called from compiled code to run dma.
1618 *
1619 * @param env Pointer to the CPU environment.
1620 */
1621void remR3DmaRun(CPUState *env)
1622{
1623 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1624 PDMR3DmaRun(env->pVM);
1625 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1626}
1627
1628
1629/**
1630 * Called from compiled code to schedule pending timers in VMM
1631 *
1632 * @param env Pointer to the CPU environment.
1633 */
1634void remR3TimersRun(CPUState *env)
1635{
1636 LogFlow(("remR3TimersRun:\n"));
1637 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1638 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1639 TMR3TimerQueuesDo(env->pVM);
1640 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1641 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1642}
1643
1644
1645/**
1646 * Record trap occurance
1647 *
1648 * @returns VBox status code
1649 * @param env Pointer to the CPU environment.
1650 * @param uTrap Trap nr
1651 * @param uErrorCode Error code
1652 * @param pvNextEIP Next EIP
1653 */
1654int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1655{
1656 PVM pVM = env->pVM;
1657#ifdef VBOX_WITH_STATISTICS
1658 static STAMCOUNTER s_aStatTrap[255];
1659 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1660#endif
1661
1662#ifdef VBOX_WITH_STATISTICS
1663 if (uTrap < 255)
1664 {
1665 if (!s_aRegisters[uTrap])
1666 {
1667 char szStatName[64];
1668 s_aRegisters[uTrap] = true;
1669 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1670 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1671 }
1672 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1673 }
1674#endif
1675 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1676 if( uTrap < 0x20
1677 && (env->cr[0] & X86_CR0_PE)
1678 && !(env->eflags & X86_EFL_VM))
1679 {
1680#ifdef DEBUG
1681 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1682#endif
1683 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1684 {
1685 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1686 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1687 return VERR_REM_TOO_MANY_TRAPS;
1688 }
1689 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1690 pVM->rem.s.cPendingExceptions = 1;
1691 pVM->rem.s.uPendingException = uTrap;
1692 pVM->rem.s.uPendingExcptEIP = env->eip;
1693 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1694 }
1695 else
1696 {
1697 pVM->rem.s.cPendingExceptions = 0;
1698 pVM->rem.s.uPendingException = uTrap;
1699 pVM->rem.s.uPendingExcptEIP = env->eip;
1700 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1701 }
1702 return VINF_SUCCESS;
1703}
1704
1705
1706/*
1707 * Clear current active trap
1708 *
1709 * @param pVM VM Handle.
1710 */
1711void remR3TrapClear(PVM pVM)
1712{
1713 pVM->rem.s.cPendingExceptions = 0;
1714 pVM->rem.s.uPendingException = 0;
1715 pVM->rem.s.uPendingExcptEIP = 0;
1716 pVM->rem.s.uPendingExcptCR2 = 0;
1717}
1718
1719
1720/*
1721 * Record previous call instruction addresses
1722 *
1723 * @param env Pointer to the CPU environment.
1724 */
1725void remR3RecordCall(CPUState *env)
1726{
1727 CSAMR3RecordCallAddress(env->pVM, env->eip);
1728}
1729
1730
1731/**
1732 * Syncs the internal REM state with the VM.
1733 *
1734 * This must be called before REMR3Run() is invoked whenever when the REM
1735 * state is not up to date. Calling it several times in a row is not
1736 * permitted.
1737 *
1738 * @returns VBox status code.
1739 *
1740 * @param pVM VM Handle.
1741 *
1742 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1743 * no do this since the majority of the callers don't want any unnecessary of events
1744 * pending that would immediatly interrupt execution.
1745 */
1746REMR3DECL(int) REMR3State(PVM pVM)
1747{
1748 register const CPUMCTX *pCtx;
1749 register unsigned fFlags;
1750 bool fHiddenSelRegsValid;
1751 unsigned i;
1752 TRPMEVENT enmType;
1753 uint8_t u8TrapNo;
1754 int rc;
1755
1756 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1757 Log2(("REMR3State:\n"));
1758
1759 pCtx = pVM->rem.s.pCtx;
1760 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1761
1762 Assert(!pVM->rem.s.fInREM);
1763 pVM->rem.s.fInStateSync = true;
1764
1765 /*
1766 * If we have to flush TBs, do that immediately.
1767 */
1768 if (pVM->rem.s.fFlushTBs)
1769 {
1770 STAM_COUNTER_INC(&gStatFlushTBs);
1771 tb_flush(&pVM->rem.s.Env);
1772 pVM->rem.s.fFlushTBs = false;
1773 }
1774
1775 /*
1776 * Copy the registers which require no special handling.
1777 */
1778#ifdef TARGET_X86_64
1779 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1780 Assert(R_EAX == 0);
1781 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1782 Assert(R_ECX == 1);
1783 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1784 Assert(R_EDX == 2);
1785 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1786 Assert(R_EBX == 3);
1787 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1788 Assert(R_ESP == 4);
1789 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1790 Assert(R_EBP == 5);
1791 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1792 Assert(R_ESI == 6);
1793 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1794 Assert(R_EDI == 7);
1795 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1796 pVM->rem.s.Env.regs[8] = pCtx->r8;
1797 pVM->rem.s.Env.regs[9] = pCtx->r9;
1798 pVM->rem.s.Env.regs[10] = pCtx->r10;
1799 pVM->rem.s.Env.regs[11] = pCtx->r11;
1800 pVM->rem.s.Env.regs[12] = pCtx->r12;
1801 pVM->rem.s.Env.regs[13] = pCtx->r13;
1802 pVM->rem.s.Env.regs[14] = pCtx->r14;
1803 pVM->rem.s.Env.regs[15] = pCtx->r15;
1804
1805 pVM->rem.s.Env.eip = pCtx->rip;
1806
1807 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1808#else
1809 Assert(R_EAX == 0);
1810 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1811 Assert(R_ECX == 1);
1812 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1813 Assert(R_EDX == 2);
1814 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1815 Assert(R_EBX == 3);
1816 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1817 Assert(R_ESP == 4);
1818 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1819 Assert(R_EBP == 5);
1820 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1821 Assert(R_ESI == 6);
1822 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1823 Assert(R_EDI == 7);
1824 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1825 pVM->rem.s.Env.eip = pCtx->eip;
1826
1827 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1828#endif
1829
1830 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1831
1832 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1833 for (i=0;i<8;i++)
1834 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1835
1836 /*
1837 * Clear the halted hidden flag (the interrupt waking up the CPU can
1838 * have been dispatched in raw mode).
1839 */
1840 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1841
1842 /*
1843 * Replay invlpg?
1844 */
1845 if (pVM->rem.s.cInvalidatedPages)
1846 {
1847 RTUINT i;
1848
1849 pVM->rem.s.fIgnoreInvlPg = true;
1850 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1851 {
1852 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1853 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1854 }
1855 pVM->rem.s.fIgnoreInvlPg = false;
1856 pVM->rem.s.cInvalidatedPages = 0;
1857 }
1858
1859 /* Replay notification changes? */
1860 if (pVM->rem.s.cHandlerNotifications)
1861 REMR3ReplayHandlerNotifications(pVM);
1862
1863 /* Update MSRs; before CRx registers! */
1864 pVM->rem.s.Env.efer = pCtx->msrEFER;
1865 pVM->rem.s.Env.star = pCtx->msrSTAR;
1866 pVM->rem.s.Env.pat = pCtx->msrPAT;
1867#ifdef TARGET_X86_64
1868 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1869 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1870 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1871 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1872
1873 /* Update the internal long mode activate flag according to the new EFER value. */
1874 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1875 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1876 else
1877 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1878#endif
1879
1880 /*
1881 * Registers which are rarely changed and require special handling / order when changed.
1882 */
1883 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1884 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1885 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1886 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1887 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1888 {
1889 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1890 {
1891 pVM->rem.s.fIgnoreCR3Load = true;
1892 tlb_flush(&pVM->rem.s.Env, true);
1893 pVM->rem.s.fIgnoreCR3Load = false;
1894 }
1895
1896 /* CR4 before CR0! */
1897 if (fFlags & CPUM_CHANGED_CR4)
1898 {
1899 pVM->rem.s.fIgnoreCR3Load = true;
1900 pVM->rem.s.fIgnoreCpuMode = true;
1901 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1902 pVM->rem.s.fIgnoreCpuMode = false;
1903 pVM->rem.s.fIgnoreCR3Load = false;
1904 }
1905
1906 if (fFlags & CPUM_CHANGED_CR0)
1907 {
1908 pVM->rem.s.fIgnoreCR3Load = true;
1909 pVM->rem.s.fIgnoreCpuMode = true;
1910 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1911 pVM->rem.s.fIgnoreCpuMode = false;
1912 pVM->rem.s.fIgnoreCR3Load = false;
1913 }
1914
1915 if (fFlags & CPUM_CHANGED_CR3)
1916 {
1917 pVM->rem.s.fIgnoreCR3Load = true;
1918 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1919 pVM->rem.s.fIgnoreCR3Load = false;
1920 }
1921
1922 if (fFlags & CPUM_CHANGED_GDTR)
1923 {
1924 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1925 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1926 }
1927
1928 if (fFlags & CPUM_CHANGED_IDTR)
1929 {
1930 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1931 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1932 }
1933
1934 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1935 {
1936 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1937 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1938 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1939 }
1940
1941 if (fFlags & CPUM_CHANGED_LDTR)
1942 {
1943 if (fHiddenSelRegsValid)
1944 {
1945 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1946 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1947 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1948 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1949 }
1950 else
1951 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1952 }
1953
1954 if (fFlags & CPUM_CHANGED_CPUID)
1955 {
1956 uint32_t u32Dummy;
1957
1958 /*
1959 * Get the CPUID features.
1960 */
1961 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1962 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1963 }
1964
1965 /* Sync FPU state after CR4, CPUID and EFER (!). */
1966 if (fFlags & CPUM_CHANGED_FPU_REM)
1967 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1968 }
1969
1970 /*
1971 * Sync TR unconditionally to make life simpler.
1972 */
1973 pVM->rem.s.Env.tr.selector = pCtx->tr;
1974 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1975 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1976 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1977 /* Note! do_interrupt will fault if the busy flag is still set... */
1978 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1979
1980 /*
1981 * Update selector registers.
1982 * This must be done *after* we've synced gdt, ldt and crX registers
1983 * since we're reading the GDT/LDT om sync_seg. This will happen with
1984 * saved state which takes a quick dip into rawmode for instance.
1985 */
1986 /*
1987 * Stack; Note first check this one as the CPL might have changed. The
1988 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1989 */
1990
1991 if (fHiddenSelRegsValid)
1992 {
1993 /* The hidden selector registers are valid in the CPU context. */
1994 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1995
1996 /* Set current CPL */
1997 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1998
1999 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2000 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2001 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2002 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2003 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2004 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2005 }
2006 else
2007 {
2008 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2009 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2010 {
2011 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2012
2013 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
2014 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2015#ifdef VBOX_WITH_STATISTICS
2016 if (pVM->rem.s.Env.segs[R_SS].newselector)
2017 {
2018 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2019 }
2020#endif
2021 }
2022 else
2023 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2024
2025 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2026 {
2027 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2028 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2029#ifdef VBOX_WITH_STATISTICS
2030 if (pVM->rem.s.Env.segs[R_ES].newselector)
2031 {
2032 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2033 }
2034#endif
2035 }
2036 else
2037 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2038
2039 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2040 {
2041 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2042 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2043#ifdef VBOX_WITH_STATISTICS
2044 if (pVM->rem.s.Env.segs[R_CS].newselector)
2045 {
2046 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2047 }
2048#endif
2049 }
2050 else
2051 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2052
2053 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2054 {
2055 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2056 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2057#ifdef VBOX_WITH_STATISTICS
2058 if (pVM->rem.s.Env.segs[R_DS].newselector)
2059 {
2060 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2061 }
2062#endif
2063 }
2064 else
2065 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2066
2067 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2068 * be the same but not the base/limit. */
2069 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2070 {
2071 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2072 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2073#ifdef VBOX_WITH_STATISTICS
2074 if (pVM->rem.s.Env.segs[R_FS].newselector)
2075 {
2076 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2077 }
2078#endif
2079 }
2080 else
2081 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2082
2083 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2084 {
2085 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2086 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2087#ifdef VBOX_WITH_STATISTICS
2088 if (pVM->rem.s.Env.segs[R_GS].newselector)
2089 {
2090 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2091 }
2092#endif
2093 }
2094 else
2095 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2096 }
2097
2098 /*
2099 * Check for traps.
2100 */
2101 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2102 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2103 if (RT_SUCCESS(rc))
2104 {
2105#ifdef DEBUG
2106 if (u8TrapNo == 0x80)
2107 {
2108 remR3DumpLnxSyscall(pVM);
2109 remR3DumpOBsdSyscall(pVM);
2110 }
2111#endif
2112
2113 pVM->rem.s.Env.exception_index = u8TrapNo;
2114 if (enmType != TRPM_SOFTWARE_INT)
2115 {
2116 pVM->rem.s.Env.exception_is_int = 0;
2117 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2118 }
2119 else
2120 {
2121 /*
2122 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2123 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2124 * for int03 and into.
2125 */
2126 pVM->rem.s.Env.exception_is_int = 1;
2127 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2128 /* int 3 may be generated by one-byte 0xcc */
2129 if (u8TrapNo == 3)
2130 {
2131 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2132 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2133 }
2134 /* int 4 may be generated by one-byte 0xce */
2135 else if (u8TrapNo == 4)
2136 {
2137 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2138 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2139 }
2140 }
2141
2142 /* get error code and cr2 if needed. */
2143 switch (u8TrapNo)
2144 {
2145 case 0x0e:
2146 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2147 /* fallthru */
2148 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2149 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2150 break;
2151
2152 case 0x11: case 0x08:
2153 default:
2154 pVM->rem.s.Env.error_code = 0;
2155 break;
2156 }
2157
2158 /*
2159 * We can now reset the active trap since the recompiler is gonna have a go at it.
2160 */
2161 rc = TRPMResetTrap(pVM);
2162 AssertRC(rc);
2163 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2164 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2165 }
2166
2167 /*
2168 * Clear old interrupt request flags; Check for pending hardware interrupts.
2169 * (See @remark for why we don't check for other FFs.)
2170 */
2171 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2172 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2173 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2174 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2175
2176 /*
2177 * We're now in REM mode.
2178 */
2179 pVM->rem.s.fInREM = true;
2180 pVM->rem.s.fInStateSync = false;
2181 pVM->rem.s.cCanExecuteRaw = 0;
2182 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2183 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2184 return VINF_SUCCESS;
2185}
2186
2187
2188/**
2189 * Syncs back changes in the REM state to the the VM state.
2190 *
2191 * This must be called after invoking REMR3Run().
2192 * Calling it several times in a row is not permitted.
2193 *
2194 * @returns VBox status code.
2195 *
2196 * @param pVM VM Handle.
2197 */
2198REMR3DECL(int) REMR3StateBack(PVM pVM)
2199{
2200 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2201 unsigned i;
2202
2203 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2204 Log2(("REMR3StateBack:\n"));
2205 Assert(pVM->rem.s.fInREM);
2206
2207 /*
2208 * Copy back the registers.
2209 * This is done in the order they are declared in the CPUMCTX structure.
2210 */
2211
2212 /** @todo FOP */
2213 /** @todo FPUIP */
2214 /** @todo CS */
2215 /** @todo FPUDP */
2216 /** @todo DS */
2217
2218 /** @todo check if FPU/XMM was actually used in the recompiler */
2219 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2220//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2221
2222#ifdef TARGET_X86_64
2223 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2224 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2225 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2226 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2227 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2228 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2229 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2230 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2231 pCtx->r8 = pVM->rem.s.Env.regs[8];
2232 pCtx->r9 = pVM->rem.s.Env.regs[9];
2233 pCtx->r10 = pVM->rem.s.Env.regs[10];
2234 pCtx->r11 = pVM->rem.s.Env.regs[11];
2235 pCtx->r12 = pVM->rem.s.Env.regs[12];
2236 pCtx->r13 = pVM->rem.s.Env.regs[13];
2237 pCtx->r14 = pVM->rem.s.Env.regs[14];
2238 pCtx->r15 = pVM->rem.s.Env.regs[15];
2239
2240 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2241
2242#else
2243 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2244 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2245 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2246 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2247 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2248 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2249 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2250
2251 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2252#endif
2253
2254 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2255
2256#ifdef VBOX_WITH_STATISTICS
2257 if (pVM->rem.s.Env.segs[R_SS].newselector)
2258 {
2259 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2260 }
2261 if (pVM->rem.s.Env.segs[R_GS].newselector)
2262 {
2263 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2264 }
2265 if (pVM->rem.s.Env.segs[R_FS].newselector)
2266 {
2267 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2268 }
2269 if (pVM->rem.s.Env.segs[R_ES].newselector)
2270 {
2271 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2272 }
2273 if (pVM->rem.s.Env.segs[R_DS].newselector)
2274 {
2275 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2276 }
2277 if (pVM->rem.s.Env.segs[R_CS].newselector)
2278 {
2279 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2280 }
2281#endif
2282 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2283 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2284 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2285 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2286 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2287
2288#ifdef TARGET_X86_64
2289 pCtx->rip = pVM->rem.s.Env.eip;
2290 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2291#else
2292 pCtx->eip = pVM->rem.s.Env.eip;
2293 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2294#endif
2295
2296 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2297 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2298 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2299 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2300 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2301 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2302
2303 for (i = 0; i < 8; i++)
2304 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2305
2306 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2307 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2308 {
2309 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2310 STAM_COUNTER_INC(&gStatREMGDTChange);
2311 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2312 }
2313
2314 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2315 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2316 {
2317 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2318 STAM_COUNTER_INC(&gStatREMIDTChange);
2319 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2320 }
2321
2322 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2323 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2324 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2325 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2326 {
2327 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2328 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2329 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2330 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2331 STAM_COUNTER_INC(&gStatREMLDTRChange);
2332 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2333 }
2334
2335 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2336 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2337 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2338 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2339 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2340 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2341 : 0) )
2342 {
2343 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2344 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2345 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2346 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2347 pCtx->tr = pVM->rem.s.Env.tr.selector;
2348 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2349 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2350 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2351 if (pCtx->trHid.Attr.u)
2352 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2353 STAM_COUNTER_INC(&gStatREMTRChange);
2354 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2355 }
2356
2357 /** @todo These values could still be out of sync! */
2358 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2359 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2360 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2361 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2362
2363 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2364 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2365 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2366
2367 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2368 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2369 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2370
2371 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2372 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2373 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2374
2375 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2376 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2377 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2378
2379 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2380 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2381 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2382
2383 /* Sysenter MSR */
2384 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2385 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2386 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2387
2388 /* System MSRs. */
2389 pCtx->msrEFER = pVM->rem.s.Env.efer;
2390 pCtx->msrSTAR = pVM->rem.s.Env.star;
2391 pCtx->msrPAT = pVM->rem.s.Env.pat;
2392#ifdef TARGET_X86_64
2393 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2394 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2395 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2396 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2397#endif
2398
2399 remR3TrapClear(pVM);
2400
2401 /*
2402 * Check for traps.
2403 */
2404 if ( pVM->rem.s.Env.exception_index >= 0
2405 && pVM->rem.s.Env.exception_index < 256)
2406 {
2407 int rc;
2408
2409 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2410 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2411 AssertRC(rc);
2412 switch (pVM->rem.s.Env.exception_index)
2413 {
2414 case 0x0e:
2415 TRPMSetFaultAddress(pVM, pCtx->cr2);
2416 /* fallthru */
2417 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2418 case 0x11: case 0x08: /* 0 */
2419 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2420 break;
2421 }
2422
2423 }
2424
2425 /*
2426 * We're not longer in REM mode.
2427 */
2428 pVM->rem.s.fInREM = false;
2429 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2430 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2431 return VINF_SUCCESS;
2432}
2433
2434
2435/**
2436 * This is called by the disassembler when it wants to update the cpu state
2437 * before for instance doing a register dump.
2438 */
2439static void remR3StateUpdate(PVM pVM)
2440{
2441 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2442 unsigned i;
2443
2444 Assert(pVM->rem.s.fInREM);
2445
2446 /*
2447 * Copy back the registers.
2448 * This is done in the order they are declared in the CPUMCTX structure.
2449 */
2450
2451 /** @todo FOP */
2452 /** @todo FPUIP */
2453 /** @todo CS */
2454 /** @todo FPUDP */
2455 /** @todo DS */
2456 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2457 pCtx->fpu.MXCSR = 0;
2458 pCtx->fpu.MXCSR_MASK = 0;
2459
2460 /** @todo check if FPU/XMM was actually used in the recompiler */
2461 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2462//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2463
2464#ifdef TARGET_X86_64
2465 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2466 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2467 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2468 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2469 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2470 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2471 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2472 pCtx->r8 = pVM->rem.s.Env.regs[8];
2473 pCtx->r9 = pVM->rem.s.Env.regs[9];
2474 pCtx->r10 = pVM->rem.s.Env.regs[10];
2475 pCtx->r11 = pVM->rem.s.Env.regs[11];
2476 pCtx->r12 = pVM->rem.s.Env.regs[12];
2477 pCtx->r13 = pVM->rem.s.Env.regs[13];
2478 pCtx->r14 = pVM->rem.s.Env.regs[14];
2479 pCtx->r15 = pVM->rem.s.Env.regs[15];
2480
2481 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2482#else
2483 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2484 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2485 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2486 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2487 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2488 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2489 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2490
2491 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2492#endif
2493
2494 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2495
2496 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2497 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2498 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2499 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2500 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2501
2502#ifdef TARGET_X86_64
2503 pCtx->rip = pVM->rem.s.Env.eip;
2504 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2505#else
2506 pCtx->eip = pVM->rem.s.Env.eip;
2507 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2508#endif
2509
2510 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2511 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2512 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2513 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2514 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2515 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2516
2517 for (i = 0; i < 8; i++)
2518 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2519
2520 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2521 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2522 {
2523 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2524 STAM_COUNTER_INC(&gStatREMGDTChange);
2525 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2526 }
2527
2528 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2529 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2530 {
2531 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2532 STAM_COUNTER_INC(&gStatREMIDTChange);
2533 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2534 }
2535
2536 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2537 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2538 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2539 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2540 {
2541 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2542 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2543 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2544 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2545 STAM_COUNTER_INC(&gStatREMLDTRChange);
2546 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2547 }
2548
2549 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2550 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2551 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2552 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2553 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2554 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2555 : 0) )
2556 {
2557 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2558 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2559 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2560 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2561 pCtx->tr = pVM->rem.s.Env.tr.selector;
2562 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2563 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2564 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2565 if (pCtx->trHid.Attr.u)
2566 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2567 STAM_COUNTER_INC(&gStatREMTRChange);
2568 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2569 }
2570
2571 /** @todo These values could still be out of sync! */
2572 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2573 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2574 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2575 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2576
2577 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2578 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2579 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2580
2581 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2582 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2583 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2584
2585 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2586 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2587 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2588
2589 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2590 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2591 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2592
2593 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2594 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2595 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2596
2597 /* Sysenter MSR */
2598 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2599 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2600 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2601
2602 /* System MSRs. */
2603 pCtx->msrEFER = pVM->rem.s.Env.efer;
2604 pCtx->msrSTAR = pVM->rem.s.Env.star;
2605 pCtx->msrPAT = pVM->rem.s.Env.pat;
2606#ifdef TARGET_X86_64
2607 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2608 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2609 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2610 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2611#endif
2612
2613}
2614
2615
2616/**
2617 * Update the VMM state information if we're currently in REM.
2618 *
2619 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2620 * we're currently executing in REM and the VMM state is invalid. This method will of
2621 * course check that we're executing in REM before syncing any data over to the VMM.
2622 *
2623 * @param pVM The VM handle.
2624 */
2625REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2626{
2627 if (pVM->rem.s.fInREM)
2628 remR3StateUpdate(pVM);
2629}
2630
2631
2632#undef LOG_GROUP
2633#define LOG_GROUP LOG_GROUP_REM
2634
2635
2636/**
2637 * Notify the recompiler about Address Gate 20 state change.
2638 *
2639 * This notification is required since A20 gate changes are
2640 * initialized from a device driver and the VM might just as
2641 * well be in REM mode as in RAW mode.
2642 *
2643 * @param pVM VM handle.
2644 * @param fEnable True if the gate should be enabled.
2645 * False if the gate should be disabled.
2646 */
2647REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2648{
2649 bool fSaved;
2650
2651 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2652 VM_ASSERT_EMT(pVM);
2653
2654 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2655 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2656
2657 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2658
2659 pVM->rem.s.fIgnoreAll = fSaved;
2660}
2661
2662
2663/**
2664 * Replays the invalidated recorded pages.
2665 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2666 *
2667 * @param pVM VM handle.
2668 */
2669REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2670{
2671 RTUINT i;
2672
2673 VM_ASSERT_EMT(pVM);
2674
2675 /*
2676 * Sync the required registers.
2677 */
2678 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2679 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2680 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2681 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2682
2683 /*
2684 * Replay the flushes.
2685 */
2686 pVM->rem.s.fIgnoreInvlPg = true;
2687 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2688 {
2689 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2690 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2691 }
2692 pVM->rem.s.fIgnoreInvlPg = false;
2693 pVM->rem.s.cInvalidatedPages = 0;
2694}
2695
2696
2697/**
2698 * Replays the handler notification changes
2699 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2700 *
2701 * @param pVM VM handle.
2702 */
2703REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2704{
2705 /*
2706 * Replay the flushes.
2707 */
2708 RTUINT i;
2709 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2710
2711 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2712 VM_ASSERT_EMT(pVM);
2713
2714 pVM->rem.s.cHandlerNotifications = 0;
2715 for (i = 0; i < c; i++)
2716 {
2717 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2718 switch (pRec->enmKind)
2719 {
2720 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2721 REMR3NotifyHandlerPhysicalRegister(pVM,
2722 pRec->u.PhysicalRegister.enmType,
2723 pRec->u.PhysicalRegister.GCPhys,
2724 pRec->u.PhysicalRegister.cb,
2725 pRec->u.PhysicalRegister.fHasHCHandler);
2726 break;
2727
2728 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2729 REMR3NotifyHandlerPhysicalDeregister(pVM,
2730 pRec->u.PhysicalDeregister.enmType,
2731 pRec->u.PhysicalDeregister.GCPhys,
2732 pRec->u.PhysicalDeregister.cb,
2733 pRec->u.PhysicalDeregister.fHasHCHandler,
2734 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2735 break;
2736
2737 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2738 REMR3NotifyHandlerPhysicalModify(pVM,
2739 pRec->u.PhysicalModify.enmType,
2740 pRec->u.PhysicalModify.GCPhysOld,
2741 pRec->u.PhysicalModify.GCPhysNew,
2742 pRec->u.PhysicalModify.cb,
2743 pRec->u.PhysicalModify.fHasHCHandler,
2744 pRec->u.PhysicalModify.fRestoreAsRAM);
2745 break;
2746
2747 default:
2748 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2749 break;
2750 }
2751 }
2752 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2753}
2754
2755
2756/**
2757 * Notify REM about changed code page.
2758 *
2759 * @returns VBox status code.
2760 * @param pVM VM handle.
2761 * @param pvCodePage Code page address
2762 */
2763REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2764{
2765#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2766 int rc;
2767 RTGCPHYS PhysGC;
2768 uint64_t flags;
2769
2770 VM_ASSERT_EMT(pVM);
2771
2772 /*
2773 * Get the physical page address.
2774 */
2775 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2776 if (rc == VINF_SUCCESS)
2777 {
2778 /*
2779 * Sync the required registers and flush the whole page.
2780 * (Easier to do the whole page than notifying it about each physical
2781 * byte that was changed.
2782 */
2783 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2784 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2785 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2786 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2787
2788 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2789 }
2790#endif
2791 return VINF_SUCCESS;
2792}
2793
2794
2795/**
2796 * Notification about a successful MMR3PhysRegister() call.
2797 *
2798 * @param pVM VM handle.
2799 * @param GCPhys The physical address the RAM.
2800 * @param cb Size of the memory.
2801 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2802 */
2803REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2804{
2805 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2806 VM_ASSERT_EMT(pVM);
2807
2808 /*
2809 * Validate input - we trust the caller.
2810 */
2811 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2812 Assert(cb);
2813 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2814 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2815
2816 /*
2817 * Base ram? Update GCPhysLastRam.
2818 */
2819 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2820 {
2821 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2822 {
2823 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2824 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2825 }
2826 }
2827
2828 /*
2829 * Register the ram.
2830 */
2831 Assert(!pVM->rem.s.fIgnoreAll);
2832 pVM->rem.s.fIgnoreAll = true;
2833
2834 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2835 Assert(pVM->rem.s.fIgnoreAll);
2836 pVM->rem.s.fIgnoreAll = false;
2837}
2838
2839
2840/**
2841 * Notification about a successful MMR3PhysRomRegister() call.
2842 *
2843 * @param pVM VM handle.
2844 * @param GCPhys The physical address of the ROM.
2845 * @param cb The size of the ROM.
2846 * @param pvCopy Pointer to the ROM copy.
2847 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2848 * This function will be called when ever the protection of the
2849 * shadow ROM changes (at reset and end of POST).
2850 */
2851REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2852{
2853 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2854 VM_ASSERT_EMT(pVM);
2855
2856 /*
2857 * Validate input - we trust the caller.
2858 */
2859 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2860 Assert(cb);
2861 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2862
2863 /*
2864 * Register the rom.
2865 */
2866 Assert(!pVM->rem.s.fIgnoreAll);
2867 pVM->rem.s.fIgnoreAll = true;
2868
2869 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2870
2871 Assert(pVM->rem.s.fIgnoreAll);
2872 pVM->rem.s.fIgnoreAll = false;
2873}
2874
2875
2876/**
2877 * Notification about a successful memory deregistration or reservation.
2878 *
2879 * @param pVM VM Handle.
2880 * @param GCPhys Start physical address.
2881 * @param cb The size of the range.
2882 */
2883REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2884{
2885 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2886 VM_ASSERT_EMT(pVM);
2887
2888 /*
2889 * Validate input - we trust the caller.
2890 */
2891 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2892 Assert(cb);
2893 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2894
2895 /*
2896 * Unassigning the memory.
2897 */
2898 Assert(!pVM->rem.s.fIgnoreAll);
2899 pVM->rem.s.fIgnoreAll = true;
2900
2901 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2902
2903 Assert(pVM->rem.s.fIgnoreAll);
2904 pVM->rem.s.fIgnoreAll = false;
2905}
2906
2907
2908/**
2909 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2910 *
2911 * @param pVM VM Handle.
2912 * @param enmType Handler type.
2913 * @param GCPhys Handler range address.
2914 * @param cb Size of the handler range.
2915 * @param fHasHCHandler Set if the handler has a HC callback function.
2916 *
2917 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2918 * Handler memory type to memory which has no HC handler.
2919 */
2920REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2921{
2922 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2923 enmType, GCPhys, cb, fHasHCHandler));
2924 VM_ASSERT_EMT(pVM);
2925 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2926 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2927
2928 if (pVM->rem.s.cHandlerNotifications)
2929 REMR3ReplayHandlerNotifications(pVM);
2930
2931 Assert(!pVM->rem.s.fIgnoreAll);
2932 pVM->rem.s.fIgnoreAll = true;
2933
2934 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2935 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2936 else if (fHasHCHandler)
2937 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2938
2939 Assert(pVM->rem.s.fIgnoreAll);
2940 pVM->rem.s.fIgnoreAll = false;
2941}
2942
2943
2944/**
2945 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2946 *
2947 * @param pVM VM Handle.
2948 * @param enmType Handler type.
2949 * @param GCPhys Handler range address.
2950 * @param cb Size of the handler range.
2951 * @param fHasHCHandler Set if the handler has a HC callback function.
2952 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2953 */
2954REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2955{
2956 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2957 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2958 VM_ASSERT_EMT(pVM);
2959
2960 if (pVM->rem.s.cHandlerNotifications)
2961 REMR3ReplayHandlerNotifications(pVM);
2962
2963 Assert(!pVM->rem.s.fIgnoreAll);
2964 pVM->rem.s.fIgnoreAll = true;
2965
2966/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2967 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2968 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2969 else if (fHasHCHandler)
2970 {
2971 if (!fRestoreAsRAM)
2972 {
2973 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2974 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2975 }
2976 else
2977 {
2978 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2979 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2980 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2981 }
2982 }
2983
2984 Assert(pVM->rem.s.fIgnoreAll);
2985 pVM->rem.s.fIgnoreAll = false;
2986}
2987
2988
2989/**
2990 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2991 *
2992 * @param pVM VM Handle.
2993 * @param enmType Handler type.
2994 * @param GCPhysOld Old handler range address.
2995 * @param GCPhysNew New handler range address.
2996 * @param cb Size of the handler range.
2997 * @param fHasHCHandler Set if the handler has a HC callback function.
2998 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2999 */
3000REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3001{
3002 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3003 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3004 VM_ASSERT_EMT(pVM);
3005 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3006
3007 if (pVM->rem.s.cHandlerNotifications)
3008 REMR3ReplayHandlerNotifications(pVM);
3009
3010 if (fHasHCHandler)
3011 {
3012 Assert(!pVM->rem.s.fIgnoreAll);
3013 pVM->rem.s.fIgnoreAll = true;
3014
3015 /*
3016 * Reset the old page.
3017 */
3018 if (!fRestoreAsRAM)
3019 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3020 else
3021 {
3022 /* This is not perfect, but it'll do for PD monitoring... */
3023 Assert(cb == PAGE_SIZE);
3024 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3025 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3026 }
3027
3028 /*
3029 * Update the new page.
3030 */
3031 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3032 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3033 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3034
3035 Assert(pVM->rem.s.fIgnoreAll);
3036 pVM->rem.s.fIgnoreAll = false;
3037 }
3038}
3039
3040
3041/**
3042 * Checks if we're handling access to this page or not.
3043 *
3044 * @returns true if we're trapping access.
3045 * @returns false if we aren't.
3046 * @param pVM The VM handle.
3047 * @param GCPhys The physical address.
3048 *
3049 * @remark This function will only work correctly in VBOX_STRICT builds!
3050 */
3051REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3052{
3053#ifdef VBOX_STRICT
3054 unsigned long off;
3055 if (pVM->rem.s.cHandlerNotifications)
3056 REMR3ReplayHandlerNotifications(pVM);
3057
3058 off = get_phys_page_offset(GCPhys);
3059 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3060 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3061 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3062#else
3063 return false;
3064#endif
3065}
3066
3067
3068/**
3069 * Deals with a rare case in get_phys_addr_code where the code
3070 * is being monitored.
3071 *
3072 * It could also be an MMIO page, in which case we will raise a fatal error.
3073 *
3074 * @returns The physical address corresponding to addr.
3075 * @param env The cpu environment.
3076 * @param addr The virtual address.
3077 * @param pTLBEntry The TLB entry.
3078 */
3079target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3080 target_ulong addr,
3081 CPUTLBEntry* pTLBEntry,
3082 target_phys_addr_t ioTLBEntry)
3083{
3084 PVM pVM = env->pVM;
3085
3086 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3087 {
3088 /* If code memory is being monitored, appropriate IOTLB entry will have
3089 handler IO type, and addend will provide real physical address, no
3090 matter if we store VA in TLB or not, as handlers are always passed PA */
3091 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3092 return ret;
3093 }
3094 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3095 "*** handlers\n",
3096 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3097 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3098 LogRel(("*** mmio\n"));
3099 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3100 LogRel(("*** phys\n"));
3101 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3102 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3103 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3104 AssertFatalFailed();
3105}
3106
3107/**
3108 * Read guest RAM and ROM.
3109 *
3110 * @param SrcGCPhys The source address (guest physical).
3111 * @param pvDst The destination address.
3112 * @param cb Number of bytes
3113 */
3114void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3115{
3116 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3117 VBOX_CHECK_ADDR(SrcGCPhys);
3118 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3119#ifdef VBOX_DEBUG_PHYS
3120 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3121#endif
3122 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3123}
3124
3125
3126/**
3127 * Read guest RAM and ROM, unsigned 8-bit.
3128 *
3129 * @param SrcGCPhys The source address (guest physical).
3130 */
3131RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3132{
3133 uint8_t val;
3134 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3135 VBOX_CHECK_ADDR(SrcGCPhys);
3136 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3137 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3138#ifdef VBOX_DEBUG_PHYS
3139 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3140#endif
3141 return val;
3142}
3143
3144
3145/**
3146 * Read guest RAM and ROM, signed 8-bit.
3147 *
3148 * @param SrcGCPhys The source address (guest physical).
3149 */
3150RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3151{
3152 int8_t val;
3153 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3154 VBOX_CHECK_ADDR(SrcGCPhys);
3155 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3156 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3157#ifdef VBOX_DEBUG_PHYS
3158 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3159#endif
3160 return val;
3161}
3162
3163
3164/**
3165 * Read guest RAM and ROM, unsigned 16-bit.
3166 *
3167 * @param SrcGCPhys The source address (guest physical).
3168 */
3169RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3170{
3171 uint16_t val;
3172 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3173 VBOX_CHECK_ADDR(SrcGCPhys);
3174 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3175 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3176#ifdef VBOX_DEBUG_PHYS
3177 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3178#endif
3179 return val;
3180}
3181
3182
3183/**
3184 * Read guest RAM and ROM, signed 16-bit.
3185 *
3186 * @param SrcGCPhys The source address (guest physical).
3187 */
3188RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3189{
3190 int16_t val;
3191 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3192 VBOX_CHECK_ADDR(SrcGCPhys);
3193 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3194 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3195#ifdef VBOX_DEBUG_PHYS
3196 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3197#endif
3198 return val;
3199}
3200
3201
3202/**
3203 * Read guest RAM and ROM, unsigned 32-bit.
3204 *
3205 * @param SrcGCPhys The source address (guest physical).
3206 */
3207RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3208{
3209 uint32_t val;
3210 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3211 VBOX_CHECK_ADDR(SrcGCPhys);
3212 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3213 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3214#ifdef VBOX_DEBUG_PHYS
3215 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3216#endif
3217 return val;
3218}
3219
3220
3221/**
3222 * Read guest RAM and ROM, signed 32-bit.
3223 *
3224 * @param SrcGCPhys The source address (guest physical).
3225 */
3226RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3227{
3228 int32_t val;
3229 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3230 VBOX_CHECK_ADDR(SrcGCPhys);
3231 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3232 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3233#ifdef VBOX_DEBUG_PHYS
3234 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3235#endif
3236 return val;
3237}
3238
3239
3240/**
3241 * Read guest RAM and ROM, unsigned 64-bit.
3242 *
3243 * @param SrcGCPhys The source address (guest physical).
3244 */
3245uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3246{
3247 uint64_t val;
3248 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3249 VBOX_CHECK_ADDR(SrcGCPhys);
3250 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3251 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3252#ifdef VBOX_DEBUG_PHYS
3253 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3254#endif
3255 return val;
3256}
3257
3258
3259/**
3260 * Read guest RAM and ROM, signed 64-bit.
3261 *
3262 * @param SrcGCPhys The source address (guest physical).
3263 */
3264int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3265{
3266 int64_t val;
3267 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3268 VBOX_CHECK_ADDR(SrcGCPhys);
3269 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3270 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3271#ifdef VBOX_DEBUG_PHYS
3272 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3273#endif
3274 return val;
3275}
3276
3277
3278/**
3279 * Write guest RAM.
3280 *
3281 * @param DstGCPhys The destination address (guest physical).
3282 * @param pvSrc The source address.
3283 * @param cb Number of bytes to write
3284 */
3285void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3286{
3287 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3288 VBOX_CHECK_ADDR(DstGCPhys);
3289 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3290 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3291#ifdef VBOX_DEBUG_PHYS
3292 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3293#endif
3294}
3295
3296
3297/**
3298 * Write guest RAM, unsigned 8-bit.
3299 *
3300 * @param DstGCPhys The destination address (guest physical).
3301 * @param val Value
3302 */
3303void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3304{
3305 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3306 VBOX_CHECK_ADDR(DstGCPhys);
3307 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3308 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3309#ifdef VBOX_DEBUG_PHYS
3310 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3311#endif
3312}
3313
3314
3315/**
3316 * Write guest RAM, unsigned 8-bit.
3317 *
3318 * @param DstGCPhys The destination address (guest physical).
3319 * @param val Value
3320 */
3321void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3322{
3323 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3324 VBOX_CHECK_ADDR(DstGCPhys);
3325 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3326 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3327#ifdef VBOX_DEBUG_PHYS
3328 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3329#endif
3330}
3331
3332
3333/**
3334 * Write guest RAM, unsigned 32-bit.
3335 *
3336 * @param DstGCPhys The destination address (guest physical).
3337 * @param val Value
3338 */
3339void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3340{
3341 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3342 VBOX_CHECK_ADDR(DstGCPhys);
3343 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3344 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3345#ifdef VBOX_DEBUG_PHYS
3346 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3347#endif
3348}
3349
3350
3351/**
3352 * Write guest RAM, unsigned 64-bit.
3353 *
3354 * @param DstGCPhys The destination address (guest physical).
3355 * @param val Value
3356 */
3357void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3358{
3359 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3360 VBOX_CHECK_ADDR(DstGCPhys);
3361 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3362 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3363#ifdef VBOX_DEBUG_PHYS
3364 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3365#endif
3366}
3367
3368#undef LOG_GROUP
3369#define LOG_GROUP LOG_GROUP_REM_MMIO
3370
3371/** Read MMIO memory. */
3372static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3373{
3374 uint32_t u32 = 0;
3375 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3376 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3377 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3378 return u32;
3379}
3380
3381/** Read MMIO memory. */
3382static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3383{
3384 uint32_t u32 = 0;
3385 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3386 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3387 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3388 return u32;
3389}
3390
3391/** Read MMIO memory. */
3392static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3393{
3394 uint32_t u32 = 0;
3395 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3396 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3397 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3398 return u32;
3399}
3400
3401/** Write to MMIO memory. */
3402static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3403{
3404 int rc;
3405 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3406 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3407 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3408}
3409
3410/** Write to MMIO memory. */
3411static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3412{
3413 int rc;
3414 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3415 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3416 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3417}
3418
3419/** Write to MMIO memory. */
3420static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3421{
3422 int rc;
3423 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3424 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3425 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3426}
3427
3428
3429#undef LOG_GROUP
3430#define LOG_GROUP LOG_GROUP_REM_HANDLER
3431
3432/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3433
3434static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3435{
3436 uint8_t u8;
3437 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3438 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3439 return u8;
3440}
3441
3442static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3443{
3444 uint16_t u16;
3445 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3446 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3447 return u16;
3448}
3449
3450static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3451{
3452 uint32_t u32;
3453 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3454 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3455 return u32;
3456}
3457
3458static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3459{
3460 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3461 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3462}
3463
3464static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3465{
3466 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3467 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3468}
3469
3470static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3471{
3472 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3473 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3474}
3475
3476/* -+- disassembly -+- */
3477
3478#undef LOG_GROUP
3479#define LOG_GROUP LOG_GROUP_REM_DISAS
3480
3481
3482/**
3483 * Enables or disables singled stepped disassembly.
3484 *
3485 * @returns VBox status code.
3486 * @param pVM VM handle.
3487 * @param fEnable To enable set this flag, to disable clear it.
3488 */
3489static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3490{
3491 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3492 VM_ASSERT_EMT(pVM);
3493
3494 if (fEnable)
3495 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3496 else
3497 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3498 return VINF_SUCCESS;
3499}
3500
3501
3502/**
3503 * Enables or disables singled stepped disassembly.
3504 *
3505 * @returns VBox status code.
3506 * @param pVM VM handle.
3507 * @param fEnable To enable set this flag, to disable clear it.
3508 */
3509REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3510{
3511 PVMREQ pReq;
3512 int rc;
3513
3514 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3515 if (VM_IS_EMT(pVM))
3516 return remR3DisasEnableStepping(pVM, fEnable);
3517
3518 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3519 AssertRC(rc);
3520 if (RT_SUCCESS(rc))
3521 rc = pReq->iStatus;
3522 VMR3ReqFree(pReq);
3523 return rc;
3524}
3525
3526
3527#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3528/**
3529 * External Debugger Command: .remstep [on|off|1|0]
3530 */
3531static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3532{
3533 bool fEnable;
3534 int rc;
3535
3536 /* print status */
3537 if (cArgs == 0)
3538 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3539 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3540
3541 /* convert the argument and change the mode. */
3542 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3543 if (RT_FAILURE(rc))
3544 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3545 rc = REMR3DisasEnableStepping(pVM, fEnable);
3546 if (RT_FAILURE(rc))
3547 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3548 return rc;
3549}
3550#endif
3551
3552
3553/**
3554 * Disassembles one instruction and prints it to the log.
3555 *
3556 * @returns Success indicator.
3557 * @param env Pointer to the recompiler CPU structure.
3558 * @param f32BitCode Indicates that whether or not the code should
3559 * be disassembled as 16 or 32 bit. If -1 the CS
3560 * selector will be inspected.
3561 * @param pszPrefix
3562 */
3563bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3564{
3565 PVM pVM = env->pVM;
3566 const bool fLog = LogIsEnabled();
3567 const bool fLog2 = LogIs2Enabled();
3568 int rc = VINF_SUCCESS;
3569
3570 /*
3571 * Don't bother if there ain't any log output to do.
3572 */
3573 if (!fLog && !fLog2)
3574 return true;
3575
3576 /*
3577 * Update the state so DBGF reads the correct register values.
3578 */
3579 remR3StateUpdate(pVM);
3580
3581 /*
3582 * Log registers if requested.
3583 */
3584 if (!fLog2)
3585 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3586
3587 /*
3588 * Disassemble to log.
3589 */
3590 if (fLog)
3591 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3592
3593 return RT_SUCCESS(rc);
3594}
3595
3596
3597/**
3598 * Disassemble recompiled code.
3599 *
3600 * @param phFileIgnored Ignored, logfile usually.
3601 * @param pvCode Pointer to the code block.
3602 * @param cb Size of the code block.
3603 */
3604void disas(FILE *phFile, void *pvCode, unsigned long cb)
3605{
3606#ifdef DEBUG_TMP_LOGGING
3607# define DISAS_PRINTF(x...) fprintf(phFile, x)
3608#else
3609# define DISAS_PRINTF(x...) RTLogPrintf(x)
3610 if (LogIs2Enabled())
3611#endif
3612 {
3613 unsigned off = 0;
3614 char szOutput[256];
3615 DISCPUSTATE Cpu;
3616
3617 memset(&Cpu, 0, sizeof(Cpu));
3618#ifdef RT_ARCH_X86
3619 Cpu.mode = CPUMODE_32BIT;
3620#else
3621 Cpu.mode = CPUMODE_64BIT;
3622#endif
3623
3624 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3625 while (off < cb)
3626 {
3627 uint32_t cbInstr;
3628 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3629 DISAS_PRINTF("%s", szOutput);
3630 else
3631 {
3632 DISAS_PRINTF("disas error\n");
3633 cbInstr = 1;
3634#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3635 break;
3636#endif
3637 }
3638 off += cbInstr;
3639 }
3640 }
3641
3642#undef DISAS_PRINTF
3643}
3644
3645
3646/**
3647 * Disassemble guest code.
3648 *
3649 * @param phFileIgnored Ignored, logfile usually.
3650 * @param uCode The guest address of the code to disassemble. (flat?)
3651 * @param cb Number of bytes to disassemble.
3652 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3653 */
3654void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3655{
3656#ifdef DEBUG_TMP_LOGGING
3657# define DISAS_PRINTF(x...) fprintf(phFile, x)
3658#else
3659# define DISAS_PRINTF(x...) RTLogPrintf(x)
3660 if (LogIs2Enabled())
3661#endif
3662 {
3663 PVM pVM = cpu_single_env->pVM;
3664 RTSEL cs;
3665 RTGCUINTPTR eip;
3666
3667 /*
3668 * Update the state so DBGF reads the correct register values (flags).
3669 */
3670 remR3StateUpdate(pVM);
3671
3672 /*
3673 * Do the disassembling.
3674 */
3675 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3676 cs = cpu_single_env->segs[R_CS].selector;
3677 eip = uCode - cpu_single_env->segs[R_CS].base;
3678 for (;;)
3679 {
3680 char szBuf[256];
3681 uint32_t cbInstr;
3682 int rc = DBGFR3DisasInstrEx(pVM,
3683 cs,
3684 eip,
3685 0,
3686 szBuf, sizeof(szBuf),
3687 &cbInstr);
3688 if (RT_SUCCESS(rc))
3689 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3690 else
3691 {
3692 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3693 cbInstr = 1;
3694 }
3695
3696 /* next */
3697 if (cb <= cbInstr)
3698 break;
3699 cb -= cbInstr;
3700 uCode += cbInstr;
3701 eip += cbInstr;
3702 }
3703 }
3704#undef DISAS_PRINTF
3705}
3706
3707
3708/**
3709 * Looks up a guest symbol.
3710 *
3711 * @returns Pointer to symbol name. This is a static buffer.
3712 * @param orig_addr The address in question.
3713 */
3714const char *lookup_symbol(target_ulong orig_addr)
3715{
3716 RTGCINTPTR off = 0;
3717 DBGFSYMBOL Sym;
3718 PVM pVM = cpu_single_env->pVM;
3719 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3720 if (RT_SUCCESS(rc))
3721 {
3722 static char szSym[sizeof(Sym.szName) + 48];
3723 if (!off)
3724 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3725 else if (off > 0)
3726 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3727 else
3728 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3729 return szSym;
3730 }
3731 return "<N/A>";
3732}
3733
3734
3735#undef LOG_GROUP
3736#define LOG_GROUP LOG_GROUP_REM
3737
3738
3739/* -+- FF notifications -+- */
3740
3741
3742/**
3743 * Notification about a pending interrupt.
3744 *
3745 * @param pVM VM Handle.
3746 * @param u8Interrupt Interrupt
3747 * @thread The emulation thread.
3748 */
3749REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3750{
3751 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3752 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3753}
3754
3755/**
3756 * Notification about a pending interrupt.
3757 *
3758 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3759 * @param pVM VM Handle.
3760 * @thread The emulation thread.
3761 */
3762REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3763{
3764 return pVM->rem.s.u32PendingInterrupt;
3765}
3766
3767/**
3768 * Notification about the interrupt FF being set.
3769 *
3770 * @param pVM VM Handle.
3771 * @thread The emulation thread.
3772 */
3773REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3774{
3775 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3776 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3777 if (pVM->rem.s.fInREM)
3778 {
3779 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3780 CPU_INTERRUPT_EXTERNAL_HARD);
3781 }
3782}
3783
3784
3785/**
3786 * Notification about the interrupt FF being set.
3787 *
3788 * @param pVM VM Handle.
3789 * @thread Any.
3790 */
3791REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3792{
3793 LogFlow(("REMR3NotifyInterruptClear:\n"));
3794 if (pVM->rem.s.fInREM)
3795 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3796}
3797
3798
3799/**
3800 * Notification about pending timer(s).
3801 *
3802 * @param pVM VM Handle.
3803 * @thread Any.
3804 */
3805REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3806{
3807#ifndef DEBUG_bird
3808 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3809#endif
3810 if (pVM->rem.s.fInREM)
3811 {
3812 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3813 CPU_INTERRUPT_EXTERNAL_TIMER);
3814 }
3815}
3816
3817
3818/**
3819 * Notification about pending DMA transfers.
3820 *
3821 * @param pVM VM Handle.
3822 * @thread Any.
3823 */
3824REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3825{
3826 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3827 if (pVM->rem.s.fInREM)
3828 {
3829 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3830 CPU_INTERRUPT_EXTERNAL_DMA);
3831 }
3832}
3833
3834
3835/**
3836 * Notification about pending timer(s).
3837 *
3838 * @param pVM VM Handle.
3839 * @thread Any.
3840 */
3841REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3842{
3843 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3844 if (pVM->rem.s.fInREM)
3845 {
3846 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3847 CPU_INTERRUPT_EXTERNAL_EXIT);
3848 }
3849}
3850
3851
3852/**
3853 * Notification about pending FF set by an external thread.
3854 *
3855 * @param pVM VM handle.
3856 * @thread Any.
3857 */
3858REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3859{
3860 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3861 if (pVM->rem.s.fInREM)
3862 {
3863 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3864 CPU_INTERRUPT_EXTERNAL_EXIT);
3865 }
3866}
3867
3868
3869#ifdef VBOX_WITH_STATISTICS
3870void remR3ProfileStart(int statcode)
3871{
3872 STAMPROFILEADV *pStat;
3873 switch(statcode)
3874 {
3875 case STATS_EMULATE_SINGLE_INSTR:
3876 pStat = &gStatExecuteSingleInstr;
3877 break;
3878 case STATS_QEMU_COMPILATION:
3879 pStat = &gStatCompilationQEmu;
3880 break;
3881 case STATS_QEMU_RUN_EMULATED_CODE:
3882 pStat = &gStatRunCodeQEmu;
3883 break;
3884 case STATS_QEMU_TOTAL:
3885 pStat = &gStatTotalTimeQEmu;
3886 break;
3887 case STATS_QEMU_RUN_TIMERS:
3888 pStat = &gStatTimers;
3889 break;
3890 case STATS_TLB_LOOKUP:
3891 pStat= &gStatTBLookup;
3892 break;
3893 case STATS_IRQ_HANDLING:
3894 pStat= &gStatIRQ;
3895 break;
3896 case STATS_RAW_CHECK:
3897 pStat = &gStatRawCheck;
3898 break;
3899
3900 default:
3901 AssertMsgFailed(("unknown stat %d\n", statcode));
3902 return;
3903 }
3904 STAM_PROFILE_ADV_START(pStat, a);
3905}
3906
3907
3908void remR3ProfileStop(int statcode)
3909{
3910 STAMPROFILEADV *pStat;
3911 switch(statcode)
3912 {
3913 case STATS_EMULATE_SINGLE_INSTR:
3914 pStat = &gStatExecuteSingleInstr;
3915 break;
3916 case STATS_QEMU_COMPILATION:
3917 pStat = &gStatCompilationQEmu;
3918 break;
3919 case STATS_QEMU_RUN_EMULATED_CODE:
3920 pStat = &gStatRunCodeQEmu;
3921 break;
3922 case STATS_QEMU_TOTAL:
3923 pStat = &gStatTotalTimeQEmu;
3924 break;
3925 case STATS_QEMU_RUN_TIMERS:
3926 pStat = &gStatTimers;
3927 break;
3928 case STATS_TLB_LOOKUP:
3929 pStat= &gStatTBLookup;
3930 break;
3931 case STATS_IRQ_HANDLING:
3932 pStat= &gStatIRQ;
3933 break;
3934 case STATS_RAW_CHECK:
3935 pStat = &gStatRawCheck;
3936 break;
3937 default:
3938 AssertMsgFailed(("unknown stat %d\n", statcode));
3939 return;
3940 }
3941 STAM_PROFILE_ADV_STOP(pStat, a);
3942}
3943#endif
3944
3945/**
3946 * Raise an RC, force rem exit.
3947 *
3948 * @param pVM VM handle.
3949 * @param rc The rc.
3950 */
3951void remR3RaiseRC(PVM pVM, int rc)
3952{
3953 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3954 Assert(pVM->rem.s.fInREM);
3955 VM_ASSERT_EMT(pVM);
3956 pVM->rem.s.rc = rc;
3957 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3958}
3959
3960
3961/* -+- timers -+- */
3962
3963uint64_t cpu_get_tsc(CPUX86State *env)
3964{
3965 STAM_COUNTER_INC(&gStatCpuGetTSC);
3966 return TMCpuTickGet(env->pVM);
3967}
3968
3969
3970/* -+- interrupts -+- */
3971
3972void cpu_set_ferr(CPUX86State *env)
3973{
3974 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3975 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3976}
3977
3978int cpu_get_pic_interrupt(CPUState *env)
3979{
3980 uint8_t u8Interrupt;
3981 int rc;
3982
3983 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3984 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3985 * with the (a)pic.
3986 */
3987 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3988 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3989 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3990 * remove this kludge. */
3991 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3992 {
3993 rc = VINF_SUCCESS;
3994 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
3995 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
3996 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
3997 }
3998 else
3999 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4000
4001 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4002 if (RT_SUCCESS(rc))
4003 {
4004 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4005 env->interrupt_request |= CPU_INTERRUPT_HARD;
4006 return u8Interrupt;
4007 }
4008 return -1;
4009}
4010
4011
4012/* -+- local apic -+- */
4013
4014void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4015{
4016 int rc = PDMApicSetBase(env->pVM, val);
4017 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4018}
4019
4020uint64_t cpu_get_apic_base(CPUX86State *env)
4021{
4022 uint64_t u64;
4023 int rc = PDMApicGetBase(env->pVM, &u64);
4024 if (RT_SUCCESS(rc))
4025 {
4026 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4027 return u64;
4028 }
4029 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4030 return 0;
4031}
4032
4033void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4034{
4035 int rc = PDMApicSetTPR(env->pVM, val);
4036 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4037}
4038
4039uint8_t cpu_get_apic_tpr(CPUX86State *env)
4040{
4041 uint8_t u8;
4042 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4043 if (RT_SUCCESS(rc))
4044 {
4045 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4046 return u8;
4047 }
4048 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4049 return 0;
4050}
4051
4052
4053uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4054{
4055 uint64_t value;
4056 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4057 if (RT_SUCCESS(rc))
4058 {
4059 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4060 return value;
4061 }
4062 /** @todo: exception ? */
4063 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4064 return value;
4065}
4066
4067void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4068{
4069 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4070 /** @todo: exception if error ? */
4071 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4072}
4073
4074uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4075{
4076 return CPUMGetGuestMsr(env->pVM, msr);
4077}
4078
4079void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4080{
4081 CPUMSetGuestMsr(env->pVM, msr, val);
4082}
4083
4084/* -+- I/O Ports -+- */
4085
4086#undef LOG_GROUP
4087#define LOG_GROUP LOG_GROUP_REM_IOPORT
4088
4089void cpu_outb(CPUState *env, int addr, int val)
4090{
4091 int rc;
4092
4093 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4094 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4095
4096 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4097 if (RT_LIKELY(rc == VINF_SUCCESS))
4098 return;
4099 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4100 {
4101 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4102 remR3RaiseRC(env->pVM, rc);
4103 return;
4104 }
4105 remAbort(rc, __FUNCTION__);
4106}
4107
4108void cpu_outw(CPUState *env, int addr, int val)
4109{
4110 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4111 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4112 if (RT_LIKELY(rc == VINF_SUCCESS))
4113 return;
4114 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4115 {
4116 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4117 remR3RaiseRC(env->pVM, rc);
4118 return;
4119 }
4120 remAbort(rc, __FUNCTION__);
4121}
4122
4123void cpu_outl(CPUState *env, int addr, int val)
4124{
4125 int rc;
4126 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4127 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4128 if (RT_LIKELY(rc == VINF_SUCCESS))
4129 return;
4130 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4131 {
4132 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4133 remR3RaiseRC(env->pVM, rc);
4134 return;
4135 }
4136 remAbort(rc, __FUNCTION__);
4137}
4138
4139int cpu_inb(CPUState *env, int addr)
4140{
4141 uint32_t u32 = 0;
4142 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4143 if (RT_LIKELY(rc == VINF_SUCCESS))
4144 {
4145 if (/*addr != 0x61 && */addr != 0x71)
4146 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4147 return (int)u32;
4148 }
4149 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4150 {
4151 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4152 remR3RaiseRC(env->pVM, rc);
4153 return (int)u32;
4154 }
4155 remAbort(rc, __FUNCTION__);
4156 return 0xff;
4157}
4158
4159int cpu_inw(CPUState *env, int addr)
4160{
4161 uint32_t u32 = 0;
4162 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4163 if (RT_LIKELY(rc == VINF_SUCCESS))
4164 {
4165 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4166 return (int)u32;
4167 }
4168 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4169 {
4170 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4171 remR3RaiseRC(env->pVM, rc);
4172 return (int)u32;
4173 }
4174 remAbort(rc, __FUNCTION__);
4175 return 0xffff;
4176}
4177
4178int cpu_inl(CPUState *env, int addr)
4179{
4180 uint32_t u32 = 0;
4181 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4182 if (RT_LIKELY(rc == VINF_SUCCESS))
4183 {
4184//if (addr==0x01f0 && u32 == 0x6b6d)
4185// loglevel = ~0;
4186 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4187 return (int)u32;
4188 }
4189 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4190 {
4191 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4192 remR3RaiseRC(env->pVM, rc);
4193 return (int)u32;
4194 }
4195 remAbort(rc, __FUNCTION__);
4196 return 0xffffffff;
4197}
4198
4199#undef LOG_GROUP
4200#define LOG_GROUP LOG_GROUP_REM
4201
4202
4203/* -+- helpers and misc other interfaces -+- */
4204
4205/**
4206 * Perform the CPUID instruction.
4207 *
4208 * ASMCpuId cannot be invoked from some source files where this is used because of global
4209 * register allocations.
4210 *
4211 * @param env Pointer to the recompiler CPU structure.
4212 * @param uOperator CPUID operation (eax).
4213 * @param pvEAX Where to store eax.
4214 * @param pvEBX Where to store ebx.
4215 * @param pvECX Where to store ecx.
4216 * @param pvEDX Where to store edx.
4217 */
4218void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4219{
4220 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4221}
4222
4223
4224#if 0 /* not used */
4225/**
4226 * Interface for qemu hardware to report back fatal errors.
4227 */
4228void hw_error(const char *pszFormat, ...)
4229{
4230 /*
4231 * Bitch about it.
4232 */
4233 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4234 * this in my Odin32 tree at home! */
4235 va_list args;
4236 va_start(args, pszFormat);
4237 RTLogPrintf("fatal error in virtual hardware:");
4238 RTLogPrintfV(pszFormat, args);
4239 va_end(args);
4240 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4241
4242 /*
4243 * If we're in REM context we'll sync back the state before 'jumping' to
4244 * the EMs failure handling.
4245 */
4246 PVM pVM = cpu_single_env->pVM;
4247 if (pVM->rem.s.fInREM)
4248 REMR3StateBack(pVM);
4249 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4250 AssertMsgFailed(("EMR3FatalError returned!\n"));
4251}
4252#endif
4253
4254/**
4255 * Interface for the qemu cpu to report unhandled situation
4256 * raising a fatal VM error.
4257 */
4258void cpu_abort(CPUState *env, const char *pszFormat, ...)
4259{
4260 va_list args;
4261 PVM pVM;
4262
4263 /*
4264 * Bitch about it.
4265 */
4266#ifndef _MSC_VER
4267 /** @todo: MSVC is right - it's not valid C */
4268 RTLogFlags(NULL, "nodisabled nobuffered");
4269#endif
4270 va_start(args, pszFormat);
4271 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4272 va_end(args);
4273 va_start(args, pszFormat);
4274 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4275 va_end(args);
4276
4277 /*
4278 * If we're in REM context we'll sync back the state before 'jumping' to
4279 * the EMs failure handling.
4280 */
4281 pVM = cpu_single_env->pVM;
4282 if (pVM->rem.s.fInREM)
4283 REMR3StateBack(pVM);
4284 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4285 AssertMsgFailed(("EMR3FatalError returned!\n"));
4286}
4287
4288
4289/**
4290 * Aborts the VM.
4291 *
4292 * @param rc VBox error code.
4293 * @param pszTip Hint about why/when this happend.
4294 */
4295void remAbort(int rc, const char *pszTip)
4296{
4297 PVM pVM;
4298
4299 /*
4300 * Bitch about it.
4301 */
4302 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4303 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4304
4305 /*
4306 * Jump back to where we entered the recompiler.
4307 */
4308 pVM = cpu_single_env->pVM;
4309 if (pVM->rem.s.fInREM)
4310 REMR3StateBack(pVM);
4311 EMR3FatalError(pVM, rc);
4312 AssertMsgFailed(("EMR3FatalError returned!\n"));
4313}
4314
4315
4316/**
4317 * Dumps a linux system call.
4318 * @param pVM VM handle.
4319 */
4320void remR3DumpLnxSyscall(PVM pVM)
4321{
4322 static const char *apsz[] =
4323 {
4324 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4325 "sys_exit",
4326 "sys_fork",
4327 "sys_read",
4328 "sys_write",
4329 "sys_open", /* 5 */
4330 "sys_close",
4331 "sys_waitpid",
4332 "sys_creat",
4333 "sys_link",
4334 "sys_unlink", /* 10 */
4335 "sys_execve",
4336 "sys_chdir",
4337 "sys_time",
4338 "sys_mknod",
4339 "sys_chmod", /* 15 */
4340 "sys_lchown16",
4341 "sys_ni_syscall", /* old break syscall holder */
4342 "sys_stat",
4343 "sys_lseek",
4344 "sys_getpid", /* 20 */
4345 "sys_mount",
4346 "sys_oldumount",
4347 "sys_setuid16",
4348 "sys_getuid16",
4349 "sys_stime", /* 25 */
4350 "sys_ptrace",
4351 "sys_alarm",
4352 "sys_fstat",
4353 "sys_pause",
4354 "sys_utime", /* 30 */
4355 "sys_ni_syscall", /* old stty syscall holder */
4356 "sys_ni_syscall", /* old gtty syscall holder */
4357 "sys_access",
4358 "sys_nice",
4359 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4360 "sys_sync",
4361 "sys_kill",
4362 "sys_rename",
4363 "sys_mkdir",
4364 "sys_rmdir", /* 40 */
4365 "sys_dup",
4366 "sys_pipe",
4367 "sys_times",
4368 "sys_ni_syscall", /* old prof syscall holder */
4369 "sys_brk", /* 45 */
4370 "sys_setgid16",
4371 "sys_getgid16",
4372 "sys_signal",
4373 "sys_geteuid16",
4374 "sys_getegid16", /* 50 */
4375 "sys_acct",
4376 "sys_umount", /* recycled never used phys() */
4377 "sys_ni_syscall", /* old lock syscall holder */
4378 "sys_ioctl",
4379 "sys_fcntl", /* 55 */
4380 "sys_ni_syscall", /* old mpx syscall holder */
4381 "sys_setpgid",
4382 "sys_ni_syscall", /* old ulimit syscall holder */
4383 "sys_olduname",
4384 "sys_umask", /* 60 */
4385 "sys_chroot",
4386 "sys_ustat",
4387 "sys_dup2",
4388 "sys_getppid",
4389 "sys_getpgrp", /* 65 */
4390 "sys_setsid",
4391 "sys_sigaction",
4392 "sys_sgetmask",
4393 "sys_ssetmask",
4394 "sys_setreuid16", /* 70 */
4395 "sys_setregid16",
4396 "sys_sigsuspend",
4397 "sys_sigpending",
4398 "sys_sethostname",
4399 "sys_setrlimit", /* 75 */
4400 "sys_old_getrlimit",
4401 "sys_getrusage",
4402 "sys_gettimeofday",
4403 "sys_settimeofday",
4404 "sys_getgroups16", /* 80 */
4405 "sys_setgroups16",
4406 "old_select",
4407 "sys_symlink",
4408 "sys_lstat",
4409 "sys_readlink", /* 85 */
4410 "sys_uselib",
4411 "sys_swapon",
4412 "sys_reboot",
4413 "old_readdir",
4414 "old_mmap", /* 90 */
4415 "sys_munmap",
4416 "sys_truncate",
4417 "sys_ftruncate",
4418 "sys_fchmod",
4419 "sys_fchown16", /* 95 */
4420 "sys_getpriority",
4421 "sys_setpriority",
4422 "sys_ni_syscall", /* old profil syscall holder */
4423 "sys_statfs",
4424 "sys_fstatfs", /* 100 */
4425 "sys_ioperm",
4426 "sys_socketcall",
4427 "sys_syslog",
4428 "sys_setitimer",
4429 "sys_getitimer", /* 105 */
4430 "sys_newstat",
4431 "sys_newlstat",
4432 "sys_newfstat",
4433 "sys_uname",
4434 "sys_iopl", /* 110 */
4435 "sys_vhangup",
4436 "sys_ni_syscall", /* old "idle" system call */
4437 "sys_vm86old",
4438 "sys_wait4",
4439 "sys_swapoff", /* 115 */
4440 "sys_sysinfo",
4441 "sys_ipc",
4442 "sys_fsync",
4443 "sys_sigreturn",
4444 "sys_clone", /* 120 */
4445 "sys_setdomainname",
4446 "sys_newuname",
4447 "sys_modify_ldt",
4448 "sys_adjtimex",
4449 "sys_mprotect", /* 125 */
4450 "sys_sigprocmask",
4451 "sys_ni_syscall", /* old "create_module" */
4452 "sys_init_module",
4453 "sys_delete_module",
4454 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4455 "sys_quotactl",
4456 "sys_getpgid",
4457 "sys_fchdir",
4458 "sys_bdflush",
4459 "sys_sysfs", /* 135 */
4460 "sys_personality",
4461 "sys_ni_syscall", /* reserved for afs_syscall */
4462 "sys_setfsuid16",
4463 "sys_setfsgid16",
4464 "sys_llseek", /* 140 */
4465 "sys_getdents",
4466 "sys_select",
4467 "sys_flock",
4468 "sys_msync",
4469 "sys_readv", /* 145 */
4470 "sys_writev",
4471 "sys_getsid",
4472 "sys_fdatasync",
4473 "sys_sysctl",
4474 "sys_mlock", /* 150 */
4475 "sys_munlock",
4476 "sys_mlockall",
4477 "sys_munlockall",
4478 "sys_sched_setparam",
4479 "sys_sched_getparam", /* 155 */
4480 "sys_sched_setscheduler",
4481 "sys_sched_getscheduler",
4482 "sys_sched_yield",
4483 "sys_sched_get_priority_max",
4484 "sys_sched_get_priority_min", /* 160 */
4485 "sys_sched_rr_get_interval",
4486 "sys_nanosleep",
4487 "sys_mremap",
4488 "sys_setresuid16",
4489 "sys_getresuid16", /* 165 */
4490 "sys_vm86",
4491 "sys_ni_syscall", /* Old sys_query_module */
4492 "sys_poll",
4493 "sys_nfsservctl",
4494 "sys_setresgid16", /* 170 */
4495 "sys_getresgid16",
4496 "sys_prctl",
4497 "sys_rt_sigreturn",
4498 "sys_rt_sigaction",
4499 "sys_rt_sigprocmask", /* 175 */
4500 "sys_rt_sigpending",
4501 "sys_rt_sigtimedwait",
4502 "sys_rt_sigqueueinfo",
4503 "sys_rt_sigsuspend",
4504 "sys_pread64", /* 180 */
4505 "sys_pwrite64",
4506 "sys_chown16",
4507 "sys_getcwd",
4508 "sys_capget",
4509 "sys_capset", /* 185 */
4510 "sys_sigaltstack",
4511 "sys_sendfile",
4512 "sys_ni_syscall", /* reserved for streams1 */
4513 "sys_ni_syscall", /* reserved for streams2 */
4514 "sys_vfork", /* 190 */
4515 "sys_getrlimit",
4516 "sys_mmap2",
4517 "sys_truncate64",
4518 "sys_ftruncate64",
4519 "sys_stat64", /* 195 */
4520 "sys_lstat64",
4521 "sys_fstat64",
4522 "sys_lchown",
4523 "sys_getuid",
4524 "sys_getgid", /* 200 */
4525 "sys_geteuid",
4526 "sys_getegid",
4527 "sys_setreuid",
4528 "sys_setregid",
4529 "sys_getgroups", /* 205 */
4530 "sys_setgroups",
4531 "sys_fchown",
4532 "sys_setresuid",
4533 "sys_getresuid",
4534 "sys_setresgid", /* 210 */
4535 "sys_getresgid",
4536 "sys_chown",
4537 "sys_setuid",
4538 "sys_setgid",
4539 "sys_setfsuid", /* 215 */
4540 "sys_setfsgid",
4541 "sys_pivot_root",
4542 "sys_mincore",
4543 "sys_madvise",
4544 "sys_getdents64", /* 220 */
4545 "sys_fcntl64",
4546 "sys_ni_syscall", /* reserved for TUX */
4547 "sys_ni_syscall",
4548 "sys_gettid",
4549 "sys_readahead", /* 225 */
4550 "sys_setxattr",
4551 "sys_lsetxattr",
4552 "sys_fsetxattr",
4553 "sys_getxattr",
4554 "sys_lgetxattr", /* 230 */
4555 "sys_fgetxattr",
4556 "sys_listxattr",
4557 "sys_llistxattr",
4558 "sys_flistxattr",
4559 "sys_removexattr", /* 235 */
4560 "sys_lremovexattr",
4561 "sys_fremovexattr",
4562 "sys_tkill",
4563 "sys_sendfile64",
4564 "sys_futex", /* 240 */
4565 "sys_sched_setaffinity",
4566 "sys_sched_getaffinity",
4567 "sys_set_thread_area",
4568 "sys_get_thread_area",
4569 "sys_io_setup", /* 245 */
4570 "sys_io_destroy",
4571 "sys_io_getevents",
4572 "sys_io_submit",
4573 "sys_io_cancel",
4574 "sys_fadvise64", /* 250 */
4575 "sys_ni_syscall",
4576 "sys_exit_group",
4577 "sys_lookup_dcookie",
4578 "sys_epoll_create",
4579 "sys_epoll_ctl", /* 255 */
4580 "sys_epoll_wait",
4581 "sys_remap_file_pages",
4582 "sys_set_tid_address",
4583 "sys_timer_create",
4584 "sys_timer_settime", /* 260 */
4585 "sys_timer_gettime",
4586 "sys_timer_getoverrun",
4587 "sys_timer_delete",
4588 "sys_clock_settime",
4589 "sys_clock_gettime", /* 265 */
4590 "sys_clock_getres",
4591 "sys_clock_nanosleep",
4592 "sys_statfs64",
4593 "sys_fstatfs64",
4594 "sys_tgkill", /* 270 */
4595 "sys_utimes",
4596 "sys_fadvise64_64",
4597 "sys_ni_syscall" /* sys_vserver */
4598 };
4599
4600 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4601 switch (uEAX)
4602 {
4603 default:
4604 if (uEAX < RT_ELEMENTS(apsz))
4605 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4606 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4607 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4608 else
4609 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4610 break;
4611
4612 }
4613}
4614
4615
4616/**
4617 * Dumps an OpenBSD system call.
4618 * @param pVM VM handle.
4619 */
4620void remR3DumpOBsdSyscall(PVM pVM)
4621{
4622 static const char *apsz[] =
4623 {
4624 "SYS_syscall", //0
4625 "SYS_exit", //1
4626 "SYS_fork", //2
4627 "SYS_read", //3
4628 "SYS_write", //4
4629 "SYS_open", //5
4630 "SYS_close", //6
4631 "SYS_wait4", //7
4632 "SYS_8",
4633 "SYS_link", //9
4634 "SYS_unlink", //10
4635 "SYS_11",
4636 "SYS_chdir", //12
4637 "SYS_fchdir", //13
4638 "SYS_mknod", //14
4639 "SYS_chmod", //15
4640 "SYS_chown", //16
4641 "SYS_break", //17
4642 "SYS_18",
4643 "SYS_19",
4644 "SYS_getpid", //20
4645 "SYS_mount", //21
4646 "SYS_unmount", //22
4647 "SYS_setuid", //23
4648 "SYS_getuid", //24
4649 "SYS_geteuid", //25
4650 "SYS_ptrace", //26
4651 "SYS_recvmsg", //27
4652 "SYS_sendmsg", //28
4653 "SYS_recvfrom", //29
4654 "SYS_accept", //30
4655 "SYS_getpeername", //31
4656 "SYS_getsockname", //32
4657 "SYS_access", //33
4658 "SYS_chflags", //34
4659 "SYS_fchflags", //35
4660 "SYS_sync", //36
4661 "SYS_kill", //37
4662 "SYS_38",
4663 "SYS_getppid", //39
4664 "SYS_40",
4665 "SYS_dup", //41
4666 "SYS_opipe", //42
4667 "SYS_getegid", //43
4668 "SYS_profil", //44
4669 "SYS_ktrace", //45
4670 "SYS_sigaction", //46
4671 "SYS_getgid", //47
4672 "SYS_sigprocmask", //48
4673 "SYS_getlogin", //49
4674 "SYS_setlogin", //50
4675 "SYS_acct", //51
4676 "SYS_sigpending", //52
4677 "SYS_osigaltstack", //53
4678 "SYS_ioctl", //54
4679 "SYS_reboot", //55
4680 "SYS_revoke", //56
4681 "SYS_symlink", //57
4682 "SYS_readlink", //58
4683 "SYS_execve", //59
4684 "SYS_umask", //60
4685 "SYS_chroot", //61
4686 "SYS_62",
4687 "SYS_63",
4688 "SYS_64",
4689 "SYS_65",
4690 "SYS_vfork", //66
4691 "SYS_67",
4692 "SYS_68",
4693 "SYS_sbrk", //69
4694 "SYS_sstk", //70
4695 "SYS_61",
4696 "SYS_vadvise", //72
4697 "SYS_munmap", //73
4698 "SYS_mprotect", //74
4699 "SYS_madvise", //75
4700 "SYS_76",
4701 "SYS_77",
4702 "SYS_mincore", //78
4703 "SYS_getgroups", //79
4704 "SYS_setgroups", //80
4705 "SYS_getpgrp", //81
4706 "SYS_setpgid", //82
4707 "SYS_setitimer", //83
4708 "SYS_84",
4709 "SYS_85",
4710 "SYS_getitimer", //86
4711 "SYS_87",
4712 "SYS_88",
4713 "SYS_89",
4714 "SYS_dup2", //90
4715 "SYS_91",
4716 "SYS_fcntl", //92
4717 "SYS_select", //93
4718 "SYS_94",
4719 "SYS_fsync", //95
4720 "SYS_setpriority", //96
4721 "SYS_socket", //97
4722 "SYS_connect", //98
4723 "SYS_99",
4724 "SYS_getpriority", //100
4725 "SYS_101",
4726 "SYS_102",
4727 "SYS_sigreturn", //103
4728 "SYS_bind", //104
4729 "SYS_setsockopt", //105
4730 "SYS_listen", //106
4731 "SYS_107",
4732 "SYS_108",
4733 "SYS_109",
4734 "SYS_110",
4735 "SYS_sigsuspend", //111
4736 "SYS_112",
4737 "SYS_113",
4738 "SYS_114",
4739 "SYS_115",
4740 "SYS_gettimeofday", //116
4741 "SYS_getrusage", //117
4742 "SYS_getsockopt", //118
4743 "SYS_119",
4744 "SYS_readv", //120
4745 "SYS_writev", //121
4746 "SYS_settimeofday", //122
4747 "SYS_fchown", //123
4748 "SYS_fchmod", //124
4749 "SYS_125",
4750 "SYS_setreuid", //126
4751 "SYS_setregid", //127
4752 "SYS_rename", //128
4753 "SYS_129",
4754 "SYS_130",
4755 "SYS_flock", //131
4756 "SYS_mkfifo", //132
4757 "SYS_sendto", //133
4758 "SYS_shutdown", //134
4759 "SYS_socketpair", //135
4760 "SYS_mkdir", //136
4761 "SYS_rmdir", //137
4762 "SYS_utimes", //138
4763 "SYS_139",
4764 "SYS_adjtime", //140
4765 "SYS_141",
4766 "SYS_142",
4767 "SYS_143",
4768 "SYS_144",
4769 "SYS_145",
4770 "SYS_146",
4771 "SYS_setsid", //147
4772 "SYS_quotactl", //148
4773 "SYS_149",
4774 "SYS_150",
4775 "SYS_151",
4776 "SYS_152",
4777 "SYS_153",
4778 "SYS_154",
4779 "SYS_nfssvc", //155
4780 "SYS_156",
4781 "SYS_157",
4782 "SYS_158",
4783 "SYS_159",
4784 "SYS_160",
4785 "SYS_getfh", //161
4786 "SYS_162",
4787 "SYS_163",
4788 "SYS_164",
4789 "SYS_sysarch", //165
4790 "SYS_166",
4791 "SYS_167",
4792 "SYS_168",
4793 "SYS_169",
4794 "SYS_170",
4795 "SYS_171",
4796 "SYS_172",
4797 "SYS_pread", //173
4798 "SYS_pwrite", //174
4799 "SYS_175",
4800 "SYS_176",
4801 "SYS_177",
4802 "SYS_178",
4803 "SYS_179",
4804 "SYS_180",
4805 "SYS_setgid", //181
4806 "SYS_setegid", //182
4807 "SYS_seteuid", //183
4808 "SYS_lfs_bmapv", //184
4809 "SYS_lfs_markv", //185
4810 "SYS_lfs_segclean", //186
4811 "SYS_lfs_segwait", //187
4812 "SYS_188",
4813 "SYS_189",
4814 "SYS_190",
4815 "SYS_pathconf", //191
4816 "SYS_fpathconf", //192
4817 "SYS_swapctl", //193
4818 "SYS_getrlimit", //194
4819 "SYS_setrlimit", //195
4820 "SYS_getdirentries", //196
4821 "SYS_mmap", //197
4822 "SYS___syscall", //198
4823 "SYS_lseek", //199
4824 "SYS_truncate", //200
4825 "SYS_ftruncate", //201
4826 "SYS___sysctl", //202
4827 "SYS_mlock", //203
4828 "SYS_munlock", //204
4829 "SYS_205",
4830 "SYS_futimes", //206
4831 "SYS_getpgid", //207
4832 "SYS_xfspioctl", //208
4833 "SYS_209",
4834 "SYS_210",
4835 "SYS_211",
4836 "SYS_212",
4837 "SYS_213",
4838 "SYS_214",
4839 "SYS_215",
4840 "SYS_216",
4841 "SYS_217",
4842 "SYS_218",
4843 "SYS_219",
4844 "SYS_220",
4845 "SYS_semget", //221
4846 "SYS_222",
4847 "SYS_223",
4848 "SYS_224",
4849 "SYS_msgget", //225
4850 "SYS_msgsnd", //226
4851 "SYS_msgrcv", //227
4852 "SYS_shmat", //228
4853 "SYS_229",
4854 "SYS_shmdt", //230
4855 "SYS_231",
4856 "SYS_clock_gettime", //232
4857 "SYS_clock_settime", //233
4858 "SYS_clock_getres", //234
4859 "SYS_235",
4860 "SYS_236",
4861 "SYS_237",
4862 "SYS_238",
4863 "SYS_239",
4864 "SYS_nanosleep", //240
4865 "SYS_241",
4866 "SYS_242",
4867 "SYS_243",
4868 "SYS_244",
4869 "SYS_245",
4870 "SYS_246",
4871 "SYS_247",
4872 "SYS_248",
4873 "SYS_249",
4874 "SYS_minherit", //250
4875 "SYS_rfork", //251
4876 "SYS_poll", //252
4877 "SYS_issetugid", //253
4878 "SYS_lchown", //254
4879 "SYS_getsid", //255
4880 "SYS_msync", //256
4881 "SYS_257",
4882 "SYS_258",
4883 "SYS_259",
4884 "SYS_getfsstat", //260
4885 "SYS_statfs", //261
4886 "SYS_fstatfs", //262
4887 "SYS_pipe", //263
4888 "SYS_fhopen", //264
4889 "SYS_265",
4890 "SYS_fhstatfs", //266
4891 "SYS_preadv", //267
4892 "SYS_pwritev", //268
4893 "SYS_kqueue", //269
4894 "SYS_kevent", //270
4895 "SYS_mlockall", //271
4896 "SYS_munlockall", //272
4897 "SYS_getpeereid", //273
4898 "SYS_274",
4899 "SYS_275",
4900 "SYS_276",
4901 "SYS_277",
4902 "SYS_278",
4903 "SYS_279",
4904 "SYS_280",
4905 "SYS_getresuid", //281
4906 "SYS_setresuid", //282
4907 "SYS_getresgid", //283
4908 "SYS_setresgid", //284
4909 "SYS_285",
4910 "SYS_mquery", //286
4911 "SYS_closefrom", //287
4912 "SYS_sigaltstack", //288
4913 "SYS_shmget", //289
4914 "SYS_semop", //290
4915 "SYS_stat", //291
4916 "SYS_fstat", //292
4917 "SYS_lstat", //293
4918 "SYS_fhstat", //294
4919 "SYS___semctl", //295
4920 "SYS_shmctl", //296
4921 "SYS_msgctl", //297
4922 "SYS_MAXSYSCALL", //298
4923 //299
4924 //300
4925 };
4926 uint32_t uEAX;
4927 if (!LogIsEnabled())
4928 return;
4929 uEAX = CPUMGetGuestEAX(pVM);
4930 switch (uEAX)
4931 {
4932 default:
4933 if (uEAX < RT_ELEMENTS(apsz))
4934 {
4935 uint32_t au32Args[8] = {0};
4936 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4937 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4938 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4939 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4940 }
4941 else
4942 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4943 break;
4944 }
4945}
4946
4947
4948#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4949/**
4950 * The Dll main entry point (stub).
4951 */
4952bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4953{
4954 return true;
4955}
4956
4957void *memcpy(void *dst, const void *src, size_t size)
4958{
4959 uint8_t*pbDst = dst, *pbSrc = src;
4960 while (size-- > 0)
4961 *pbDst++ = *pbSrc++;
4962 return dst;
4963}
4964
4965#endif
4966
4967void cpu_smm_update(CPUState *env)
4968{
4969}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette