VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18650

Last change on this file since 18650 was 18650, checked in by vboxsync, 16 years ago

remR3changeCpuMode: deal with rescheduling status codes from PGMChangeMode (i.e. handling suspend status when the swithcing to PAE without having enabled PAE support).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.8 KB
Line 
1/* $Id: VBoxRecompiler.c 18650 2009-04-02 16:26:48Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145#endif
146/* in exec.c */
147extern uint32_t tlb_flush_count;
148extern uint32_t tb_flush_count;
149extern uint32_t tb_phys_invalidate_count;
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392
393 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
394#endif /* VBOX_WITH_STATISTICS */
395
396 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
397 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
398 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
399
400
401#ifdef DEBUG_ALL_LOGGING
402 loglevel = ~0;
403# ifdef DEBUG_TMP_LOGGING
404 logfile = fopen("/tmp/vbox-qemu.log", "w");
405# endif
406#endif
407
408 return rc;
409}
410
411
412/**
413 * Finalizes the REM initialization.
414 *
415 * This is called after all components, devices and drivers has
416 * been initialized. Its main purpose it to finish the RAM related
417 * initialization.
418 *
419 * @returns VBox status code.
420 *
421 * @param pVM The VM handle.
422 */
423REMR3DECL(int) REMR3InitFinalize(PVM pVM)
424{
425 int rc;
426
427 /*
428 * Ram size & dirty bit map.
429 */
430 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
431 pVM->rem.s.fGCPhysLastRamFixed = true;
432#ifdef RT_STRICT
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
434#else
435 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
436#endif
437 return rc;
438}
439
440
441/**
442 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM handle.
446 * @param fGuarded Whether to guard the map.
447 */
448static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
449{
450 int rc = VINF_SUCCESS;
451 RTGCPHYS cb;
452
453 cb = pVM->rem.s.GCPhysLastRam + 1;
454 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
455 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
456 VERR_OUT_OF_RANGE);
457 phys_ram_size = cb;
458 phys_ram_dirty_size = cb >> PAGE_SHIFT;
459 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
460
461 if (!fGuarded)
462 {
463 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
464 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
465 }
466 else
467 {
468 /*
469 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
470 */
471 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
472 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
473 if (cbBitmapFull == cbBitmapAligned)
474 cbBitmapFull += _4G >> PAGE_SHIFT;
475 else if (cbBitmapFull - cbBitmapAligned < _64K)
476 cbBitmapFull += _64K;
477
478 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
479 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
480
481 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
482 if (RT_FAILURE(rc))
483 {
484 RTMemPageFree(phys_ram_dirty);
485 AssertLogRelRCReturn(rc, rc);
486 }
487
488 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
489 }
490
491 /* initialize it. */
492 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
493 return rc;
494}
495
496
497/**
498 * Terminates the REM.
499 *
500 * Termination means cleaning up and freeing all resources,
501 * the VM it self is at this point powered off or suspended.
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506REMR3DECL(int) REMR3Term(PVM pVM)
507{
508 return VINF_SUCCESS;
509}
510
511
512/**
513 * The VM is being reset.
514 *
515 * For the REM component this means to call the cpu_reset() and
516 * reinitialize some state variables.
517 *
518 * @param pVM VM handle.
519 */
520REMR3DECL(void) REMR3Reset(PVM pVM)
521{
522 /*
523 * Reset the REM cpu.
524 */
525 pVM->rem.s.fIgnoreAll = true;
526 cpu_reset(&pVM->rem.s.Env);
527 pVM->rem.s.cInvalidatedPages = 0;
528 pVM->rem.s.fIgnoreAll = false;
529
530 /* Clear raw ring 0 init state */
531 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
532
533 /* Flush the TBs the next time we execute code here. */
534 pVM->rem.s.fFlushTBs = true;
535}
536
537
538/**
539 * Execute state save operation.
540 *
541 * @returns VBox status code.
542 * @param pVM VM Handle.
543 * @param pSSM SSM operation handle.
544 */
545static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
546{
547 PREM pRem = &pVM->rem.s;
548
549 /*
550 * Save the required CPU Env bits.
551 * (Not much because we're never in REM when doing the save.)
552 */
553 LogFlow(("remR3Save:\n"));
554 Assert(!pRem->fInREM);
555 SSMR3PutU32(pSSM, pRem->Env.hflags);
556 SSMR3PutU32(pSSM, ~0); /* separator */
557
558 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
559 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
560 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
561
562 return SSMR3PutU32(pSSM, ~0); /* terminator */
563}
564
565
566/**
567 * Execute state load operation.
568 *
569 * @returns VBox status code.
570 * @param pVM VM Handle.
571 * @param pSSM SSM operation handle.
572 * @param u32Version Data layout version.
573 */
574static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
575{
576 uint32_t u32Dummy;
577 uint32_t fRawRing0 = false;
578 uint32_t u32Sep;
579 int rc;
580 PREM pRem;
581 LogFlow(("remR3Load:\n"));
582
583 /*
584 * Validate version.
585 */
586 if ( u32Version != REM_SAVED_STATE_VERSION
587 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
588 {
589 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
590 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
591 }
592
593 /*
594 * Do a reset to be on the safe side...
595 */
596 REMR3Reset(pVM);
597
598 /*
599 * Ignore all ignorable notifications.
600 * (Not doing this will cause serious trouble.)
601 */
602 pVM->rem.s.fIgnoreAll = true;
603
604 /*
605 * Load the required CPU Env bits.
606 * (Not much because we're never in REM when doing the save.)
607 */
608 pRem = &pVM->rem.s;
609 Assert(!pRem->fInREM);
610 SSMR3GetU32(pSSM, &pRem->Env.hflags);
611 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
612 {
613 /* Redundant REM CPU state has to be loaded, but can be ignored. */
614 CPUX86State_Ver16 temp;
615 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
616 }
617
618 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
619 if (RT_FAILURE(rc))
620 return rc;
621 if (u32Sep != ~0U)
622 {
623 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
624 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
625 }
626
627 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
628 SSMR3GetUInt(pSSM, &fRawRing0);
629 if (fRawRing0)
630 pRem->Env.state |= CPU_RAW_RING0;
631
632 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
633 {
634 unsigned i;
635
636 /*
637 * Load the REM stuff.
638 */
639 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
640 if (RT_FAILURE(rc))
641 return rc;
642 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
643 {
644 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
645 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
646 }
647 for (i = 0; i < pRem->cInvalidatedPages; i++)
648 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
649 }
650
651 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
652 if (RT_FAILURE(rc))
653 return rc;
654
655 /* check the terminator. */
656 rc = SSMR3GetU32(pSSM, &u32Sep);
657 if (RT_FAILURE(rc))
658 return rc;
659 if (u32Sep != ~0U)
660 {
661 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
662 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
663 }
664
665 /*
666 * Get the CPUID features.
667 */
668 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
669 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
670
671 /*
672 * Sync the Load Flush the TLB
673 */
674 tlb_flush(&pRem->Env, 1);
675
676 /*
677 * Stop ignoring ignornable notifications.
678 */
679 pVM->rem.s.fIgnoreAll = false;
680
681 /*
682 * Sync the whole CPU state when executing code in the recompiler.
683 */
684 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
685 return VINF_SUCCESS;
686}
687
688
689
690#undef LOG_GROUP
691#define LOG_GROUP LOG_GROUP_REM_RUN
692
693/**
694 * Single steps an instruction in recompiled mode.
695 *
696 * Before calling this function the REM state needs to be in sync with
697 * the VM. Call REMR3State() to perform the sync. It's only necessary
698 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
699 * and after calling REMR3StateBack().
700 *
701 * @returns VBox status code.
702 *
703 * @param pVM VM Handle.
704 */
705REMR3DECL(int) REMR3Step(PVM pVM)
706{
707 int rc, interrupt_request;
708 RTGCPTR GCPtrPC;
709 bool fBp;
710
711 /*
712 * Lock the REM - we don't wanna have anyone interrupting us
713 * while stepping - and enabled single stepping. We also ignore
714 * pending interrupts and suchlike.
715 */
716 interrupt_request = pVM->rem.s.Env.interrupt_request;
717 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
718 pVM->rem.s.Env.interrupt_request = 0;
719 cpu_single_step(&pVM->rem.s.Env, 1);
720
721 /*
722 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
723 */
724 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
725 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
726
727 /*
728 * Execute and handle the return code.
729 * We execute without enabling the cpu tick, so on success we'll
730 * just flip it on and off to make sure it moves
731 */
732 rc = cpu_exec(&pVM->rem.s.Env);
733 if (rc == EXCP_DEBUG)
734 {
735 TMCpuTickResume(pVM);
736 TMCpuTickPause(pVM);
737 TMVirtualResume(pVM);
738 TMVirtualPause(pVM);
739 rc = VINF_EM_DBG_STEPPED;
740 }
741 else
742 {
743 switch (rc)
744 {
745 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
746 case EXCP_HLT:
747 case EXCP_HALTED: rc = VINF_EM_HALT; break;
748 case EXCP_RC:
749 rc = pVM->rem.s.rc;
750 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
751 break;
752 case EXCP_EXECUTE_RAW:
753 case EXCP_EXECUTE_HWACC:
754 /** @todo: is it correct? No! */
755 rc = VINF_SUCCESS;
756 break;
757 default:
758 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
759 rc = VERR_INTERNAL_ERROR;
760 break;
761 }
762 }
763
764 /*
765 * Restore the stuff we changed to prevent interruption.
766 * Unlock the REM.
767 */
768 if (fBp)
769 {
770 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
771 Assert(rc2 == 0); NOREF(rc2);
772 }
773 cpu_single_step(&pVM->rem.s.Env, 0);
774 pVM->rem.s.Env.interrupt_request = interrupt_request;
775
776 return rc;
777}
778
779
780/**
781 * Set a breakpoint using the REM facilities.
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 * @param Address The breakpoint address.
786 * @thread The emulation thread.
787 */
788REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
789{
790 VM_ASSERT_EMT(pVM);
791 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
792 {
793 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
794 return VINF_SUCCESS;
795 }
796 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
797 return VERR_REM_NO_MORE_BP_SLOTS;
798}
799
800
801/**
802 * Clears a breakpoint set by REMR3BreakpointSet().
803 *
804 * @returns VBox status code.
805 * @param pVM The VM handle.
806 * @param Address The breakpoint address.
807 * @thread The emulation thread.
808 */
809REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
810{
811 VM_ASSERT_EMT(pVM);
812 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
813 {
814 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
815 return VINF_SUCCESS;
816 }
817 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
818 return VERR_REM_BP_NOT_FOUND;
819}
820
821
822/**
823 * Emulate an instruction.
824 *
825 * This function executes one instruction without letting anyone
826 * interrupt it. This is intended for being called while being in
827 * raw mode and thus will take care of all the state syncing between
828 * REM and the rest.
829 *
830 * @returns VBox status code.
831 * @param pVM VM handle.
832 */
833REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
834{
835 bool fFlushTBs;
836
837 int rc, rc2;
838 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
839
840 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
841 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
842 */
843 if (HWACCMIsEnabled(pVM))
844 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
845
846 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
847 fFlushTBs = pVM->rem.s.fFlushTBs;
848 pVM->rem.s.fFlushTBs = false;
849
850 /*
851 * Sync the state and enable single instruction / single stepping.
852 */
853 rc = REMR3State(pVM);
854 pVM->rem.s.fFlushTBs = fFlushTBs;
855 if (RT_SUCCESS(rc))
856 {
857 int interrupt_request = pVM->rem.s.Env.interrupt_request;
858 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
859 Assert(!pVM->rem.s.Env.singlestep_enabled);
860 /*
861 * Now we set the execute single instruction flag and enter the cpu_exec loop.
862 */
863 TMNotifyStartOfExecution(pVM);
864 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
865 rc = cpu_exec(&pVM->rem.s.Env);
866 TMNotifyEndOfExecution(pVM);
867 switch (rc)
868 {
869 /*
870 * Executed without anything out of the way happening.
871 */
872 case EXCP_SINGLE_INSTR:
873 rc = VINF_EM_RESCHEDULE;
874 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
875 break;
876
877 /*
878 * If we take a trap or start servicing a pending interrupt, we might end up here.
879 * (Timer thread or some other thread wishing EMT's attention.)
880 */
881 case EXCP_INTERRUPT:
882 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
883 rc = VINF_EM_RESCHEDULE;
884 break;
885
886 /*
887 * Single step, we assume!
888 * If there was a breakpoint there we're fucked now.
889 */
890 case EXCP_DEBUG:
891 {
892 /* breakpoint or single step? */
893 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
894 int iBP;
895 rc = VINF_EM_DBG_STEPPED;
896 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
897 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
898 {
899 rc = VINF_EM_DBG_BREAKPOINT;
900 break;
901 }
902 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
903 break;
904 }
905
906 /*
907 * hlt instruction.
908 */
909 case EXCP_HLT:
910 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
911 rc = VINF_EM_HALT;
912 break;
913
914 /*
915 * The VM has halted.
916 */
917 case EXCP_HALTED:
918 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
919 rc = VINF_EM_HALT;
920 break;
921
922 /*
923 * Switch to RAW-mode.
924 */
925 case EXCP_EXECUTE_RAW:
926 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
927 rc = VINF_EM_RESCHEDULE_RAW;
928 break;
929
930 /*
931 * Switch to hardware accelerated RAW-mode.
932 */
933 case EXCP_EXECUTE_HWACC:
934 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
935 rc = VINF_EM_RESCHEDULE_HWACC;
936 break;
937
938 /*
939 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
940 */
941 case EXCP_RC:
942 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
943 rc = pVM->rem.s.rc;
944 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
945 break;
946
947 /*
948 * Figure out the rest when they arrive....
949 */
950 default:
951 AssertMsgFailed(("rc=%d\n", rc));
952 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
953 rc = VINF_EM_RESCHEDULE;
954 break;
955 }
956
957 /*
958 * Switch back the state.
959 */
960 pVM->rem.s.Env.interrupt_request = interrupt_request;
961 rc2 = REMR3StateBack(pVM);
962 AssertRC(rc2);
963 }
964
965 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
966 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
967 return rc;
968}
969
970
971/**
972 * Runs code in recompiled mode.
973 *
974 * Before calling this function the REM state needs to be in sync with
975 * the VM. Call REMR3State() to perform the sync. It's only necessary
976 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
977 * and after calling REMR3StateBack().
978 *
979 * @returns VBox status code.
980 *
981 * @param pVM VM Handle.
982 */
983REMR3DECL(int) REMR3Run(PVM pVM)
984{
985 int rc;
986 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
987 Assert(pVM->rem.s.fInREM);
988
989 TMNotifyStartOfExecution(pVM);
990 rc = cpu_exec(&pVM->rem.s.Env);
991 TMNotifyEndOfExecution(pVM);
992 switch (rc)
993 {
994 /*
995 * This happens when the execution was interrupted
996 * by an external event, like pending timers.
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_SUCCESS;
1001 break;
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Breakpoint/single step.
1021 */
1022 case EXCP_DEBUG:
1023 {
1024#if 0//def DEBUG_bird
1025 static int iBP = 0;
1026 printf("howdy, breakpoint! iBP=%d\n", iBP);
1027 switch (iBP)
1028 {
1029 case 0:
1030 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1031 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1032 //pVM->rem.s.Env.interrupt_request = 0;
1033 //pVM->rem.s.Env.exception_index = -1;
1034 //g_fInterruptDisabled = 1;
1035 rc = VINF_SUCCESS;
1036 asm("int3");
1037 break;
1038 default:
1039 asm("int3");
1040 break;
1041 }
1042 iBP++;
1043#else
1044 /* breakpoint or single step? */
1045 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1046 int iBP;
1047 rc = VINF_EM_DBG_STEPPED;
1048 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1049 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1050 {
1051 rc = VINF_EM_DBG_BREAKPOINT;
1052 break;
1053 }
1054 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1055#endif
1056 break;
1057 }
1058
1059 /*
1060 * Switch to RAW-mode.
1061 */
1062 case EXCP_EXECUTE_RAW:
1063 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1064 rc = VINF_EM_RESCHEDULE_RAW;
1065 break;
1066
1067 /*
1068 * Switch to hardware accelerated RAW-mode.
1069 */
1070 case EXCP_EXECUTE_HWACC:
1071 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1072 rc = VINF_EM_RESCHEDULE_HWACC;
1073 break;
1074
1075 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1076 /*
1077 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1078 */
1079 case EXCP_RC:
1080 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1081 rc = pVM->rem.s.rc;
1082 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1083 break;
1084
1085 /*
1086 * Figure out the rest when they arrive....
1087 */
1088 default:
1089 AssertMsgFailed(("rc=%d\n", rc));
1090 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1091 rc = VINF_SUCCESS;
1092 break;
1093 }
1094
1095 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Check if the cpu state is suitable for Raw execution.
1102 *
1103 * @returns boolean
1104 * @param env The CPU env struct.
1105 * @param eip The EIP to check this for (might differ from env->eip).
1106 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1107 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1108 *
1109 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1110 */
1111bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1112{
1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1114 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1115 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1116 uint32_t u32CR0;
1117
1118 /* Update counter. */
1119 env->pVM->rem.s.cCanExecuteRaw++;
1120
1121 if (HWACCMIsEnabled(env->pVM))
1122 {
1123 CPUMCTX Ctx;
1124
1125 env->state |= CPU_RAW_HWACC;
1126
1127 /*
1128 * Create partial context for HWACCMR3CanExecuteGuest
1129 */
1130 Ctx.cr0 = env->cr[0];
1131 Ctx.cr3 = env->cr[3];
1132 Ctx.cr4 = env->cr[4];
1133
1134 Ctx.tr = env->tr.selector;
1135 Ctx.trHid.u64Base = env->tr.base;
1136 Ctx.trHid.u32Limit = env->tr.limit;
1137 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1138
1139 Ctx.idtr.cbIdt = env->idt.limit;
1140 Ctx.idtr.pIdt = env->idt.base;
1141
1142 Ctx.gdtr.cbGdt = env->gdt.limit;
1143 Ctx.gdtr.pGdt = env->gdt.base;
1144
1145 Ctx.rsp = env->regs[R_ESP];
1146 Ctx.rip = env->eip;
1147
1148 Ctx.eflags.u32 = env->eflags;
1149
1150 Ctx.cs = env->segs[R_CS].selector;
1151 Ctx.csHid.u64Base = env->segs[R_CS].base;
1152 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1153 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1154
1155 Ctx.ds = env->segs[R_DS].selector;
1156 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1157 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1158 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1159
1160 Ctx.es = env->segs[R_ES].selector;
1161 Ctx.esHid.u64Base = env->segs[R_ES].base;
1162 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1163 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1164
1165 Ctx.fs = env->segs[R_FS].selector;
1166 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1167 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1168 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1169
1170 Ctx.gs = env->segs[R_GS].selector;
1171 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1172 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1173 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1174
1175 Ctx.ss = env->segs[R_SS].selector;
1176 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1177 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1178 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1179
1180 Ctx.msrEFER = env->efer;
1181
1182 /* Hardware accelerated raw-mode:
1183 *
1184 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1185 */
1186 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1187 {
1188 *piException = EXCP_EXECUTE_HWACC;
1189 return true;
1190 }
1191 return false;
1192 }
1193
1194 /*
1195 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1196 * or 32 bits protected mode ring 0 code
1197 *
1198 * The tests are ordered by the likelyhood of being true during normal execution.
1199 */
1200 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1201 {
1202 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1203 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1204 return false;
1205 }
1206
1207#ifndef VBOX_RAW_V86
1208 if (fFlags & VM_MASK) {
1209 STAM_COUNTER_INC(&gStatRefuseVM86);
1210 Log2(("raw mode refused: VM_MASK\n"));
1211 return false;
1212 }
1213#endif
1214
1215 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1216 {
1217#ifndef DEBUG_bird
1218 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1219#endif
1220 return false;
1221 }
1222
1223 if (env->singlestep_enabled)
1224 {
1225 //Log2(("raw mode refused: Single step\n"));
1226 return false;
1227 }
1228
1229 if (env->nb_breakpoints > 0)
1230 {
1231 //Log2(("raw mode refused: Breakpoints\n"));
1232 return false;
1233 }
1234
1235 u32CR0 = env->cr[0];
1236 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1237 {
1238 STAM_COUNTER_INC(&gStatRefusePaging);
1239 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1240 return false;
1241 }
1242
1243 if (env->cr[4] & CR4_PAE_MASK)
1244 {
1245 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1246 {
1247 STAM_COUNTER_INC(&gStatRefusePAE);
1248 return false;
1249 }
1250 }
1251
1252 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1253 {
1254 if (!EMIsRawRing3Enabled(env->pVM))
1255 return false;
1256
1257 if (!(env->eflags & IF_MASK))
1258 {
1259 STAM_COUNTER_INC(&gStatRefuseIF0);
1260 Log2(("raw mode refused: IF (RawR3)\n"));
1261 return false;
1262 }
1263
1264 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1265 {
1266 STAM_COUNTER_INC(&gStatRefuseWP0);
1267 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1268 return false;
1269 }
1270 }
1271 else
1272 {
1273 if (!EMIsRawRing0Enabled(env->pVM))
1274 return false;
1275
1276 // Let's start with pure 32 bits ring 0 code first
1277 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1278 {
1279 STAM_COUNTER_INC(&gStatRefuseCode16);
1280 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1281 return false;
1282 }
1283
1284 // Only R0
1285 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1286 {
1287 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1288 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1289 return false;
1290 }
1291
1292 if (!(u32CR0 & CR0_WP_MASK))
1293 {
1294 STAM_COUNTER_INC(&gStatRefuseWP0);
1295 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1296 return false;
1297 }
1298
1299 if (PATMIsPatchGCAddr(env->pVM, eip))
1300 {
1301 Log2(("raw r0 mode forced: patch code\n"));
1302 *piException = EXCP_EXECUTE_RAW;
1303 return true;
1304 }
1305
1306#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1307 if (!(env->eflags & IF_MASK))
1308 {
1309 STAM_COUNTER_INC(&gStatRefuseIF0);
1310 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1311 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1312 return false;
1313 }
1314#endif
1315
1316 env->state |= CPU_RAW_RING0;
1317 }
1318
1319 /*
1320 * Don't reschedule the first time we're called, because there might be
1321 * special reasons why we're here that is not covered by the above checks.
1322 */
1323 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1324 {
1325 Log2(("raw mode refused: first scheduling\n"));
1326 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1327 return false;
1328 }
1329
1330 Assert(PGMPhysIsA20Enabled(env->pVM));
1331 *piException = EXCP_EXECUTE_RAW;
1332 return true;
1333}
1334
1335
1336/**
1337 * Fetches a code byte.
1338 *
1339 * @returns Success indicator (bool) for ease of use.
1340 * @param env The CPU environment structure.
1341 * @param GCPtrInstr Where to fetch code.
1342 * @param pu8Byte Where to store the byte on success
1343 */
1344bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1345{
1346 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1347 if (RT_SUCCESS(rc))
1348 return true;
1349 return false;
1350}
1351
1352
1353/**
1354 * Flush (or invalidate if you like) page table/dir entry.
1355 *
1356 * (invlpg instruction; tlb_flush_page)
1357 *
1358 * @param env Pointer to cpu environment.
1359 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1360 */
1361void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1362{
1363 PVM pVM = env->pVM;
1364 PCPUMCTX pCtx;
1365 int rc;
1366
1367 /*
1368 * When we're replaying invlpg instructions or restoring a saved
1369 * state we disable this path.
1370 */
1371 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1372 return;
1373 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1374 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1375
1376 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1377
1378 /*
1379 * Update the control registers before calling PGMFlushPage.
1380 */
1381 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1382 pCtx->cr0 = env->cr[0];
1383 pCtx->cr3 = env->cr[3];
1384 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1385 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1386 pCtx->cr4 = env->cr[4];
1387
1388 /*
1389 * Let PGM do the rest.
1390 */
1391 rc = PGMInvalidatePage(pVM, GCPtr);
1392 if (RT_FAILURE(rc))
1393 {
1394 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1395 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1396 }
1397 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1398}
1399
1400
1401#ifndef REM_PHYS_ADDR_IN_TLB
1402/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1403void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1404{
1405 void *pv;
1406 int rc;
1407
1408 /* Address must be aligned enough to fiddle with lower bits */
1409 Assert((physAddr & 0x3) == 0);
1410
1411 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1412 Assert( rc == VINF_SUCCESS
1413 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1414 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1415 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1416 if (RT_FAILURE(rc))
1417 return (void *)1;
1418 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1419 return (void *)((uintptr_t)pv | 2);
1420 return pv;
1421}
1422#endif /* REM_PHYS_ADDR_IN_TLB */
1423
1424
1425/**
1426 * Called from tlb_protect_code in order to write monitor a code page.
1427 *
1428 * @param env Pointer to the CPU environment.
1429 * @param GCPtr Code page to monitor
1430 */
1431void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1432{
1433#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1434 Assert(env->pVM->rem.s.fInREM);
1435 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1436 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1437 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1438 && !(env->eflags & VM_MASK) /* no V86 mode */
1439 && !HWACCMIsEnabled(env->pVM))
1440 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1441#endif
1442}
1443
1444
1445/**
1446 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1447 *
1448 * @param env Pointer to the CPU environment.
1449 * @param GCPtr Code page to monitor
1450 */
1451void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1452{
1453 Assert(env->pVM->rem.s.fInREM);
1454#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1455 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1456 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1457 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1458 && !(env->eflags & VM_MASK) /* no V86 mode */
1459 && !HWACCMIsEnabled(env->pVM))
1460 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1461#endif
1462}
1463
1464
1465/**
1466 * Called when the CPU is initialized, any of the CRx registers are changed or
1467 * when the A20 line is modified.
1468 *
1469 * @param env Pointer to the CPU environment.
1470 * @param fGlobal Set if the flush is global.
1471 */
1472void remR3FlushTLB(CPUState *env, bool fGlobal)
1473{
1474 PVM pVM = env->pVM;
1475 PCPUMCTX pCtx;
1476
1477 /*
1478 * When we're replaying invlpg instructions or restoring a saved
1479 * state we disable this path.
1480 */
1481 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1482 return;
1483 Assert(pVM->rem.s.fInREM);
1484
1485 /*
1486 * The caller doesn't check cr4, so we have to do that for ourselves.
1487 */
1488 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1489 fGlobal = true;
1490 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1491
1492 /*
1493 * Update the control registers before calling PGMR3FlushTLB.
1494 */
1495 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1496 pCtx->cr0 = env->cr[0];
1497 pCtx->cr3 = env->cr[3];
1498 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1499 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1500 pCtx->cr4 = env->cr[4];
1501
1502 /*
1503 * Let PGM do the rest.
1504 */
1505 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1506}
1507
1508
1509/**
1510 * Called when any of the cr0, cr4 or efer registers is updated.
1511 *
1512 * @param env Pointer to the CPU environment.
1513 */
1514void remR3ChangeCpuMode(CPUState *env)
1515{
1516 PVM pVM = env->pVM;
1517 uint64_t efer;
1518 PCPUMCTX pCtx;
1519 int rc;
1520
1521 /*
1522 * When we're replaying loads or restoring a saved
1523 * state this path is disabled.
1524 */
1525 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1526 return;
1527 Assert(pVM->rem.s.fInREM);
1528
1529 /*
1530 * Update the control registers before calling PGMChangeMode()
1531 * as it may need to map whatever cr3 is pointing to.
1532 */
1533 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1534 pCtx->cr0 = env->cr[0];
1535 pCtx->cr3 = env->cr[3];
1536 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1537 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1538 pCtx->cr4 = env->cr[4];
1539
1540#ifdef TARGET_X86_64
1541 efer = env->efer;
1542#else
1543 efer = 0;
1544#endif
1545 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], efer);
1546 if (rc != VINF_SUCCESS)
1547 {
1548 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1549 {
1550 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1551 remR3RaiseRC(env->pVM, rc);
1552 }
1553 else
1554 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1555 }
1556}
1557
1558
1559/**
1560 * Called from compiled code to run dma.
1561 *
1562 * @param env Pointer to the CPU environment.
1563 */
1564void remR3DmaRun(CPUState *env)
1565{
1566 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1567 PDMR3DmaRun(env->pVM);
1568 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1569}
1570
1571
1572/**
1573 * Called from compiled code to schedule pending timers in VMM
1574 *
1575 * @param env Pointer to the CPU environment.
1576 */
1577void remR3TimersRun(CPUState *env)
1578{
1579 LogFlow(("remR3TimersRun:\n"));
1580 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1581 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1582 TMR3TimerQueuesDo(env->pVM);
1583 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1584 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1585}
1586
1587
1588/**
1589 * Record trap occurance
1590 *
1591 * @returns VBox status code
1592 * @param env Pointer to the CPU environment.
1593 * @param uTrap Trap nr
1594 * @param uErrorCode Error code
1595 * @param pvNextEIP Next EIP
1596 */
1597int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1598{
1599 PVM pVM = env->pVM;
1600#ifdef VBOX_WITH_STATISTICS
1601 static STAMCOUNTER s_aStatTrap[255];
1602 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1603#endif
1604
1605#ifdef VBOX_WITH_STATISTICS
1606 if (uTrap < 255)
1607 {
1608 if (!s_aRegisters[uTrap])
1609 {
1610 char szStatName[64];
1611 s_aRegisters[uTrap] = true;
1612 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1613 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1614 }
1615 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1616 }
1617#endif
1618 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1619 if( uTrap < 0x20
1620 && (env->cr[0] & X86_CR0_PE)
1621 && !(env->eflags & X86_EFL_VM))
1622 {
1623#ifdef DEBUG
1624 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1625#endif
1626 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1627 {
1628 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1629 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1630 return VERR_REM_TOO_MANY_TRAPS;
1631 }
1632 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1633 pVM->rem.s.cPendingExceptions = 1;
1634 pVM->rem.s.uPendingException = uTrap;
1635 pVM->rem.s.uPendingExcptEIP = env->eip;
1636 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1637 }
1638 else
1639 {
1640 pVM->rem.s.cPendingExceptions = 0;
1641 pVM->rem.s.uPendingException = uTrap;
1642 pVM->rem.s.uPendingExcptEIP = env->eip;
1643 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1644 }
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/*
1650 * Clear current active trap
1651 *
1652 * @param pVM VM Handle.
1653 */
1654void remR3TrapClear(PVM pVM)
1655{
1656 pVM->rem.s.cPendingExceptions = 0;
1657 pVM->rem.s.uPendingException = 0;
1658 pVM->rem.s.uPendingExcptEIP = 0;
1659 pVM->rem.s.uPendingExcptCR2 = 0;
1660}
1661
1662
1663/*
1664 * Record previous call instruction addresses
1665 *
1666 * @param env Pointer to the CPU environment.
1667 */
1668void remR3RecordCall(CPUState *env)
1669{
1670 CSAMR3RecordCallAddress(env->pVM, env->eip);
1671}
1672
1673
1674/**
1675 * Syncs the internal REM state with the VM.
1676 *
1677 * This must be called before REMR3Run() is invoked whenever when the REM
1678 * state is not up to date. Calling it several times in a row is not
1679 * permitted.
1680 *
1681 * @returns VBox status code.
1682 *
1683 * @param pVM VM Handle.
1684 *
1685 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1686 * no do this since the majority of the callers don't want any unnecessary of events
1687 * pending that would immediatly interrupt execution.
1688 */
1689REMR3DECL(int) REMR3State(PVM pVM)
1690{
1691 register const CPUMCTX *pCtx;
1692 register unsigned fFlags;
1693 bool fHiddenSelRegsValid;
1694 unsigned i;
1695 TRPMEVENT enmType;
1696 uint8_t u8TrapNo;
1697 int rc;
1698
1699 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1700 Log2(("REMR3State:\n"));
1701
1702 pCtx = pVM->rem.s.pCtx;
1703 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1704
1705 Assert(!pVM->rem.s.fInREM);
1706 pVM->rem.s.fInStateSync = true;
1707
1708 /*
1709 * If we have to flush TBs, do that immediately.
1710 */
1711 if (pVM->rem.s.fFlushTBs)
1712 {
1713 STAM_COUNTER_INC(&gStatFlushTBs);
1714 tb_flush(&pVM->rem.s.Env);
1715 pVM->rem.s.fFlushTBs = false;
1716 }
1717
1718 /*
1719 * Copy the registers which require no special handling.
1720 */
1721#ifdef TARGET_X86_64
1722 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1723 Assert(R_EAX == 0);
1724 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1725 Assert(R_ECX == 1);
1726 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1727 Assert(R_EDX == 2);
1728 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1729 Assert(R_EBX == 3);
1730 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1731 Assert(R_ESP == 4);
1732 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1733 Assert(R_EBP == 5);
1734 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1735 Assert(R_ESI == 6);
1736 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1737 Assert(R_EDI == 7);
1738 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1739 pVM->rem.s.Env.regs[8] = pCtx->r8;
1740 pVM->rem.s.Env.regs[9] = pCtx->r9;
1741 pVM->rem.s.Env.regs[10] = pCtx->r10;
1742 pVM->rem.s.Env.regs[11] = pCtx->r11;
1743 pVM->rem.s.Env.regs[12] = pCtx->r12;
1744 pVM->rem.s.Env.regs[13] = pCtx->r13;
1745 pVM->rem.s.Env.regs[14] = pCtx->r14;
1746 pVM->rem.s.Env.regs[15] = pCtx->r15;
1747
1748 pVM->rem.s.Env.eip = pCtx->rip;
1749
1750 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1751#else
1752 Assert(R_EAX == 0);
1753 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1754 Assert(R_ECX == 1);
1755 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1756 Assert(R_EDX == 2);
1757 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1758 Assert(R_EBX == 3);
1759 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1760 Assert(R_ESP == 4);
1761 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1762 Assert(R_EBP == 5);
1763 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1764 Assert(R_ESI == 6);
1765 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1766 Assert(R_EDI == 7);
1767 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1768 pVM->rem.s.Env.eip = pCtx->eip;
1769
1770 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1771#endif
1772
1773 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1774
1775 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1776 for (i=0;i<8;i++)
1777 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1778
1779 /*
1780 * Clear the halted hidden flag (the interrupt waking up the CPU can
1781 * have been dispatched in raw mode).
1782 */
1783 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1784
1785 /*
1786 * Replay invlpg?
1787 */
1788 if (pVM->rem.s.cInvalidatedPages)
1789 {
1790 RTUINT i;
1791
1792 pVM->rem.s.fIgnoreInvlPg = true;
1793 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1794 {
1795 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1796 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1797 }
1798 pVM->rem.s.fIgnoreInvlPg = false;
1799 pVM->rem.s.cInvalidatedPages = 0;
1800 }
1801
1802 /* Replay notification changes? */
1803 if (pVM->rem.s.cHandlerNotifications)
1804 REMR3ReplayHandlerNotifications(pVM);
1805
1806 /* Update MSRs; before CRx registers! */
1807 pVM->rem.s.Env.efer = pCtx->msrEFER;
1808 pVM->rem.s.Env.star = pCtx->msrSTAR;
1809 pVM->rem.s.Env.pat = pCtx->msrPAT;
1810#ifdef TARGET_X86_64
1811 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1812 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1813 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1814 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1815
1816 /* Update the internal long mode activate flag according to the new EFER value. */
1817 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1818 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1819 else
1820 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1821#endif
1822
1823 /*
1824 * Registers which are rarely changed and require special handling / order when changed.
1825 */
1826 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1827 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1828 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1829 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1830 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1831 {
1832 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1833 {
1834 pVM->rem.s.fIgnoreCR3Load = true;
1835 tlb_flush(&pVM->rem.s.Env, true);
1836 pVM->rem.s.fIgnoreCR3Load = false;
1837 }
1838
1839 /* CR4 before CR0! */
1840 if (fFlags & CPUM_CHANGED_CR4)
1841 {
1842 pVM->rem.s.fIgnoreCR3Load = true;
1843 pVM->rem.s.fIgnoreCpuMode = true;
1844 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1845 pVM->rem.s.fIgnoreCpuMode = false;
1846 pVM->rem.s.fIgnoreCR3Load = false;
1847 }
1848
1849 if (fFlags & CPUM_CHANGED_CR0)
1850 {
1851 pVM->rem.s.fIgnoreCR3Load = true;
1852 pVM->rem.s.fIgnoreCpuMode = true;
1853 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1854 pVM->rem.s.fIgnoreCpuMode = false;
1855 pVM->rem.s.fIgnoreCR3Load = false;
1856 }
1857
1858 if (fFlags & CPUM_CHANGED_CR3)
1859 {
1860 pVM->rem.s.fIgnoreCR3Load = true;
1861 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1862 pVM->rem.s.fIgnoreCR3Load = false;
1863 }
1864
1865 if (fFlags & CPUM_CHANGED_GDTR)
1866 {
1867 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1868 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1869 }
1870
1871 if (fFlags & CPUM_CHANGED_IDTR)
1872 {
1873 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1874 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1875 }
1876
1877 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1878 {
1879 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1880 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1881 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1882 }
1883
1884 if (fFlags & CPUM_CHANGED_LDTR)
1885 {
1886 if (fHiddenSelRegsValid)
1887 {
1888 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1889 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1890 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1891 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1892 }
1893 else
1894 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1895 }
1896
1897 if (fFlags & CPUM_CHANGED_CPUID)
1898 {
1899 uint32_t u32Dummy;
1900
1901 /*
1902 * Get the CPUID features.
1903 */
1904 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1905 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1906 }
1907
1908 /* Sync FPU state after CR4, CPUID and EFER (!). */
1909 if (fFlags & CPUM_CHANGED_FPU_REM)
1910 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1911 }
1912
1913 /*
1914 * Sync TR unconditionally to make life simpler.
1915 */
1916 pVM->rem.s.Env.tr.selector = pCtx->tr;
1917 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1918 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1919 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1920 /* Note! do_interrupt will fault if the busy flag is still set... */
1921 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1922
1923 /*
1924 * Update selector registers.
1925 * This must be done *after* we've synced gdt, ldt and crX registers
1926 * since we're reading the GDT/LDT om sync_seg. This will happen with
1927 * saved state which takes a quick dip into rawmode for instance.
1928 */
1929 /*
1930 * Stack; Note first check this one as the CPL might have changed. The
1931 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1932 */
1933
1934 if (fHiddenSelRegsValid)
1935 {
1936 /* The hidden selector registers are valid in the CPU context. */
1937 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1938
1939 /* Set current CPL */
1940 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1941
1942 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1943 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1944 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1945 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1946 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1947 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1948 }
1949 else
1950 {
1951 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1952 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1953 {
1954 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1955
1956 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1957 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1958#ifdef VBOX_WITH_STATISTICS
1959 if (pVM->rem.s.Env.segs[R_SS].newselector)
1960 {
1961 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1962 }
1963#endif
1964 }
1965 else
1966 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1967
1968 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1969 {
1970 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1971 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1972#ifdef VBOX_WITH_STATISTICS
1973 if (pVM->rem.s.Env.segs[R_ES].newselector)
1974 {
1975 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1976 }
1977#endif
1978 }
1979 else
1980 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1981
1982 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1983 {
1984 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1985 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1986#ifdef VBOX_WITH_STATISTICS
1987 if (pVM->rem.s.Env.segs[R_CS].newselector)
1988 {
1989 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1990 }
1991#endif
1992 }
1993 else
1994 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1995
1996 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1997 {
1998 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1999 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2000#ifdef VBOX_WITH_STATISTICS
2001 if (pVM->rem.s.Env.segs[R_DS].newselector)
2002 {
2003 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2004 }
2005#endif
2006 }
2007 else
2008 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2009
2010 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2011 * be the same but not the base/limit. */
2012 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2013 {
2014 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2015 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2016#ifdef VBOX_WITH_STATISTICS
2017 if (pVM->rem.s.Env.segs[R_FS].newselector)
2018 {
2019 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2020 }
2021#endif
2022 }
2023 else
2024 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2025
2026 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2027 {
2028 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2029 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2030#ifdef VBOX_WITH_STATISTICS
2031 if (pVM->rem.s.Env.segs[R_GS].newselector)
2032 {
2033 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2034 }
2035#endif
2036 }
2037 else
2038 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2039 }
2040
2041 /*
2042 * Check for traps.
2043 */
2044 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2045 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2046 if (RT_SUCCESS(rc))
2047 {
2048#ifdef DEBUG
2049 if (u8TrapNo == 0x80)
2050 {
2051 remR3DumpLnxSyscall(pVM);
2052 remR3DumpOBsdSyscall(pVM);
2053 }
2054#endif
2055
2056 pVM->rem.s.Env.exception_index = u8TrapNo;
2057 if (enmType != TRPM_SOFTWARE_INT)
2058 {
2059 pVM->rem.s.Env.exception_is_int = 0;
2060 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2061 }
2062 else
2063 {
2064 /*
2065 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2066 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2067 * for int03 and into.
2068 */
2069 pVM->rem.s.Env.exception_is_int = 1;
2070 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2071 /* int 3 may be generated by one-byte 0xcc */
2072 if (u8TrapNo == 3)
2073 {
2074 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2075 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2076 }
2077 /* int 4 may be generated by one-byte 0xce */
2078 else if (u8TrapNo == 4)
2079 {
2080 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2081 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2082 }
2083 }
2084
2085 /* get error code and cr2 if needed. */
2086 switch (u8TrapNo)
2087 {
2088 case 0x0e:
2089 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2090 /* fallthru */
2091 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2092 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2093 break;
2094
2095 case 0x11: case 0x08:
2096 default:
2097 pVM->rem.s.Env.error_code = 0;
2098 break;
2099 }
2100
2101 /*
2102 * We can now reset the active trap since the recompiler is gonna have a go at it.
2103 */
2104 rc = TRPMResetTrap(pVM);
2105 AssertRC(rc);
2106 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2107 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2108 }
2109
2110 /*
2111 * Clear old interrupt request flags; Check for pending hardware interrupts.
2112 * (See @remark for why we don't check for other FFs.)
2113 */
2114 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2115 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2116 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2117 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2118
2119 /*
2120 * We're now in REM mode.
2121 */
2122 pVM->rem.s.fInREM = true;
2123 pVM->rem.s.fInStateSync = false;
2124 pVM->rem.s.cCanExecuteRaw = 0;
2125 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2126 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2127 return VINF_SUCCESS;
2128}
2129
2130
2131/**
2132 * Syncs back changes in the REM state to the the VM state.
2133 *
2134 * This must be called after invoking REMR3Run().
2135 * Calling it several times in a row is not permitted.
2136 *
2137 * @returns VBox status code.
2138 *
2139 * @param pVM VM Handle.
2140 */
2141REMR3DECL(int) REMR3StateBack(PVM pVM)
2142{
2143 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2144 unsigned i;
2145
2146 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2147 Log2(("REMR3StateBack:\n"));
2148 Assert(pVM->rem.s.fInREM);
2149
2150 /*
2151 * Copy back the registers.
2152 * This is done in the order they are declared in the CPUMCTX structure.
2153 */
2154
2155 /** @todo FOP */
2156 /** @todo FPUIP */
2157 /** @todo CS */
2158 /** @todo FPUDP */
2159 /** @todo DS */
2160
2161 /** @todo check if FPU/XMM was actually used in the recompiler */
2162 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2163//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2164
2165#ifdef TARGET_X86_64
2166 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2167 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2168 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2169 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2170 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2171 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2172 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2173 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2174 pCtx->r8 = pVM->rem.s.Env.regs[8];
2175 pCtx->r9 = pVM->rem.s.Env.regs[9];
2176 pCtx->r10 = pVM->rem.s.Env.regs[10];
2177 pCtx->r11 = pVM->rem.s.Env.regs[11];
2178 pCtx->r12 = pVM->rem.s.Env.regs[12];
2179 pCtx->r13 = pVM->rem.s.Env.regs[13];
2180 pCtx->r14 = pVM->rem.s.Env.regs[14];
2181 pCtx->r15 = pVM->rem.s.Env.regs[15];
2182
2183 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2184
2185#else
2186 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2187 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2188 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2189 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2190 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2191 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2192 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2193
2194 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2195#endif
2196
2197 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2198
2199#ifdef VBOX_WITH_STATISTICS
2200 if (pVM->rem.s.Env.segs[R_SS].newselector)
2201 {
2202 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2203 }
2204 if (pVM->rem.s.Env.segs[R_GS].newselector)
2205 {
2206 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2207 }
2208 if (pVM->rem.s.Env.segs[R_FS].newselector)
2209 {
2210 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2211 }
2212 if (pVM->rem.s.Env.segs[R_ES].newselector)
2213 {
2214 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2215 }
2216 if (pVM->rem.s.Env.segs[R_DS].newselector)
2217 {
2218 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2219 }
2220 if (pVM->rem.s.Env.segs[R_CS].newselector)
2221 {
2222 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2223 }
2224#endif
2225 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2226 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2227 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2228 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2229 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2230
2231#ifdef TARGET_X86_64
2232 pCtx->rip = pVM->rem.s.Env.eip;
2233 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2234#else
2235 pCtx->eip = pVM->rem.s.Env.eip;
2236 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2237#endif
2238
2239 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2240 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2241 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2242 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2243 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2244 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2245
2246 for (i = 0; i < 8; i++)
2247 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2248
2249 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2250 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2251 {
2252 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2253 STAM_COUNTER_INC(&gStatREMGDTChange);
2254 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2255 }
2256
2257 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2258 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2259 {
2260 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2261 STAM_COUNTER_INC(&gStatREMIDTChange);
2262 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2263 }
2264
2265 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2266 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2267 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2268 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2269 {
2270 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2271 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2272 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2273 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2274 STAM_COUNTER_INC(&gStatREMLDTRChange);
2275 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2276 }
2277
2278 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2279 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2280 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2281 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2282 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2283 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2284 : 0) )
2285 {
2286 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2287 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2288 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2289 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2290 pCtx->tr = pVM->rem.s.Env.tr.selector;
2291 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2292 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2293 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2294 if (pCtx->trHid.Attr.u)
2295 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2296 STAM_COUNTER_INC(&gStatREMTRChange);
2297 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2298 }
2299
2300 /** @todo These values could still be out of sync! */
2301 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2302 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2303 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2304 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2305
2306 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2307 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2308 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2309
2310 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2311 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2312 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2313
2314 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2315 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2316 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2317
2318 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2319 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2320 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2321
2322 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2323 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2324 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2325
2326 /* Sysenter MSR */
2327 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2328 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2329 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2330
2331 /* System MSRs. */
2332 pCtx->msrEFER = pVM->rem.s.Env.efer;
2333 pCtx->msrSTAR = pVM->rem.s.Env.star;
2334 pCtx->msrPAT = pVM->rem.s.Env.pat;
2335#ifdef TARGET_X86_64
2336 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2337 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2338 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2339 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2340#endif
2341
2342 remR3TrapClear(pVM);
2343
2344 /*
2345 * Check for traps.
2346 */
2347 if ( pVM->rem.s.Env.exception_index >= 0
2348 && pVM->rem.s.Env.exception_index < 256)
2349 {
2350 int rc;
2351
2352 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2353 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2354 AssertRC(rc);
2355 switch (pVM->rem.s.Env.exception_index)
2356 {
2357 case 0x0e:
2358 TRPMSetFaultAddress(pVM, pCtx->cr2);
2359 /* fallthru */
2360 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2361 case 0x11: case 0x08: /* 0 */
2362 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2363 break;
2364 }
2365
2366 }
2367
2368 /*
2369 * We're not longer in REM mode.
2370 */
2371 pVM->rem.s.fInREM = false;
2372 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2373 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2374 return VINF_SUCCESS;
2375}
2376
2377
2378/**
2379 * This is called by the disassembler when it wants to update the cpu state
2380 * before for instance doing a register dump.
2381 */
2382static void remR3StateUpdate(PVM pVM)
2383{
2384 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2385 unsigned i;
2386
2387 Assert(pVM->rem.s.fInREM);
2388
2389 /*
2390 * Copy back the registers.
2391 * This is done in the order they are declared in the CPUMCTX structure.
2392 */
2393
2394 /** @todo FOP */
2395 /** @todo FPUIP */
2396 /** @todo CS */
2397 /** @todo FPUDP */
2398 /** @todo DS */
2399 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2400 pCtx->fpu.MXCSR = 0;
2401 pCtx->fpu.MXCSR_MASK = 0;
2402
2403 /** @todo check if FPU/XMM was actually used in the recompiler */
2404 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2405//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2406
2407#ifdef TARGET_X86_64
2408 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2409 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2410 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2411 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2412 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2413 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2414 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2415 pCtx->r8 = pVM->rem.s.Env.regs[8];
2416 pCtx->r9 = pVM->rem.s.Env.regs[9];
2417 pCtx->r10 = pVM->rem.s.Env.regs[10];
2418 pCtx->r11 = pVM->rem.s.Env.regs[11];
2419 pCtx->r12 = pVM->rem.s.Env.regs[12];
2420 pCtx->r13 = pVM->rem.s.Env.regs[13];
2421 pCtx->r14 = pVM->rem.s.Env.regs[14];
2422 pCtx->r15 = pVM->rem.s.Env.regs[15];
2423
2424 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2425#else
2426 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2427 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2428 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2429 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2430 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2431 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2432 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2433
2434 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2435#endif
2436
2437 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2438
2439 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2440 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2441 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2442 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2443 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2444
2445#ifdef TARGET_X86_64
2446 pCtx->rip = pVM->rem.s.Env.eip;
2447 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2448#else
2449 pCtx->eip = pVM->rem.s.Env.eip;
2450 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2451#endif
2452
2453 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2454 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2455 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2456 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2457 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2458 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2459
2460 for (i = 0; i < 8; i++)
2461 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2462
2463 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2464 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2465 {
2466 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2467 STAM_COUNTER_INC(&gStatREMGDTChange);
2468 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2469 }
2470
2471 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2472 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2473 {
2474 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2475 STAM_COUNTER_INC(&gStatREMIDTChange);
2476 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2477 }
2478
2479 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2480 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2481 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2482 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2483 {
2484 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2485 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2486 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2487 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2488 STAM_COUNTER_INC(&gStatREMLDTRChange);
2489 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2490 }
2491
2492 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2493 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2494 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2495 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2496 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2497 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2498 : 0) )
2499 {
2500 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2501 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2502 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2503 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2504 pCtx->tr = pVM->rem.s.Env.tr.selector;
2505 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2506 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2507 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2508 if (pCtx->trHid.Attr.u)
2509 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2510 STAM_COUNTER_INC(&gStatREMTRChange);
2511 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2512 }
2513
2514 /** @todo These values could still be out of sync! */
2515 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2516 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2517 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2518 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2519
2520 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2521 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2522 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2523
2524 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2525 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2526 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2527
2528 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2529 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2530 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2531
2532 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2533 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2534 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2535
2536 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2537 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2538 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2539
2540 /* Sysenter MSR */
2541 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2542 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2543 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2544
2545 /* System MSRs. */
2546 pCtx->msrEFER = pVM->rem.s.Env.efer;
2547 pCtx->msrSTAR = pVM->rem.s.Env.star;
2548 pCtx->msrPAT = pVM->rem.s.Env.pat;
2549#ifdef TARGET_X86_64
2550 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2551 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2552 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2553 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2554#endif
2555
2556}
2557
2558
2559/**
2560 * Update the VMM state information if we're currently in REM.
2561 *
2562 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2563 * we're currently executing in REM and the VMM state is invalid. This method will of
2564 * course check that we're executing in REM before syncing any data over to the VMM.
2565 *
2566 * @param pVM The VM handle.
2567 */
2568REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2569{
2570 if (pVM->rem.s.fInREM)
2571 remR3StateUpdate(pVM);
2572}
2573
2574
2575#undef LOG_GROUP
2576#define LOG_GROUP LOG_GROUP_REM
2577
2578
2579/**
2580 * Notify the recompiler about Address Gate 20 state change.
2581 *
2582 * This notification is required since A20 gate changes are
2583 * initialized from a device driver and the VM might just as
2584 * well be in REM mode as in RAW mode.
2585 *
2586 * @param pVM VM handle.
2587 * @param fEnable True if the gate should be enabled.
2588 * False if the gate should be disabled.
2589 */
2590REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2591{
2592 bool fSaved;
2593
2594 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2595 VM_ASSERT_EMT(pVM);
2596
2597 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2598 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2599
2600 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2601
2602 pVM->rem.s.fIgnoreAll = fSaved;
2603}
2604
2605
2606/**
2607 * Replays the invalidated recorded pages.
2608 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2609 *
2610 * @param pVM VM handle.
2611 */
2612REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2613{
2614 RTUINT i;
2615
2616 VM_ASSERT_EMT(pVM);
2617
2618 /*
2619 * Sync the required registers.
2620 */
2621 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2622 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2623 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2624 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2625
2626 /*
2627 * Replay the flushes.
2628 */
2629 pVM->rem.s.fIgnoreInvlPg = true;
2630 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2631 {
2632 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2633 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2634 }
2635 pVM->rem.s.fIgnoreInvlPg = false;
2636 pVM->rem.s.cInvalidatedPages = 0;
2637}
2638
2639
2640/**
2641 * Replays the handler notification changes
2642 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2643 *
2644 * @param pVM VM handle.
2645 */
2646REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2647{
2648 /*
2649 * Replay the flushes.
2650 */
2651 RTUINT i;
2652 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2653
2654 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2655 VM_ASSERT_EMT(pVM);
2656
2657 pVM->rem.s.cHandlerNotifications = 0;
2658 for (i = 0; i < c; i++)
2659 {
2660 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2661 switch (pRec->enmKind)
2662 {
2663 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2664 REMR3NotifyHandlerPhysicalRegister(pVM,
2665 pRec->u.PhysicalRegister.enmType,
2666 pRec->u.PhysicalRegister.GCPhys,
2667 pRec->u.PhysicalRegister.cb,
2668 pRec->u.PhysicalRegister.fHasHCHandler);
2669 break;
2670
2671 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2672 REMR3NotifyHandlerPhysicalDeregister(pVM,
2673 pRec->u.PhysicalDeregister.enmType,
2674 pRec->u.PhysicalDeregister.GCPhys,
2675 pRec->u.PhysicalDeregister.cb,
2676 pRec->u.PhysicalDeregister.fHasHCHandler,
2677 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2678 break;
2679
2680 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2681 REMR3NotifyHandlerPhysicalModify(pVM,
2682 pRec->u.PhysicalModify.enmType,
2683 pRec->u.PhysicalModify.GCPhysOld,
2684 pRec->u.PhysicalModify.GCPhysNew,
2685 pRec->u.PhysicalModify.cb,
2686 pRec->u.PhysicalModify.fHasHCHandler,
2687 pRec->u.PhysicalModify.fRestoreAsRAM);
2688 break;
2689
2690 default:
2691 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2692 break;
2693 }
2694 }
2695 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2696}
2697
2698
2699/**
2700 * Notify REM about changed code page.
2701 *
2702 * @returns VBox status code.
2703 * @param pVM VM handle.
2704 * @param pvCodePage Code page address
2705 */
2706REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2707{
2708#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2709 int rc;
2710 RTGCPHYS PhysGC;
2711 uint64_t flags;
2712
2713 VM_ASSERT_EMT(pVM);
2714
2715 /*
2716 * Get the physical page address.
2717 */
2718 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2719 if (rc == VINF_SUCCESS)
2720 {
2721 /*
2722 * Sync the required registers and flush the whole page.
2723 * (Easier to do the whole page than notifying it about each physical
2724 * byte that was changed.
2725 */
2726 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2727 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2728 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2729 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2730
2731 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2732 }
2733#endif
2734 return VINF_SUCCESS;
2735}
2736
2737
2738/**
2739 * Notification about a successful MMR3PhysRegister() call.
2740 *
2741 * @param pVM VM handle.
2742 * @param GCPhys The physical address the RAM.
2743 * @param cb Size of the memory.
2744 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2745 */
2746REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2747{
2748 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2749 VM_ASSERT_EMT(pVM);
2750
2751 /*
2752 * Validate input - we trust the caller.
2753 */
2754 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2755 Assert(cb);
2756 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2757#ifdef VBOX_WITH_NEW_PHYS_CODE
2758 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2759#endif
2760
2761 /*
2762 * Base ram? Update GCPhysLastRam.
2763 */
2764#ifdef VBOX_WITH_NEW_PHYS_CODE
2765 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2766#else
2767 if (!GCPhys)
2768#endif
2769 {
2770 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2771 {
2772 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2773 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2774 }
2775 }
2776
2777 /*
2778 * Register the ram.
2779 */
2780 Assert(!pVM->rem.s.fIgnoreAll);
2781 pVM->rem.s.fIgnoreAll = true;
2782
2783#ifdef VBOX_WITH_NEW_PHYS_CODE
2784 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2785#else
2786 if (!GCPhys)
2787 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2788 else
2789 {
2790 if (fFlags & MM_RAM_FLAGS_RESERVED)
2791 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2792 else
2793 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2794 }
2795#endif
2796 Assert(pVM->rem.s.fIgnoreAll);
2797 pVM->rem.s.fIgnoreAll = false;
2798}
2799
2800#ifndef VBOX_WITH_NEW_PHYS_CODE
2801
2802/**
2803 * Notification about a successful PGMR3PhysRegisterChunk() call.
2804 *
2805 * @param pVM VM handle.
2806 * @param GCPhys The physical address the RAM.
2807 * @param cb Size of the memory.
2808 * @param pvRam The HC address of the RAM.
2809 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2810 */
2811REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2812{
2813 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2814 VM_ASSERT_EMT(pVM);
2815
2816 /*
2817 * Validate input - we trust the caller.
2818 */
2819 Assert(pvRam);
2820 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2821 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2822 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2823 Assert(fFlags == 0 /* normal RAM */);
2824 Assert(!pVM->rem.s.fIgnoreAll);
2825 pVM->rem.s.fIgnoreAll = true;
2826 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2827 Assert(pVM->rem.s.fIgnoreAll);
2828 pVM->rem.s.fIgnoreAll = false;
2829}
2830
2831
2832/**
2833 * Grows dynamically allocated guest RAM.
2834 * Will raise a fatal error if the operation fails.
2835 *
2836 * @param physaddr The physical address.
2837 */
2838void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2839{
2840 int rc;
2841 PVM pVM = cpu_single_env->pVM;
2842 const RTGCPHYS GCPhys = physaddr;
2843
2844 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2845 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2846 if (RT_SUCCESS(rc))
2847 return;
2848
2849 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2850 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2851 AssertFatalFailed();
2852}
2853
2854#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2855
2856/**
2857 * Notification about a successful MMR3PhysRomRegister() call.
2858 *
2859 * @param pVM VM handle.
2860 * @param GCPhys The physical address of the ROM.
2861 * @param cb The size of the ROM.
2862 * @param pvCopy Pointer to the ROM copy.
2863 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2864 * This function will be called when ever the protection of the
2865 * shadow ROM changes (at reset and end of POST).
2866 */
2867REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2868{
2869 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2870 VM_ASSERT_EMT(pVM);
2871
2872 /*
2873 * Validate input - we trust the caller.
2874 */
2875 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2876 Assert(cb);
2877 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2878
2879 /*
2880 * Register the rom.
2881 */
2882 Assert(!pVM->rem.s.fIgnoreAll);
2883 pVM->rem.s.fIgnoreAll = true;
2884
2885 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2886
2887 Assert(pVM->rem.s.fIgnoreAll);
2888 pVM->rem.s.fIgnoreAll = false;
2889}
2890
2891
2892/**
2893 * Notification about a successful memory deregistration or reservation.
2894 *
2895 * @param pVM VM Handle.
2896 * @param GCPhys Start physical address.
2897 * @param cb The size of the range.
2898 */
2899REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2900{
2901 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2902 VM_ASSERT_EMT(pVM);
2903
2904 /*
2905 * Validate input - we trust the caller.
2906 */
2907 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2908 Assert(cb);
2909 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2910
2911 /*
2912 * Unassigning the memory.
2913 */
2914 Assert(!pVM->rem.s.fIgnoreAll);
2915 pVM->rem.s.fIgnoreAll = true;
2916
2917 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2918
2919 Assert(pVM->rem.s.fIgnoreAll);
2920 pVM->rem.s.fIgnoreAll = false;
2921}
2922
2923
2924/**
2925 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2926 *
2927 * @param pVM VM Handle.
2928 * @param enmType Handler type.
2929 * @param GCPhys Handler range address.
2930 * @param cb Size of the handler range.
2931 * @param fHasHCHandler Set if the handler has a HC callback function.
2932 *
2933 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2934 * Handler memory type to memory which has no HC handler.
2935 */
2936REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2937{
2938 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2939 enmType, GCPhys, cb, fHasHCHandler));
2940 VM_ASSERT_EMT(pVM);
2941 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2942 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2943
2944 if (pVM->rem.s.cHandlerNotifications)
2945 REMR3ReplayHandlerNotifications(pVM);
2946
2947 Assert(!pVM->rem.s.fIgnoreAll);
2948 pVM->rem.s.fIgnoreAll = true;
2949
2950 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2951 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2952 else if (fHasHCHandler)
2953 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2954
2955 Assert(pVM->rem.s.fIgnoreAll);
2956 pVM->rem.s.fIgnoreAll = false;
2957}
2958
2959
2960/**
2961 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2962 *
2963 * @param pVM VM Handle.
2964 * @param enmType Handler type.
2965 * @param GCPhys Handler range address.
2966 * @param cb Size of the handler range.
2967 * @param fHasHCHandler Set if the handler has a HC callback function.
2968 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2969 */
2970REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2971{
2972 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2973 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2974 VM_ASSERT_EMT(pVM);
2975
2976 if (pVM->rem.s.cHandlerNotifications)
2977 REMR3ReplayHandlerNotifications(pVM);
2978
2979 Assert(!pVM->rem.s.fIgnoreAll);
2980 pVM->rem.s.fIgnoreAll = true;
2981
2982/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2983 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2984 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2985 else if (fHasHCHandler)
2986 {
2987 if (!fRestoreAsRAM)
2988 {
2989 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2990 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2991 }
2992 else
2993 {
2994 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2995 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2996 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2997 }
2998 }
2999
3000 Assert(pVM->rem.s.fIgnoreAll);
3001 pVM->rem.s.fIgnoreAll = false;
3002}
3003
3004
3005/**
3006 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3007 *
3008 * @param pVM VM Handle.
3009 * @param enmType Handler type.
3010 * @param GCPhysOld Old handler range address.
3011 * @param GCPhysNew New handler range address.
3012 * @param cb Size of the handler range.
3013 * @param fHasHCHandler Set if the handler has a HC callback function.
3014 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3015 */
3016REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3017{
3018 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3019 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3020 VM_ASSERT_EMT(pVM);
3021 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3022
3023 if (pVM->rem.s.cHandlerNotifications)
3024 REMR3ReplayHandlerNotifications(pVM);
3025
3026 if (fHasHCHandler)
3027 {
3028 Assert(!pVM->rem.s.fIgnoreAll);
3029 pVM->rem.s.fIgnoreAll = true;
3030
3031 /*
3032 * Reset the old page.
3033 */
3034 if (!fRestoreAsRAM)
3035 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3036 else
3037 {
3038 /* This is not perfect, but it'll do for PD monitoring... */
3039 Assert(cb == PAGE_SIZE);
3040 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3041 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3042 }
3043
3044 /*
3045 * Update the new page.
3046 */
3047 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3048 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3049 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3050
3051 Assert(pVM->rem.s.fIgnoreAll);
3052 pVM->rem.s.fIgnoreAll = false;
3053 }
3054}
3055
3056
3057/**
3058 * Checks if we're handling access to this page or not.
3059 *
3060 * @returns true if we're trapping access.
3061 * @returns false if we aren't.
3062 * @param pVM The VM handle.
3063 * @param GCPhys The physical address.
3064 *
3065 * @remark This function will only work correctly in VBOX_STRICT builds!
3066 */
3067REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3068{
3069#ifdef VBOX_STRICT
3070 unsigned long off;
3071 if (pVM->rem.s.cHandlerNotifications)
3072 REMR3ReplayHandlerNotifications(pVM);
3073
3074 off = get_phys_page_offset(GCPhys);
3075 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3076 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3077 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3078#else
3079 return false;
3080#endif
3081}
3082
3083
3084/**
3085 * Deals with a rare case in get_phys_addr_code where the code
3086 * is being monitored.
3087 *
3088 * It could also be an MMIO page, in which case we will raise a fatal error.
3089 *
3090 * @returns The physical address corresponding to addr.
3091 * @param env The cpu environment.
3092 * @param addr The virtual address.
3093 * @param pTLBEntry The TLB entry.
3094 */
3095target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3096 target_ulong addr,
3097 CPUTLBEntry* pTLBEntry,
3098 target_phys_addr_t ioTLBEntry)
3099{
3100 PVM pVM = env->pVM;
3101
3102 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3103 {
3104 /* If code memory is being monitored, appropriate IOTLB entry will have
3105 handler IO type, and addend will provide real physical address, no
3106 matter if we store VA in TLB or not, as handlers are always passed PA */
3107 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3108 return ret;
3109 }
3110 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3111 "*** handlers\n",
3112 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3113 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3114 LogRel(("*** mmio\n"));
3115 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3116 LogRel(("*** phys\n"));
3117 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3118 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3119 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3120 AssertFatalFailed();
3121}
3122
3123/**
3124 * Read guest RAM and ROM.
3125 *
3126 * @param SrcGCPhys The source address (guest physical).
3127 * @param pvDst The destination address.
3128 * @param cb Number of bytes
3129 */
3130void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3131{
3132 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3133 VBOX_CHECK_ADDR(SrcGCPhys);
3134 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3135#ifdef VBOX_DEBUG_PHYS
3136 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3137#endif
3138 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3139}
3140
3141
3142/**
3143 * Read guest RAM and ROM, unsigned 8-bit.
3144 *
3145 * @param SrcGCPhys The source address (guest physical).
3146 */
3147RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3148{
3149 uint8_t val;
3150 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3151 VBOX_CHECK_ADDR(SrcGCPhys);
3152 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3153 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3154#ifdef VBOX_DEBUG_PHYS
3155 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3156#endif
3157 return val;
3158}
3159
3160
3161/**
3162 * Read guest RAM and ROM, signed 8-bit.
3163 *
3164 * @param SrcGCPhys The source address (guest physical).
3165 */
3166RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3167{
3168 int8_t val;
3169 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3170 VBOX_CHECK_ADDR(SrcGCPhys);
3171 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3172 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3173#ifdef VBOX_DEBUG_PHYS
3174 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3175#endif
3176 return val;
3177}
3178
3179
3180/**
3181 * Read guest RAM and ROM, unsigned 16-bit.
3182 *
3183 * @param SrcGCPhys The source address (guest physical).
3184 */
3185RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3186{
3187 uint16_t val;
3188 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3189 VBOX_CHECK_ADDR(SrcGCPhys);
3190 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3191 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3192#ifdef VBOX_DEBUG_PHYS
3193 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3194#endif
3195 return val;
3196}
3197
3198
3199/**
3200 * Read guest RAM and ROM, signed 16-bit.
3201 *
3202 * @param SrcGCPhys The source address (guest physical).
3203 */
3204RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3205{
3206 int16_t val;
3207 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3208 VBOX_CHECK_ADDR(SrcGCPhys);
3209 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3210 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3211#ifdef VBOX_DEBUG_PHYS
3212 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3213#endif
3214 return val;
3215}
3216
3217
3218/**
3219 * Read guest RAM and ROM, unsigned 32-bit.
3220 *
3221 * @param SrcGCPhys The source address (guest physical).
3222 */
3223RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3224{
3225 uint32_t val;
3226 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3227 VBOX_CHECK_ADDR(SrcGCPhys);
3228 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3229 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3230#ifdef VBOX_DEBUG_PHYS
3231 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3232#endif
3233 return val;
3234}
3235
3236
3237/**
3238 * Read guest RAM and ROM, signed 32-bit.
3239 *
3240 * @param SrcGCPhys The source address (guest physical).
3241 */
3242RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3243{
3244 int32_t val;
3245 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3246 VBOX_CHECK_ADDR(SrcGCPhys);
3247 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3248 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3249#ifdef VBOX_DEBUG_PHYS
3250 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3251#endif
3252 return val;
3253}
3254
3255
3256/**
3257 * Read guest RAM and ROM, unsigned 64-bit.
3258 *
3259 * @param SrcGCPhys The source address (guest physical).
3260 */
3261uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3262{
3263 uint64_t val;
3264 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3265 VBOX_CHECK_ADDR(SrcGCPhys);
3266 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3267 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3268#ifdef VBOX_DEBUG_PHYS
3269 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3270#endif
3271 return val;
3272}
3273
3274
3275/**
3276 * Read guest RAM and ROM, signed 64-bit.
3277 *
3278 * @param SrcGCPhys The source address (guest physical).
3279 */
3280int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3281{
3282 int64_t val;
3283 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3284 VBOX_CHECK_ADDR(SrcGCPhys);
3285 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3286 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3287#ifdef VBOX_DEBUG_PHYS
3288 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3289#endif
3290 return val;
3291}
3292
3293
3294/**
3295 * Write guest RAM.
3296 *
3297 * @param DstGCPhys The destination address (guest physical).
3298 * @param pvSrc The source address.
3299 * @param cb Number of bytes to write
3300 */
3301void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3302{
3303 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3304 VBOX_CHECK_ADDR(DstGCPhys);
3305 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3306 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3307#ifdef VBOX_DEBUG_PHYS
3308 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3309#endif
3310}
3311
3312
3313/**
3314 * Write guest RAM, unsigned 8-bit.
3315 *
3316 * @param DstGCPhys The destination address (guest physical).
3317 * @param val Value
3318 */
3319void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3320{
3321 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3322 VBOX_CHECK_ADDR(DstGCPhys);
3323 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3324 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3325#ifdef VBOX_DEBUG_PHYS
3326 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3327#endif
3328}
3329
3330
3331/**
3332 * Write guest RAM, unsigned 8-bit.
3333 *
3334 * @param DstGCPhys The destination address (guest physical).
3335 * @param val Value
3336 */
3337void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3338{
3339 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3340 VBOX_CHECK_ADDR(DstGCPhys);
3341 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3342 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3343#ifdef VBOX_DEBUG_PHYS
3344 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3345#endif
3346}
3347
3348
3349/**
3350 * Write guest RAM, unsigned 32-bit.
3351 *
3352 * @param DstGCPhys The destination address (guest physical).
3353 * @param val Value
3354 */
3355void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3356{
3357 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3358 VBOX_CHECK_ADDR(DstGCPhys);
3359 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3360 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3361#ifdef VBOX_DEBUG_PHYS
3362 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3363#endif
3364}
3365
3366
3367/**
3368 * Write guest RAM, unsigned 64-bit.
3369 *
3370 * @param DstGCPhys The destination address (guest physical).
3371 * @param val Value
3372 */
3373void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3374{
3375 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3376 VBOX_CHECK_ADDR(DstGCPhys);
3377 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3378 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3379#ifdef VBOX_DEBUG_PHYS
3380 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3381#endif
3382}
3383
3384#undef LOG_GROUP
3385#define LOG_GROUP LOG_GROUP_REM_MMIO
3386
3387/** Read MMIO memory. */
3388static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3389{
3390 uint32_t u32 = 0;
3391 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3392 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3393 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3394 return u32;
3395}
3396
3397/** Read MMIO memory. */
3398static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3399{
3400 uint32_t u32 = 0;
3401 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3402 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3403 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3404 return u32;
3405}
3406
3407/** Read MMIO memory. */
3408static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3409{
3410 uint32_t u32 = 0;
3411 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3412 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3413 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3414 return u32;
3415}
3416
3417/** Write to MMIO memory. */
3418static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3419{
3420 int rc;
3421 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3422 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3423 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3424}
3425
3426/** Write to MMIO memory. */
3427static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3428{
3429 int rc;
3430 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3431 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3432 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3433}
3434
3435/** Write to MMIO memory. */
3436static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3437{
3438 int rc;
3439 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3440 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3441 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3442}
3443
3444
3445#undef LOG_GROUP
3446#define LOG_GROUP LOG_GROUP_REM_HANDLER
3447
3448/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3449
3450static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3451{
3452 uint8_t u8;
3453 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3454 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3455 return u8;
3456}
3457
3458static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3459{
3460 uint16_t u16;
3461 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3462 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3463 return u16;
3464}
3465
3466static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3467{
3468 uint32_t u32;
3469 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3470 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3471 return u32;
3472}
3473
3474static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3475{
3476 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3477 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3478}
3479
3480static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3481{
3482 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3483 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3484}
3485
3486static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3487{
3488 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3489 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3490}
3491
3492/* -+- disassembly -+- */
3493
3494#undef LOG_GROUP
3495#define LOG_GROUP LOG_GROUP_REM_DISAS
3496
3497
3498/**
3499 * Enables or disables singled stepped disassembly.
3500 *
3501 * @returns VBox status code.
3502 * @param pVM VM handle.
3503 * @param fEnable To enable set this flag, to disable clear it.
3504 */
3505static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3506{
3507 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3508 VM_ASSERT_EMT(pVM);
3509
3510 if (fEnable)
3511 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3512 else
3513 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3514 return VINF_SUCCESS;
3515}
3516
3517
3518/**
3519 * Enables or disables singled stepped disassembly.
3520 *
3521 * @returns VBox status code.
3522 * @param pVM VM handle.
3523 * @param fEnable To enable set this flag, to disable clear it.
3524 */
3525REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3526{
3527 PVMREQ pReq;
3528 int rc;
3529
3530 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3531 if (VM_IS_EMT(pVM))
3532 return remR3DisasEnableStepping(pVM, fEnable);
3533
3534 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3535 AssertRC(rc);
3536 if (RT_SUCCESS(rc))
3537 rc = pReq->iStatus;
3538 VMR3ReqFree(pReq);
3539 return rc;
3540}
3541
3542
3543#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3544/**
3545 * External Debugger Command: .remstep [on|off|1|0]
3546 */
3547static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3548{
3549 bool fEnable;
3550 int rc;
3551
3552 /* print status */
3553 if (cArgs == 0)
3554 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3555 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3556
3557 /* convert the argument and change the mode. */
3558 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3559 if (RT_FAILURE(rc))
3560 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3561 rc = REMR3DisasEnableStepping(pVM, fEnable);
3562 if (RT_FAILURE(rc))
3563 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3564 return rc;
3565}
3566#endif
3567
3568
3569/**
3570 * Disassembles one instruction and prints it to the log.
3571 *
3572 * @returns Success indicator.
3573 * @param env Pointer to the recompiler CPU structure.
3574 * @param f32BitCode Indicates that whether or not the code should
3575 * be disassembled as 16 or 32 bit. If -1 the CS
3576 * selector will be inspected.
3577 * @param pszPrefix
3578 */
3579bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3580{
3581 PVM pVM = env->pVM;
3582 const bool fLog = LogIsEnabled();
3583 const bool fLog2 = LogIs2Enabled();
3584 int rc = VINF_SUCCESS;
3585
3586 /*
3587 * Don't bother if there ain't any log output to do.
3588 */
3589 if (!fLog && !fLog2)
3590 return true;
3591
3592 /*
3593 * Update the state so DBGF reads the correct register values.
3594 */
3595 remR3StateUpdate(pVM);
3596
3597 /*
3598 * Log registers if requested.
3599 */
3600 if (!fLog2)
3601 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3602
3603 /*
3604 * Disassemble to log.
3605 */
3606 if (fLog)
3607 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3608
3609 return RT_SUCCESS(rc);
3610}
3611
3612
3613/**
3614 * Disassemble recompiled code.
3615 *
3616 * @param phFileIgnored Ignored, logfile usually.
3617 * @param pvCode Pointer to the code block.
3618 * @param cb Size of the code block.
3619 */
3620void disas(FILE *phFile, void *pvCode, unsigned long cb)
3621{
3622#ifdef DEBUG_TMP_LOGGING
3623# define DISAS_PRINTF(x...) fprintf(phFile, x)
3624#else
3625# define DISAS_PRINTF(x...) RTLogPrintf(x)
3626 if (LogIs2Enabled())
3627#endif
3628 {
3629 unsigned off = 0;
3630 char szOutput[256];
3631 DISCPUSTATE Cpu;
3632
3633 memset(&Cpu, 0, sizeof(Cpu));
3634#ifdef RT_ARCH_X86
3635 Cpu.mode = CPUMODE_32BIT;
3636#else
3637 Cpu.mode = CPUMODE_64BIT;
3638#endif
3639
3640 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3641 while (off < cb)
3642 {
3643 uint32_t cbInstr;
3644 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3645 DISAS_PRINTF("%s", szOutput);
3646 else
3647 {
3648 DISAS_PRINTF("disas error\n");
3649 cbInstr = 1;
3650#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3651 break;
3652#endif
3653 }
3654 off += cbInstr;
3655 }
3656 }
3657
3658#undef DISAS_PRINTF
3659}
3660
3661
3662/**
3663 * Disassemble guest code.
3664 *
3665 * @param phFileIgnored Ignored, logfile usually.
3666 * @param uCode The guest address of the code to disassemble. (flat?)
3667 * @param cb Number of bytes to disassemble.
3668 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3669 */
3670void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3671{
3672#ifdef DEBUG_TMP_LOGGING
3673# define DISAS_PRINTF(x...) fprintf(phFile, x)
3674#else
3675# define DISAS_PRINTF(x...) RTLogPrintf(x)
3676 if (LogIs2Enabled())
3677#endif
3678 {
3679 PVM pVM = cpu_single_env->pVM;
3680 RTSEL cs;
3681 RTGCUINTPTR eip;
3682
3683 /*
3684 * Update the state so DBGF reads the correct register values (flags).
3685 */
3686 remR3StateUpdate(pVM);
3687
3688 /*
3689 * Do the disassembling.
3690 */
3691 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3692 cs = cpu_single_env->segs[R_CS].selector;
3693 eip = uCode - cpu_single_env->segs[R_CS].base;
3694 for (;;)
3695 {
3696 char szBuf[256];
3697 uint32_t cbInstr;
3698 int rc = DBGFR3DisasInstrEx(pVM,
3699 cs,
3700 eip,
3701 0,
3702 szBuf, sizeof(szBuf),
3703 &cbInstr);
3704 if (RT_SUCCESS(rc))
3705 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3706 else
3707 {
3708 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3709 cbInstr = 1;
3710 }
3711
3712 /* next */
3713 if (cb <= cbInstr)
3714 break;
3715 cb -= cbInstr;
3716 uCode += cbInstr;
3717 eip += cbInstr;
3718 }
3719 }
3720#undef DISAS_PRINTF
3721}
3722
3723
3724/**
3725 * Looks up a guest symbol.
3726 *
3727 * @returns Pointer to symbol name. This is a static buffer.
3728 * @param orig_addr The address in question.
3729 */
3730const char *lookup_symbol(target_ulong orig_addr)
3731{
3732 RTGCINTPTR off = 0;
3733 DBGFSYMBOL Sym;
3734 PVM pVM = cpu_single_env->pVM;
3735 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3736 if (RT_SUCCESS(rc))
3737 {
3738 static char szSym[sizeof(Sym.szName) + 48];
3739 if (!off)
3740 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3741 else if (off > 0)
3742 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3743 else
3744 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3745 return szSym;
3746 }
3747 return "<N/A>";
3748}
3749
3750
3751#undef LOG_GROUP
3752#define LOG_GROUP LOG_GROUP_REM
3753
3754
3755/* -+- FF notifications -+- */
3756
3757
3758/**
3759 * Notification about a pending interrupt.
3760 *
3761 * @param pVM VM Handle.
3762 * @param u8Interrupt Interrupt
3763 * @thread The emulation thread.
3764 */
3765REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3766{
3767 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3768 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3769}
3770
3771/**
3772 * Notification about a pending interrupt.
3773 *
3774 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3775 * @param pVM VM Handle.
3776 * @thread The emulation thread.
3777 */
3778REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3779{
3780 return pVM->rem.s.u32PendingInterrupt;
3781}
3782
3783/**
3784 * Notification about the interrupt FF being set.
3785 *
3786 * @param pVM VM Handle.
3787 * @thread The emulation thread.
3788 */
3789REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3790{
3791 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3792 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3793 if (pVM->rem.s.fInREM)
3794 {
3795 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3796 CPU_INTERRUPT_EXTERNAL_HARD);
3797 }
3798}
3799
3800
3801/**
3802 * Notification about the interrupt FF being set.
3803 *
3804 * @param pVM VM Handle.
3805 * @thread Any.
3806 */
3807REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3808{
3809 LogFlow(("REMR3NotifyInterruptClear:\n"));
3810 if (pVM->rem.s.fInREM)
3811 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3812}
3813
3814
3815/**
3816 * Notification about pending timer(s).
3817 *
3818 * @param pVM VM Handle.
3819 * @thread Any.
3820 */
3821REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3822{
3823#ifndef DEBUG_bird
3824 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3825#endif
3826 if (pVM->rem.s.fInREM)
3827 {
3828 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3829 CPU_INTERRUPT_EXTERNAL_TIMER);
3830 }
3831}
3832
3833
3834/**
3835 * Notification about pending DMA transfers.
3836 *
3837 * @param pVM VM Handle.
3838 * @thread Any.
3839 */
3840REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3841{
3842 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3843 if (pVM->rem.s.fInREM)
3844 {
3845 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3846 CPU_INTERRUPT_EXTERNAL_DMA);
3847 }
3848}
3849
3850
3851/**
3852 * Notification about pending timer(s).
3853 *
3854 * @param pVM VM Handle.
3855 * @thread Any.
3856 */
3857REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3858{
3859 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3860 if (pVM->rem.s.fInREM)
3861 {
3862 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3863 CPU_INTERRUPT_EXTERNAL_EXIT);
3864 }
3865}
3866
3867
3868/**
3869 * Notification about pending FF set by an external thread.
3870 *
3871 * @param pVM VM handle.
3872 * @thread Any.
3873 */
3874REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3875{
3876 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3877 if (pVM->rem.s.fInREM)
3878 {
3879 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3880 CPU_INTERRUPT_EXTERNAL_EXIT);
3881 }
3882}
3883
3884
3885#ifdef VBOX_WITH_STATISTICS
3886void remR3ProfileStart(int statcode)
3887{
3888 STAMPROFILEADV *pStat;
3889 switch(statcode)
3890 {
3891 case STATS_EMULATE_SINGLE_INSTR:
3892 pStat = &gStatExecuteSingleInstr;
3893 break;
3894 case STATS_QEMU_COMPILATION:
3895 pStat = &gStatCompilationQEmu;
3896 break;
3897 case STATS_QEMU_RUN_EMULATED_CODE:
3898 pStat = &gStatRunCodeQEmu;
3899 break;
3900 case STATS_QEMU_TOTAL:
3901 pStat = &gStatTotalTimeQEmu;
3902 break;
3903 case STATS_QEMU_RUN_TIMERS:
3904 pStat = &gStatTimers;
3905 break;
3906 case STATS_TLB_LOOKUP:
3907 pStat= &gStatTBLookup;
3908 break;
3909 case STATS_IRQ_HANDLING:
3910 pStat= &gStatIRQ;
3911 break;
3912 case STATS_RAW_CHECK:
3913 pStat = &gStatRawCheck;
3914 break;
3915
3916 default:
3917 AssertMsgFailed(("unknown stat %d\n", statcode));
3918 return;
3919 }
3920 STAM_PROFILE_ADV_START(pStat, a);
3921}
3922
3923
3924void remR3ProfileStop(int statcode)
3925{
3926 STAMPROFILEADV *pStat;
3927 switch(statcode)
3928 {
3929 case STATS_EMULATE_SINGLE_INSTR:
3930 pStat = &gStatExecuteSingleInstr;
3931 break;
3932 case STATS_QEMU_COMPILATION:
3933 pStat = &gStatCompilationQEmu;
3934 break;
3935 case STATS_QEMU_RUN_EMULATED_CODE:
3936 pStat = &gStatRunCodeQEmu;
3937 break;
3938 case STATS_QEMU_TOTAL:
3939 pStat = &gStatTotalTimeQEmu;
3940 break;
3941 case STATS_QEMU_RUN_TIMERS:
3942 pStat = &gStatTimers;
3943 break;
3944 case STATS_TLB_LOOKUP:
3945 pStat= &gStatTBLookup;
3946 break;
3947 case STATS_IRQ_HANDLING:
3948 pStat= &gStatIRQ;
3949 break;
3950 case STATS_RAW_CHECK:
3951 pStat = &gStatRawCheck;
3952 break;
3953 default:
3954 AssertMsgFailed(("unknown stat %d\n", statcode));
3955 return;
3956 }
3957 STAM_PROFILE_ADV_STOP(pStat, a);
3958}
3959#endif
3960
3961/**
3962 * Raise an RC, force rem exit.
3963 *
3964 * @param pVM VM handle.
3965 * @param rc The rc.
3966 */
3967void remR3RaiseRC(PVM pVM, int rc)
3968{
3969 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3970 Assert(pVM->rem.s.fInREM);
3971 VM_ASSERT_EMT(pVM);
3972 pVM->rem.s.rc = rc;
3973 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3974}
3975
3976
3977/* -+- timers -+- */
3978
3979uint64_t cpu_get_tsc(CPUX86State *env)
3980{
3981 STAM_COUNTER_INC(&gStatCpuGetTSC);
3982 return TMCpuTickGet(env->pVM);
3983}
3984
3985
3986/* -+- interrupts -+- */
3987
3988void cpu_set_ferr(CPUX86State *env)
3989{
3990 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3991 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3992}
3993
3994int cpu_get_pic_interrupt(CPUState *env)
3995{
3996 uint8_t u8Interrupt;
3997 int rc;
3998
3999 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4000 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4001 * with the (a)pic.
4002 */
4003 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4004 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4005 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4006 * remove this kludge. */
4007 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4008 {
4009 rc = VINF_SUCCESS;
4010 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4011 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4012 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4013 }
4014 else
4015 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4016
4017 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4018 if (RT_SUCCESS(rc))
4019 {
4020 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4021 env->interrupt_request |= CPU_INTERRUPT_HARD;
4022 return u8Interrupt;
4023 }
4024 return -1;
4025}
4026
4027
4028/* -+- local apic -+- */
4029
4030void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4031{
4032 int rc = PDMApicSetBase(env->pVM, val);
4033 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4034}
4035
4036uint64_t cpu_get_apic_base(CPUX86State *env)
4037{
4038 uint64_t u64;
4039 int rc = PDMApicGetBase(env->pVM, &u64);
4040 if (RT_SUCCESS(rc))
4041 {
4042 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4043 return u64;
4044 }
4045 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4046 return 0;
4047}
4048
4049void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4050{
4051 int rc = PDMApicSetTPR(env->pVM, val);
4052 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4053}
4054
4055uint8_t cpu_get_apic_tpr(CPUX86State *env)
4056{
4057 uint8_t u8;
4058 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4059 if (RT_SUCCESS(rc))
4060 {
4061 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4062 return u8;
4063 }
4064 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4065 return 0;
4066}
4067
4068
4069uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4070{
4071 uint64_t value;
4072 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4073 if (RT_SUCCESS(rc))
4074 {
4075 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4076 return value;
4077 }
4078 /** @todo: exception ? */
4079 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4080 return value;
4081}
4082
4083void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4084{
4085 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4086 /** @todo: exception if error ? */
4087 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4088}
4089
4090uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4091{
4092 return CPUMGetGuestMsr(env->pVM, msr);
4093}
4094
4095void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4096{
4097 CPUMSetGuestMsr(env->pVM, msr, val);
4098}
4099
4100/* -+- I/O Ports -+- */
4101
4102#undef LOG_GROUP
4103#define LOG_GROUP LOG_GROUP_REM_IOPORT
4104
4105void cpu_outb(CPUState *env, int addr, int val)
4106{
4107 int rc;
4108
4109 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4110 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4111
4112 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4113 if (RT_LIKELY(rc == VINF_SUCCESS))
4114 return;
4115 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4116 {
4117 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4118 remR3RaiseRC(env->pVM, rc);
4119 return;
4120 }
4121 remAbort(rc, __FUNCTION__);
4122}
4123
4124void cpu_outw(CPUState *env, int addr, int val)
4125{
4126 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4127 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4128 if (RT_LIKELY(rc == VINF_SUCCESS))
4129 return;
4130 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4131 {
4132 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4133 remR3RaiseRC(env->pVM, rc);
4134 return;
4135 }
4136 remAbort(rc, __FUNCTION__);
4137}
4138
4139void cpu_outl(CPUState *env, int addr, int val)
4140{
4141 int rc;
4142 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4143 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4144 if (RT_LIKELY(rc == VINF_SUCCESS))
4145 return;
4146 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4147 {
4148 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4149 remR3RaiseRC(env->pVM, rc);
4150 return;
4151 }
4152 remAbort(rc, __FUNCTION__);
4153}
4154
4155int cpu_inb(CPUState *env, int addr)
4156{
4157 uint32_t u32 = 0;
4158 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4159 if (RT_LIKELY(rc == VINF_SUCCESS))
4160 {
4161 if (/*addr != 0x61 && */addr != 0x71)
4162 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4163 return (int)u32;
4164 }
4165 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4166 {
4167 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4168 remR3RaiseRC(env->pVM, rc);
4169 return (int)u32;
4170 }
4171 remAbort(rc, __FUNCTION__);
4172 return 0xff;
4173}
4174
4175int cpu_inw(CPUState *env, int addr)
4176{
4177 uint32_t u32 = 0;
4178 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4179 if (RT_LIKELY(rc == VINF_SUCCESS))
4180 {
4181 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4182 return (int)u32;
4183 }
4184 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4185 {
4186 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4187 remR3RaiseRC(env->pVM, rc);
4188 return (int)u32;
4189 }
4190 remAbort(rc, __FUNCTION__);
4191 return 0xffff;
4192}
4193
4194int cpu_inl(CPUState *env, int addr)
4195{
4196 uint32_t u32 = 0;
4197 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4198 if (RT_LIKELY(rc == VINF_SUCCESS))
4199 {
4200//if (addr==0x01f0 && u32 == 0x6b6d)
4201// loglevel = ~0;
4202 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4203 return (int)u32;
4204 }
4205 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4206 {
4207 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4208 remR3RaiseRC(env->pVM, rc);
4209 return (int)u32;
4210 }
4211 remAbort(rc, __FUNCTION__);
4212 return 0xffffffff;
4213}
4214
4215#undef LOG_GROUP
4216#define LOG_GROUP LOG_GROUP_REM
4217
4218
4219/* -+- helpers and misc other interfaces -+- */
4220
4221/**
4222 * Perform the CPUID instruction.
4223 *
4224 * ASMCpuId cannot be invoked from some source files where this is used because of global
4225 * register allocations.
4226 *
4227 * @param env Pointer to the recompiler CPU structure.
4228 * @param uOperator CPUID operation (eax).
4229 * @param pvEAX Where to store eax.
4230 * @param pvEBX Where to store ebx.
4231 * @param pvECX Where to store ecx.
4232 * @param pvEDX Where to store edx.
4233 */
4234void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4235{
4236 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4237}
4238
4239
4240#if 0 /* not used */
4241/**
4242 * Interface for qemu hardware to report back fatal errors.
4243 */
4244void hw_error(const char *pszFormat, ...)
4245{
4246 /*
4247 * Bitch about it.
4248 */
4249 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4250 * this in my Odin32 tree at home! */
4251 va_list args;
4252 va_start(args, pszFormat);
4253 RTLogPrintf("fatal error in virtual hardware:");
4254 RTLogPrintfV(pszFormat, args);
4255 va_end(args);
4256 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4257
4258 /*
4259 * If we're in REM context we'll sync back the state before 'jumping' to
4260 * the EMs failure handling.
4261 */
4262 PVM pVM = cpu_single_env->pVM;
4263 if (pVM->rem.s.fInREM)
4264 REMR3StateBack(pVM);
4265 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4266 AssertMsgFailed(("EMR3FatalError returned!\n"));
4267}
4268#endif
4269
4270/**
4271 * Interface for the qemu cpu to report unhandled situation
4272 * raising a fatal VM error.
4273 */
4274void cpu_abort(CPUState *env, const char *pszFormat, ...)
4275{
4276 va_list args;
4277 PVM pVM;
4278
4279 /*
4280 * Bitch about it.
4281 */
4282#ifndef _MSC_VER
4283 /** @todo: MSVC is right - it's not valid C */
4284 RTLogFlags(NULL, "nodisabled nobuffered");
4285#endif
4286 va_start(args, pszFormat);
4287 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4288 va_end(args);
4289 va_start(args, pszFormat);
4290 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4291 va_end(args);
4292
4293 /*
4294 * If we're in REM context we'll sync back the state before 'jumping' to
4295 * the EMs failure handling.
4296 */
4297 pVM = cpu_single_env->pVM;
4298 if (pVM->rem.s.fInREM)
4299 REMR3StateBack(pVM);
4300 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4301 AssertMsgFailed(("EMR3FatalError returned!\n"));
4302}
4303
4304
4305/**
4306 * Aborts the VM.
4307 *
4308 * @param rc VBox error code.
4309 * @param pszTip Hint about why/when this happend.
4310 */
4311void remAbort(int rc, const char *pszTip)
4312{
4313 PVM pVM;
4314
4315 /*
4316 * Bitch about it.
4317 */
4318 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4319 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4320
4321 /*
4322 * Jump back to where we entered the recompiler.
4323 */
4324 pVM = cpu_single_env->pVM;
4325 if (pVM->rem.s.fInREM)
4326 REMR3StateBack(pVM);
4327 EMR3FatalError(pVM, rc);
4328 AssertMsgFailed(("EMR3FatalError returned!\n"));
4329}
4330
4331
4332/**
4333 * Dumps a linux system call.
4334 * @param pVM VM handle.
4335 */
4336void remR3DumpLnxSyscall(PVM pVM)
4337{
4338 static const char *apsz[] =
4339 {
4340 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4341 "sys_exit",
4342 "sys_fork",
4343 "sys_read",
4344 "sys_write",
4345 "sys_open", /* 5 */
4346 "sys_close",
4347 "sys_waitpid",
4348 "sys_creat",
4349 "sys_link",
4350 "sys_unlink", /* 10 */
4351 "sys_execve",
4352 "sys_chdir",
4353 "sys_time",
4354 "sys_mknod",
4355 "sys_chmod", /* 15 */
4356 "sys_lchown16",
4357 "sys_ni_syscall", /* old break syscall holder */
4358 "sys_stat",
4359 "sys_lseek",
4360 "sys_getpid", /* 20 */
4361 "sys_mount",
4362 "sys_oldumount",
4363 "sys_setuid16",
4364 "sys_getuid16",
4365 "sys_stime", /* 25 */
4366 "sys_ptrace",
4367 "sys_alarm",
4368 "sys_fstat",
4369 "sys_pause",
4370 "sys_utime", /* 30 */
4371 "sys_ni_syscall", /* old stty syscall holder */
4372 "sys_ni_syscall", /* old gtty syscall holder */
4373 "sys_access",
4374 "sys_nice",
4375 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4376 "sys_sync",
4377 "sys_kill",
4378 "sys_rename",
4379 "sys_mkdir",
4380 "sys_rmdir", /* 40 */
4381 "sys_dup",
4382 "sys_pipe",
4383 "sys_times",
4384 "sys_ni_syscall", /* old prof syscall holder */
4385 "sys_brk", /* 45 */
4386 "sys_setgid16",
4387 "sys_getgid16",
4388 "sys_signal",
4389 "sys_geteuid16",
4390 "sys_getegid16", /* 50 */
4391 "sys_acct",
4392 "sys_umount", /* recycled never used phys() */
4393 "sys_ni_syscall", /* old lock syscall holder */
4394 "sys_ioctl",
4395 "sys_fcntl", /* 55 */
4396 "sys_ni_syscall", /* old mpx syscall holder */
4397 "sys_setpgid",
4398 "sys_ni_syscall", /* old ulimit syscall holder */
4399 "sys_olduname",
4400 "sys_umask", /* 60 */
4401 "sys_chroot",
4402 "sys_ustat",
4403 "sys_dup2",
4404 "sys_getppid",
4405 "sys_getpgrp", /* 65 */
4406 "sys_setsid",
4407 "sys_sigaction",
4408 "sys_sgetmask",
4409 "sys_ssetmask",
4410 "sys_setreuid16", /* 70 */
4411 "sys_setregid16",
4412 "sys_sigsuspend",
4413 "sys_sigpending",
4414 "sys_sethostname",
4415 "sys_setrlimit", /* 75 */
4416 "sys_old_getrlimit",
4417 "sys_getrusage",
4418 "sys_gettimeofday",
4419 "sys_settimeofday",
4420 "sys_getgroups16", /* 80 */
4421 "sys_setgroups16",
4422 "old_select",
4423 "sys_symlink",
4424 "sys_lstat",
4425 "sys_readlink", /* 85 */
4426 "sys_uselib",
4427 "sys_swapon",
4428 "sys_reboot",
4429 "old_readdir",
4430 "old_mmap", /* 90 */
4431 "sys_munmap",
4432 "sys_truncate",
4433 "sys_ftruncate",
4434 "sys_fchmod",
4435 "sys_fchown16", /* 95 */
4436 "sys_getpriority",
4437 "sys_setpriority",
4438 "sys_ni_syscall", /* old profil syscall holder */
4439 "sys_statfs",
4440 "sys_fstatfs", /* 100 */
4441 "sys_ioperm",
4442 "sys_socketcall",
4443 "sys_syslog",
4444 "sys_setitimer",
4445 "sys_getitimer", /* 105 */
4446 "sys_newstat",
4447 "sys_newlstat",
4448 "sys_newfstat",
4449 "sys_uname",
4450 "sys_iopl", /* 110 */
4451 "sys_vhangup",
4452 "sys_ni_syscall", /* old "idle" system call */
4453 "sys_vm86old",
4454 "sys_wait4",
4455 "sys_swapoff", /* 115 */
4456 "sys_sysinfo",
4457 "sys_ipc",
4458 "sys_fsync",
4459 "sys_sigreturn",
4460 "sys_clone", /* 120 */
4461 "sys_setdomainname",
4462 "sys_newuname",
4463 "sys_modify_ldt",
4464 "sys_adjtimex",
4465 "sys_mprotect", /* 125 */
4466 "sys_sigprocmask",
4467 "sys_ni_syscall", /* old "create_module" */
4468 "sys_init_module",
4469 "sys_delete_module",
4470 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4471 "sys_quotactl",
4472 "sys_getpgid",
4473 "sys_fchdir",
4474 "sys_bdflush",
4475 "sys_sysfs", /* 135 */
4476 "sys_personality",
4477 "sys_ni_syscall", /* reserved for afs_syscall */
4478 "sys_setfsuid16",
4479 "sys_setfsgid16",
4480 "sys_llseek", /* 140 */
4481 "sys_getdents",
4482 "sys_select",
4483 "sys_flock",
4484 "sys_msync",
4485 "sys_readv", /* 145 */
4486 "sys_writev",
4487 "sys_getsid",
4488 "sys_fdatasync",
4489 "sys_sysctl",
4490 "sys_mlock", /* 150 */
4491 "sys_munlock",
4492 "sys_mlockall",
4493 "sys_munlockall",
4494 "sys_sched_setparam",
4495 "sys_sched_getparam", /* 155 */
4496 "sys_sched_setscheduler",
4497 "sys_sched_getscheduler",
4498 "sys_sched_yield",
4499 "sys_sched_get_priority_max",
4500 "sys_sched_get_priority_min", /* 160 */
4501 "sys_sched_rr_get_interval",
4502 "sys_nanosleep",
4503 "sys_mremap",
4504 "sys_setresuid16",
4505 "sys_getresuid16", /* 165 */
4506 "sys_vm86",
4507 "sys_ni_syscall", /* Old sys_query_module */
4508 "sys_poll",
4509 "sys_nfsservctl",
4510 "sys_setresgid16", /* 170 */
4511 "sys_getresgid16",
4512 "sys_prctl",
4513 "sys_rt_sigreturn",
4514 "sys_rt_sigaction",
4515 "sys_rt_sigprocmask", /* 175 */
4516 "sys_rt_sigpending",
4517 "sys_rt_sigtimedwait",
4518 "sys_rt_sigqueueinfo",
4519 "sys_rt_sigsuspend",
4520 "sys_pread64", /* 180 */
4521 "sys_pwrite64",
4522 "sys_chown16",
4523 "sys_getcwd",
4524 "sys_capget",
4525 "sys_capset", /* 185 */
4526 "sys_sigaltstack",
4527 "sys_sendfile",
4528 "sys_ni_syscall", /* reserved for streams1 */
4529 "sys_ni_syscall", /* reserved for streams2 */
4530 "sys_vfork", /* 190 */
4531 "sys_getrlimit",
4532 "sys_mmap2",
4533 "sys_truncate64",
4534 "sys_ftruncate64",
4535 "sys_stat64", /* 195 */
4536 "sys_lstat64",
4537 "sys_fstat64",
4538 "sys_lchown",
4539 "sys_getuid",
4540 "sys_getgid", /* 200 */
4541 "sys_geteuid",
4542 "sys_getegid",
4543 "sys_setreuid",
4544 "sys_setregid",
4545 "sys_getgroups", /* 205 */
4546 "sys_setgroups",
4547 "sys_fchown",
4548 "sys_setresuid",
4549 "sys_getresuid",
4550 "sys_setresgid", /* 210 */
4551 "sys_getresgid",
4552 "sys_chown",
4553 "sys_setuid",
4554 "sys_setgid",
4555 "sys_setfsuid", /* 215 */
4556 "sys_setfsgid",
4557 "sys_pivot_root",
4558 "sys_mincore",
4559 "sys_madvise",
4560 "sys_getdents64", /* 220 */
4561 "sys_fcntl64",
4562 "sys_ni_syscall", /* reserved for TUX */
4563 "sys_ni_syscall",
4564 "sys_gettid",
4565 "sys_readahead", /* 225 */
4566 "sys_setxattr",
4567 "sys_lsetxattr",
4568 "sys_fsetxattr",
4569 "sys_getxattr",
4570 "sys_lgetxattr", /* 230 */
4571 "sys_fgetxattr",
4572 "sys_listxattr",
4573 "sys_llistxattr",
4574 "sys_flistxattr",
4575 "sys_removexattr", /* 235 */
4576 "sys_lremovexattr",
4577 "sys_fremovexattr",
4578 "sys_tkill",
4579 "sys_sendfile64",
4580 "sys_futex", /* 240 */
4581 "sys_sched_setaffinity",
4582 "sys_sched_getaffinity",
4583 "sys_set_thread_area",
4584 "sys_get_thread_area",
4585 "sys_io_setup", /* 245 */
4586 "sys_io_destroy",
4587 "sys_io_getevents",
4588 "sys_io_submit",
4589 "sys_io_cancel",
4590 "sys_fadvise64", /* 250 */
4591 "sys_ni_syscall",
4592 "sys_exit_group",
4593 "sys_lookup_dcookie",
4594 "sys_epoll_create",
4595 "sys_epoll_ctl", /* 255 */
4596 "sys_epoll_wait",
4597 "sys_remap_file_pages",
4598 "sys_set_tid_address",
4599 "sys_timer_create",
4600 "sys_timer_settime", /* 260 */
4601 "sys_timer_gettime",
4602 "sys_timer_getoverrun",
4603 "sys_timer_delete",
4604 "sys_clock_settime",
4605 "sys_clock_gettime", /* 265 */
4606 "sys_clock_getres",
4607 "sys_clock_nanosleep",
4608 "sys_statfs64",
4609 "sys_fstatfs64",
4610 "sys_tgkill", /* 270 */
4611 "sys_utimes",
4612 "sys_fadvise64_64",
4613 "sys_ni_syscall" /* sys_vserver */
4614 };
4615
4616 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4617 switch (uEAX)
4618 {
4619 default:
4620 if (uEAX < RT_ELEMENTS(apsz))
4621 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4622 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4623 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4624 else
4625 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4626 break;
4627
4628 }
4629}
4630
4631
4632/**
4633 * Dumps an OpenBSD system call.
4634 * @param pVM VM handle.
4635 */
4636void remR3DumpOBsdSyscall(PVM pVM)
4637{
4638 static const char *apsz[] =
4639 {
4640 "SYS_syscall", //0
4641 "SYS_exit", //1
4642 "SYS_fork", //2
4643 "SYS_read", //3
4644 "SYS_write", //4
4645 "SYS_open", //5
4646 "SYS_close", //6
4647 "SYS_wait4", //7
4648 "SYS_8",
4649 "SYS_link", //9
4650 "SYS_unlink", //10
4651 "SYS_11",
4652 "SYS_chdir", //12
4653 "SYS_fchdir", //13
4654 "SYS_mknod", //14
4655 "SYS_chmod", //15
4656 "SYS_chown", //16
4657 "SYS_break", //17
4658 "SYS_18",
4659 "SYS_19",
4660 "SYS_getpid", //20
4661 "SYS_mount", //21
4662 "SYS_unmount", //22
4663 "SYS_setuid", //23
4664 "SYS_getuid", //24
4665 "SYS_geteuid", //25
4666 "SYS_ptrace", //26
4667 "SYS_recvmsg", //27
4668 "SYS_sendmsg", //28
4669 "SYS_recvfrom", //29
4670 "SYS_accept", //30
4671 "SYS_getpeername", //31
4672 "SYS_getsockname", //32
4673 "SYS_access", //33
4674 "SYS_chflags", //34
4675 "SYS_fchflags", //35
4676 "SYS_sync", //36
4677 "SYS_kill", //37
4678 "SYS_38",
4679 "SYS_getppid", //39
4680 "SYS_40",
4681 "SYS_dup", //41
4682 "SYS_opipe", //42
4683 "SYS_getegid", //43
4684 "SYS_profil", //44
4685 "SYS_ktrace", //45
4686 "SYS_sigaction", //46
4687 "SYS_getgid", //47
4688 "SYS_sigprocmask", //48
4689 "SYS_getlogin", //49
4690 "SYS_setlogin", //50
4691 "SYS_acct", //51
4692 "SYS_sigpending", //52
4693 "SYS_osigaltstack", //53
4694 "SYS_ioctl", //54
4695 "SYS_reboot", //55
4696 "SYS_revoke", //56
4697 "SYS_symlink", //57
4698 "SYS_readlink", //58
4699 "SYS_execve", //59
4700 "SYS_umask", //60
4701 "SYS_chroot", //61
4702 "SYS_62",
4703 "SYS_63",
4704 "SYS_64",
4705 "SYS_65",
4706 "SYS_vfork", //66
4707 "SYS_67",
4708 "SYS_68",
4709 "SYS_sbrk", //69
4710 "SYS_sstk", //70
4711 "SYS_61",
4712 "SYS_vadvise", //72
4713 "SYS_munmap", //73
4714 "SYS_mprotect", //74
4715 "SYS_madvise", //75
4716 "SYS_76",
4717 "SYS_77",
4718 "SYS_mincore", //78
4719 "SYS_getgroups", //79
4720 "SYS_setgroups", //80
4721 "SYS_getpgrp", //81
4722 "SYS_setpgid", //82
4723 "SYS_setitimer", //83
4724 "SYS_84",
4725 "SYS_85",
4726 "SYS_getitimer", //86
4727 "SYS_87",
4728 "SYS_88",
4729 "SYS_89",
4730 "SYS_dup2", //90
4731 "SYS_91",
4732 "SYS_fcntl", //92
4733 "SYS_select", //93
4734 "SYS_94",
4735 "SYS_fsync", //95
4736 "SYS_setpriority", //96
4737 "SYS_socket", //97
4738 "SYS_connect", //98
4739 "SYS_99",
4740 "SYS_getpriority", //100
4741 "SYS_101",
4742 "SYS_102",
4743 "SYS_sigreturn", //103
4744 "SYS_bind", //104
4745 "SYS_setsockopt", //105
4746 "SYS_listen", //106
4747 "SYS_107",
4748 "SYS_108",
4749 "SYS_109",
4750 "SYS_110",
4751 "SYS_sigsuspend", //111
4752 "SYS_112",
4753 "SYS_113",
4754 "SYS_114",
4755 "SYS_115",
4756 "SYS_gettimeofday", //116
4757 "SYS_getrusage", //117
4758 "SYS_getsockopt", //118
4759 "SYS_119",
4760 "SYS_readv", //120
4761 "SYS_writev", //121
4762 "SYS_settimeofday", //122
4763 "SYS_fchown", //123
4764 "SYS_fchmod", //124
4765 "SYS_125",
4766 "SYS_setreuid", //126
4767 "SYS_setregid", //127
4768 "SYS_rename", //128
4769 "SYS_129",
4770 "SYS_130",
4771 "SYS_flock", //131
4772 "SYS_mkfifo", //132
4773 "SYS_sendto", //133
4774 "SYS_shutdown", //134
4775 "SYS_socketpair", //135
4776 "SYS_mkdir", //136
4777 "SYS_rmdir", //137
4778 "SYS_utimes", //138
4779 "SYS_139",
4780 "SYS_adjtime", //140
4781 "SYS_141",
4782 "SYS_142",
4783 "SYS_143",
4784 "SYS_144",
4785 "SYS_145",
4786 "SYS_146",
4787 "SYS_setsid", //147
4788 "SYS_quotactl", //148
4789 "SYS_149",
4790 "SYS_150",
4791 "SYS_151",
4792 "SYS_152",
4793 "SYS_153",
4794 "SYS_154",
4795 "SYS_nfssvc", //155
4796 "SYS_156",
4797 "SYS_157",
4798 "SYS_158",
4799 "SYS_159",
4800 "SYS_160",
4801 "SYS_getfh", //161
4802 "SYS_162",
4803 "SYS_163",
4804 "SYS_164",
4805 "SYS_sysarch", //165
4806 "SYS_166",
4807 "SYS_167",
4808 "SYS_168",
4809 "SYS_169",
4810 "SYS_170",
4811 "SYS_171",
4812 "SYS_172",
4813 "SYS_pread", //173
4814 "SYS_pwrite", //174
4815 "SYS_175",
4816 "SYS_176",
4817 "SYS_177",
4818 "SYS_178",
4819 "SYS_179",
4820 "SYS_180",
4821 "SYS_setgid", //181
4822 "SYS_setegid", //182
4823 "SYS_seteuid", //183
4824 "SYS_lfs_bmapv", //184
4825 "SYS_lfs_markv", //185
4826 "SYS_lfs_segclean", //186
4827 "SYS_lfs_segwait", //187
4828 "SYS_188",
4829 "SYS_189",
4830 "SYS_190",
4831 "SYS_pathconf", //191
4832 "SYS_fpathconf", //192
4833 "SYS_swapctl", //193
4834 "SYS_getrlimit", //194
4835 "SYS_setrlimit", //195
4836 "SYS_getdirentries", //196
4837 "SYS_mmap", //197
4838 "SYS___syscall", //198
4839 "SYS_lseek", //199
4840 "SYS_truncate", //200
4841 "SYS_ftruncate", //201
4842 "SYS___sysctl", //202
4843 "SYS_mlock", //203
4844 "SYS_munlock", //204
4845 "SYS_205",
4846 "SYS_futimes", //206
4847 "SYS_getpgid", //207
4848 "SYS_xfspioctl", //208
4849 "SYS_209",
4850 "SYS_210",
4851 "SYS_211",
4852 "SYS_212",
4853 "SYS_213",
4854 "SYS_214",
4855 "SYS_215",
4856 "SYS_216",
4857 "SYS_217",
4858 "SYS_218",
4859 "SYS_219",
4860 "SYS_220",
4861 "SYS_semget", //221
4862 "SYS_222",
4863 "SYS_223",
4864 "SYS_224",
4865 "SYS_msgget", //225
4866 "SYS_msgsnd", //226
4867 "SYS_msgrcv", //227
4868 "SYS_shmat", //228
4869 "SYS_229",
4870 "SYS_shmdt", //230
4871 "SYS_231",
4872 "SYS_clock_gettime", //232
4873 "SYS_clock_settime", //233
4874 "SYS_clock_getres", //234
4875 "SYS_235",
4876 "SYS_236",
4877 "SYS_237",
4878 "SYS_238",
4879 "SYS_239",
4880 "SYS_nanosleep", //240
4881 "SYS_241",
4882 "SYS_242",
4883 "SYS_243",
4884 "SYS_244",
4885 "SYS_245",
4886 "SYS_246",
4887 "SYS_247",
4888 "SYS_248",
4889 "SYS_249",
4890 "SYS_minherit", //250
4891 "SYS_rfork", //251
4892 "SYS_poll", //252
4893 "SYS_issetugid", //253
4894 "SYS_lchown", //254
4895 "SYS_getsid", //255
4896 "SYS_msync", //256
4897 "SYS_257",
4898 "SYS_258",
4899 "SYS_259",
4900 "SYS_getfsstat", //260
4901 "SYS_statfs", //261
4902 "SYS_fstatfs", //262
4903 "SYS_pipe", //263
4904 "SYS_fhopen", //264
4905 "SYS_265",
4906 "SYS_fhstatfs", //266
4907 "SYS_preadv", //267
4908 "SYS_pwritev", //268
4909 "SYS_kqueue", //269
4910 "SYS_kevent", //270
4911 "SYS_mlockall", //271
4912 "SYS_munlockall", //272
4913 "SYS_getpeereid", //273
4914 "SYS_274",
4915 "SYS_275",
4916 "SYS_276",
4917 "SYS_277",
4918 "SYS_278",
4919 "SYS_279",
4920 "SYS_280",
4921 "SYS_getresuid", //281
4922 "SYS_setresuid", //282
4923 "SYS_getresgid", //283
4924 "SYS_setresgid", //284
4925 "SYS_285",
4926 "SYS_mquery", //286
4927 "SYS_closefrom", //287
4928 "SYS_sigaltstack", //288
4929 "SYS_shmget", //289
4930 "SYS_semop", //290
4931 "SYS_stat", //291
4932 "SYS_fstat", //292
4933 "SYS_lstat", //293
4934 "SYS_fhstat", //294
4935 "SYS___semctl", //295
4936 "SYS_shmctl", //296
4937 "SYS_msgctl", //297
4938 "SYS_MAXSYSCALL", //298
4939 //299
4940 //300
4941 };
4942 uint32_t uEAX;
4943 if (!LogIsEnabled())
4944 return;
4945 uEAX = CPUMGetGuestEAX(pVM);
4946 switch (uEAX)
4947 {
4948 default:
4949 if (uEAX < RT_ELEMENTS(apsz))
4950 {
4951 uint32_t au32Args[8] = {0};
4952 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4953 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4954 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4955 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4956 }
4957 else
4958 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4959 break;
4960 }
4961}
4962
4963
4964#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4965/**
4966 * The Dll main entry point (stub).
4967 */
4968bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4969{
4970 return true;
4971}
4972
4973void *memcpy(void *dst, const void *src, size_t size)
4974{
4975 uint8_t*pbDst = dst, *pbSrc = src;
4976 while (size-- > 0)
4977 *pbDst++ = *pbSrc++;
4978 return dst;
4979}
4980
4981#endif
4982
4983void cpu_smm_update(CPUState *env)
4984{
4985}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette