VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 46278

Last change on this file since 46278 was 46160, checked in by vboxsync, 12 years ago

nits

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 183.1 KB
Line 
1/* $Id: VBoxRecompiler.c 46160 2013-05-19 13:30:08Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/vmm/uvm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50
51#include <VBox/log.h>
52#include <iprt/semaphore.h>
53#include <iprt/asm.h>
54#include <iprt/assert.h>
55#include <iprt/thread.h>
56#include <iprt/string.h>
57
58/* Don't wanna include everything. */
59extern void cpu_exec_init_all(uintptr_t tb_size);
60extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
61extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
62extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
63extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
64extern void tlb_flush(CPUX86State *env, int flush_global);
65extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
66extern void sync_ldtr(CPUX86State *env1, int selector);
67
68#ifdef VBOX_STRICT
69ram_addr_t get_phys_page_offset(target_ulong addr);
70#endif
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76
77/** Copy 80-bit fpu register at pSrc to pDst.
78 * This is probably faster than *calling* memcpy.
79 */
80#define REM_COPY_FPU_REG(pDst, pSrc) \
81 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
82
83/** How remR3RunLoggingStep operates. */
84#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
85
86
87/*******************************************************************************
88* Internal Functions *
89*******************************************************************************/
90static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
91static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
92static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
93static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
94
95static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
97static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
98static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
100static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
101
102static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
104static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
105static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
108
109static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
111static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
112
113/*******************************************************************************
114* Global Variables *
115*******************************************************************************/
116
117/** @todo Move stats to REM::s some rainy day we have nothing do to. */
118#ifdef VBOX_WITH_STATISTICS
119static STAMPROFILEADV gStatExecuteSingleInstr;
120static STAMPROFILEADV gStatCompilationQEmu;
121static STAMPROFILEADV gStatRunCodeQEmu;
122static STAMPROFILEADV gStatTotalTimeQEmu;
123static STAMPROFILEADV gStatTimers;
124static STAMPROFILEADV gStatTBLookup;
125static STAMPROFILEADV gStatIRQ;
126static STAMPROFILEADV gStatRawCheck;
127static STAMPROFILEADV gStatMemRead;
128static STAMPROFILEADV gStatMemWrite;
129static STAMPROFILE gStatGCPhys2HCVirt;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gaStatRefuseStale[6];
142static STAMCOUNTER gStatREMGDTChange;
143static STAMCOUNTER gStatREMIDTChange;
144static STAMCOUNTER gStatREMLDTRChange;
145static STAMCOUNTER gStatREMTRChange;
146static STAMCOUNTER gStatSelOutOfSync[6];
147static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
148static STAMCOUNTER gStatFlushTBs;
149#endif
150/* in exec.c */
151extern uint32_t tlb_flush_count;
152extern uint32_t tb_flush_count;
153extern uint32_t tb_phys_invalidate_count;
154
155/*
156 * Global stuff.
157 */
158
159/** MMIO read callbacks. */
160CPUReadMemoryFunc *g_apfnMMIORead[3] =
161{
162 remR3MMIOReadU8,
163 remR3MMIOReadU16,
164 remR3MMIOReadU32
165};
166
167/** MMIO write callbacks. */
168CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
169{
170 remR3MMIOWriteU8,
171 remR3MMIOWriteU16,
172 remR3MMIOWriteU32
173};
174
175/** Handler read callbacks. */
176CPUReadMemoryFunc *g_apfnHandlerRead[3] =
177{
178 remR3HandlerReadU8,
179 remR3HandlerReadU16,
180 remR3HandlerReadU32
181};
182
183/** Handler write callbacks. */
184CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
185{
186 remR3HandlerWriteU8,
187 remR3HandlerWriteU16,
188 remR3HandlerWriteU32
189};
190
191
192#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
193/*
194 * Debugger commands.
195 */
196static FNDBGCCMD remR3CmdDisasEnableStepping;;
197
198/** '.remstep' arguments. */
199static const DBGCVARDESC g_aArgRemStep[] =
200{
201 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
202 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
203};
204
205/** Command descriptors. */
206static const DBGCCMD g_aCmds[] =
207{
208 {
209 .pszCmd ="remstep",
210 .cArgsMin = 0,
211 .cArgsMax = 1,
212 .paArgDescs = &g_aArgRemStep[0],
213 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
224 * @todo huh??? That cannot be the case on the mac... So, this
225 * point is probably not valid any longer. */
226uint8_t *code_gen_prologue;
227
228
229/*******************************************************************************
230* Internal Functions *
231*******************************************************************************/
232void remAbort(int rc, const char *pszTip);
233extern int testmath(void);
234
235/* Put them here to avoid unused variable warning. */
236AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
237#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
238//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
239/* Why did this have to be identical?? */
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#else
242AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
243#endif
244
245
246/**
247 * Initializes the REM.
248 *
249 * @returns VBox status code.
250 * @param pVM The VM to operate on.
251 */
252REMR3DECL(int) REMR3Init(PVM pVM)
253{
254 PREMHANDLERNOTIFICATION pCur;
255 uint32_t u32Dummy;
256 int rc;
257 unsigned i;
258
259#ifdef VBOX_ENABLE_VBOXREM64
260 LogRel(("Using 64-bit aware REM\n"));
261#endif
262
263 /*
264 * Assert sanity.
265 */
266 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
267 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
268 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
269#if 0 /* just an annoyance at the moment. */
270#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
271 Assert(!testmath());
272#endif
273#endif
274
275 /*
276 * Init some internal data members.
277 */
278 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
279 pVM->rem.s.Env.pVM = pVM;
280#ifdef CPU_RAW_MODE_INIT
281 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
282#endif
283
284 /*
285 * Initialize the REM critical section.
286 *
287 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
288 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
289 * deadlocks. (mostly pgm vs rem locking)
290 */
291 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
292 AssertRCReturn(rc, rc);
293
294 /* ctx. */
295 pVM->rem.s.pCtx = NULL; /* set when executing code. */
296 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
297
298 /* ignore all notifications */
299 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
300
301 code_gen_prologue = RTMemExecAlloc(_1K);
302 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
303
304 cpu_exec_init_all(0);
305
306 /*
307 * Init the recompiler.
308 */
309 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
310 {
311 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
312 return VERR_GENERAL_FAILURE;
313 }
314 PVMCPU pVCpu = VMMGetCpu(pVM);
315 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
316 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
317
318 EMRemLock(pVM);
319 cpu_reset(&pVM->rem.s.Env);
320 EMRemUnlock(pVM);
321
322 /* allocate code buffer for single instruction emulation. */
323 pVM->rem.s.Env.cbCodeBuffer = 4096;
324 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
325 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
326
327 /* Finally, set the cpu_single_env global. */
328 cpu_single_env = &pVM->rem.s.Env;
329
330 /* Nothing is pending by default */
331 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
332
333 /*
334 * Register ram types.
335 */
336 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
337 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
338 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
339 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
340 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
341
342 /* stop ignoring. */
343 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
344
345 /*
346 * Register the saved state data unit.
347 */
348 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
349 NULL, NULL, NULL,
350 NULL, remR3Save, NULL,
351 NULL, remR3Load, NULL);
352 if (RT_FAILURE(rc))
353 return rc;
354
355#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
356 /*
357 * Debugger commands.
358 */
359 static bool fRegisteredCmds = false;
360 if (!fRegisteredCmds)
361 {
362 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
363 if (RT_SUCCESS(rc))
364 fRegisteredCmds = true;
365 }
366#endif
367
368#ifdef VBOX_WITH_STATISTICS
369 /*
370 * Statistics.
371 */
372 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
373 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
374 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
375 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
376 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
377 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
378 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
379 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
380 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
381 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
382 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
383
384 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
385
386 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
387 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
388 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
389 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
390 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
391 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
392 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
393 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
394 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
395 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
396 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
397 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
398 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
399 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
400 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
401 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
402 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
403
404 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
405 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
406 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
407 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
408
409 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
414 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
415
416 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
417 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
418 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
419 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
420 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
421 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
422
423 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
424#endif /* VBOX_WITH_STATISTICS */
425 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
426 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
427
428 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
429 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
430 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
431
432
433#ifdef DEBUG_ALL_LOGGING
434 loglevel = ~0;
435#endif
436
437 /*
438 * Init the handler notification lists.
439 */
440 pVM->rem.s.idxPendingList = UINT32_MAX;
441 pVM->rem.s.idxFreeList = 0;
442
443 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
444 {
445 pCur = &pVM->rem.s.aHandlerNotifications[i];
446 pCur->idxNext = i + 1;
447 pCur->idxSelf = i;
448 }
449 pCur->idxNext = UINT32_MAX; /* the last record. */
450
451 return rc;
452}
453
454
455/**
456 * Finalizes the REM initialization.
457 *
458 * This is called after all components, devices and drivers has
459 * been initialized. Its main purpose it to finish the RAM related
460 * initialization.
461 *
462 * @returns VBox status code.
463 *
464 * @param pVM The VM handle.
465 */
466REMR3DECL(int) REMR3InitFinalize(PVM pVM)
467{
468 int rc;
469
470 /*
471 * Ram size & dirty bit map.
472 */
473 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
474 pVM->rem.s.fGCPhysLastRamFixed = true;
475#ifdef RT_STRICT
476 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
477#else
478 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
479#endif
480 return rc;
481}
482
483/**
484 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
485 *
486 * @returns VBox status code.
487 * @param pVM The VM handle.
488 * @param fGuarded Whether to guard the map.
489 */
490static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
491{
492 int rc = VINF_SUCCESS;
493 RTGCPHYS cb;
494
495 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
496
497 cb = pVM->rem.s.GCPhysLastRam + 1;
498 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
499 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
500 VERR_OUT_OF_RANGE);
501
502 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
503 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
504
505 if (!fGuarded)
506 {
507 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
508 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
509 }
510 else
511 {
512 /*
513 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
514 */
515 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
516 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
517 if (cbBitmapFull == cbBitmapAligned)
518 cbBitmapFull += _4G >> PAGE_SHIFT;
519 else if (cbBitmapFull - cbBitmapAligned < _64K)
520 cbBitmapFull += _64K;
521
522 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
523 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
524
525 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
526 if (RT_FAILURE(rc))
527 {
528 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
529 AssertLogRelRCReturn(rc, rc);
530 }
531
532 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
533 }
534
535 /* initialize it. */
536 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
537 return rc;
538}
539
540
541/**
542 * Terminates the REM.
543 *
544 * Termination means cleaning up and freeing all resources,
545 * the VM it self is at this point powered off or suspended.
546 *
547 * @returns VBox status code.
548 * @param pVM The VM to operate on.
549 */
550REMR3DECL(int) REMR3Term(PVM pVM)
551{
552#ifdef VBOX_WITH_STATISTICS
553 /*
554 * Statistics.
555 */
556 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
557 STAM_DEREG(pVM, &gStatCompilationQEmu);
558 STAM_DEREG(pVM, &gStatRunCodeQEmu);
559 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
560 STAM_DEREG(pVM, &gStatTimers);
561 STAM_DEREG(pVM, &gStatTBLookup);
562 STAM_DEREG(pVM, &gStatIRQ);
563 STAM_DEREG(pVM, &gStatRawCheck);
564 STAM_DEREG(pVM, &gStatMemRead);
565 STAM_DEREG(pVM, &gStatMemWrite);
566 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
567
568 STAM_DEREG(pVM, &gStatCpuGetTSC);
569
570 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
571 STAM_DEREG(pVM, &gStatRefuseVM86);
572 STAM_DEREG(pVM, &gStatRefusePaging);
573 STAM_DEREG(pVM, &gStatRefusePAE);
574 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
575 STAM_DEREG(pVM, &gStatRefuseIF0);
576 STAM_DEREG(pVM, &gStatRefuseCode16);
577 STAM_DEREG(pVM, &gStatRefuseWP0);
578 STAM_DEREG(pVM, &gStatRefuseRing1or2);
579 STAM_DEREG(pVM, &gStatRefuseCanExecute);
580 STAM_DEREG(pVM, &gaStatRefuseStale[0]);
581 STAM_DEREG(pVM, &gaStatRefuseStale[1]);
582 STAM_DEREG(pVM, &gaStatRefuseStale[2]);
583 STAM_DEREG(pVM, &gaStatRefuseStale[3]);
584 STAM_DEREG(pVM, &gaStatRefuseStale[4]);
585 STAM_DEREG(pVM, &gaStatRefuseStale[5]);
586 STAM_DEREG(pVM, &gStatFlushTBs);
587
588 STAM_DEREG(pVM, &gStatREMGDTChange);
589 STAM_DEREG(pVM, &gStatREMLDTRChange);
590 STAM_DEREG(pVM, &gStatREMIDTChange);
591 STAM_DEREG(pVM, &gStatREMTRChange);
592
593 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
594 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
595 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
596 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
597 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
598 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
599
600 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
601 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
602 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
603 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
604 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
605 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
606
607 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
608#endif /* VBOX_WITH_STATISTICS */
609
610 STAM_REL_DEREG(pVM, &tb_flush_count);
611 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
612 STAM_REL_DEREG(pVM, &tlb_flush_count);
613
614 return VINF_SUCCESS;
615}
616
617
618/**
619 * The VM is being reset.
620 *
621 * For the REM component this means to call the cpu_reset() and
622 * reinitialize some state variables.
623 *
624 * @param pVM VM handle.
625 */
626REMR3DECL(void) REMR3Reset(PVM pVM)
627{
628 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
629
630 /*
631 * Reset the REM cpu.
632 */
633 Assert(pVM->rem.s.cIgnoreAll == 0);
634 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
635 cpu_reset(&pVM->rem.s.Env);
636 pVM->rem.s.cInvalidatedPages = 0;
637 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
638 Assert(pVM->rem.s.cIgnoreAll == 0);
639
640 /* Clear raw ring 0 init state */
641 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
642
643 /* Flush the TBs the next time we execute code here. */
644 pVM->rem.s.fFlushTBs = true;
645
646 EMRemUnlock(pVM);
647}
648
649
650/**
651 * Execute state save operation.
652 *
653 * @returns VBox status code.
654 * @param pVM VM Handle.
655 * @param pSSM SSM operation handle.
656 */
657static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
658{
659 PREM pRem = &pVM->rem.s;
660
661 /*
662 * Save the required CPU Env bits.
663 * (Not much because we're never in REM when doing the save.)
664 */
665 LogFlow(("remR3Save:\n"));
666 Assert(!pRem->fInREM);
667 SSMR3PutU32(pSSM, pRem->Env.hflags);
668 SSMR3PutU32(pSSM, ~0); /* separator */
669
670 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
671 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
672 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
673
674 return SSMR3PutU32(pSSM, ~0); /* terminator */
675}
676
677
678/**
679 * Execute state load operation.
680 *
681 * @returns VBox status code.
682 * @param pVM VM Handle.
683 * @param pSSM SSM operation handle.
684 * @param uVersion Data layout version.
685 * @param uPass The data pass.
686 */
687static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
688{
689 uint32_t u32Dummy;
690 uint32_t fRawRing0 = false;
691 uint32_t u32Sep;
692 uint32_t i;
693 int rc;
694 PREM pRem;
695
696 LogFlow(("remR3Load:\n"));
697 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
698
699 /*
700 * Validate version.
701 */
702 if ( uVersion != REM_SAVED_STATE_VERSION
703 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
706 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
707 }
708
709 /*
710 * Do a reset to be on the safe side...
711 */
712 REMR3Reset(pVM);
713
714 /*
715 * Ignore all ignorable notifications.
716 * (Not doing this will cause serious trouble.)
717 */
718 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
719
720 /*
721 * Load the required CPU Env bits.
722 * (Not much because we're never in REM when doing the save.)
723 */
724 pRem = &pVM->rem.s;
725 Assert(!pRem->fInREM);
726 SSMR3GetU32(pSSM, &pRem->Env.hflags);
727 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
728 {
729 /* Redundant REM CPU state has to be loaded, but can be ignored. */
730 CPUX86State_Ver16 temp;
731 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
732 }
733
734 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
735 if (RT_FAILURE(rc))
736 return rc;
737 if (u32Sep != ~0U)
738 {
739 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
740 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
741 }
742
743 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
744 SSMR3GetUInt(pSSM, &fRawRing0);
745 if (fRawRing0)
746 pRem->Env.state |= CPU_RAW_RING0;
747
748 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
749 {
750 /*
751 * Load the REM stuff.
752 */
753 /** @todo r=bird: We should just drop all these items, restoring doesn't make
754 * sense. */
755 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
756 if (RT_FAILURE(rc))
757 return rc;
758 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
759 {
760 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
761 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
762 }
763 for (i = 0; i < pRem->cInvalidatedPages; i++)
764 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
765 }
766
767 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
768 if (RT_FAILURE(rc))
769 return rc;
770
771 /* check the terminator. */
772 rc = SSMR3GetU32(pSSM, &u32Sep);
773 if (RT_FAILURE(rc))
774 return rc;
775 if (u32Sep != ~0U)
776 {
777 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
778 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
779 }
780
781 /*
782 * Get the CPUID features.
783 */
784 PVMCPU pVCpu = VMMGetCpu(pVM);
785 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
786 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
787
788 /*
789 * Stop ignoring ignorable notifications.
790 */
791 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
792
793 /*
794 * Sync the whole CPU state when executing code in the recompiler.
795 */
796 for (i = 0; i < pVM->cCpus; i++)
797 {
798 PVMCPU pVCpu = &pVM->aCpus[i];
799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
800 }
801 return VINF_SUCCESS;
802}
803
804
805
806#undef LOG_GROUP
807#define LOG_GROUP LOG_GROUP_REM_RUN
808
809/**
810 * Single steps an instruction in recompiled mode.
811 *
812 * Before calling this function the REM state needs to be in sync with
813 * the VM. Call REMR3State() to perform the sync. It's only necessary
814 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
815 * and after calling REMR3StateBack().
816 *
817 * @returns VBox status code.
818 *
819 * @param pVM VM Handle.
820 * @param pVCpu VMCPU Handle.
821 */
822REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
823{
824 int rc, interrupt_request;
825 RTGCPTR GCPtrPC;
826 bool fBp;
827
828 /*
829 * Lock the REM - we don't wanna have anyone interrupting us
830 * while stepping - and enabled single stepping. We also ignore
831 * pending interrupts and suchlike.
832 */
833 interrupt_request = pVM->rem.s.Env.interrupt_request;
834 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
835 pVM->rem.s.Env.interrupt_request = 0;
836 cpu_single_step(&pVM->rem.s.Env, 1);
837
838 /*
839 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
840 */
841 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
842 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
843
844 /*
845 * Execute and handle the return code.
846 * We execute without enabling the cpu tick, so on success we'll
847 * just flip it on and off to make sure it moves
848 */
849 rc = cpu_exec(&pVM->rem.s.Env);
850 if (rc == EXCP_DEBUG)
851 {
852 TMR3NotifyResume(pVM, pVCpu);
853 TMR3NotifySuspend(pVM, pVCpu);
854 rc = VINF_EM_DBG_STEPPED;
855 }
856 else
857 {
858 switch (rc)
859 {
860 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
861 case EXCP_HLT:
862 case EXCP_HALTED: rc = VINF_EM_HALT; break;
863 case EXCP_RC:
864 rc = pVM->rem.s.rc;
865 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
866 break;
867 case EXCP_EXECUTE_RAW:
868 case EXCP_EXECUTE_HM:
869 /** @todo: is it correct? No! */
870 rc = VINF_SUCCESS;
871 break;
872 default:
873 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
874 rc = VERR_INTERNAL_ERROR;
875 break;
876 }
877 }
878
879 /*
880 * Restore the stuff we changed to prevent interruption.
881 * Unlock the REM.
882 */
883 if (fBp)
884 {
885 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
886 Assert(rc2 == 0); NOREF(rc2);
887 }
888 cpu_single_step(&pVM->rem.s.Env, 0);
889 pVM->rem.s.Env.interrupt_request = interrupt_request;
890
891 return rc;
892}
893
894
895/**
896 * Set a breakpoint using the REM facilities.
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
907 {
908 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
912 return VERR_REM_NO_MORE_BP_SLOTS;
913}
914
915
916/**
917 * Clears a breakpoint set by REMR3BreakpointSet().
918 *
919 * @returns VBox status code.
920 * @param pVM The VM handle.
921 * @param Address The breakpoint address.
922 * @thread The emulation thread.
923 */
924REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
925{
926 VM_ASSERT_EMT(pVM);
927 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
928 {
929 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
930 return VINF_SUCCESS;
931 }
932 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
933 return VERR_REM_BP_NOT_FOUND;
934}
935
936
937/**
938 * Emulate an instruction.
939 *
940 * This function executes one instruction without letting anyone
941 * interrupt it. This is intended for being called while being in
942 * raw mode and thus will take care of all the state syncing between
943 * REM and the rest.
944 *
945 * @returns VBox status code.
946 * @param pVM VM handle.
947 * @param pVCpu VMCPU Handle.
948 */
949REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
950{
951 bool fFlushTBs;
952
953 int rc, rc2;
954 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
955
956 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
957 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
958 */
959 if (HMIsEnabled(pVM))
960 pVM->rem.s.Env.state |= CPU_RAW_HM;
961
962 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
963 fFlushTBs = pVM->rem.s.fFlushTBs;
964 pVM->rem.s.fFlushTBs = false;
965
966 /*
967 * Sync the state and enable single instruction / single stepping.
968 */
969 rc = REMR3State(pVM, pVCpu);
970 pVM->rem.s.fFlushTBs = fFlushTBs;
971 if (RT_SUCCESS(rc))
972 {
973 int interrupt_request = pVM->rem.s.Env.interrupt_request;
974 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
975#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
976 cpu_single_step(&pVM->rem.s.Env, 0);
977#endif
978 Assert(!pVM->rem.s.Env.singlestep_enabled);
979
980 /*
981 * Now we set the execute single instruction flag and enter the cpu_exec loop.
982 */
983 TMNotifyStartOfExecution(pVCpu);
984 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
985 rc = cpu_exec(&pVM->rem.s.Env);
986 TMNotifyEndOfExecution(pVCpu);
987 switch (rc)
988 {
989 /*
990 * Executed without anything out of the way happening.
991 */
992 case EXCP_SINGLE_INSTR:
993 rc = VINF_EM_RESCHEDULE;
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
995 break;
996
997 /*
998 * If we take a trap or start servicing a pending interrupt, we might end up here.
999 * (Timer thread or some other thread wishing EMT's attention.)
1000 */
1001 case EXCP_INTERRUPT:
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
1003 rc = VINF_EM_RESCHEDULE;
1004 break;
1005
1006 /*
1007 * Single step, we assume!
1008 * If there was a breakpoint there we're fucked now.
1009 */
1010 case EXCP_DEBUG:
1011 if (pVM->rem.s.Env.watchpoint_hit)
1012 {
1013 /** @todo deal with watchpoints */
1014 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1015 rc = VINF_EM_DBG_BREAKPOINT;
1016 }
1017 else
1018 {
1019 CPUBreakpoint *pBP;
1020 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1021 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1022 if (pBP->pc == GCPtrPC)
1023 break;
1024 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1026 }
1027 break;
1028
1029 /*
1030 * hlt instruction.
1031 */
1032 case EXCP_HLT:
1033 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1034 rc = VINF_EM_HALT;
1035 break;
1036
1037 /*
1038 * The VM has halted.
1039 */
1040 case EXCP_HALTED:
1041 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1042 rc = VINF_EM_HALT;
1043 break;
1044
1045 /*
1046 * Switch to RAW-mode.
1047 */
1048 case EXCP_EXECUTE_RAW:
1049 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1050 rc = VINF_EM_RESCHEDULE_RAW;
1051 break;
1052
1053 /*
1054 * Switch to hardware accelerated RAW-mode.
1055 */
1056 case EXCP_EXECUTE_HM:
1057 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1058 rc = VINF_EM_RESCHEDULE_HM;
1059 break;
1060
1061 /*
1062 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1063 */
1064 case EXCP_RC:
1065 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1066 rc = pVM->rem.s.rc;
1067 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1068 break;
1069
1070 /*
1071 * Figure out the rest when they arrive....
1072 */
1073 default:
1074 AssertMsgFailed(("rc=%d\n", rc));
1075 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1076 rc = VINF_EM_RESCHEDULE;
1077 break;
1078 }
1079
1080 /*
1081 * Switch back the state.
1082 */
1083 pVM->rem.s.Env.interrupt_request = interrupt_request;
1084 rc2 = REMR3StateBack(pVM, pVCpu);
1085 AssertRC(rc2);
1086 }
1087
1088 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1089 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1090 return rc;
1091}
1092
1093
1094/**
1095 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1096 *
1097 * @returns VBox status code.
1098 *
1099 * @param pVM The VM handle.
1100 * @param pVCpu The Virtual CPU handle.
1101 */
1102static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1103{
1104 int rc;
1105
1106 Assert(pVM->rem.s.fInREM);
1107#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1108 cpu_single_step(&pVM->rem.s.Env, 1);
1109#else
1110 Assert(!pVM->rem.s.Env.singlestep_enabled);
1111#endif
1112
1113 /*
1114 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1115 */
1116 for (;;)
1117 {
1118 char szBuf[256];
1119
1120 /*
1121 * Log the current registers state and instruction.
1122 */
1123 remR3StateUpdate(pVM, pVCpu);
1124 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1125 szBuf[0] = '\0';
1126 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1127 pVCpu->idCpu,
1128 0, /* Sel */ 0, /* GCPtr */
1129 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1130 szBuf,
1131 sizeof(szBuf),
1132 NULL);
1133 if (RT_FAILURE(rc))
1134 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1135 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1136
1137 /*
1138 * Execute the instruction.
1139 */
1140 TMNotifyStartOfExecution(pVCpu);
1141
1142 if ( pVM->rem.s.Env.exception_index < 0
1143 || pVM->rem.s.Env.exception_index > 256)
1144 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1145
1146#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1147 pVM->rem.s.Env.interrupt_request = 0;
1148#else
1149 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1150#endif
1151 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1152 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1153 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1154 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1155 pVM->rem.s.Env.interrupt_request,
1156 pVM->rem.s.Env.halted,
1157 pVM->rem.s.Env.exception_index
1158 );
1159
1160 rc = cpu_exec(&pVM->rem.s.Env);
1161
1162 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1163 pVM->rem.s.Env.interrupt_request,
1164 pVM->rem.s.Env.halted,
1165 pVM->rem.s.Env.exception_index
1166 );
1167
1168 TMNotifyEndOfExecution(pVCpu);
1169
1170 switch (rc)
1171 {
1172#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1173 /*
1174 * The normal exit.
1175 */
1176 case EXCP_SINGLE_INSTR:
1177 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1178 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1179 continue;
1180 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1181 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1182 rc = VINF_SUCCESS;
1183 break;
1184
1185#else
1186 /*
1187 * The normal exit, check for breakpoints at PC just to be sure.
1188 */
1189#endif
1190 case EXCP_DEBUG:
1191 if (pVM->rem.s.Env.watchpoint_hit)
1192 {
1193 /** @todo deal with watchpoints */
1194 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1195 rc = VINF_EM_DBG_BREAKPOINT;
1196 }
1197 else
1198 {
1199 CPUBreakpoint *pBP;
1200 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1201 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1202 if (pBP->pc == GCPtrPC)
1203 break;
1204 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1205 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1206 }
1207#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1208 if (rc == VINF_EM_DBG_STEPPED)
1209 {
1210 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1211 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1212 continue;
1213
1214 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1215 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1216 rc = VINF_SUCCESS;
1217 }
1218#endif
1219 break;
1220
1221 /*
1222 * If we take a trap or start servicing a pending interrupt, we might end up here.
1223 * (Timer thread or some other thread wishing EMT's attention.)
1224 */
1225 case EXCP_INTERRUPT:
1226 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1227 rc = VINF_SUCCESS;
1228 break;
1229
1230 /*
1231 * hlt instruction.
1232 */
1233 case EXCP_HLT:
1234 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1235 rc = VINF_EM_HALT;
1236 break;
1237
1238 /*
1239 * The VM has halted.
1240 */
1241 case EXCP_HALTED:
1242 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1243 rc = VINF_EM_HALT;
1244 break;
1245
1246 /*
1247 * Switch to RAW-mode.
1248 */
1249 case EXCP_EXECUTE_RAW:
1250 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1251 rc = VINF_EM_RESCHEDULE_RAW;
1252 break;
1253
1254 /*
1255 * Switch to hardware accelerated RAW-mode.
1256 */
1257 case EXCP_EXECUTE_HM:
1258 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1259 rc = VINF_EM_RESCHEDULE_HM;
1260 break;
1261
1262 /*
1263 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1264 */
1265 case EXCP_RC:
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1267 rc = pVM->rem.s.rc;
1268 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1269 break;
1270
1271 /*
1272 * Figure out the rest when they arrive....
1273 */
1274 default:
1275 AssertMsgFailed(("rc=%d\n", rc));
1276 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1277 rc = VINF_EM_RESCHEDULE;
1278 break;
1279 }
1280 break;
1281 }
1282
1283#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1284// cpu_single_step(&pVM->rem.s.Env, 0);
1285#else
1286 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1287#endif
1288 return rc;
1289}
1290
1291
1292/**
1293 * Runs code in recompiled mode.
1294 *
1295 * Before calling this function the REM state needs to be in sync with
1296 * the VM. Call REMR3State() to perform the sync. It's only necessary
1297 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1298 * and after calling REMR3StateBack().
1299 *
1300 * @returns VBox status code.
1301 *
1302 * @param pVM VM Handle.
1303 * @param pVCpu VMCPU Handle.
1304 */
1305REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1306{
1307 int rc;
1308
1309 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1310 return remR3RunLoggingStep(pVM, pVCpu);
1311
1312 Assert(pVM->rem.s.fInREM);
1313 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1314
1315 TMNotifyStartOfExecution(pVCpu);
1316 rc = cpu_exec(&pVM->rem.s.Env);
1317 TMNotifyEndOfExecution(pVCpu);
1318 switch (rc)
1319 {
1320 /*
1321 * This happens when the execution was interrupted
1322 * by an external event, like pending timers.
1323 */
1324 case EXCP_INTERRUPT:
1325 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1326 rc = VINF_SUCCESS;
1327 break;
1328
1329 /*
1330 * hlt instruction.
1331 */
1332 case EXCP_HLT:
1333 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1334 rc = VINF_EM_HALT;
1335 break;
1336
1337 /*
1338 * The VM has halted.
1339 */
1340 case EXCP_HALTED:
1341 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1342 rc = VINF_EM_HALT;
1343 break;
1344
1345 /*
1346 * Breakpoint/single step.
1347 */
1348 case EXCP_DEBUG:
1349 if (pVM->rem.s.Env.watchpoint_hit)
1350 {
1351 /** @todo deal with watchpoints */
1352 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1353 rc = VINF_EM_DBG_BREAKPOINT;
1354 }
1355 else
1356 {
1357 CPUBreakpoint *pBP;
1358 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1359 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1360 if (pBP->pc == GCPtrPC)
1361 break;
1362 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1363 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1364 }
1365 break;
1366
1367 /*
1368 * Switch to RAW-mode.
1369 */
1370 case EXCP_EXECUTE_RAW:
1371 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1372 rc = VINF_EM_RESCHEDULE_RAW;
1373 break;
1374
1375 /*
1376 * Switch to hardware accelerated RAW-mode.
1377 */
1378 case EXCP_EXECUTE_HM:
1379 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1380 rc = VINF_EM_RESCHEDULE_HM;
1381 break;
1382
1383 /*
1384 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1385 */
1386 case EXCP_RC:
1387 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1388 rc = pVM->rem.s.rc;
1389 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1390 break;
1391
1392 /*
1393 * Figure out the rest when they arrive....
1394 */
1395 default:
1396 AssertMsgFailed(("rc=%d\n", rc));
1397 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1398 rc = VINF_SUCCESS;
1399 break;
1400 }
1401
1402 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1403 return rc;
1404}
1405
1406
1407/**
1408 * Check if the cpu state is suitable for Raw execution.
1409 *
1410 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1411 *
1412 * @param env The CPU env struct.
1413 * @param eip The EIP to check this for (might differ from env->eip).
1414 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1415 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1416 *
1417 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1418 */
1419bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1420{
1421 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1422 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1423 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1424 uint32_t u32CR0;
1425
1426#ifdef IEM_VERIFICATION_MODE
1427 return false;
1428#endif
1429
1430 /* Update counter. */
1431 env->pVM->rem.s.cCanExecuteRaw++;
1432
1433 /* Never when single stepping+logging guest code. */
1434 if (env->state & CPU_EMULATE_SINGLE_STEP)
1435 return false;
1436
1437 if (HMIsEnabled(env->pVM))
1438 {
1439 CPUMCTX Ctx;
1440
1441 env->state |= CPU_RAW_HM;
1442
1443 /*
1444 * The simple check first...
1445 */
1446 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1447 return false;
1448
1449 /*
1450 * Create partial context for HMR3CanExecuteGuest
1451 */
1452 Ctx.cr0 = env->cr[0];
1453 Ctx.cr3 = env->cr[3];
1454 Ctx.cr4 = env->cr[4];
1455
1456 Ctx.tr.Sel = env->tr.selector;
1457 Ctx.tr.ValidSel = env->tr.selector;
1458 Ctx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1459 Ctx.tr.u64Base = env->tr.base;
1460 Ctx.tr.u32Limit = env->tr.limit;
1461 Ctx.tr.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1462
1463 Ctx.ldtr.Sel = env->ldt.selector;
1464 Ctx.ldtr.ValidSel = env->ldt.selector;
1465 Ctx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1466 Ctx.ldtr.u64Base = env->ldt.base;
1467 Ctx.ldtr.u32Limit = env->ldt.limit;
1468 Ctx.ldtr.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1469
1470 Ctx.idtr.cbIdt = env->idt.limit;
1471 Ctx.idtr.pIdt = env->idt.base;
1472
1473 Ctx.gdtr.cbGdt = env->gdt.limit;
1474 Ctx.gdtr.pGdt = env->gdt.base;
1475
1476 Ctx.rsp = env->regs[R_ESP];
1477 Ctx.rip = env->eip;
1478
1479 Ctx.eflags.u32 = env->eflags;
1480
1481 Ctx.cs.Sel = env->segs[R_CS].selector;
1482 Ctx.cs.ValidSel = env->segs[R_CS].selector;
1483 Ctx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1484 Ctx.cs.u64Base = env->segs[R_CS].base;
1485 Ctx.cs.u32Limit = env->segs[R_CS].limit;
1486 Ctx.cs.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1487
1488 Ctx.ds.Sel = env->segs[R_DS].selector;
1489 Ctx.ds.ValidSel = env->segs[R_DS].selector;
1490 Ctx.ds.fFlags = CPUMSELREG_FLAGS_VALID;
1491 Ctx.ds.u64Base = env->segs[R_DS].base;
1492 Ctx.ds.u32Limit = env->segs[R_DS].limit;
1493 Ctx.ds.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1494
1495 Ctx.es.Sel = env->segs[R_ES].selector;
1496 Ctx.es.ValidSel = env->segs[R_ES].selector;
1497 Ctx.es.fFlags = CPUMSELREG_FLAGS_VALID;
1498 Ctx.es.u64Base = env->segs[R_ES].base;
1499 Ctx.es.u32Limit = env->segs[R_ES].limit;
1500 Ctx.es.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1501
1502 Ctx.fs.Sel = env->segs[R_FS].selector;
1503 Ctx.fs.ValidSel = env->segs[R_FS].selector;
1504 Ctx.fs.fFlags = CPUMSELREG_FLAGS_VALID;
1505 Ctx.fs.u64Base = env->segs[R_FS].base;
1506 Ctx.fs.u32Limit = env->segs[R_FS].limit;
1507 Ctx.fs.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1508
1509 Ctx.gs.Sel = env->segs[R_GS].selector;
1510 Ctx.gs.ValidSel = env->segs[R_GS].selector;
1511 Ctx.gs.fFlags = CPUMSELREG_FLAGS_VALID;
1512 Ctx.gs.u64Base = env->segs[R_GS].base;
1513 Ctx.gs.u32Limit = env->segs[R_GS].limit;
1514 Ctx.gs.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1515
1516 Ctx.ss.Sel = env->segs[R_SS].selector;
1517 Ctx.ss.ValidSel = env->segs[R_SS].selector;
1518 Ctx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1519 Ctx.ss.u64Base = env->segs[R_SS].base;
1520 Ctx.ss.u32Limit = env->segs[R_SS].limit;
1521 Ctx.ss.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1522
1523 Ctx.msrEFER = env->efer;
1524
1525 /* Hardware accelerated raw-mode:
1526 *
1527 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1528 */
1529 if (HMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1530 {
1531 *piException = EXCP_EXECUTE_HM;
1532 return true;
1533 }
1534 return false;
1535 }
1536
1537 /*
1538 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1539 * or 32 bits protected mode ring 0 code
1540 *
1541 * The tests are ordered by the likelihood of being true during normal execution.
1542 */
1543 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1544 {
1545 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1546 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1547 return false;
1548 }
1549
1550#ifndef VBOX_RAW_V86
1551 if (fFlags & VM_MASK) {
1552 STAM_COUNTER_INC(&gStatRefuseVM86);
1553 Log2(("raw mode refused: VM_MASK\n"));
1554 return false;
1555 }
1556#endif
1557
1558 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1559 {
1560#ifndef DEBUG_bird
1561 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1562#endif
1563 return false;
1564 }
1565
1566 if (env->singlestep_enabled)
1567 {
1568 //Log2(("raw mode refused: Single step\n"));
1569 return false;
1570 }
1571
1572 if (!QTAILQ_EMPTY(&env->breakpoints))
1573 {
1574 //Log2(("raw mode refused: Breakpoints\n"));
1575 return false;
1576 }
1577
1578 if (!QTAILQ_EMPTY(&env->watchpoints))
1579 {
1580 //Log2(("raw mode refused: Watchpoints\n"));
1581 return false;
1582 }
1583
1584 u32CR0 = env->cr[0];
1585 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1586 {
1587 STAM_COUNTER_INC(&gStatRefusePaging);
1588 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1589 return false;
1590 }
1591
1592 if (env->cr[4] & CR4_PAE_MASK)
1593 {
1594 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1595 {
1596 STAM_COUNTER_INC(&gStatRefusePAE);
1597 return false;
1598 }
1599 }
1600
1601 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1602 {
1603 if (!EMIsRawRing3Enabled(env->pVM))
1604 return false;
1605
1606 if (!(env->eflags & IF_MASK))
1607 {
1608 STAM_COUNTER_INC(&gStatRefuseIF0);
1609 Log2(("raw mode refused: IF (RawR3)\n"));
1610 return false;
1611 }
1612
1613 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1614 {
1615 STAM_COUNTER_INC(&gStatRefuseWP0);
1616 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1617 return false;
1618 }
1619 }
1620 else
1621 {
1622 if (!EMIsRawRing0Enabled(env->pVM))
1623 return false;
1624
1625 // Let's start with pure 32 bits ring 0 code first
1626 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1627 {
1628 STAM_COUNTER_INC(&gStatRefuseCode16);
1629 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1630 return false;
1631 }
1632
1633 if (EMIsRawRing1Enabled(env->pVM))
1634 {
1635 /* Only ring 0 and 1 supervisor code. */
1636 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1637 {
1638 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1639 return false;
1640 }
1641 }
1642 /* Only R0. */
1643 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1644 {
1645 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1646 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1647 return false;
1648 }
1649
1650 if (!(u32CR0 & CR0_WP_MASK))
1651 {
1652 STAM_COUNTER_INC(&gStatRefuseWP0);
1653 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1654 return false;
1655 }
1656
1657#ifdef VBOX_WITH_RAW_MODE
1658 if (PATMIsPatchGCAddr(env->pVM, eip))
1659 {
1660 Log2(("raw r0 mode forced: patch code\n"));
1661 *piException = EXCP_EXECUTE_RAW;
1662 return true;
1663 }
1664#endif
1665
1666#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1667 if (!(env->eflags & IF_MASK))
1668 {
1669 STAM_COUNTER_INC(&gStatRefuseIF0);
1670 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1671 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1672 return false;
1673 }
1674#endif
1675
1676#ifndef VBOX_WITH_RAW_RING1
1677 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1678 {
1679 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1680 return false;
1681 }
1682#endif
1683 env->state |= CPU_RAW_RING0;
1684 }
1685
1686 /*
1687 * Don't reschedule the first time we're called, because there might be
1688 * special reasons why we're here that is not covered by the above checks.
1689 */
1690 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1691 {
1692 Log2(("raw mode refused: first scheduling\n"));
1693 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1694 return false;
1695 }
1696
1697 /*
1698 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1699 */
1700 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1701 {
1702 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1703 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1704 return false;
1705 }
1706 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1707 {
1708 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1709 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1710 return false;
1711 }
1712 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1713 {
1714 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1715 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1716 return false;
1717 }
1718 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1719 {
1720 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1721 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1722 return false;
1723 }
1724 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1725 {
1726 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1727 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1728 return false;
1729 }
1730 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1731 {
1732 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1733 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1734 return false;
1735 }
1736
1737/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1738 *piException = EXCP_EXECUTE_RAW;
1739 return true;
1740}
1741
1742
1743#ifdef VBOX_WITH_RAW_MODE
1744/**
1745 * Fetches a code byte.
1746 *
1747 * @returns Success indicator (bool) for ease of use.
1748 * @param env The CPU environment structure.
1749 * @param GCPtrInstr Where to fetch code.
1750 * @param pu8Byte Where to store the byte on success
1751 */
1752bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1753{
1754 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1755 if (RT_SUCCESS(rc))
1756 return true;
1757 return false;
1758}
1759#endif /* VBOX_WITH_RAW_MODE */
1760
1761
1762/**
1763 * Flush (or invalidate if you like) page table/dir entry.
1764 *
1765 * (invlpg instruction; tlb_flush_page)
1766 *
1767 * @param env Pointer to cpu environment.
1768 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1769 */
1770void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1771{
1772 PVM pVM = env->pVM;
1773 PCPUMCTX pCtx;
1774 int rc;
1775
1776 Assert(EMRemIsLockOwner(env->pVM));
1777
1778 /*
1779 * When we're replaying invlpg instructions or restoring a saved
1780 * state we disable this path.
1781 */
1782 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1783 return;
1784 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1785 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1786
1787 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1788
1789 /*
1790 * Update the control registers before calling PGMFlushPage.
1791 */
1792 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1793 Assert(pCtx);
1794 pCtx->cr0 = env->cr[0];
1795 pCtx->cr3 = env->cr[3];
1796#ifdef VBOX_WITH_RAW_MODE
1797 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1798 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1799#endif
1800 pCtx->cr4 = env->cr[4];
1801
1802 /*
1803 * Let PGM do the rest.
1804 */
1805 Assert(env->pVCpu);
1806 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1807 if (RT_FAILURE(rc))
1808 {
1809 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1810 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1811 }
1812 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1813}
1814
1815
1816#ifndef REM_PHYS_ADDR_IN_TLB
1817/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1818void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1819{
1820 void *pv;
1821 int rc;
1822
1823
1824 /* Address must be aligned enough to fiddle with lower bits */
1825 Assert((physAddr & 0x3) == 0);
1826 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1827
1828 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1829 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1830 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1831 Assert( rc == VINF_SUCCESS
1832 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1833 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1834 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1835 if (RT_FAILURE(rc))
1836 return (void *)1;
1837 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1838 return (void *)((uintptr_t)pv | 2);
1839 return pv;
1840}
1841#endif /* REM_PHYS_ADDR_IN_TLB */
1842
1843
1844/**
1845 * Called from tlb_protect_code in order to write monitor a code page.
1846 *
1847 * @param env Pointer to the CPU environment.
1848 * @param GCPtr Code page to monitor
1849 */
1850void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1851{
1852#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1853 Assert(env->pVM->rem.s.fInREM);
1854 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1855 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1856 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1857 && !(env->eflags & VM_MASK) /* no V86 mode */
1858 && !HMIsEnabled(env->pVM))
1859 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1860#endif
1861}
1862
1863
1864/**
1865 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1866 *
1867 * @param env Pointer to the CPU environment.
1868 * @param GCPtr Code page to monitor
1869 */
1870void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1871{
1872 Assert(env->pVM->rem.s.fInREM);
1873#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1874 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1875 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1876 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1877 && !(env->eflags & VM_MASK) /* no V86 mode */
1878 && !HMIsEnabled(env->pVM))
1879 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1880#endif
1881}
1882
1883
1884/**
1885 * Called when the CPU is initialized, any of the CRx registers are changed or
1886 * when the A20 line is modified.
1887 *
1888 * @param env Pointer to the CPU environment.
1889 * @param fGlobal Set if the flush is global.
1890 */
1891void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1892{
1893 PVM pVM = env->pVM;
1894 PCPUMCTX pCtx;
1895 Assert(EMRemIsLockOwner(pVM));
1896
1897 /*
1898 * When we're replaying invlpg instructions or restoring a saved
1899 * state we disable this path.
1900 */
1901 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1902 return;
1903 Assert(pVM->rem.s.fInREM);
1904
1905 /*
1906 * The caller doesn't check cr4, so we have to do that for ourselves.
1907 */
1908 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1909 fGlobal = true;
1910 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1911
1912 /*
1913 * Update the control registers before calling PGMR3FlushTLB.
1914 */
1915 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1916 Assert(pCtx);
1917 pCtx->cr0 = env->cr[0];
1918 pCtx->cr3 = env->cr[3];
1919#ifdef VBOX_WITH_RAW_MODE
1920 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1921 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1922#endif
1923 pCtx->cr4 = env->cr[4];
1924
1925 /*
1926 * Let PGM do the rest.
1927 */
1928 Assert(env->pVCpu);
1929 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1930}
1931
1932
1933/**
1934 * Called when any of the cr0, cr4 or efer registers is updated.
1935 *
1936 * @param env Pointer to the CPU environment.
1937 */
1938void remR3ChangeCpuMode(CPUX86State *env)
1939{
1940 PVM pVM = env->pVM;
1941 uint64_t efer;
1942 PCPUMCTX pCtx;
1943 int rc;
1944
1945 /*
1946 * When we're replaying loads or restoring a saved
1947 * state this path is disabled.
1948 */
1949 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1950 return;
1951 Assert(pVM->rem.s.fInREM);
1952
1953 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1954 Assert(pCtx);
1955
1956 /*
1957 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1958 */
1959 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1960 PGMCr0WpEnabled(env->pVCpu);
1961
1962 /*
1963 * Update the control registers before calling PGMChangeMode()
1964 * as it may need to map whatever cr3 is pointing to.
1965 */
1966 pCtx->cr0 = env->cr[0];
1967 pCtx->cr3 = env->cr[3];
1968#ifdef VBOX_WITH_RAW_MODE
1969 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1970 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1971#endif
1972 pCtx->cr4 = env->cr[4];
1973#ifdef TARGET_X86_64
1974 efer = env->efer;
1975 pCtx->msrEFER = efer;
1976#else
1977 efer = 0;
1978#endif
1979 Assert(env->pVCpu);
1980 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1981 if (rc != VINF_SUCCESS)
1982 {
1983 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1984 {
1985 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1986 remR3RaiseRC(env->pVM, rc);
1987 }
1988 else
1989 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1990 }
1991}
1992
1993
1994/**
1995 * Called from compiled code to run dma.
1996 *
1997 * @param env Pointer to the CPU environment.
1998 */
1999void remR3DmaRun(CPUX86State *env)
2000{
2001 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
2002 PDMR3DmaRun(env->pVM);
2003 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
2004}
2005
2006
2007/**
2008 * Called from compiled code to schedule pending timers in VMM
2009 *
2010 * @param env Pointer to the CPU environment.
2011 */
2012void remR3TimersRun(CPUX86State *env)
2013{
2014 LogFlow(("remR3TimersRun:\n"));
2015 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
2016 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
2017 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
2018 TMR3TimerQueuesDo(env->pVM);
2019 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
2020 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
2021}
2022
2023
2024/**
2025 * Record trap occurrence
2026 *
2027 * @returns VBox status code
2028 * @param env Pointer to the CPU environment.
2029 * @param uTrap Trap nr
2030 * @param uErrorCode Error code
2031 * @param pvNextEIP Next EIP
2032 */
2033int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
2034{
2035 PVM pVM = env->pVM;
2036#ifdef VBOX_WITH_STATISTICS
2037 static STAMCOUNTER s_aStatTrap[255];
2038 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
2039#endif
2040
2041#ifdef VBOX_WITH_STATISTICS
2042 if (uTrap < 255)
2043 {
2044 if (!s_aRegisters[uTrap])
2045 {
2046 char szStatName[64];
2047 s_aRegisters[uTrap] = true;
2048 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2049 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2050 }
2051 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2052 }
2053#endif
2054 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2055 if( uTrap < 0x20
2056 && (env->cr[0] & X86_CR0_PE)
2057 && !(env->eflags & X86_EFL_VM))
2058 {
2059#ifdef DEBUG
2060 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2061#endif
2062 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2063 {
2064 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2065 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2066 return VERR_REM_TOO_MANY_TRAPS;
2067 }
2068 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2069 {
2070 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2071 pVM->rem.s.cPendingExceptions = 1;
2072 }
2073 pVM->rem.s.uPendingException = uTrap;
2074 pVM->rem.s.uPendingExcptEIP = env->eip;
2075 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2076 }
2077 else
2078 {
2079 pVM->rem.s.cPendingExceptions = 0;
2080 pVM->rem.s.uPendingException = uTrap;
2081 pVM->rem.s.uPendingExcptEIP = env->eip;
2082 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2083 }
2084 return VINF_SUCCESS;
2085}
2086
2087
2088/*
2089 * Clear current active trap
2090 *
2091 * @param pVM VM Handle.
2092 */
2093void remR3TrapClear(PVM pVM)
2094{
2095 pVM->rem.s.cPendingExceptions = 0;
2096 pVM->rem.s.uPendingException = 0;
2097 pVM->rem.s.uPendingExcptEIP = 0;
2098 pVM->rem.s.uPendingExcptCR2 = 0;
2099}
2100
2101
2102/*
2103 * Record previous call instruction addresses
2104 *
2105 * @param env Pointer to the CPU environment.
2106 */
2107void remR3RecordCall(CPUX86State *env)
2108{
2109#ifdef VBOX_WITH_RAW_MODE
2110 CSAMR3RecordCallAddress(env->pVM, env->eip);
2111#endif
2112}
2113
2114
2115/**
2116 * Syncs the internal REM state with the VM.
2117 *
2118 * This must be called before REMR3Run() is invoked whenever when the REM
2119 * state is not up to date. Calling it several times in a row is not
2120 * permitted.
2121 *
2122 * @returns VBox status code.
2123 *
2124 * @param pVM VM Handle.
2125 * @param pVCpu VMCPU Handle.
2126 *
2127 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2128 * no do this since the majority of the callers don't want any unnecessary of events
2129 * pending that would immediately interrupt execution.
2130 */
2131REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2132{
2133 register const CPUMCTX *pCtx;
2134 register unsigned fFlags;
2135 unsigned i;
2136 TRPMEVENT enmType;
2137 uint8_t u8TrapNo;
2138 uint32_t uCpl;
2139 int rc;
2140
2141 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2142 Log2(("REMR3State:\n"));
2143
2144 pVM->rem.s.Env.pVCpu = pVCpu;
2145 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2146
2147 Assert(!pVM->rem.s.fInREM);
2148 pVM->rem.s.fInStateSync = true;
2149
2150 /*
2151 * If we have to flush TBs, do that immediately.
2152 */
2153 if (pVM->rem.s.fFlushTBs)
2154 {
2155 STAM_COUNTER_INC(&gStatFlushTBs);
2156 tb_flush(&pVM->rem.s.Env);
2157 pVM->rem.s.fFlushTBs = false;
2158 }
2159
2160 /*
2161 * Copy the registers which require no special handling.
2162 */
2163#ifdef TARGET_X86_64
2164 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2165 Assert(R_EAX == 0);
2166 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2167 Assert(R_ECX == 1);
2168 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2169 Assert(R_EDX == 2);
2170 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2171 Assert(R_EBX == 3);
2172 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2173 Assert(R_ESP == 4);
2174 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2175 Assert(R_EBP == 5);
2176 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2177 Assert(R_ESI == 6);
2178 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2179 Assert(R_EDI == 7);
2180 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2181 pVM->rem.s.Env.regs[8] = pCtx->r8;
2182 pVM->rem.s.Env.regs[9] = pCtx->r9;
2183 pVM->rem.s.Env.regs[10] = pCtx->r10;
2184 pVM->rem.s.Env.regs[11] = pCtx->r11;
2185 pVM->rem.s.Env.regs[12] = pCtx->r12;
2186 pVM->rem.s.Env.regs[13] = pCtx->r13;
2187 pVM->rem.s.Env.regs[14] = pCtx->r14;
2188 pVM->rem.s.Env.regs[15] = pCtx->r15;
2189
2190 pVM->rem.s.Env.eip = pCtx->rip;
2191
2192 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2193#else
2194 Assert(R_EAX == 0);
2195 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2196 Assert(R_ECX == 1);
2197 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2198 Assert(R_EDX == 2);
2199 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2200 Assert(R_EBX == 3);
2201 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2202 Assert(R_ESP == 4);
2203 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2204 Assert(R_EBP == 5);
2205 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2206 Assert(R_ESI == 6);
2207 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2208 Assert(R_EDI == 7);
2209 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2210 pVM->rem.s.Env.eip = pCtx->eip;
2211
2212 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2213#endif
2214
2215 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2216
2217 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2218 for (i=0;i<8;i++)
2219 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2220
2221#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2222 /*
2223 * Clear the halted hidden flag (the interrupt waking up the CPU can
2224 * have been dispatched in raw mode).
2225 */
2226 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2227#endif
2228
2229 /*
2230 * Replay invlpg? Only if we're not flushing the TLB.
2231 */
2232 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2233 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2234 if (pVM->rem.s.cInvalidatedPages)
2235 {
2236 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2237 {
2238 RTUINT i;
2239
2240 pVM->rem.s.fIgnoreCR3Load = true;
2241 pVM->rem.s.fIgnoreInvlPg = true;
2242 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2243 {
2244 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2245 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2246 }
2247 pVM->rem.s.fIgnoreInvlPg = false;
2248 pVM->rem.s.fIgnoreCR3Load = false;
2249 }
2250 pVM->rem.s.cInvalidatedPages = 0;
2251 }
2252
2253 /* Replay notification changes. */
2254 REMR3ReplayHandlerNotifications(pVM);
2255
2256 /* Update MSRs; before CRx registers! */
2257 pVM->rem.s.Env.efer = pCtx->msrEFER;
2258 pVM->rem.s.Env.star = pCtx->msrSTAR;
2259 pVM->rem.s.Env.pat = pCtx->msrPAT;
2260#ifdef TARGET_X86_64
2261 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2262 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2263 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2264 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2265
2266 /* Update the internal long mode activate flag according to the new EFER value. */
2267 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2268 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2269 else
2270 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2271#endif
2272
2273 /* Update the inhibit IRQ mask. */
2274 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2275 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2276 {
2277 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2278 if (InhibitPC == pCtx->rip)
2279 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2280 else
2281 {
2282 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2283 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2284 }
2285 }
2286
2287 /*
2288 * Sync the A20 gate.
2289 */
2290 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2291 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2292 {
2293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2294 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2295 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2296 }
2297
2298 /*
2299 * Registers which are rarely changed and require special handling / order when changed.
2300 */
2301 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2302 | CPUM_CHANGED_CR4
2303 | CPUM_CHANGED_CR0
2304 | CPUM_CHANGED_CR3
2305 | CPUM_CHANGED_GDTR
2306 | CPUM_CHANGED_IDTR
2307 | CPUM_CHANGED_SYSENTER_MSR
2308 | CPUM_CHANGED_LDTR
2309 | CPUM_CHANGED_CPUID
2310 | CPUM_CHANGED_FPU_REM
2311 )
2312 )
2313 {
2314 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2315 {
2316 pVM->rem.s.fIgnoreCR3Load = true;
2317 tlb_flush(&pVM->rem.s.Env, true);
2318 pVM->rem.s.fIgnoreCR3Load = false;
2319 }
2320
2321 /* CR4 before CR0! */
2322 if (fFlags & CPUM_CHANGED_CR4)
2323 {
2324 pVM->rem.s.fIgnoreCR3Load = true;
2325 pVM->rem.s.fIgnoreCpuMode = true;
2326 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2327 pVM->rem.s.fIgnoreCpuMode = false;
2328 pVM->rem.s.fIgnoreCR3Load = false;
2329 }
2330
2331 if (fFlags & CPUM_CHANGED_CR0)
2332 {
2333 pVM->rem.s.fIgnoreCR3Load = true;
2334 pVM->rem.s.fIgnoreCpuMode = true;
2335 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2336 pVM->rem.s.fIgnoreCpuMode = false;
2337 pVM->rem.s.fIgnoreCR3Load = false;
2338 }
2339
2340 if (fFlags & CPUM_CHANGED_CR3)
2341 {
2342 pVM->rem.s.fIgnoreCR3Load = true;
2343 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2344 pVM->rem.s.fIgnoreCR3Load = false;
2345 }
2346
2347 if (fFlags & CPUM_CHANGED_GDTR)
2348 {
2349 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2350 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2351 }
2352
2353 if (fFlags & CPUM_CHANGED_IDTR)
2354 {
2355 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2356 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2357 }
2358
2359 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2360 {
2361 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2362 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2363 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2364 }
2365
2366 if (fFlags & CPUM_CHANGED_LDTR)
2367 {
2368 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2369 {
2370 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2371 pVM->rem.s.Env.ldt.newselector = 0;
2372 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2373 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2374 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2375 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u << 8) & 0xFFFFFF;
2376 }
2377 else
2378 {
2379 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2380 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2381 }
2382 }
2383
2384 if (fFlags & CPUM_CHANGED_CPUID)
2385 {
2386 uint32_t u32Dummy;
2387
2388 /*
2389 * Get the CPUID features.
2390 */
2391 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2392 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2393 }
2394
2395 /* Sync FPU state after CR4, CPUID and EFER (!). */
2396 if (fFlags & CPUM_CHANGED_FPU_REM)
2397 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2398 }
2399
2400 /*
2401 * Sync TR unconditionally to make life simpler.
2402 */
2403 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2404 pVM->rem.s.Env.tr.newselector = 0;
2405 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2406 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2407 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2408 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
2409 /* Note! do_interrupt will fault if the busy flag is still set... */
2410 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2411
2412 /*
2413 * Update selector registers.
2414 *
2415 * This must be done *after* we've synced gdt, ldt and crX registers
2416 * since we're reading the GDT/LDT om sync_seg. This will happen with
2417 * saved state which takes a quick dip into rawmode for instance.
2418 *
2419 * CPL/Stack; Note first check this one as the CPL might have changed.
2420 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2421 */
2422 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2423 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2424#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2425 do \
2426 { \
2427 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2428 { \
2429 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2430 (a_pVBoxSReg)->Sel, \
2431 (a_pVBoxSReg)->u64Base, \
2432 (a_pVBoxSReg)->u32Limit, \
2433 ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \
2434 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2435 } \
2436 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2437 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2438 { \
2439 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2440 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2441 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2442 if ((a_pRemSReg)->newselector) \
2443 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2444 } \
2445 else \
2446 (a_pRemSReg)->newselector = 0; \
2447 } while (0)
2448
2449 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2450 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2451 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2452 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2453 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2454 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2455 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2456 * be the same but not the base/limit. */
2457
2458 /*
2459 * Check for traps.
2460 */
2461 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2462 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2463 if (RT_SUCCESS(rc))
2464 {
2465#ifdef DEBUG
2466 if (u8TrapNo == 0x80)
2467 {
2468 remR3DumpLnxSyscall(pVCpu);
2469 remR3DumpOBsdSyscall(pVCpu);
2470 }
2471#endif
2472
2473 pVM->rem.s.Env.exception_index = u8TrapNo;
2474 if (enmType != TRPM_SOFTWARE_INT)
2475 {
2476 pVM->rem.s.Env.exception_is_int = 0;
2477 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2478 }
2479 else
2480 {
2481 /*
2482 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2483 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2484 * for int03 and into.
2485 */
2486 pVM->rem.s.Env.exception_is_int = 1;
2487 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2488 /* int 3 may be generated by one-byte 0xcc */
2489 if (u8TrapNo == 3)
2490 {
2491 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2492 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2493 }
2494 /* int 4 may be generated by one-byte 0xce */
2495 else if (u8TrapNo == 4)
2496 {
2497 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2498 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2499 }
2500 }
2501
2502 /* get error code and cr2 if needed. */
2503 if (enmType == TRPM_TRAP)
2504 {
2505 switch (u8TrapNo)
2506 {
2507 case X86_XCPT_PF:
2508 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2509 /* fallthru */
2510 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2511 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2512 break;
2513
2514 case X86_XCPT_AC: case X86_XCPT_DF:
2515 default:
2516 pVM->rem.s.Env.error_code = 0;
2517 break;
2518 }
2519 }
2520 else
2521 pVM->rem.s.Env.error_code = 0;
2522
2523 /*
2524 * We can now reset the active trap since the recompiler is gonna have a go at it.
2525 */
2526 rc = TRPMResetTrap(pVCpu);
2527 AssertRC(rc);
2528 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2529 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2530 }
2531
2532 /*
2533 * Clear old interrupt request flags; Check for pending hardware interrupts.
2534 * (See @remark for why we don't check for other FFs.)
2535 */
2536 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2537 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2538 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2539 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2540
2541 /*
2542 * We're now in REM mode.
2543 */
2544 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2545 pVM->rem.s.fInREM = true;
2546 pVM->rem.s.fInStateSync = false;
2547 pVM->rem.s.cCanExecuteRaw = 0;
2548 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2549 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2550 return VINF_SUCCESS;
2551}
2552
2553
2554/**
2555 * Syncs back changes in the REM state to the the VM state.
2556 *
2557 * This must be called after invoking REMR3Run().
2558 * Calling it several times in a row is not permitted.
2559 *
2560 * @returns VBox status code.
2561 *
2562 * @param pVM VM Handle.
2563 * @param pVCpu VMCPU Handle.
2564 */
2565REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2566{
2567 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2568 Assert(pCtx);
2569 unsigned i;
2570
2571 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2572 Log2(("REMR3StateBack:\n"));
2573 Assert(pVM->rem.s.fInREM);
2574
2575 /*
2576 * Copy back the registers.
2577 * This is done in the order they are declared in the CPUMCTX structure.
2578 */
2579
2580 /** @todo FOP */
2581 /** @todo FPUIP */
2582 /** @todo CS */
2583 /** @todo FPUDP */
2584 /** @todo DS */
2585
2586 /** @todo check if FPU/XMM was actually used in the recompiler */
2587 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2588//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2589
2590#ifdef TARGET_X86_64
2591 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2592 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2593 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2594 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2595 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2596 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2597 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2598 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2599 pCtx->r8 = pVM->rem.s.Env.regs[8];
2600 pCtx->r9 = pVM->rem.s.Env.regs[9];
2601 pCtx->r10 = pVM->rem.s.Env.regs[10];
2602 pCtx->r11 = pVM->rem.s.Env.regs[11];
2603 pCtx->r12 = pVM->rem.s.Env.regs[12];
2604 pCtx->r13 = pVM->rem.s.Env.regs[13];
2605 pCtx->r14 = pVM->rem.s.Env.regs[14];
2606 pCtx->r15 = pVM->rem.s.Env.regs[15];
2607
2608 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2609
2610#else
2611 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2612 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2613 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2614 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2615 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2616 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2617 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2618
2619 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2620#endif
2621
2622#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2623 do \
2624 { \
2625 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2626 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2627 { \
2628 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2629 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2630 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2631 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2632 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ \
2633 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> 8) & 0xF0FF; \
2634 } \
2635 else \
2636 { \
2637 pCtx->a_sreg.fFlags = 0; \
2638 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2639 } \
2640 } while (0)
2641
2642 SYNC_BACK_SREG(es, ES);
2643 SYNC_BACK_SREG(cs, CS);
2644 SYNC_BACK_SREG(ss, SS);
2645 SYNC_BACK_SREG(ds, DS);
2646 SYNC_BACK_SREG(fs, FS);
2647 SYNC_BACK_SREG(gs, GS);
2648
2649#ifdef TARGET_X86_64
2650 pCtx->rip = pVM->rem.s.Env.eip;
2651 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2652#else
2653 pCtx->eip = pVM->rem.s.Env.eip;
2654 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2655#endif
2656
2657 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2658 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2659 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2660#ifdef VBOX_WITH_RAW_MODE
2661 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2662 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2663#endif
2664 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2665
2666 for (i = 0; i < 8; i++)
2667 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2668
2669 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2670 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2671 {
2672 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2673 STAM_COUNTER_INC(&gStatREMGDTChange);
2674#ifdef VBOX_WITH_RAW_MODE
2675 if (!HMIsEnabled(pVM))
2676 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2677#endif
2678 }
2679
2680 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2681 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2682 {
2683 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2684 STAM_COUNTER_INC(&gStatREMIDTChange);
2685#ifdef VBOX_WITH_RAW_MODE
2686 if (!HMIsEnabled(pVM))
2687 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2688#endif
2689 }
2690
2691 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2692 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2693 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2694 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2695 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2696 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2697 )
2698 {
2699 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2700 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2701 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2702 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2703 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2704 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2705 STAM_COUNTER_INC(&gStatREMLDTRChange);
2706#ifdef VBOX_WITH_RAW_MODE
2707 if (!HMIsEnabled(pVM))
2708 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2709#endif
2710 }
2711
2712 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2713 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2714 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2715 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2716 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2717 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2718 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2719 : 0)
2720 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2721 )
2722 {
2723 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2724 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2725 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2726 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2727 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2728 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2729 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2730 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2731 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2732 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2733 if (pCtx->tr.Attr.u)
2734 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2735 STAM_COUNTER_INC(&gStatREMTRChange);
2736#ifdef VBOX_WITH_RAW_MODE
2737 if (!HMIsEnabled(pVM))
2738 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2739#endif
2740 }
2741
2742 /* Sysenter MSR */
2743 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2744 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2745 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2746
2747 /* System MSRs. */
2748 pCtx->msrEFER = pVM->rem.s.Env.efer;
2749 pCtx->msrSTAR = pVM->rem.s.Env.star;
2750 pCtx->msrPAT = pVM->rem.s.Env.pat;
2751#ifdef TARGET_X86_64
2752 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2753 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2754 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2755 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2756#endif
2757
2758 /* Inhibit interrupt flag. */
2759 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2760 {
2761 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2762 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2763 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2764 }
2765 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2766 {
2767 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2768 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2769 }
2770
2771 remR3TrapClear(pVM);
2772
2773 /*
2774 * Check for traps.
2775 */
2776 if ( pVM->rem.s.Env.exception_index >= 0
2777 && pVM->rem.s.Env.exception_index < 256)
2778 {
2779 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2780 int rc;
2781
2782 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2783 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int ? TRPM_SOFTWARE_INT : TRPM_TRAP;
2784 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2785 AssertRC(rc);
2786 if (enmType == TRPM_TRAP)
2787 {
2788 switch (pVM->rem.s.Env.exception_index)
2789 {
2790 case X86_XCPT_PF:
2791 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2792 /* fallthru */
2793 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2794 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2795 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2796 break;
2797 }
2798 }
2799 }
2800
2801 /*
2802 * We're not longer in REM mode.
2803 */
2804 CPUMR3RemLeave(pVCpu,
2805 HMIsEnabled(pVM)
2806 || ( pVM->rem.s.Env.segs[R_SS].newselector
2807 | pVM->rem.s.Env.segs[R_GS].newselector
2808 | pVM->rem.s.Env.segs[R_FS].newselector
2809 | pVM->rem.s.Env.segs[R_ES].newselector
2810 | pVM->rem.s.Env.segs[R_DS].newselector
2811 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2812 );
2813 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2814 pVM->rem.s.fInREM = false;
2815 pVM->rem.s.pCtx = NULL;
2816 pVM->rem.s.Env.pVCpu = NULL;
2817 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2818 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2819 return VINF_SUCCESS;
2820}
2821
2822
2823/**
2824 * This is called by the disassembler when it wants to update the cpu state
2825 * before for instance doing a register dump.
2826 */
2827static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2828{
2829 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2830 unsigned i;
2831
2832 Assert(pVM->rem.s.fInREM);
2833
2834 /*
2835 * Copy back the registers.
2836 * This is done in the order they are declared in the CPUMCTX structure.
2837 */
2838
2839 /** @todo FOP */
2840 /** @todo FPUIP */
2841 /** @todo CS */
2842 /** @todo FPUDP */
2843 /** @todo DS */
2844 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2845 pCtx->fpu.MXCSR = 0;
2846 pCtx->fpu.MXCSR_MASK = 0;
2847
2848 /** @todo check if FPU/XMM was actually used in the recompiler */
2849 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2850//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2851
2852#ifdef TARGET_X86_64
2853 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2854 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2855 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2856 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2857 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2858 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2859 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2860 pCtx->r8 = pVM->rem.s.Env.regs[8];
2861 pCtx->r9 = pVM->rem.s.Env.regs[9];
2862 pCtx->r10 = pVM->rem.s.Env.regs[10];
2863 pCtx->r11 = pVM->rem.s.Env.regs[11];
2864 pCtx->r12 = pVM->rem.s.Env.regs[12];
2865 pCtx->r13 = pVM->rem.s.Env.regs[13];
2866 pCtx->r14 = pVM->rem.s.Env.regs[14];
2867 pCtx->r15 = pVM->rem.s.Env.regs[15];
2868
2869 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2870#else
2871 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2872 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2873 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2874 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2875 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2876 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2877 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2878
2879 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2880#endif
2881
2882 SYNC_BACK_SREG(es, ES);
2883 SYNC_BACK_SREG(cs, CS);
2884 SYNC_BACK_SREG(ss, SS);
2885 SYNC_BACK_SREG(ds, DS);
2886 SYNC_BACK_SREG(fs, FS);
2887 SYNC_BACK_SREG(gs, GS);
2888
2889#ifdef TARGET_X86_64
2890 pCtx->rip = pVM->rem.s.Env.eip;
2891 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2892#else
2893 pCtx->eip = pVM->rem.s.Env.eip;
2894 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2895#endif
2896
2897 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2898 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2899 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2900#ifdef VBOX_WITH_RAW_MODE
2901 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2902 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2903#endif
2904 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2905
2906 for (i = 0; i < 8; i++)
2907 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2908
2909 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2910 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2911 {
2912 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2913 STAM_COUNTER_INC(&gStatREMGDTChange);
2914#ifdef VBOX_WITH_RAW_MODE
2915 if (!HMIsEnabled(pVM))
2916 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2917#endif
2918 }
2919
2920 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2921 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2922 {
2923 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2924 STAM_COUNTER_INC(&gStatREMIDTChange);
2925#ifdef VBOX_WITH_RAW_MODE
2926 if (!HMIsEnabled(pVM))
2927 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2928#endif
2929 }
2930
2931 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2932 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2933 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2934 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2935 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2936 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2937 )
2938 {
2939 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2940 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2941 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2942 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2943 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2944 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2945 STAM_COUNTER_INC(&gStatREMLDTRChange);
2946#ifdef VBOX_WITH_RAW_MODE
2947 if (!HMIsEnabled(pVM))
2948 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2949#endif
2950 }
2951
2952 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2953 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2954 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2955 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2956 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2957 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2958 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2959 : 0)
2960 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2961 )
2962 {
2963 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2964 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2965 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2966 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2967 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2968 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2969 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2970 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2971 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2972 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2973 if (pCtx->tr.Attr.u)
2974 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2975 STAM_COUNTER_INC(&gStatREMTRChange);
2976#ifdef VBOX_WITH_RAW_MODE
2977 if (!HMIsEnabled(pVM))
2978 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2979#endif
2980 }
2981
2982 /* Sysenter MSR */
2983 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2984 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2985 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2986
2987 /* System MSRs. */
2988 pCtx->msrEFER = pVM->rem.s.Env.efer;
2989 pCtx->msrSTAR = pVM->rem.s.Env.star;
2990 pCtx->msrPAT = pVM->rem.s.Env.pat;
2991#ifdef TARGET_X86_64
2992 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2993 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2994 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2995 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2996#endif
2997
2998}
2999
3000
3001/**
3002 * Update the VMM state information if we're currently in REM.
3003 *
3004 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
3005 * we're currently executing in REM and the VMM state is invalid. This method will of
3006 * course check that we're executing in REM before syncing any data over to the VMM.
3007 *
3008 * @param pVM The VM handle.
3009 * @param pVCpu The VMCPU handle.
3010 */
3011REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
3012{
3013 if (pVM->rem.s.fInREM)
3014 remR3StateUpdate(pVM, pVCpu);
3015}
3016
3017
3018#undef LOG_GROUP
3019#define LOG_GROUP LOG_GROUP_REM
3020
3021
3022/**
3023 * Notify the recompiler about Address Gate 20 state change.
3024 *
3025 * This notification is required since A20 gate changes are
3026 * initialized from a device driver and the VM might just as
3027 * well be in REM mode as in RAW mode.
3028 *
3029 * @param pVM VM handle.
3030 * @param pVCpu VMCPU handle.
3031 * @param fEnable True if the gate should be enabled.
3032 * False if the gate should be disabled.
3033 */
3034REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3035{
3036 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3037 VM_ASSERT_EMT(pVM);
3038
3039 /** @todo SMP and the A20 gate... */
3040 if (pVM->rem.s.Env.pVCpu == pVCpu)
3041 {
3042 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3043 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3044 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3045 }
3046}
3047
3048
3049/**
3050 * Replays the handler notification changes
3051 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3052 *
3053 * @param pVM VM handle.
3054 */
3055REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3056{
3057 /*
3058 * Replay the flushes.
3059 */
3060 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3061 VM_ASSERT_EMT(pVM);
3062
3063 /** @todo this isn't ensuring correct replay order. */
3064 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3065 {
3066 uint32_t idxNext;
3067 uint32_t idxRevHead;
3068 uint32_t idxHead;
3069#ifdef VBOX_STRICT
3070 int32_t c = 0;
3071#endif
3072
3073 /* Lockless purging of pending notifications. */
3074 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3075 if (idxHead == UINT32_MAX)
3076 return;
3077 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3078
3079 /*
3080 * Reverse the list to process it in FIFO order.
3081 */
3082 idxRevHead = UINT32_MAX;
3083 do
3084 {
3085 /* Save the index of the next rec. */
3086 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3087 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3088 /* Push the record onto the reversed list. */
3089 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3090 idxRevHead = idxHead;
3091 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3092 /* Advance. */
3093 idxHead = idxNext;
3094 } while (idxHead != UINT32_MAX);
3095
3096 /*
3097 * Loop thru the list, reinserting the record into the free list as they are
3098 * processed to avoid having other EMTs running out of entries while we're flushing.
3099 */
3100 idxHead = idxRevHead;
3101 do
3102 {
3103 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3104 uint32_t idxCur;
3105 Assert(--c >= 0);
3106
3107 switch (pCur->enmKind)
3108 {
3109 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3110 remR3NotifyHandlerPhysicalRegister(pVM,
3111 pCur->u.PhysicalRegister.enmType,
3112 pCur->u.PhysicalRegister.GCPhys,
3113 pCur->u.PhysicalRegister.cb,
3114 pCur->u.PhysicalRegister.fHasHCHandler);
3115 break;
3116
3117 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3118 remR3NotifyHandlerPhysicalDeregister(pVM,
3119 pCur->u.PhysicalDeregister.enmType,
3120 pCur->u.PhysicalDeregister.GCPhys,
3121 pCur->u.PhysicalDeregister.cb,
3122 pCur->u.PhysicalDeregister.fHasHCHandler,
3123 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3124 break;
3125
3126 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3127 remR3NotifyHandlerPhysicalModify(pVM,
3128 pCur->u.PhysicalModify.enmType,
3129 pCur->u.PhysicalModify.GCPhysOld,
3130 pCur->u.PhysicalModify.GCPhysNew,
3131 pCur->u.PhysicalModify.cb,
3132 pCur->u.PhysicalModify.fHasHCHandler,
3133 pCur->u.PhysicalModify.fRestoreAsRAM);
3134 break;
3135
3136 default:
3137 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3138 break;
3139 }
3140
3141 /*
3142 * Advance idxHead.
3143 */
3144 idxCur = idxHead;
3145 idxHead = pCur->idxNext;
3146 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3147
3148 /*
3149 * Put the record back into the free list.
3150 */
3151 do
3152 {
3153 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3154 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3155 ASMCompilerBarrier();
3156 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3157 } while (idxHead != UINT32_MAX);
3158
3159#ifdef VBOX_STRICT
3160 if (pVM->cCpus == 1)
3161 {
3162 unsigned c;
3163 /* Check that all records are now on the free list. */
3164 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3165 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3166 c++;
3167 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3168 }
3169#endif
3170 }
3171}
3172
3173
3174/**
3175 * Notify REM about changed code page.
3176 *
3177 * @returns VBox status code.
3178 * @param pVM VM handle.
3179 * @param pVCpu VMCPU handle.
3180 * @param pvCodePage Code page address
3181 */
3182REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3183{
3184#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3185 int rc;
3186 RTGCPHYS PhysGC;
3187 uint64_t flags;
3188
3189 VM_ASSERT_EMT(pVM);
3190
3191 /*
3192 * Get the physical page address.
3193 */
3194 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3195 if (rc == VINF_SUCCESS)
3196 {
3197 /*
3198 * Sync the required registers and flush the whole page.
3199 * (Easier to do the whole page than notifying it about each physical
3200 * byte that was changed.
3201 */
3202 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3203 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3204 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3205 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3206
3207 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3208 }
3209#endif
3210 return VINF_SUCCESS;
3211}
3212
3213
3214/**
3215 * Notification about a successful MMR3PhysRegister() call.
3216 *
3217 * @param pVM VM handle.
3218 * @param GCPhys The physical address the RAM.
3219 * @param cb Size of the memory.
3220 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3221 */
3222REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3223{
3224 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3225 VM_ASSERT_EMT(pVM);
3226
3227 /*
3228 * Validate input - we trust the caller.
3229 */
3230 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3231 Assert(cb);
3232 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3233 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3234
3235 /*
3236 * Base ram? Update GCPhysLastRam.
3237 */
3238 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3239 {
3240 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3241 {
3242 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3243 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3244 }
3245 }
3246
3247 /*
3248 * Register the ram.
3249 */
3250 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3251
3252 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3253 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3254 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3255
3256 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3257}
3258
3259
3260/**
3261 * Notification about a successful MMR3PhysRomRegister() call.
3262 *
3263 * @param pVM VM handle.
3264 * @param GCPhys The physical address of the ROM.
3265 * @param cb The size of the ROM.
3266 * @param pvCopy Pointer to the ROM copy.
3267 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3268 * This function will be called when ever the protection of the
3269 * shadow ROM changes (at reset and end of POST).
3270 */
3271REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3272{
3273 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3274 VM_ASSERT_EMT(pVM);
3275
3276 /*
3277 * Validate input - we trust the caller.
3278 */
3279 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3280 Assert(cb);
3281 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3282
3283 /*
3284 * Register the rom.
3285 */
3286 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3287
3288 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3289 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3290 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3291
3292 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3293}
3294
3295
3296/**
3297 * Notification about a successful memory deregistration or reservation.
3298 *
3299 * @param pVM VM Handle.
3300 * @param GCPhys Start physical address.
3301 * @param cb The size of the range.
3302 */
3303REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3304{
3305 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3306 VM_ASSERT_EMT(pVM);
3307
3308 /*
3309 * Validate input - we trust the caller.
3310 */
3311 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3312 Assert(cb);
3313 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3314
3315 /*
3316 * Unassigning the memory.
3317 */
3318 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3319
3320 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3321 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3322 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3323
3324 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3325}
3326
3327
3328/**
3329 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3330 *
3331 * @param pVM VM Handle.
3332 * @param enmType Handler type.
3333 * @param GCPhys Handler range address.
3334 * @param cb Size of the handler range.
3335 * @param fHasHCHandler Set if the handler has a HC callback function.
3336 *
3337 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3338 * Handler memory type to memory which has no HC handler.
3339 */
3340static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3341{
3342 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3343 enmType, GCPhys, cb, fHasHCHandler));
3344
3345 VM_ASSERT_EMT(pVM);
3346 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3347 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3348
3349
3350 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3351
3352 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3353 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3354 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3355 else if (fHasHCHandler)
3356 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3357 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3358
3359 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3360}
3361
3362/**
3363 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3364 *
3365 * @param pVM VM Handle.
3366 * @param enmType Handler type.
3367 * @param GCPhys Handler range address.
3368 * @param cb Size of the handler range.
3369 * @param fHasHCHandler Set if the handler has a HC callback function.
3370 *
3371 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3372 * Handler memory type to memory which has no HC handler.
3373 */
3374REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3375{
3376 REMR3ReplayHandlerNotifications(pVM);
3377
3378 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3379}
3380
3381/**
3382 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3383 *
3384 * @param pVM VM Handle.
3385 * @param enmType Handler type.
3386 * @param GCPhys Handler range address.
3387 * @param cb Size of the handler range.
3388 * @param fHasHCHandler Set if the handler has a HC callback function.
3389 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3390 */
3391static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3392{
3393 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3394 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3395 VM_ASSERT_EMT(pVM);
3396
3397
3398 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3399
3400 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3401 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3402 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3403 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3404 else if (fHasHCHandler)
3405 {
3406 if (!fRestoreAsRAM)
3407 {
3408 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3409 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3410 }
3411 else
3412 {
3413 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3414 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3415 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3416 }
3417 }
3418 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3419
3420 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3421}
3422
3423/**
3424 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3425 *
3426 * @param pVM VM Handle.
3427 * @param enmType Handler type.
3428 * @param GCPhys Handler range address.
3429 * @param cb Size of the handler range.
3430 * @param fHasHCHandler Set if the handler has a HC callback function.
3431 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3432 */
3433REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3434{
3435 REMR3ReplayHandlerNotifications(pVM);
3436 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3437}
3438
3439
3440/**
3441 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3442 *
3443 * @param pVM VM Handle.
3444 * @param enmType Handler type.
3445 * @param GCPhysOld Old handler range address.
3446 * @param GCPhysNew New handler range address.
3447 * @param cb Size of the handler range.
3448 * @param fHasHCHandler Set if the handler has a HC callback function.
3449 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3450 */
3451static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3452{
3453 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3454 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3455 VM_ASSERT_EMT(pVM);
3456 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3457
3458 if (fHasHCHandler)
3459 {
3460 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3461
3462 /*
3463 * Reset the old page.
3464 */
3465 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3466 if (!fRestoreAsRAM)
3467 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3468 else
3469 {
3470 /* This is not perfect, but it'll do for PD monitoring... */
3471 Assert(cb == PAGE_SIZE);
3472 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3473 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3474 }
3475
3476 /*
3477 * Update the new page.
3478 */
3479 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3480 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3481 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3482 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3483
3484 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3485 }
3486}
3487
3488/**
3489 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3490 *
3491 * @param pVM VM Handle.
3492 * @param enmType Handler type.
3493 * @param GCPhysOld Old handler range address.
3494 * @param GCPhysNew New handler range address.
3495 * @param cb Size of the handler range.
3496 * @param fHasHCHandler Set if the handler has a HC callback function.
3497 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3498 */
3499REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3500{
3501 REMR3ReplayHandlerNotifications(pVM);
3502
3503 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3504}
3505
3506/**
3507 * Checks if we're handling access to this page or not.
3508 *
3509 * @returns true if we're trapping access.
3510 * @returns false if we aren't.
3511 * @param pVM The VM handle.
3512 * @param GCPhys The physical address.
3513 *
3514 * @remark This function will only work correctly in VBOX_STRICT builds!
3515 */
3516REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3517{
3518#ifdef VBOX_STRICT
3519 ram_addr_t off;
3520 REMR3ReplayHandlerNotifications(pVM);
3521
3522 off = get_phys_page_offset(GCPhys);
3523 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3524 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3525 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3526#else
3527 return false;
3528#endif
3529}
3530
3531
3532/**
3533 * Deals with a rare case in get_phys_addr_code where the code
3534 * is being monitored.
3535 *
3536 * It could also be an MMIO page, in which case we will raise a fatal error.
3537 *
3538 * @returns The physical address corresponding to addr.
3539 * @param env The cpu environment.
3540 * @param addr The virtual address.
3541 * @param pTLBEntry The TLB entry.
3542 */
3543target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3544 target_ulong addr,
3545 CPUTLBEntry *pTLBEntry,
3546 target_phys_addr_t ioTLBEntry)
3547{
3548 PVM pVM = env->pVM;
3549
3550 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3551 {
3552 /* If code memory is being monitored, appropriate IOTLB entry will have
3553 handler IO type, and addend will provide real physical address, no
3554 matter if we store VA in TLB or not, as handlers are always passed PA */
3555 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3556 return ret;
3557 }
3558 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3559 "*** handlers\n",
3560 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3561 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3562 LogRel(("*** mmio\n"));
3563 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3564 LogRel(("*** phys\n"));
3565 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3566 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3567 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3568 AssertFatalFailed();
3569}
3570
3571/**
3572 * Read guest RAM and ROM.
3573 *
3574 * @param SrcGCPhys The source address (guest physical).
3575 * @param pvDst The destination address.
3576 * @param cb Number of bytes
3577 */
3578void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3579{
3580 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3581 VBOX_CHECK_ADDR(SrcGCPhys);
3582 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3583#ifdef VBOX_DEBUG_PHYS
3584 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3585#endif
3586 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3587}
3588
3589
3590/**
3591 * Read guest RAM and ROM, unsigned 8-bit.
3592 *
3593 * @param SrcGCPhys The source address (guest physical).
3594 */
3595RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3596{
3597 uint8_t val;
3598 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3599 VBOX_CHECK_ADDR(SrcGCPhys);
3600 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3601 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3602#ifdef VBOX_DEBUG_PHYS
3603 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3604#endif
3605 return val;
3606}
3607
3608
3609/**
3610 * Read guest RAM and ROM, signed 8-bit.
3611 *
3612 * @param SrcGCPhys The source address (guest physical).
3613 */
3614RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3615{
3616 int8_t val;
3617 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3618 VBOX_CHECK_ADDR(SrcGCPhys);
3619 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3620 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3621#ifdef VBOX_DEBUG_PHYS
3622 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3623#endif
3624 return val;
3625}
3626
3627
3628/**
3629 * Read guest RAM and ROM, unsigned 16-bit.
3630 *
3631 * @param SrcGCPhys The source address (guest physical).
3632 */
3633RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3634{
3635 uint16_t val;
3636 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3637 VBOX_CHECK_ADDR(SrcGCPhys);
3638 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3639 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3640#ifdef VBOX_DEBUG_PHYS
3641 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3642#endif
3643 return val;
3644}
3645
3646
3647/**
3648 * Read guest RAM and ROM, signed 16-bit.
3649 *
3650 * @param SrcGCPhys The source address (guest physical).
3651 */
3652RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3653{
3654 int16_t val;
3655 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3656 VBOX_CHECK_ADDR(SrcGCPhys);
3657 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3658 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3659#ifdef VBOX_DEBUG_PHYS
3660 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3661#endif
3662 return val;
3663}
3664
3665
3666/**
3667 * Read guest RAM and ROM, unsigned 32-bit.
3668 *
3669 * @param SrcGCPhys The source address (guest physical).
3670 */
3671RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3672{
3673 uint32_t val;
3674 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3675 VBOX_CHECK_ADDR(SrcGCPhys);
3676 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3677 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3678#ifdef VBOX_DEBUG_PHYS
3679 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3680#endif
3681 return val;
3682}
3683
3684
3685/**
3686 * Read guest RAM and ROM, signed 32-bit.
3687 *
3688 * @param SrcGCPhys The source address (guest physical).
3689 */
3690RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3691{
3692 int32_t val;
3693 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3694 VBOX_CHECK_ADDR(SrcGCPhys);
3695 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3696 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3697#ifdef VBOX_DEBUG_PHYS
3698 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3699#endif
3700 return val;
3701}
3702
3703
3704/**
3705 * Read guest RAM and ROM, unsigned 64-bit.
3706 *
3707 * @param SrcGCPhys The source address (guest physical).
3708 */
3709uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3710{
3711 uint64_t val;
3712 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3713 VBOX_CHECK_ADDR(SrcGCPhys);
3714 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3715 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3716#ifdef VBOX_DEBUG_PHYS
3717 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3718#endif
3719 return val;
3720}
3721
3722
3723/**
3724 * Read guest RAM and ROM, signed 64-bit.
3725 *
3726 * @param SrcGCPhys The source address (guest physical).
3727 */
3728int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3729{
3730 int64_t val;
3731 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3732 VBOX_CHECK_ADDR(SrcGCPhys);
3733 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3734 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3735#ifdef VBOX_DEBUG_PHYS
3736 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3737#endif
3738 return val;
3739}
3740
3741
3742/**
3743 * Write guest RAM.
3744 *
3745 * @param DstGCPhys The destination address (guest physical).
3746 * @param pvSrc The source address.
3747 * @param cb Number of bytes to write
3748 */
3749void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3750{
3751 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3752 VBOX_CHECK_ADDR(DstGCPhys);
3753 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3754 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3755#ifdef VBOX_DEBUG_PHYS
3756 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3757#endif
3758}
3759
3760
3761/**
3762 * Write guest RAM, unsigned 8-bit.
3763 *
3764 * @param DstGCPhys The destination address (guest physical).
3765 * @param val Value
3766 */
3767void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3768{
3769 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3770 VBOX_CHECK_ADDR(DstGCPhys);
3771 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3772 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3773#ifdef VBOX_DEBUG_PHYS
3774 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3775#endif
3776}
3777
3778
3779/**
3780 * Write guest RAM, unsigned 8-bit.
3781 *
3782 * @param DstGCPhys The destination address (guest physical).
3783 * @param val Value
3784 */
3785void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3786{
3787 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3788 VBOX_CHECK_ADDR(DstGCPhys);
3789 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3790 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3791#ifdef VBOX_DEBUG_PHYS
3792 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3793#endif
3794}
3795
3796
3797/**
3798 * Write guest RAM, unsigned 32-bit.
3799 *
3800 * @param DstGCPhys The destination address (guest physical).
3801 * @param val Value
3802 */
3803void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3804{
3805 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3806 VBOX_CHECK_ADDR(DstGCPhys);
3807 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3808 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3809#ifdef VBOX_DEBUG_PHYS
3810 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3811#endif
3812}
3813
3814
3815/**
3816 * Write guest RAM, unsigned 64-bit.
3817 *
3818 * @param DstGCPhys The destination address (guest physical).
3819 * @param val Value
3820 */
3821void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3822{
3823 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3824 VBOX_CHECK_ADDR(DstGCPhys);
3825 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3826 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3827#ifdef VBOX_DEBUG_PHYS
3828 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3829#endif
3830}
3831
3832#undef LOG_GROUP
3833#define LOG_GROUP LOG_GROUP_REM_MMIO
3834
3835/** Read MMIO memory. */
3836static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3837{
3838 CPUX86State *env = (CPUX86State *)pvEnv;
3839 uint32_t u32 = 0;
3840 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3841 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3842 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3843 return u32;
3844}
3845
3846/** Read MMIO memory. */
3847static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3848{
3849 CPUX86State *env = (CPUX86State *)pvEnv;
3850 uint32_t u32 = 0;
3851 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3852 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3853 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3854 return u32;
3855}
3856
3857/** Read MMIO memory. */
3858static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3859{
3860 CPUX86State *env = (CPUX86State *)pvEnv;
3861 uint32_t u32 = 0;
3862 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3863 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3864 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3865 return u32;
3866}
3867
3868/** Write to MMIO memory. */
3869static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3870{
3871 CPUX86State *env = (CPUX86State *)pvEnv;
3872 int rc;
3873 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3874 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3875 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3876}
3877
3878/** Write to MMIO memory. */
3879static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3880{
3881 CPUX86State *env = (CPUX86State *)pvEnv;
3882 int rc;
3883 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3884 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3885 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3886}
3887
3888/** Write to MMIO memory. */
3889static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3890{
3891 CPUX86State *env = (CPUX86State *)pvEnv;
3892 int rc;
3893 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3894 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3895 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3896}
3897
3898
3899#undef LOG_GROUP
3900#define LOG_GROUP LOG_GROUP_REM_HANDLER
3901
3902/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3903
3904static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3905{
3906 uint8_t u8;
3907 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3908 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3909 return u8;
3910}
3911
3912static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3913{
3914 uint16_t u16;
3915 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3916 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3917 return u16;
3918}
3919
3920static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3921{
3922 uint32_t u32;
3923 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3924 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3925 return u32;
3926}
3927
3928static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3929{
3930 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3931 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3932}
3933
3934static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3935{
3936 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3937 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3938}
3939
3940static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3941{
3942 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3943 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3944}
3945
3946/* -+- disassembly -+- */
3947
3948#undef LOG_GROUP
3949#define LOG_GROUP LOG_GROUP_REM_DISAS
3950
3951
3952/**
3953 * Enables or disables singled stepped disassembly.
3954 *
3955 * @returns VBox status code.
3956 * @param pVM VM handle.
3957 * @param fEnable To enable set this flag, to disable clear it.
3958 */
3959static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3960{
3961 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3962 VM_ASSERT_EMT(pVM);
3963
3964 if (fEnable)
3965 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3966 else
3967 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3968#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3969 cpu_single_step(&pVM->rem.s.Env, fEnable);
3970#endif
3971 return VINF_SUCCESS;
3972}
3973
3974
3975/**
3976 * Enables or disables singled stepped disassembly.
3977 *
3978 * @returns VBox status code.
3979 * @param pVM VM handle.
3980 * @param fEnable To enable set this flag, to disable clear it.
3981 */
3982REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3983{
3984 int rc;
3985
3986 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3987 if (VM_IS_EMT(pVM))
3988 return remR3DisasEnableStepping(pVM, fEnable);
3989
3990 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3991 AssertRC(rc);
3992 return rc;
3993}
3994
3995
3996#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3997/**
3998 * External Debugger Command: .remstep [on|off|1|0]
3999 */
4000static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
4001{
4002 int rc;
4003 PVM pVM = pUVM->pVM;
4004
4005 if (cArgs == 0)
4006 /*
4007 * Print the current status.
4008 */
4009 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
4010 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
4011 else
4012 {
4013 /*
4014 * Convert the argument and change the mode.
4015 */
4016 bool fEnable;
4017 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
4018 if (RT_SUCCESS(rc))
4019 {
4020 rc = REMR3DisasEnableStepping(pVM, fEnable);
4021 if (RT_SUCCESS(rc))
4022 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
4023 else
4024 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
4025 }
4026 else
4027 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
4028 }
4029 return rc;
4030}
4031#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
4032
4033
4034/**
4035 * Disassembles one instruction and prints it to the log.
4036 *
4037 * @returns Success indicator.
4038 * @param env Pointer to the recompiler CPU structure.
4039 * @param f32BitCode Indicates that whether or not the code should
4040 * be disassembled as 16 or 32 bit. If -1 the CS
4041 * selector will be inspected.
4042 * @param pszPrefix
4043 */
4044bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4045{
4046 PVM pVM = env->pVM;
4047 const bool fLog = LogIsEnabled();
4048 const bool fLog2 = LogIs2Enabled();
4049 int rc = VINF_SUCCESS;
4050
4051 /*
4052 * Don't bother if there ain't any log output to do.
4053 */
4054 if (!fLog && !fLog2)
4055 return true;
4056
4057 /*
4058 * Update the state so DBGF reads the correct register values.
4059 */
4060 remR3StateUpdate(pVM, env->pVCpu);
4061
4062 /*
4063 * Log registers if requested.
4064 */
4065 if (fLog2)
4066 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4067
4068 /*
4069 * Disassemble to log.
4070 */
4071 if (fLog)
4072 {
4073 PVMCPU pVCpu = VMMGetCpu(pVM);
4074 char szBuf[256];
4075 szBuf[0] = '\0';
4076 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4077 pVCpu->idCpu,
4078 0, /* Sel */ 0, /* GCPtr */
4079 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4080 szBuf,
4081 sizeof(szBuf),
4082 NULL);
4083 if (RT_FAILURE(rc))
4084 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4085 if (pszPrefix && *pszPrefix)
4086 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4087 else
4088 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4089 }
4090
4091 return RT_SUCCESS(rc);
4092}
4093
4094
4095/**
4096 * Disassemble recompiled code.
4097 *
4098 * @param phFileIgnored Ignored, logfile usually.
4099 * @param pvCode Pointer to the code block.
4100 * @param cb Size of the code block.
4101 */
4102void disas(FILE *phFile, void *pvCode, unsigned long cb)
4103{
4104 if (LogIs2Enabled())
4105 {
4106 unsigned off = 0;
4107 char szOutput[256];
4108 DISCPUSTATE Cpu;
4109#ifdef RT_ARCH_X86
4110 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4111#else
4112 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4113#endif
4114
4115 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4116 while (off < cb)
4117 {
4118 uint32_t cbInstr;
4119 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4120 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4121 if (RT_SUCCESS(rc))
4122 RTLogPrintf("%s", szOutput);
4123 else
4124 {
4125 RTLogPrintf("disas error %Rrc\n", rc);
4126 cbInstr = 1;
4127 }
4128 off += cbInstr;
4129 }
4130 }
4131}
4132
4133
4134/**
4135 * Disassemble guest code.
4136 *
4137 * @param phFileIgnored Ignored, logfile usually.
4138 * @param uCode The guest address of the code to disassemble. (flat?)
4139 * @param cb Number of bytes to disassemble.
4140 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4141 */
4142void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4143{
4144 if (LogIs2Enabled())
4145 {
4146 PVM pVM = cpu_single_env->pVM;
4147 PVMCPU pVCpu = cpu_single_env->pVCpu;
4148 RTSEL cs;
4149 RTGCUINTPTR eip;
4150
4151 Assert(pVCpu);
4152
4153 /*
4154 * Update the state so DBGF reads the correct register values (flags).
4155 */
4156 remR3StateUpdate(pVM, pVCpu);
4157
4158 /*
4159 * Do the disassembling.
4160 */
4161 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4162 cs = cpu_single_env->segs[R_CS].selector;
4163 eip = uCode - cpu_single_env->segs[R_CS].base;
4164 for (;;)
4165 {
4166 char szBuf[256];
4167 uint32_t cbInstr;
4168 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4169 pVCpu->idCpu,
4170 cs,
4171 eip,
4172 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4173 szBuf, sizeof(szBuf),
4174 &cbInstr);
4175 if (RT_SUCCESS(rc))
4176 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4177 else
4178 {
4179 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4180 cbInstr = 1;
4181 }
4182
4183 /* next */
4184 if (cb <= cbInstr)
4185 break;
4186 cb -= cbInstr;
4187 uCode += cbInstr;
4188 eip += cbInstr;
4189 }
4190 }
4191}
4192
4193
4194/**
4195 * Looks up a guest symbol.
4196 *
4197 * @returns Pointer to symbol name. This is a static buffer.
4198 * @param orig_addr The address in question.
4199 */
4200const char *lookup_symbol(target_ulong orig_addr)
4201{
4202 PVM pVM = cpu_single_env->pVM;
4203 RTGCINTPTR off = 0;
4204 RTDBGSYMBOL Sym;
4205 DBGFADDRESS Addr;
4206
4207 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4208 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, &off, &Sym, NULL /*phMod*/);
4209 if (RT_SUCCESS(rc))
4210 {
4211 static char szSym[sizeof(Sym.szName) + 48];
4212 if (!off)
4213 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4214 else if (off > 0)
4215 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4216 else
4217 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4218 return szSym;
4219 }
4220 return "<N/A>";
4221}
4222
4223
4224#undef LOG_GROUP
4225#define LOG_GROUP LOG_GROUP_REM
4226
4227
4228/* -+- FF notifications -+- */
4229
4230
4231/**
4232 * Notification about a pending interrupt.
4233 *
4234 * @param pVM VM Handle.
4235 * @param pVCpu VMCPU Handle.
4236 * @param u8Interrupt Interrupt
4237 * @thread The emulation thread.
4238 */
4239REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4240{
4241 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4242 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4243}
4244
4245/**
4246 * Notification about a pending interrupt.
4247 *
4248 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4249 * @param pVM VM Handle.
4250 * @param pVCpu VMCPU Handle.
4251 * @thread The emulation thread.
4252 */
4253REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4254{
4255 return pVM->rem.s.u32PendingInterrupt;
4256}
4257
4258/**
4259 * Notification about the interrupt FF being set.
4260 *
4261 * @param pVM VM Handle.
4262 * @param pVCpu VMCPU Handle.
4263 * @thread The emulation thread.
4264 */
4265REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4266{
4267#ifndef IEM_VERIFICATION_MODE
4268 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4269 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4270 if (pVM->rem.s.fInREM)
4271 {
4272 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4273 CPU_INTERRUPT_EXTERNAL_HARD);
4274 }
4275#endif
4276}
4277
4278
4279/**
4280 * Notification about the interrupt FF being set.
4281 *
4282 * @param pVM VM Handle.
4283 * @param pVCpu VMCPU Handle.
4284 * @thread Any.
4285 */
4286REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4287{
4288 LogFlow(("REMR3NotifyInterruptClear:\n"));
4289 if (pVM->rem.s.fInREM)
4290 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4291}
4292
4293
4294/**
4295 * Notification about pending timer(s).
4296 *
4297 * @param pVM VM Handle.
4298 * @param pVCpuDst The target cpu for this notification.
4299 * TM will not broadcast pending timer events, but use
4300 * a dedicated EMT for them. So, only interrupt REM
4301 * execution if the given CPU is executing in REM.
4302 * @thread Any.
4303 */
4304REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4305{
4306#ifndef IEM_VERIFICATION_MODE
4307#ifndef DEBUG_bird
4308 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4309#endif
4310 if (pVM->rem.s.fInREM)
4311 {
4312 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4313 {
4314 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4315 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4316 CPU_INTERRUPT_EXTERNAL_TIMER);
4317 }
4318 else
4319 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4320 }
4321 else
4322 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4323#endif
4324}
4325
4326
4327/**
4328 * Notification about pending DMA transfers.
4329 *
4330 * @param pVM VM Handle.
4331 * @thread Any.
4332 */
4333REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4334{
4335#ifndef IEM_VERIFICATION_MODE
4336 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4337 if (pVM->rem.s.fInREM)
4338 {
4339 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4340 CPU_INTERRUPT_EXTERNAL_DMA);
4341 }
4342#endif
4343}
4344
4345
4346/**
4347 * Notification about pending timer(s).
4348 *
4349 * @param pVM VM Handle.
4350 * @thread Any.
4351 */
4352REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4353{
4354#ifndef IEM_VERIFICATION_MODE
4355 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4356 if (pVM->rem.s.fInREM)
4357 {
4358 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4359 CPU_INTERRUPT_EXTERNAL_EXIT);
4360 }
4361#endif
4362}
4363
4364
4365/**
4366 * Notification about pending FF set by an external thread.
4367 *
4368 * @param pVM VM handle.
4369 * @thread Any.
4370 */
4371REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4372{
4373#ifndef IEM_VERIFICATION_MODE
4374 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4375 if (pVM->rem.s.fInREM)
4376 {
4377 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4378 CPU_INTERRUPT_EXTERNAL_EXIT);
4379 }
4380#endif
4381}
4382
4383
4384#ifdef VBOX_WITH_STATISTICS
4385void remR3ProfileStart(int statcode)
4386{
4387 STAMPROFILEADV *pStat;
4388 switch(statcode)
4389 {
4390 case STATS_EMULATE_SINGLE_INSTR:
4391 pStat = &gStatExecuteSingleInstr;
4392 break;
4393 case STATS_QEMU_COMPILATION:
4394 pStat = &gStatCompilationQEmu;
4395 break;
4396 case STATS_QEMU_RUN_EMULATED_CODE:
4397 pStat = &gStatRunCodeQEmu;
4398 break;
4399 case STATS_QEMU_TOTAL:
4400 pStat = &gStatTotalTimeQEmu;
4401 break;
4402 case STATS_QEMU_RUN_TIMERS:
4403 pStat = &gStatTimers;
4404 break;
4405 case STATS_TLB_LOOKUP:
4406 pStat= &gStatTBLookup;
4407 break;
4408 case STATS_IRQ_HANDLING:
4409 pStat= &gStatIRQ;
4410 break;
4411 case STATS_RAW_CHECK:
4412 pStat = &gStatRawCheck;
4413 break;
4414
4415 default:
4416 AssertMsgFailed(("unknown stat %d\n", statcode));
4417 return;
4418 }
4419 STAM_PROFILE_ADV_START(pStat, a);
4420}
4421
4422
4423void remR3ProfileStop(int statcode)
4424{
4425 STAMPROFILEADV *pStat;
4426 switch(statcode)
4427 {
4428 case STATS_EMULATE_SINGLE_INSTR:
4429 pStat = &gStatExecuteSingleInstr;
4430 break;
4431 case STATS_QEMU_COMPILATION:
4432 pStat = &gStatCompilationQEmu;
4433 break;
4434 case STATS_QEMU_RUN_EMULATED_CODE:
4435 pStat = &gStatRunCodeQEmu;
4436 break;
4437 case STATS_QEMU_TOTAL:
4438 pStat = &gStatTotalTimeQEmu;
4439 break;
4440 case STATS_QEMU_RUN_TIMERS:
4441 pStat = &gStatTimers;
4442 break;
4443 case STATS_TLB_LOOKUP:
4444 pStat= &gStatTBLookup;
4445 break;
4446 case STATS_IRQ_HANDLING:
4447 pStat= &gStatIRQ;
4448 break;
4449 case STATS_RAW_CHECK:
4450 pStat = &gStatRawCheck;
4451 break;
4452 default:
4453 AssertMsgFailed(("unknown stat %d\n", statcode));
4454 return;
4455 }
4456 STAM_PROFILE_ADV_STOP(pStat, a);
4457}
4458#endif
4459
4460/**
4461 * Raise an RC, force rem exit.
4462 *
4463 * @param pVM VM handle.
4464 * @param rc The rc.
4465 */
4466void remR3RaiseRC(PVM pVM, int rc)
4467{
4468 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4469 Assert(pVM->rem.s.fInREM);
4470 VM_ASSERT_EMT(pVM);
4471 pVM->rem.s.rc = rc;
4472 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4473}
4474
4475
4476/* -+- timers -+- */
4477
4478uint64_t cpu_get_tsc(CPUX86State *env)
4479{
4480 STAM_COUNTER_INC(&gStatCpuGetTSC);
4481 return TMCpuTickGet(env->pVCpu);
4482}
4483
4484
4485/* -+- interrupts -+- */
4486
4487void cpu_set_ferr(CPUX86State *env)
4488{
4489 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4490 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4491}
4492
4493int cpu_get_pic_interrupt(CPUX86State *env)
4494{
4495 uint8_t u8Interrupt;
4496 int rc;
4497
4498 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4499 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4500 * with the (a)pic.
4501 */
4502 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4503 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4504 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4505 * remove this kludge. */
4506 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4507 {
4508 rc = VINF_SUCCESS;
4509 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4510 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4511 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4512 }
4513 else
4514 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4515
4516 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4517 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4518 if (RT_SUCCESS(rc))
4519 {
4520 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4521 env->interrupt_request |= CPU_INTERRUPT_HARD;
4522 return u8Interrupt;
4523 }
4524 return -1;
4525}
4526
4527
4528/* -+- local apic -+- */
4529
4530#if 0 /* CPUMSetGuestMsr does this now. */
4531void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4532{
4533 int rc = PDMApicSetBase(env->pVM, val);
4534 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4535}
4536#endif
4537
4538uint64_t cpu_get_apic_base(CPUX86State *env)
4539{
4540 uint64_t u64;
4541 int rc = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4542 if (RT_SUCCESS(rc))
4543 {
4544 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4545 return u64;
4546 }
4547 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4548 return 0;
4549}
4550
4551void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4552{
4553 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4554 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4555}
4556
4557uint8_t cpu_get_apic_tpr(CPUX86State *env)
4558{
4559 uint8_t u8;
4560 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL, NULL);
4561 if (RT_SUCCESS(rc))
4562 {
4563 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4564 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4565 }
4566 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4567 return 0;
4568}
4569
4570/**
4571 * Read an MSR.
4572 *
4573 * @retval 0 success.
4574 * @retval -1 failure, raise \#GP(0).
4575 * @param env The cpu state.
4576 * @param idMsr The MSR to read.
4577 * @param puValue Where to return the value.
4578 */
4579int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4580{
4581 Assert(env->pVCpu);
4582 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4583}
4584
4585/**
4586 * Write to an MSR.
4587 *
4588 * @retval 0 success.
4589 * @retval -1 failure, raise \#GP(0).
4590 * @param env The cpu state.
4591 * @param idMsr The MSR to read.
4592 * @param puValue Where to return the value.
4593 */
4594int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4595{
4596 Assert(env->pVCpu);
4597 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4598}
4599
4600/* -+- I/O Ports -+- */
4601
4602#undef LOG_GROUP
4603#define LOG_GROUP LOG_GROUP_REM_IOPORT
4604
4605void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4606{
4607 int rc;
4608
4609 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4610 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4611
4612 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4613 if (RT_LIKELY(rc == VINF_SUCCESS))
4614 return;
4615 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4616 {
4617 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4618 remR3RaiseRC(env->pVM, rc);
4619 return;
4620 }
4621 remAbort(rc, __FUNCTION__);
4622}
4623
4624void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4625{
4626 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4627 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4628 if (RT_LIKELY(rc == VINF_SUCCESS))
4629 return;
4630 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4631 {
4632 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4633 remR3RaiseRC(env->pVM, rc);
4634 return;
4635 }
4636 remAbort(rc, __FUNCTION__);
4637}
4638
4639void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4640{
4641 int rc;
4642 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4643 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4644 if (RT_LIKELY(rc == VINF_SUCCESS))
4645 return;
4646 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4647 {
4648 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4649 remR3RaiseRC(env->pVM, rc);
4650 return;
4651 }
4652 remAbort(rc, __FUNCTION__);
4653}
4654
4655uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4656{
4657 uint32_t u32 = 0;
4658 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4659 if (RT_LIKELY(rc == VINF_SUCCESS))
4660 {
4661 if (/*addr != 0x61 && */addr != 0x71)
4662 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4663 return (uint8_t)u32;
4664 }
4665 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4666 {
4667 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4668 remR3RaiseRC(env->pVM, rc);
4669 return (uint8_t)u32;
4670 }
4671 remAbort(rc, __FUNCTION__);
4672 return UINT8_C(0xff);
4673}
4674
4675uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4676{
4677 uint32_t u32 = 0;
4678 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4679 if (RT_LIKELY(rc == VINF_SUCCESS))
4680 {
4681 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4682 return (uint16_t)u32;
4683 }
4684 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4685 {
4686 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4687 remR3RaiseRC(env->pVM, rc);
4688 return (uint16_t)u32;
4689 }
4690 remAbort(rc, __FUNCTION__);
4691 return UINT16_C(0xffff);
4692}
4693
4694uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4695{
4696 uint32_t u32 = 0;
4697 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4698 if (RT_LIKELY(rc == VINF_SUCCESS))
4699 {
4700//if (addr==0x01f0 && u32 == 0x6b6d)
4701// loglevel = ~0;
4702 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4703 return u32;
4704 }
4705 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4706 {
4707 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4708 remR3RaiseRC(env->pVM, rc);
4709 return u32;
4710 }
4711 remAbort(rc, __FUNCTION__);
4712 return UINT32_C(0xffffffff);
4713}
4714
4715#undef LOG_GROUP
4716#define LOG_GROUP LOG_GROUP_REM
4717
4718
4719/* -+- helpers and misc other interfaces -+- */
4720
4721/**
4722 * Perform the CPUID instruction.
4723 *
4724 * @param env Pointer to the recompiler CPU structure.
4725 * @param idx The CPUID leaf (eax).
4726 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4727 * @param pvEAX Where to store eax.
4728 * @param pvEBX Where to store ebx.
4729 * @param pvECX Where to store ecx.
4730 * @param pvEDX Where to store edx.
4731 */
4732void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4733 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4734{
4735 NOREF(idxSub);
4736 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4737}
4738
4739
4740#if 0 /* not used */
4741/**
4742 * Interface for qemu hardware to report back fatal errors.
4743 */
4744void hw_error(const char *pszFormat, ...)
4745{
4746 /*
4747 * Bitch about it.
4748 */
4749 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4750 * this in my Odin32 tree at home! */
4751 va_list args;
4752 va_start(args, pszFormat);
4753 RTLogPrintf("fatal error in virtual hardware:");
4754 RTLogPrintfV(pszFormat, args);
4755 va_end(args);
4756 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4757
4758 /*
4759 * If we're in REM context we'll sync back the state before 'jumping' to
4760 * the EMs failure handling.
4761 */
4762 PVM pVM = cpu_single_env->pVM;
4763 if (pVM->rem.s.fInREM)
4764 REMR3StateBack(pVM);
4765 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4766 AssertMsgFailed(("EMR3FatalError returned!\n"));
4767}
4768#endif
4769
4770/**
4771 * Interface for the qemu cpu to report unhandled situation
4772 * raising a fatal VM error.
4773 */
4774void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4775{
4776 va_list va;
4777 PVM pVM;
4778 PVMCPU pVCpu;
4779 char szMsg[256];
4780
4781 /*
4782 * Bitch about it.
4783 */
4784 RTLogFlags(NULL, "nodisabled nobuffered");
4785 RTLogFlush(NULL);
4786
4787 va_start(va, pszFormat);
4788#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4789 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4790 unsigned cArgs = 0;
4791 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4792 const char *psz = strchr(pszFormat, '%');
4793 while (psz && cArgs < 6)
4794 {
4795 auArgs[cArgs++] = va_arg(va, uintptr_t);
4796 psz = strchr(psz + 1, '%');
4797 }
4798 switch (cArgs)
4799 {
4800 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4801 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4802 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4803 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4804 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4805 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4806 default:
4807 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4808 }
4809#else
4810 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4811#endif
4812 va_end(va);
4813
4814 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4815 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4816
4817 /*
4818 * If we're in REM context we'll sync back the state before 'jumping' to
4819 * the EMs failure handling.
4820 */
4821 pVM = cpu_single_env->pVM;
4822 pVCpu = cpu_single_env->pVCpu;
4823 Assert(pVCpu);
4824
4825 if (pVM->rem.s.fInREM)
4826 REMR3StateBack(pVM, pVCpu);
4827 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4828 AssertMsgFailed(("EMR3FatalError returned!\n"));
4829}
4830
4831
4832/**
4833 * Aborts the VM.
4834 *
4835 * @param rc VBox error code.
4836 * @param pszTip Hint about why/when this happened.
4837 */
4838void remAbort(int rc, const char *pszTip)
4839{
4840 PVM pVM;
4841 PVMCPU pVCpu;
4842
4843 /*
4844 * Bitch about it.
4845 */
4846 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4847 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4848
4849 /*
4850 * Jump back to where we entered the recompiler.
4851 */
4852 pVM = cpu_single_env->pVM;
4853 pVCpu = cpu_single_env->pVCpu;
4854 Assert(pVCpu);
4855
4856 if (pVM->rem.s.fInREM)
4857 REMR3StateBack(pVM, pVCpu);
4858
4859 EMR3FatalError(pVCpu, rc);
4860 AssertMsgFailed(("EMR3FatalError returned!\n"));
4861}
4862
4863
4864/**
4865 * Dumps a linux system call.
4866 * @param pVCpu VMCPU handle.
4867 */
4868void remR3DumpLnxSyscall(PVMCPU pVCpu)
4869{
4870 static const char *apsz[] =
4871 {
4872 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4873 "sys_exit",
4874 "sys_fork",
4875 "sys_read",
4876 "sys_write",
4877 "sys_open", /* 5 */
4878 "sys_close",
4879 "sys_waitpid",
4880 "sys_creat",
4881 "sys_link",
4882 "sys_unlink", /* 10 */
4883 "sys_execve",
4884 "sys_chdir",
4885 "sys_time",
4886 "sys_mknod",
4887 "sys_chmod", /* 15 */
4888 "sys_lchown16",
4889 "sys_ni_syscall", /* old break syscall holder */
4890 "sys_stat",
4891 "sys_lseek",
4892 "sys_getpid", /* 20 */
4893 "sys_mount",
4894 "sys_oldumount",
4895 "sys_setuid16",
4896 "sys_getuid16",
4897 "sys_stime", /* 25 */
4898 "sys_ptrace",
4899 "sys_alarm",
4900 "sys_fstat",
4901 "sys_pause",
4902 "sys_utime", /* 30 */
4903 "sys_ni_syscall", /* old stty syscall holder */
4904 "sys_ni_syscall", /* old gtty syscall holder */
4905 "sys_access",
4906 "sys_nice",
4907 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4908 "sys_sync",
4909 "sys_kill",
4910 "sys_rename",
4911 "sys_mkdir",
4912 "sys_rmdir", /* 40 */
4913 "sys_dup",
4914 "sys_pipe",
4915 "sys_times",
4916 "sys_ni_syscall", /* old prof syscall holder */
4917 "sys_brk", /* 45 */
4918 "sys_setgid16",
4919 "sys_getgid16",
4920 "sys_signal",
4921 "sys_geteuid16",
4922 "sys_getegid16", /* 50 */
4923 "sys_acct",
4924 "sys_umount", /* recycled never used phys() */
4925 "sys_ni_syscall", /* old lock syscall holder */
4926 "sys_ioctl",
4927 "sys_fcntl", /* 55 */
4928 "sys_ni_syscall", /* old mpx syscall holder */
4929 "sys_setpgid",
4930 "sys_ni_syscall", /* old ulimit syscall holder */
4931 "sys_olduname",
4932 "sys_umask", /* 60 */
4933 "sys_chroot",
4934 "sys_ustat",
4935 "sys_dup2",
4936 "sys_getppid",
4937 "sys_getpgrp", /* 65 */
4938 "sys_setsid",
4939 "sys_sigaction",
4940 "sys_sgetmask",
4941 "sys_ssetmask",
4942 "sys_setreuid16", /* 70 */
4943 "sys_setregid16",
4944 "sys_sigsuspend",
4945 "sys_sigpending",
4946 "sys_sethostname",
4947 "sys_setrlimit", /* 75 */
4948 "sys_old_getrlimit",
4949 "sys_getrusage",
4950 "sys_gettimeofday",
4951 "sys_settimeofday",
4952 "sys_getgroups16", /* 80 */
4953 "sys_setgroups16",
4954 "old_select",
4955 "sys_symlink",
4956 "sys_lstat",
4957 "sys_readlink", /* 85 */
4958 "sys_uselib",
4959 "sys_swapon",
4960 "sys_reboot",
4961 "old_readdir",
4962 "old_mmap", /* 90 */
4963 "sys_munmap",
4964 "sys_truncate",
4965 "sys_ftruncate",
4966 "sys_fchmod",
4967 "sys_fchown16", /* 95 */
4968 "sys_getpriority",
4969 "sys_setpriority",
4970 "sys_ni_syscall", /* old profil syscall holder */
4971 "sys_statfs",
4972 "sys_fstatfs", /* 100 */
4973 "sys_ioperm",
4974 "sys_socketcall",
4975 "sys_syslog",
4976 "sys_setitimer",
4977 "sys_getitimer", /* 105 */
4978 "sys_newstat",
4979 "sys_newlstat",
4980 "sys_newfstat",
4981 "sys_uname",
4982 "sys_iopl", /* 110 */
4983 "sys_vhangup",
4984 "sys_ni_syscall", /* old "idle" system call */
4985 "sys_vm86old",
4986 "sys_wait4",
4987 "sys_swapoff", /* 115 */
4988 "sys_sysinfo",
4989 "sys_ipc",
4990 "sys_fsync",
4991 "sys_sigreturn",
4992 "sys_clone", /* 120 */
4993 "sys_setdomainname",
4994 "sys_newuname",
4995 "sys_modify_ldt",
4996 "sys_adjtimex",
4997 "sys_mprotect", /* 125 */
4998 "sys_sigprocmask",
4999 "sys_ni_syscall", /* old "create_module" */
5000 "sys_init_module",
5001 "sys_delete_module",
5002 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
5003 "sys_quotactl",
5004 "sys_getpgid",
5005 "sys_fchdir",
5006 "sys_bdflush",
5007 "sys_sysfs", /* 135 */
5008 "sys_personality",
5009 "sys_ni_syscall", /* reserved for afs_syscall */
5010 "sys_setfsuid16",
5011 "sys_setfsgid16",
5012 "sys_llseek", /* 140 */
5013 "sys_getdents",
5014 "sys_select",
5015 "sys_flock",
5016 "sys_msync",
5017 "sys_readv", /* 145 */
5018 "sys_writev",
5019 "sys_getsid",
5020 "sys_fdatasync",
5021 "sys_sysctl",
5022 "sys_mlock", /* 150 */
5023 "sys_munlock",
5024 "sys_mlockall",
5025 "sys_munlockall",
5026 "sys_sched_setparam",
5027 "sys_sched_getparam", /* 155 */
5028 "sys_sched_setscheduler",
5029 "sys_sched_getscheduler",
5030 "sys_sched_yield",
5031 "sys_sched_get_priority_max",
5032 "sys_sched_get_priority_min", /* 160 */
5033 "sys_sched_rr_get_interval",
5034 "sys_nanosleep",
5035 "sys_mremap",
5036 "sys_setresuid16",
5037 "sys_getresuid16", /* 165 */
5038 "sys_vm86",
5039 "sys_ni_syscall", /* Old sys_query_module */
5040 "sys_poll",
5041 "sys_nfsservctl",
5042 "sys_setresgid16", /* 170 */
5043 "sys_getresgid16",
5044 "sys_prctl",
5045 "sys_rt_sigreturn",
5046 "sys_rt_sigaction",
5047 "sys_rt_sigprocmask", /* 175 */
5048 "sys_rt_sigpending",
5049 "sys_rt_sigtimedwait",
5050 "sys_rt_sigqueueinfo",
5051 "sys_rt_sigsuspend",
5052 "sys_pread64", /* 180 */
5053 "sys_pwrite64",
5054 "sys_chown16",
5055 "sys_getcwd",
5056 "sys_capget",
5057 "sys_capset", /* 185 */
5058 "sys_sigaltstack",
5059 "sys_sendfile",
5060 "sys_ni_syscall", /* reserved for streams1 */
5061 "sys_ni_syscall", /* reserved for streams2 */
5062 "sys_vfork", /* 190 */
5063 "sys_getrlimit",
5064 "sys_mmap2",
5065 "sys_truncate64",
5066 "sys_ftruncate64",
5067 "sys_stat64", /* 195 */
5068 "sys_lstat64",
5069 "sys_fstat64",
5070 "sys_lchown",
5071 "sys_getuid",
5072 "sys_getgid", /* 200 */
5073 "sys_geteuid",
5074 "sys_getegid",
5075 "sys_setreuid",
5076 "sys_setregid",
5077 "sys_getgroups", /* 205 */
5078 "sys_setgroups",
5079 "sys_fchown",
5080 "sys_setresuid",
5081 "sys_getresuid",
5082 "sys_setresgid", /* 210 */
5083 "sys_getresgid",
5084 "sys_chown",
5085 "sys_setuid",
5086 "sys_setgid",
5087 "sys_setfsuid", /* 215 */
5088 "sys_setfsgid",
5089 "sys_pivot_root",
5090 "sys_mincore",
5091 "sys_madvise",
5092 "sys_getdents64", /* 220 */
5093 "sys_fcntl64",
5094 "sys_ni_syscall", /* reserved for TUX */
5095 "sys_ni_syscall",
5096 "sys_gettid",
5097 "sys_readahead", /* 225 */
5098 "sys_setxattr",
5099 "sys_lsetxattr",
5100 "sys_fsetxattr",
5101 "sys_getxattr",
5102 "sys_lgetxattr", /* 230 */
5103 "sys_fgetxattr",
5104 "sys_listxattr",
5105 "sys_llistxattr",
5106 "sys_flistxattr",
5107 "sys_removexattr", /* 235 */
5108 "sys_lremovexattr",
5109 "sys_fremovexattr",
5110 "sys_tkill",
5111 "sys_sendfile64",
5112 "sys_futex", /* 240 */
5113 "sys_sched_setaffinity",
5114 "sys_sched_getaffinity",
5115 "sys_set_thread_area",
5116 "sys_get_thread_area",
5117 "sys_io_setup", /* 245 */
5118 "sys_io_destroy",
5119 "sys_io_getevents",
5120 "sys_io_submit",
5121 "sys_io_cancel",
5122 "sys_fadvise64", /* 250 */
5123 "sys_ni_syscall",
5124 "sys_exit_group",
5125 "sys_lookup_dcookie",
5126 "sys_epoll_create",
5127 "sys_epoll_ctl", /* 255 */
5128 "sys_epoll_wait",
5129 "sys_remap_file_pages",
5130 "sys_set_tid_address",
5131 "sys_timer_create",
5132 "sys_timer_settime", /* 260 */
5133 "sys_timer_gettime",
5134 "sys_timer_getoverrun",
5135 "sys_timer_delete",
5136 "sys_clock_settime",
5137 "sys_clock_gettime", /* 265 */
5138 "sys_clock_getres",
5139 "sys_clock_nanosleep",
5140 "sys_statfs64",
5141 "sys_fstatfs64",
5142 "sys_tgkill", /* 270 */
5143 "sys_utimes",
5144 "sys_fadvise64_64",
5145 "sys_ni_syscall" /* sys_vserver */
5146 };
5147
5148 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5149 switch (uEAX)
5150 {
5151 default:
5152 if (uEAX < RT_ELEMENTS(apsz))
5153 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5154 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5155 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5156 else
5157 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5158 break;
5159
5160 }
5161}
5162
5163
5164/**
5165 * Dumps an OpenBSD system call.
5166 * @param pVCpu VMCPU handle.
5167 */
5168void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5169{
5170 static const char *apsz[] =
5171 {
5172 "SYS_syscall", //0
5173 "SYS_exit", //1
5174 "SYS_fork", //2
5175 "SYS_read", //3
5176 "SYS_write", //4
5177 "SYS_open", //5
5178 "SYS_close", //6
5179 "SYS_wait4", //7
5180 "SYS_8",
5181 "SYS_link", //9
5182 "SYS_unlink", //10
5183 "SYS_11",
5184 "SYS_chdir", //12
5185 "SYS_fchdir", //13
5186 "SYS_mknod", //14
5187 "SYS_chmod", //15
5188 "SYS_chown", //16
5189 "SYS_break", //17
5190 "SYS_18",
5191 "SYS_19",
5192 "SYS_getpid", //20
5193 "SYS_mount", //21
5194 "SYS_unmount", //22
5195 "SYS_setuid", //23
5196 "SYS_getuid", //24
5197 "SYS_geteuid", //25
5198 "SYS_ptrace", //26
5199 "SYS_recvmsg", //27
5200 "SYS_sendmsg", //28
5201 "SYS_recvfrom", //29
5202 "SYS_accept", //30
5203 "SYS_getpeername", //31
5204 "SYS_getsockname", //32
5205 "SYS_access", //33
5206 "SYS_chflags", //34
5207 "SYS_fchflags", //35
5208 "SYS_sync", //36
5209 "SYS_kill", //37
5210 "SYS_38",
5211 "SYS_getppid", //39
5212 "SYS_40",
5213 "SYS_dup", //41
5214 "SYS_opipe", //42
5215 "SYS_getegid", //43
5216 "SYS_profil", //44
5217 "SYS_ktrace", //45
5218 "SYS_sigaction", //46
5219 "SYS_getgid", //47
5220 "SYS_sigprocmask", //48
5221 "SYS_getlogin", //49
5222 "SYS_setlogin", //50
5223 "SYS_acct", //51
5224 "SYS_sigpending", //52
5225 "SYS_osigaltstack", //53
5226 "SYS_ioctl", //54
5227 "SYS_reboot", //55
5228 "SYS_revoke", //56
5229 "SYS_symlink", //57
5230 "SYS_readlink", //58
5231 "SYS_execve", //59
5232 "SYS_umask", //60
5233 "SYS_chroot", //61
5234 "SYS_62",
5235 "SYS_63",
5236 "SYS_64",
5237 "SYS_65",
5238 "SYS_vfork", //66
5239 "SYS_67",
5240 "SYS_68",
5241 "SYS_sbrk", //69
5242 "SYS_sstk", //70
5243 "SYS_61",
5244 "SYS_vadvise", //72
5245 "SYS_munmap", //73
5246 "SYS_mprotect", //74
5247 "SYS_madvise", //75
5248 "SYS_76",
5249 "SYS_77",
5250 "SYS_mincore", //78
5251 "SYS_getgroups", //79
5252 "SYS_setgroups", //80
5253 "SYS_getpgrp", //81
5254 "SYS_setpgid", //82
5255 "SYS_setitimer", //83
5256 "SYS_84",
5257 "SYS_85",
5258 "SYS_getitimer", //86
5259 "SYS_87",
5260 "SYS_88",
5261 "SYS_89",
5262 "SYS_dup2", //90
5263 "SYS_91",
5264 "SYS_fcntl", //92
5265 "SYS_select", //93
5266 "SYS_94",
5267 "SYS_fsync", //95
5268 "SYS_setpriority", //96
5269 "SYS_socket", //97
5270 "SYS_connect", //98
5271 "SYS_99",
5272 "SYS_getpriority", //100
5273 "SYS_101",
5274 "SYS_102",
5275 "SYS_sigreturn", //103
5276 "SYS_bind", //104
5277 "SYS_setsockopt", //105
5278 "SYS_listen", //106
5279 "SYS_107",
5280 "SYS_108",
5281 "SYS_109",
5282 "SYS_110",
5283 "SYS_sigsuspend", //111
5284 "SYS_112",
5285 "SYS_113",
5286 "SYS_114",
5287 "SYS_115",
5288 "SYS_gettimeofday", //116
5289 "SYS_getrusage", //117
5290 "SYS_getsockopt", //118
5291 "SYS_119",
5292 "SYS_readv", //120
5293 "SYS_writev", //121
5294 "SYS_settimeofday", //122
5295 "SYS_fchown", //123
5296 "SYS_fchmod", //124
5297 "SYS_125",
5298 "SYS_setreuid", //126
5299 "SYS_setregid", //127
5300 "SYS_rename", //128
5301 "SYS_129",
5302 "SYS_130",
5303 "SYS_flock", //131
5304 "SYS_mkfifo", //132
5305 "SYS_sendto", //133
5306 "SYS_shutdown", //134
5307 "SYS_socketpair", //135
5308 "SYS_mkdir", //136
5309 "SYS_rmdir", //137
5310 "SYS_utimes", //138
5311 "SYS_139",
5312 "SYS_adjtime", //140
5313 "SYS_141",
5314 "SYS_142",
5315 "SYS_143",
5316 "SYS_144",
5317 "SYS_145",
5318 "SYS_146",
5319 "SYS_setsid", //147
5320 "SYS_quotactl", //148
5321 "SYS_149",
5322 "SYS_150",
5323 "SYS_151",
5324 "SYS_152",
5325 "SYS_153",
5326 "SYS_154",
5327 "SYS_nfssvc", //155
5328 "SYS_156",
5329 "SYS_157",
5330 "SYS_158",
5331 "SYS_159",
5332 "SYS_160",
5333 "SYS_getfh", //161
5334 "SYS_162",
5335 "SYS_163",
5336 "SYS_164",
5337 "SYS_sysarch", //165
5338 "SYS_166",
5339 "SYS_167",
5340 "SYS_168",
5341 "SYS_169",
5342 "SYS_170",
5343 "SYS_171",
5344 "SYS_172",
5345 "SYS_pread", //173
5346 "SYS_pwrite", //174
5347 "SYS_175",
5348 "SYS_176",
5349 "SYS_177",
5350 "SYS_178",
5351 "SYS_179",
5352 "SYS_180",
5353 "SYS_setgid", //181
5354 "SYS_setegid", //182
5355 "SYS_seteuid", //183
5356 "SYS_lfs_bmapv", //184
5357 "SYS_lfs_markv", //185
5358 "SYS_lfs_segclean", //186
5359 "SYS_lfs_segwait", //187
5360 "SYS_188",
5361 "SYS_189",
5362 "SYS_190",
5363 "SYS_pathconf", //191
5364 "SYS_fpathconf", //192
5365 "SYS_swapctl", //193
5366 "SYS_getrlimit", //194
5367 "SYS_setrlimit", //195
5368 "SYS_getdirentries", //196
5369 "SYS_mmap", //197
5370 "SYS___syscall", //198
5371 "SYS_lseek", //199
5372 "SYS_truncate", //200
5373 "SYS_ftruncate", //201
5374 "SYS___sysctl", //202
5375 "SYS_mlock", //203
5376 "SYS_munlock", //204
5377 "SYS_205",
5378 "SYS_futimes", //206
5379 "SYS_getpgid", //207
5380 "SYS_xfspioctl", //208
5381 "SYS_209",
5382 "SYS_210",
5383 "SYS_211",
5384 "SYS_212",
5385 "SYS_213",
5386 "SYS_214",
5387 "SYS_215",
5388 "SYS_216",
5389 "SYS_217",
5390 "SYS_218",
5391 "SYS_219",
5392 "SYS_220",
5393 "SYS_semget", //221
5394 "SYS_222",
5395 "SYS_223",
5396 "SYS_224",
5397 "SYS_msgget", //225
5398 "SYS_msgsnd", //226
5399 "SYS_msgrcv", //227
5400 "SYS_shmat", //228
5401 "SYS_229",
5402 "SYS_shmdt", //230
5403 "SYS_231",
5404 "SYS_clock_gettime", //232
5405 "SYS_clock_settime", //233
5406 "SYS_clock_getres", //234
5407 "SYS_235",
5408 "SYS_236",
5409 "SYS_237",
5410 "SYS_238",
5411 "SYS_239",
5412 "SYS_nanosleep", //240
5413 "SYS_241",
5414 "SYS_242",
5415 "SYS_243",
5416 "SYS_244",
5417 "SYS_245",
5418 "SYS_246",
5419 "SYS_247",
5420 "SYS_248",
5421 "SYS_249",
5422 "SYS_minherit", //250
5423 "SYS_rfork", //251
5424 "SYS_poll", //252
5425 "SYS_issetugid", //253
5426 "SYS_lchown", //254
5427 "SYS_getsid", //255
5428 "SYS_msync", //256
5429 "SYS_257",
5430 "SYS_258",
5431 "SYS_259",
5432 "SYS_getfsstat", //260
5433 "SYS_statfs", //261
5434 "SYS_fstatfs", //262
5435 "SYS_pipe", //263
5436 "SYS_fhopen", //264
5437 "SYS_265",
5438 "SYS_fhstatfs", //266
5439 "SYS_preadv", //267
5440 "SYS_pwritev", //268
5441 "SYS_kqueue", //269
5442 "SYS_kevent", //270
5443 "SYS_mlockall", //271
5444 "SYS_munlockall", //272
5445 "SYS_getpeereid", //273
5446 "SYS_274",
5447 "SYS_275",
5448 "SYS_276",
5449 "SYS_277",
5450 "SYS_278",
5451 "SYS_279",
5452 "SYS_280",
5453 "SYS_getresuid", //281
5454 "SYS_setresuid", //282
5455 "SYS_getresgid", //283
5456 "SYS_setresgid", //284
5457 "SYS_285",
5458 "SYS_mquery", //286
5459 "SYS_closefrom", //287
5460 "SYS_sigaltstack", //288
5461 "SYS_shmget", //289
5462 "SYS_semop", //290
5463 "SYS_stat", //291
5464 "SYS_fstat", //292
5465 "SYS_lstat", //293
5466 "SYS_fhstat", //294
5467 "SYS___semctl", //295
5468 "SYS_shmctl", //296
5469 "SYS_msgctl", //297
5470 "SYS_MAXSYSCALL", //298
5471 //299
5472 //300
5473 };
5474 uint32_t uEAX;
5475 if (!LogIsEnabled())
5476 return;
5477 uEAX = CPUMGetGuestEAX(pVCpu);
5478 switch (uEAX)
5479 {
5480 default:
5481 if (uEAX < RT_ELEMENTS(apsz))
5482 {
5483 uint32_t au32Args[8] = {0};
5484 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5485 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5486 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5487 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5488 }
5489 else
5490 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5491 break;
5492 }
5493}
5494
5495
5496#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5497/**
5498 * The Dll main entry point (stub).
5499 */
5500bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5501{
5502 return true;
5503}
5504
5505void *memcpy(void *dst, const void *src, size_t size)
5506{
5507 uint8_t*pbDst = dst, *pbSrc = src;
5508 while (size-- > 0)
5509 *pbDst++ = *pbSrc++;
5510 return dst;
5511}
5512
5513#endif
5514
5515void cpu_smm_update(CPUX86State *env)
5516{
5517}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette