VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 60914

Last change on this file since 60914 was 60914, checked in by vboxsync, 9 years ago

EMAll.cpp: Fix unused static function warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 142.1 KB
Line 
1/* $Id: EMAll.cpp 60914 2016-05-10 06:47:16Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include "internal/pgm.h"
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <iprt/string.h>
50
51#ifdef VBOX_WITH_IEM
52//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
53//# define VBOX_SAME_AS_EM
54//# define VBOX_COMPARE_IEM_LAST
55#endif
56
57#ifdef VBOX_WITH_RAW_RING1
58# define EM_EMULATE_SMSW
59#endif
60
61
62/*********************************************************************************************************************************
63* Defined Constants And Macros *
64*********************************************************************************************************************************/
65/** @def EM_ASSERT_FAULT_RETURN
66 * Safety check.
67 *
68 * Could in theory misfire on a cross page boundary access...
69 *
70 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
71 * turns up an alias page instead of the original faulting one and annoying the
72 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
73 */
74#if 0
75# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
76#else
77# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
85DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
86 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
87#endif
88
89
90/*********************************************************************************************************************************
91* Global Variables *
92*********************************************************************************************************************************/
93#ifdef VBOX_COMPARE_IEM_AND_EM
94static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
95 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
96 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
97 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
98static uint32_t g_fIncomingFFs;
99static CPUMCTX g_IncomingCtx;
100static bool g_fIgnoreRaxRdx = false;
101
102static uint32_t g_fEmFFs;
103static CPUMCTX g_EmCtx;
104static uint8_t g_abEmWrote[256];
105static size_t g_cbEmWrote;
106
107static uint32_t g_fIemFFs;
108static CPUMCTX g_IemCtx;
109extern uint8_t g_abIemWrote[256];
110#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
111extern size_t g_cbIemWrote;
112#else
113static size_t g_cbIemWrote;
114#endif
115#endif
116
117
118/**
119 * Get the current execution manager status.
120 *
121 * @returns Current status.
122 * @param pVCpu The cross context virtual CPU structure.
123 */
124VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
125{
126 return pVCpu->em.s.enmState;
127}
128
129
130/**
131 * Sets the current execution manager status. (use only when you know what you're doing!)
132 *
133 * @param pVCpu The cross context virtual CPU structure.
134 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
135 */
136VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
137{
138 /* Only allowed combination: */
139 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
140 pVCpu->em.s.enmState = enmNewState;
141}
142
143
144/**
145 * Sets the PC for which interrupts should be inhibited.
146 *
147 * @param pVCpu The cross context virtual CPU structure.
148 * @param PC The PC.
149 */
150VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
151{
152 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
153 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
154}
155
156
157/**
158 * Gets the PC for which interrupts should be inhibited.
159 *
160 * There are a few instructions which inhibits or delays interrupts
161 * for the instruction following them. These instructions are:
162 * - STI
163 * - MOV SS, r/m16
164 * - POP SS
165 *
166 * @returns The PC for which interrupts should be inhibited.
167 * @param pVCpu The cross context virtual CPU structure.
168 *
169 */
170VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
171{
172 return pVCpu->em.s.GCPtrInhibitInterrupts;
173}
174
175
176/**
177 * Prepare an MWAIT - essentials of the MONITOR instruction.
178 *
179 * @returns VINF_SUCCESS
180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
181 * @param rax The content of RAX.
182 * @param rcx The content of RCX.
183 * @param rdx The content of RDX.
184 * @param GCPhys The physical address corresponding to rax.
185 */
186VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
187{
188 pVCpu->em.s.MWait.uMonitorRAX = rax;
189 pVCpu->em.s.MWait.uMonitorRCX = rcx;
190 pVCpu->em.s.MWait.uMonitorRDX = rdx;
191 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
192 /** @todo Make use of GCPhys. */
193 NOREF(GCPhys);
194 /** @todo Complete MONITOR implementation. */
195 return VINF_SUCCESS;
196}
197
198
199/**
200 * Performs an MWAIT.
201 *
202 * @returns VINF_SUCCESS
203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
204 * @param rax The content of RAX.
205 * @param rcx The content of RCX.
206 */
207VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
208{
209 pVCpu->em.s.MWait.uMWaitRAX = rax;
210 pVCpu->em.s.MWait.uMWaitRCX = rcx;
211 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
212 if (rcx)
213 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
214 else
215 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
216 /** @todo not completely correct?? */
217 return VINF_EM_HALT;
218}
219
220
221
222/**
223 * Determine if we should continue execution in HM after encountering an mwait
224 * instruction.
225 *
226 * Clears MWAIT flags if returning @c true.
227 *
228 * @returns true if we should continue, false if we should halt.
229 * @param pVCpu The cross context virtual CPU structure.
230 * @param pCtx Current CPU context.
231 */
232VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
233{
234 if ( pCtx->eflags.Bits.u1IF
235 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
236 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
237 {
238 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
239 {
240 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
241 return true;
242 }
243 }
244
245 return false;
246}
247
248
249/**
250 * Determine if we should continue execution in HM after encountering a hlt
251 * instruction.
252 *
253 * @returns true if we should continue, false if we should halt.
254 * @param pVCpu The cross context virtual CPU structure.
255 * @param pCtx Current CPU context.
256 */
257VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
258{
259 if (pCtx->eflags.Bits.u1IF)
260 return !!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
261 return false;
262}
263
264
265/**
266 * Locks REM execution to a single VCPU.
267 *
268 * @param pVM The cross context VM structure.
269 */
270VMMDECL(void) EMRemLock(PVM pVM)
271{
272#ifdef VBOX_WITH_REM
273 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
274 return; /* early init */
275
276 Assert(!PGMIsLockOwner(pVM));
277 Assert(!IOMIsLockWriteOwner(pVM));
278 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
279 AssertRCSuccess(rc);
280#endif
281}
282
283
284/**
285 * Unlocks REM execution
286 *
287 * @param pVM The cross context VM structure.
288 */
289VMMDECL(void) EMRemUnlock(PVM pVM)
290{
291#ifdef VBOX_WITH_REM
292 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
293 return; /* early init */
294
295 PDMCritSectLeave(&pVM->em.s.CritSectREM);
296#endif
297}
298
299
300/**
301 * Check if this VCPU currently owns the REM lock.
302 *
303 * @returns bool owner/not owner
304 * @param pVM The cross context VM structure.
305 */
306VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
307{
308#ifdef VBOX_WITH_REM
309 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
310 return true; /* early init */
311
312 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
313#else
314 return true;
315#endif
316}
317
318
319/**
320 * Try to acquire the REM lock.
321 *
322 * @returns VBox status code
323 * @param pVM The cross context VM structure.
324 */
325VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
326{
327#ifdef VBOX_WITH_REM
328 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
329 return VINF_SUCCESS; /* early init */
330
331 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
332#else
333 return VINF_SUCCESS;
334#endif
335}
336
337
338/**
339 * @callback_method_impl{FNDISREADBYTES}
340 */
341static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
342{
343 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
344#if defined(IN_RC) || defined(IN_RING3)
345 PVM pVM = pVCpu->CTX_SUFF(pVM);
346#endif
347 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
348 int rc;
349
350 /*
351 * Figure how much we can or must read.
352 */
353 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
354 if (cbToRead > cbMaxRead)
355 cbToRead = cbMaxRead;
356 else if (cbToRead < cbMinRead)
357 cbToRead = cbMinRead;
358
359#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
360 /*
361 * We might be called upon to interpret an instruction in a patch.
362 */
363 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), uSrcAddr))
364 {
365# ifdef IN_RC
366 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
367# else
368 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVCpu->CTX_SUFF(pVM), uSrcAddr), cbToRead);
369# endif
370 rc = VINF_SUCCESS;
371 }
372 else
373#endif
374 {
375# ifdef IN_RC
376 /*
377 * Try access it thru the shadow page tables first. Fall back on the
378 * slower PGM method if it fails because the TLB or page table was
379 * modified recently.
380 */
381 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
382 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
383 {
384 cbToRead = cbMinRead;
385 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
386 }
387 if (rc == VERR_ACCESS_DENIED)
388#endif
389 {
390 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
391 if (RT_FAILURE(rc))
392 {
393 if (cbToRead > cbMinRead)
394 {
395 cbToRead = cbMinRead;
396 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
397 }
398 if (RT_FAILURE(rc))
399 {
400#ifndef IN_RC
401 /*
402 * If we fail to find the page via the guest's page tables
403 * we invalidate the page in the host TLB (pertaining to
404 * the guest in the NestedPaging case). See @bugref{6043}.
405 */
406 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
407 {
408 HMInvalidatePage(pVCpu, uSrcAddr);
409 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
410 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
411 }
412#endif
413 }
414 }
415 }
416 }
417
418 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
419 return rc;
420}
421
422
423DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
424{
425 NOREF(pVM);
426 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
427}
428
429
430/**
431 * Disassembles the current instruction.
432 *
433 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
434 * details.
435 *
436 * @param pVM The cross context VM structure.
437 * @param pVCpu The cross context virtual CPU structure.
438 * @param pDis Where to return the parsed instruction info.
439 * @param pcbInstr Where to return the instruction size. (optional)
440 */
441VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
442{
443 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
444 RTGCPTR GCPtrInstr;
445#if 0
446 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
447#else
448/** @todo Get the CPU mode as well while we're at it! */
449 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
450 pCtxCore->rip, &GCPtrInstr);
451#endif
452 if (RT_FAILURE(rc))
453 {
454 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
455 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
456 return rc;
457 }
458 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
459}
460
461
462/**
463 * Disassembles one instruction.
464 *
465 * This is used by internally by the interpreter and by trap/access handlers.
466 *
467 * @returns VBox status code.
468 *
469 * @param pVM The cross context VM structure.
470 * @param pVCpu The cross context virtual CPU structure.
471 * @param GCPtrInstr The flat address of the instruction.
472 * @param pCtxCore The context core (used to determine the cpu mode).
473 * @param pDis Where to return the parsed instruction info.
474 * @param pcbInstr Where to return the instruction size. (optional)
475 */
476VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
477 PDISCPUSTATE pDis, unsigned *pcbInstr)
478{
479 NOREF(pVM);
480 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
481 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
482 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
483 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
484 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
485 if (RT_SUCCESS(rc))
486 return VINF_SUCCESS;
487 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
488 return rc;
489}
490
491
492#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
493static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
494 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
495 uint32_t cbEm, uint32_t cbIem)
496{
497 /* Quick compare. */
498 if ( rcEm == rcIem
499 && cbEm == cbIem
500 && g_cbEmWrote == g_cbIemWrote
501 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
502 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
503 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
504 )
505 return;
506
507 /* Report exact differences. */
508 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
509 if (rcEm != rcIem)
510 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
511 else if (cbEm != cbIem)
512 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
513
514 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
515 {
516 if (g_cbIemWrote != g_cbEmWrote)
517 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
518 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
519 {
520 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
521 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
522 }
523
524 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
525 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
526 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
527
528# define CHECK_FIELD(a_Field) \
529 do \
530 { \
531 if (pEmCtx->a_Field != pIemCtx->a_Field) \
532 { \
533 switch (sizeof(pEmCtx->a_Field)) \
534 { \
535 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
536 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
537 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
538 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
539 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
540 } \
541 cDiffs++; \
542 } \
543 } while (0)
544
545# define CHECK_BIT_FIELD(a_Field) \
546 do \
547 { \
548 if (pEmCtx->a_Field != pIemCtx->a_Field) \
549 { \
550 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
551 cDiffs++; \
552 } \
553 } while (0)
554
555# define CHECK_SEL(a_Sel) \
556 do \
557 { \
558 CHECK_FIELD(a_Sel.Sel); \
559 CHECK_FIELD(a_Sel.Attr.u); \
560 CHECK_FIELD(a_Sel.u64Base); \
561 CHECK_FIELD(a_Sel.u32Limit); \
562 CHECK_FIELD(a_Sel.fFlags); \
563 } while (0)
564
565 unsigned cDiffs = 0;
566 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
567 {
568 RTLogPrintf(" the FPU state differs\n");
569 cDiffs++;
570 CHECK_FIELD(fpu.FCW);
571 CHECK_FIELD(fpu.FSW);
572 CHECK_FIELD(fpu.FTW);
573 CHECK_FIELD(fpu.FOP);
574 CHECK_FIELD(fpu.FPUIP);
575 CHECK_FIELD(fpu.CS);
576 CHECK_FIELD(fpu.Rsrvd1);
577 CHECK_FIELD(fpu.FPUDP);
578 CHECK_FIELD(fpu.DS);
579 CHECK_FIELD(fpu.Rsrvd2);
580 CHECK_FIELD(fpu.MXCSR);
581 CHECK_FIELD(fpu.MXCSR_MASK);
582 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
583 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
584 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
585 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
586 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
587 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
588 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
589 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
590 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
591 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
592 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
593 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
594 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
595 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
596 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
597 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
598 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
599 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
600 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
601 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
602 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
603 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
604 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
605 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
606 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
607 CHECK_FIELD(fpu.au32RsrvdRest[i]);
608 }
609 CHECK_FIELD(rip);
610 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
611 {
612 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
613 CHECK_BIT_FIELD(rflags.Bits.u1CF);
614 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
615 CHECK_BIT_FIELD(rflags.Bits.u1PF);
616 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
617 CHECK_BIT_FIELD(rflags.Bits.u1AF);
618 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
619 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
620 CHECK_BIT_FIELD(rflags.Bits.u1SF);
621 CHECK_BIT_FIELD(rflags.Bits.u1TF);
622 CHECK_BIT_FIELD(rflags.Bits.u1IF);
623 CHECK_BIT_FIELD(rflags.Bits.u1DF);
624 CHECK_BIT_FIELD(rflags.Bits.u1OF);
625 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
626 CHECK_BIT_FIELD(rflags.Bits.u1NT);
627 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
628 CHECK_BIT_FIELD(rflags.Bits.u1RF);
629 CHECK_BIT_FIELD(rflags.Bits.u1VM);
630 CHECK_BIT_FIELD(rflags.Bits.u1AC);
631 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
632 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
633 CHECK_BIT_FIELD(rflags.Bits.u1ID);
634 }
635
636 if (!g_fIgnoreRaxRdx)
637 CHECK_FIELD(rax);
638 CHECK_FIELD(rcx);
639 if (!g_fIgnoreRaxRdx)
640 CHECK_FIELD(rdx);
641 CHECK_FIELD(rbx);
642 CHECK_FIELD(rsp);
643 CHECK_FIELD(rbp);
644 CHECK_FIELD(rsi);
645 CHECK_FIELD(rdi);
646 CHECK_FIELD(r8);
647 CHECK_FIELD(r9);
648 CHECK_FIELD(r10);
649 CHECK_FIELD(r11);
650 CHECK_FIELD(r12);
651 CHECK_FIELD(r13);
652 CHECK_SEL(cs);
653 CHECK_SEL(ss);
654 CHECK_SEL(ds);
655 CHECK_SEL(es);
656 CHECK_SEL(fs);
657 CHECK_SEL(gs);
658 CHECK_FIELD(cr0);
659 CHECK_FIELD(cr2);
660 CHECK_FIELD(cr3);
661 CHECK_FIELD(cr4);
662 CHECK_FIELD(dr[0]);
663 CHECK_FIELD(dr[1]);
664 CHECK_FIELD(dr[2]);
665 CHECK_FIELD(dr[3]);
666 CHECK_FIELD(dr[6]);
667 CHECK_FIELD(dr[7]);
668 CHECK_FIELD(gdtr.cbGdt);
669 CHECK_FIELD(gdtr.pGdt);
670 CHECK_FIELD(idtr.cbIdt);
671 CHECK_FIELD(idtr.pIdt);
672 CHECK_SEL(ldtr);
673 CHECK_SEL(tr);
674 CHECK_FIELD(SysEnter.cs);
675 CHECK_FIELD(SysEnter.eip);
676 CHECK_FIELD(SysEnter.esp);
677 CHECK_FIELD(msrEFER);
678 CHECK_FIELD(msrSTAR);
679 CHECK_FIELD(msrPAT);
680 CHECK_FIELD(msrLSTAR);
681 CHECK_FIELD(msrCSTAR);
682 CHECK_FIELD(msrSFMASK);
683 CHECK_FIELD(msrKERNELGSBASE);
684
685# undef CHECK_FIELD
686# undef CHECK_BIT_FIELD
687 }
688}
689#endif /* VBOX_COMPARE_IEM_AND_EM */
690
691
692/**
693 * Interprets the current instruction.
694 *
695 * @returns VBox status code.
696 * @retval VINF_* Scheduling instructions.
697 * @retval VERR_EM_INTERPRETER Something we can't cope with.
698 * @retval VERR_* Fatal errors.
699 *
700 * @param pVCpu The cross context virtual CPU structure.
701 * @param pRegFrame The register frame.
702 * Updates the EIP if an instruction was executed successfully.
703 * @param pvFault The fault address (CR2).
704 *
705 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
706 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
707 * to worry about e.g. invalid modrm combinations (!)
708 */
709VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
710{
711 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
712 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
713#ifdef VBOX_WITH_IEM
714 NOREF(pvFault);
715
716# ifdef VBOX_COMPARE_IEM_AND_EM
717 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
718 g_IncomingCtx = *pCtx;
719 g_fIncomingFFs = pVCpu->fLocalForcedActions;
720 g_cbEmWrote = g_cbIemWrote = 0;
721
722# ifdef VBOX_COMPARE_IEM_FIRST
723 /* IEM */
724 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
725 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
726 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
727 rcIem = VERR_EM_INTERPRETER;
728 g_IemCtx = *pCtx;
729 g_fIemFFs = pVCpu->fLocalForcedActions;
730 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
731 *pCtx = g_IncomingCtx;
732# endif
733
734 /* EM */
735 RTGCPTR pbCode;
736 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
737 if (RT_SUCCESS(rcEm))
738 {
739 uint32_t cbOp;
740 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
741 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
742 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
743 if (RT_SUCCESS(rcEm))
744 {
745 Assert(cbOp == pDis->cbInstr);
746 uint32_t cbIgnored;
747 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
748 if (RT_SUCCESS(rcEm))
749 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
750
751 }
752 rcEm = VERR_EM_INTERPRETER;
753 }
754 else
755 rcEm = VERR_EM_INTERPRETER;
756# ifdef VBOX_SAME_AS_EM
757 if (rcEm == VERR_EM_INTERPRETER)
758 {
759 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
760 return rcEm;
761 }
762# endif
763 g_EmCtx = *pCtx;
764 g_fEmFFs = pVCpu->fLocalForcedActions;
765 VBOXSTRICTRC rc = rcEm;
766
767# ifdef VBOX_COMPARE_IEM_LAST
768 /* IEM */
769 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
770 *pCtx = g_IncomingCtx;
771 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
772 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
773 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
774 rcIem = VERR_EM_INTERPRETER;
775 g_IemCtx = *pCtx;
776 g_fIemFFs = pVCpu->fLocalForcedActions;
777 rc = rcIem;
778# endif
779
780# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
781 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
782# endif
783
784# else
785 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
786 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
787 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
788 rc = VERR_EM_INTERPRETER;
789# endif
790 if (rc != VINF_SUCCESS)
791 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
792
793 return rc;
794#else
795 RTGCPTR pbCode;
796 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
797 if (RT_SUCCESS(rc))
798 {
799 uint32_t cbOp;
800 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
801 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
802 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
803 if (RT_SUCCESS(rc))
804 {
805 Assert(cbOp == pDis->cbInstr);
806 uint32_t cbIgnored;
807 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
808 if (RT_SUCCESS(rc))
809 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
810
811 return rc;
812 }
813 }
814 return VERR_EM_INTERPRETER;
815#endif
816}
817
818
819/**
820 * Interprets the current instruction.
821 *
822 * @returns VBox status code.
823 * @retval VINF_* Scheduling instructions.
824 * @retval VERR_EM_INTERPRETER Something we can't cope with.
825 * @retval VERR_* Fatal errors.
826 *
827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
828 * @param pRegFrame The register frame.
829 * Updates the EIP if an instruction was executed successfully.
830 * @param pvFault The fault address (CR2).
831 * @param pcbWritten Size of the write (if applicable).
832 *
833 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
834 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
835 * to worry about e.g. invalid modrm combinations (!)
836 */
837VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
838{
839 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
840 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
841#ifdef VBOX_WITH_IEM
842 NOREF(pvFault);
843
844# ifdef VBOX_COMPARE_IEM_AND_EM
845 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
846 g_IncomingCtx = *pCtx;
847 g_fIncomingFFs = pVCpu->fLocalForcedActions;
848 g_cbEmWrote = g_cbIemWrote = 0;
849
850# ifdef VBOX_COMPARE_IEM_FIRST
851 /* IEM */
852 uint32_t cbIemWritten = 0;
853 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
854 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
855 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
856 rcIem = VERR_EM_INTERPRETER;
857 g_IemCtx = *pCtx;
858 g_fIemFFs = pVCpu->fLocalForcedActions;
859 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
860 *pCtx = g_IncomingCtx;
861# endif
862
863 /* EM */
864 uint32_t cbEmWritten = 0;
865 RTGCPTR pbCode;
866 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
867 if (RT_SUCCESS(rcEm))
868 {
869 uint32_t cbOp;
870 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
871 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
872 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
873 if (RT_SUCCESS(rcEm))
874 {
875 Assert(cbOp == pDis->cbInstr);
876 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
877 if (RT_SUCCESS(rcEm))
878 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
879
880 }
881 else
882 rcEm = VERR_EM_INTERPRETER;
883 }
884 else
885 rcEm = VERR_EM_INTERPRETER;
886# ifdef VBOX_SAME_AS_EM
887 if (rcEm == VERR_EM_INTERPRETER)
888 {
889 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
890 return rcEm;
891 }
892# endif
893 g_EmCtx = *pCtx;
894 g_fEmFFs = pVCpu->fLocalForcedActions;
895 *pcbWritten = cbEmWritten;
896 VBOXSTRICTRC rc = rcEm;
897
898# ifdef VBOX_COMPARE_IEM_LAST
899 /* IEM */
900 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
901 *pCtx = g_IncomingCtx;
902 uint32_t cbIemWritten = 0;
903 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
904 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
905 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
906 rcIem = VERR_EM_INTERPRETER;
907 g_IemCtx = *pCtx;
908 g_fIemFFs = pVCpu->fLocalForcedActions;
909 *pcbWritten = cbIemWritten;
910 rc = rcIem;
911# endif
912
913# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
914 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
915# endif
916
917# else
918 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
919 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
920 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
921 rc = VERR_EM_INTERPRETER;
922# endif
923 if (rc != VINF_SUCCESS)
924 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
925
926 return rc;
927#else
928 RTGCPTR pbCode;
929 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
930 if (RT_SUCCESS(rc))
931 {
932 uint32_t cbOp;
933 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
934 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
935 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
936 if (RT_SUCCESS(rc))
937 {
938 Assert(cbOp == pDis->cbInstr);
939 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
940 if (RT_SUCCESS(rc))
941 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
942
943 return rc;
944 }
945 }
946 return VERR_EM_INTERPRETER;
947#endif
948}
949
950
951/**
952 * Interprets the current instruction using the supplied DISCPUSTATE structure.
953 *
954 * IP/EIP/RIP *IS* updated!
955 *
956 * @returns VBox strict status code.
957 * @retval VINF_* Scheduling instructions. When these are returned, it
958 * starts to get a bit tricky to know whether code was
959 * executed or not... We'll address this when it becomes a problem.
960 * @retval VERR_EM_INTERPRETER Something we can't cope with.
961 * @retval VERR_* Fatal errors.
962 *
963 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
964 * @param pDis The disassembler cpu state for the instruction to be
965 * interpreted.
966 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
967 * @param pvFault The fault address (CR2).
968 * @param enmCodeType Code type (user/supervisor)
969 *
970 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
971 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
972 * to worry about e.g. invalid modrm combinations (!)
973 *
974 * @todo At this time we do NOT check if the instruction overwrites vital information.
975 * Make sure this can't happen!! (will add some assertions/checks later)
976 */
977VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
978 RTGCPTR pvFault, EMCODETYPE enmCodeType)
979{
980 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
981 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
982#ifdef VBOX_WITH_IEM
983 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
984
985# ifdef VBOX_COMPARE_IEM_AND_EM
986 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
987 g_IncomingCtx = *pCtx;
988 g_fIncomingFFs = pVCpu->fLocalForcedActions;
989 g_cbEmWrote = g_cbIemWrote = 0;
990
991# ifdef VBOX_COMPARE_IEM_FIRST
992 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
993 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
994 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
995 rcIem = VERR_EM_INTERPRETER;
996 g_IemCtx = *pCtx;
997 g_fIemFFs = pVCpu->fLocalForcedActions;
998 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
999 *pCtx = g_IncomingCtx;
1000# endif
1001
1002 /* EM */
1003 uint32_t cbIgnored;
1004 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1005 if (RT_SUCCESS(rcEm))
1006 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1007# ifdef VBOX_SAME_AS_EM
1008 if (rcEm == VERR_EM_INTERPRETER)
1009 {
1010 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1011 return rcEm;
1012 }
1013# endif
1014 g_EmCtx = *pCtx;
1015 g_fEmFFs = pVCpu->fLocalForcedActions;
1016 VBOXSTRICTRC rc = rcEm;
1017
1018# ifdef VBOX_COMPARE_IEM_LAST
1019 /* IEM */
1020 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1021 *pCtx = g_IncomingCtx;
1022 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1023 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1024 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1025 rcIem = VERR_EM_INTERPRETER;
1026 g_IemCtx = *pCtx;
1027 g_fIemFFs = pVCpu->fLocalForcedActions;
1028 rc = rcIem;
1029# endif
1030
1031# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1032 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1033# endif
1034
1035# else
1036 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1037 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1038 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1039 rc = VERR_EM_INTERPRETER;
1040# endif
1041
1042 if (rc != VINF_SUCCESS)
1043 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1044
1045 return rc;
1046#else
1047 uint32_t cbIgnored;
1048 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1049 if (RT_SUCCESS(rc))
1050 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1051 return rc;
1052#endif
1053}
1054
1055#ifdef IN_RC
1056
1057DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1058{
1059 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1060 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1061 return rc;
1062 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1063}
1064
1065
1066/**
1067 * Interpret IRET (currently only to V86 code) - PATM only.
1068 *
1069 * @returns VBox status code.
1070 * @param pVM The cross context VM structure.
1071 * @param pVCpu The cross context virtual CPU structure.
1072 * @param pRegFrame The register frame.
1073 *
1074 */
1075VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1076{
1077 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1078 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1079 int rc;
1080
1081 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1082 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1083 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1084 * this function. Fear that it may guru on us, thus not converted to
1085 * IEM. */
1086
1087 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1088 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1089 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1090 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1091 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1092
1093 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1094 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1095 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1096 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1097 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1098 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1099 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1100
1101 pRegFrame->eip = eip & 0xffff;
1102 pRegFrame->cs.Sel = cs;
1103
1104 /* Mask away all reserved bits */
1105 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1106 eflags &= uMask;
1107
1108 CPUMRawSetEFlags(pVCpu, eflags);
1109 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1110
1111 pRegFrame->esp = esp;
1112 pRegFrame->ss.Sel = ss;
1113 pRegFrame->ds.Sel = ds;
1114 pRegFrame->es.Sel = es;
1115 pRegFrame->fs.Sel = fs;
1116 pRegFrame->gs.Sel = gs;
1117
1118 return VINF_SUCCESS;
1119}
1120
1121/**
1122 * IRET Emulation.
1123 */
1124static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1125{
1126#ifdef VBOX_WITH_RAW_RING1
1127 NOREF(pvFault); NOREF(pcbSize);
1128 if (EMIsRawRing1Enabled(pVM))
1129 {
1130 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1131 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1132 int rc;
1133 uint32_t cpl, rpl;
1134
1135 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1136 /* @todo: we don't verify all the edge cases that generate #GP faults */
1137
1138 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1139 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1140 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1141 * this function. Fear that it may guru on us, thus not converted to
1142 * IEM. */
1143
1144 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1145 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1146 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1147 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1148 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1149
1150 /* Deal with V86 above. */
1151 if (eflags & X86_EFL_VM)
1152 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1153
1154 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1155 rpl = cs & X86_SEL_RPL;
1156
1157 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1158 if (rpl != cpl)
1159 {
1160 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1161 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1162 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1163 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1164 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1165 pRegFrame->ss.Sel = ss;
1166 pRegFrame->esp = esp;
1167 }
1168 pRegFrame->cs.Sel = cs;
1169 pRegFrame->eip = eip;
1170
1171 /* Adjust CS & SS as required. */
1172 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1173
1174 /* Mask away all reserved bits */
1175 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1176 eflags &= uMask;
1177
1178 CPUMRawSetEFlags(pVCpu, eflags);
1179 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1180 return VINF_SUCCESS;
1181 }
1182#else
1183 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1184#endif
1185 return VERR_EM_INTERPRETER;
1186}
1187
1188#endif /* IN_RC */
1189
1190
1191
1192/*
1193 *
1194 * Old interpreter primitives used by HM, move/eliminate later.
1195 * Old interpreter primitives used by HM, move/eliminate later.
1196 * Old interpreter primitives used by HM, move/eliminate later.
1197 * Old interpreter primitives used by HM, move/eliminate later.
1198 * Old interpreter primitives used by HM, move/eliminate later.
1199 *
1200 */
1201
1202
1203/**
1204 * Interpret CPUID given the parameters in the CPU context.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM The cross context VM structure.
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param pRegFrame The register frame.
1210 *
1211 */
1212VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1213{
1214 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1215 uint32_t iLeaf = pRegFrame->eax;
1216 uint32_t iSubLeaf = pRegFrame->ecx;
1217 NOREF(pVM);
1218
1219 /* cpuid clears the high dwords of the affected 64 bits registers. */
1220 pRegFrame->rax = 0;
1221 pRegFrame->rbx = 0;
1222 pRegFrame->rcx = 0;
1223 pRegFrame->rdx = 0;
1224
1225 /* Note: operates the same in 64 and non-64 bits mode. */
1226 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1227 Log(("Emulate: CPUID %x -> %08x %08x %08x %08x\n", iLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1228 return VINF_SUCCESS;
1229}
1230
1231
1232/**
1233 * Interpret RDTSC.
1234 *
1235 * @returns VBox status code.
1236 * @param pVM The cross context VM structure.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pRegFrame The register frame.
1239 *
1240 */
1241VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1242{
1243 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1244 unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
1245
1246 if (uCR4 & X86_CR4_TSD)
1247 return VERR_EM_INTERPRETER; /* genuine #GP */
1248
1249 uint64_t uTicks = TMCpuTickGet(pVCpu);
1250
1251 /* Same behaviour in 32 & 64 bits mode */
1252 pRegFrame->rax = (uint32_t)uTicks;
1253 pRegFrame->rdx = (uTicks >> 32ULL);
1254#ifdef VBOX_COMPARE_IEM_AND_EM
1255 g_fIgnoreRaxRdx = true;
1256#endif
1257
1258 NOREF(pVM);
1259 return VINF_SUCCESS;
1260}
1261
1262/**
1263 * Interpret RDTSCP.
1264 *
1265 * @returns VBox status code.
1266 * @param pVM The cross context VM structure.
1267 * @param pVCpu The cross context virtual CPU structure.
1268 * @param pCtx The CPU context.
1269 *
1270 */
1271VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1272{
1273 Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
1274 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1275
1276 if (!CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1277 {
1278 AssertFailed();
1279 return VERR_EM_INTERPRETER; /* genuine #UD */
1280 }
1281
1282 if (uCR4 & X86_CR4_TSD)
1283 return VERR_EM_INTERPRETER; /* genuine #GP */
1284
1285 uint64_t uTicks = TMCpuTickGet(pVCpu);
1286
1287 /* Same behaviour in 32 & 64 bits mode */
1288 pCtx->rax = (uint32_t)uTicks;
1289 pCtx->rdx = (uTicks >> 32ULL);
1290#ifdef VBOX_COMPARE_IEM_AND_EM
1291 g_fIgnoreRaxRdx = true;
1292#endif
1293 /* Low dword of the TSC_AUX msr only. */
1294 VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1295 pCtx->rcx &= UINT32_C(0xffffffff);
1296
1297 return VINF_SUCCESS;
1298}
1299
1300/**
1301 * Interpret RDPMC.
1302 *
1303 * @returns VBox status code.
1304 * @param pVM The cross context VM structure.
1305 * @param pVCpu The cross context virtual CPU structure.
1306 * @param pRegFrame The register frame.
1307 *
1308 */
1309VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1310{
1311 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1312 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1313
1314 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1315 if ( !(uCR4 & X86_CR4_PCE)
1316 && CPUMGetGuestCPL(pVCpu) != 0)
1317 {
1318 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1319 return VERR_EM_INTERPRETER; /* genuine #GP */
1320 }
1321
1322 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1323 pRegFrame->rax = 0;
1324 pRegFrame->rdx = 0;
1325 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1326 * ecx but see @bugref{3472}! */
1327
1328 NOREF(pVM);
1329 return VINF_SUCCESS;
1330}
1331
1332
1333/**
1334 * MWAIT Emulation.
1335 */
1336VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1337{
1338 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1339 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1340 NOREF(pVM);
1341
1342 /* Get the current privilege level. */
1343 cpl = CPUMGetGuestCPL(pVCpu);
1344 if (cpl != 0)
1345 return VERR_EM_INTERPRETER; /* supervisor only */
1346
1347 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1348 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1349 return VERR_EM_INTERPRETER; /* not supported */
1350
1351 /*
1352 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1353 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1354 */
1355 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1356 if (pRegFrame->ecx > 1)
1357 {
1358 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1359 return VERR_EM_INTERPRETER; /* illegal value. */
1360 }
1361
1362 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1363 {
1364 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1365 return VERR_EM_INTERPRETER; /* illegal value. */
1366 }
1367
1368 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1369}
1370
1371
1372/**
1373 * MONITOR Emulation.
1374 */
1375VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1376{
1377 uint32_t u32Dummy, u32ExtFeatures, cpl;
1378 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1379 NOREF(pVM);
1380
1381 if (pRegFrame->ecx != 0)
1382 {
1383 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1384 return VERR_EM_INTERPRETER; /* illegal value. */
1385 }
1386
1387 /* Get the current privilege level. */
1388 cpl = CPUMGetGuestCPL(pVCpu);
1389 if (cpl != 0)
1390 return VERR_EM_INTERPRETER; /* supervisor only */
1391
1392 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1393 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1394 return VERR_EM_INTERPRETER; /* not supported */
1395
1396 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1397 return VINF_SUCCESS;
1398}
1399
1400
1401/* VT-x only: */
1402
1403/**
1404 * Interpret INVLPG.
1405 *
1406 * @returns VBox status code.
1407 * @param pVM The cross context VM structure.
1408 * @param pVCpu The cross context virtual CPU structure.
1409 * @param pRegFrame The register frame.
1410 * @param pAddrGC Operand address.
1411 *
1412 */
1413VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1414{
1415 /** @todo is addr always a flat linear address or ds based
1416 * (in absence of segment override prefixes)????
1417 */
1418 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1419 NOREF(pVM); NOREF(pRegFrame);
1420#ifdef IN_RC
1421 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1422#endif
1423 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1424 if ( rc == VINF_SUCCESS
1425 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1426 return VINF_SUCCESS;
1427 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1428 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1429 VERR_EM_INTERPRETER);
1430 return rc;
1431}
1432
1433
1434/**
1435 * Update CRx.
1436 *
1437 * @returns VBox status code.
1438 * @param pVM The cross context VM structure.
1439 * @param pVCpu The cross context virtual CPU structure.
1440 * @param pRegFrame The register frame.
1441 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
1442 * @param val New CRx value
1443 *
1444 */
1445static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
1446{
1447 uint64_t oldval;
1448 uint64_t msrEFER;
1449 uint32_t fValid;
1450 int rc, rc2;
1451 NOREF(pVM);
1452
1453 /** @todo Clean up this mess. */
1454 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
1455 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1456 switch (DestRegCrx)
1457 {
1458 case DISCREG_CR0:
1459 oldval = CPUMGetGuestCR0(pVCpu);
1460#ifdef IN_RC
1461 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
1462 if ( (val & (X86_CR0_WP | X86_CR0_AM))
1463 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
1464 return VERR_EM_INTERPRETER;
1465#endif
1466 rc = VINF_SUCCESS;
1467#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1468 CPUMSetGuestCR0(pVCpu, val);
1469#else
1470 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
1471#endif
1472 val = CPUMGetGuestCR0(pVCpu);
1473 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
1474 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
1475 {
1476 /* global flush */
1477 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
1478 AssertRCReturn(rc, rc);
1479 }
1480
1481 /* Deal with long mode enabling/disabling. */
1482 msrEFER = CPUMGetGuestEFER(pVCpu);
1483 if (msrEFER & MSR_K6_EFER_LME)
1484 {
1485 if ( !(oldval & X86_CR0_PG)
1486 && (val & X86_CR0_PG))
1487 {
1488 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1489 if (pRegFrame->cs.Attr.n.u1Long)
1490 {
1491 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
1492 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
1493 }
1494
1495 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1496 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
1497 {
1498 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
1499 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
1500 }
1501 msrEFER |= MSR_K6_EFER_LMA;
1502 }
1503 else
1504 if ( (oldval & X86_CR0_PG)
1505 && !(val & X86_CR0_PG))
1506 {
1507 msrEFER &= ~MSR_K6_EFER_LMA;
1508 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
1509 }
1510 CPUMSetGuestEFER(pVCpu, msrEFER);
1511 }
1512 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
1513 return rc2 == VINF_SUCCESS ? rc : rc2;
1514
1515 case DISCREG_CR2:
1516 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
1517 return VINF_SUCCESS;
1518
1519 case DISCREG_CR3:
1520 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
1521 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
1522 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
1523 {
1524 /* flush */
1525 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
1526 AssertRC(rc);
1527 }
1528 return rc;
1529
1530 case DISCREG_CR4:
1531 oldval = CPUMGetGuestCR4(pVCpu);
1532 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
1533 val = CPUMGetGuestCR4(pVCpu);
1534
1535 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1536 msrEFER = CPUMGetGuestEFER(pVCpu);
1537 if ( (msrEFER & MSR_K6_EFER_LMA)
1538 && (oldval & X86_CR4_PAE)
1539 && !(val & X86_CR4_PAE))
1540 {
1541 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
1542 }
1543
1544 /* From IEM iemCImpl_load_CrX. */
1545 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
1546 fValid = X86_CR4_VME | X86_CR4_PVI
1547 | X86_CR4_TSD | X86_CR4_DE
1548 | X86_CR4_PSE | X86_CR4_PAE
1549 | X86_CR4_MCE | X86_CR4_PGE
1550 | X86_CR4_PCE | X86_CR4_OSFXSR
1551 | X86_CR4_OSXMMEEXCPT;
1552 //if (xxx)
1553 // fValid |= X86_CR4_VMXE;
1554 //if (xxx)
1555 // fValid |= X86_CR4_OSXSAVE;
1556 if (val & ~(uint64_t)fValid)
1557 {
1558 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
1559 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
1560 }
1561
1562 rc = VINF_SUCCESS;
1563 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
1564 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
1565 {
1566 /* global flush */
1567 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
1568 AssertRCReturn(rc, rc);
1569 }
1570
1571 /* Feeling extremely lazy. */
1572# ifdef IN_RC
1573 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
1574 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
1575 {
1576 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
1577 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1578 }
1579# endif
1580# ifdef VBOX_WITH_RAW_MODE
1581 if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM))
1582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1583# endif
1584
1585 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
1586 return rc2 == VINF_SUCCESS ? rc : rc2;
1587
1588 case DISCREG_CR8:
1589 return PDMApicSetTPR(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
1590
1591 default:
1592 AssertFailed();
1593 case DISCREG_CR1: /* illegal op */
1594 break;
1595 }
1596 return VERR_EM_INTERPRETER;
1597}
1598
1599
1600#ifdef LOG_ENABLED
1601static const char *emMSRtoString(uint32_t uMsr)
1602{
1603 switch (uMsr)
1604 {
1605 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
1606 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
1607 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
1608 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
1609 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
1610 case MSR_K6_EFER: return "MSR_K6_EFER";
1611 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
1612 case MSR_K6_STAR: return "MSR_K6_STAR";
1613 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
1614 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
1615 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
1616 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
1617 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
1618 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
1619 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
1620 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
1621 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
1622 case MSR_IA32_TSC: return "MSR_IA32_TSC";
1623 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
1624 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
1625 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
1626 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
1627 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
1628 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
1629 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
1630 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
1631 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
1632 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
1633 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
1634 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
1635 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
1636 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
1637 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
1638 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
1639 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
1640 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
1641 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
1642 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
1643 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
1644 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
1645 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
1646 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
1647 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
1648 }
1649 return "Unknown MSR";
1650}
1651#endif /* LOG_ENABLED */
1652
1653
1654/**
1655 * Interpret RDMSR
1656 *
1657 * @returns VBox status code.
1658 * @param pVM The cross context VM structure.
1659 * @param pVCpu The cross context virtual CPU structure.
1660 * @param pRegFrame The register frame.
1661 */
1662VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1663{
1664 NOREF(pVM);
1665
1666 /* Get the current privilege level. */
1667 if (CPUMGetGuestCPL(pVCpu) != 0)
1668 {
1669 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
1670 return VERR_EM_INTERPRETER; /* supervisor only */
1671 }
1672
1673 uint64_t uValue;
1674 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
1675 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1676 {
1677 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1678 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
1679 return VERR_EM_INTERPRETER;
1680 }
1681 pRegFrame->rax = (uint32_t) uValue;
1682 pRegFrame->rdx = (uint32_t)(uValue >> 32);
1683 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
1684 return VINF_SUCCESS;
1685}
1686
1687
1688/**
1689 * Interpret WRMSR
1690 *
1691 * @returns VBox status code.
1692 * @param pVM The cross context VM structure.
1693 * @param pVCpu The cross context virtual CPU structure.
1694 * @param pRegFrame The register frame.
1695 */
1696VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1697{
1698 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1699
1700 /* Check the current privilege level, this instruction is supervisor only. */
1701 if (CPUMGetGuestCPL(pVCpu) != 0)
1702 {
1703 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
1704 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
1705 }
1706
1707 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
1708 if (rcStrict != VINF_SUCCESS)
1709 {
1710 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1711 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
1712 return VERR_EM_INTERPRETER;
1713 }
1714 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
1715 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
1716 NOREF(pVM);
1717 return VINF_SUCCESS;
1718}
1719
1720
1721/**
1722 * Interpret DRx write.
1723 *
1724 * @returns VBox status code.
1725 * @param pVM The cross context VM structure.
1726 * @param pVCpu The cross context virtual CPU structure.
1727 * @param pRegFrame The register frame.
1728 * @param DestRegDrx DRx register index (USE_REG_DR*)
1729 * @param SrcRegGen General purpose register index (USE_REG_E**))
1730 *
1731 */
1732VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1733{
1734 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1735 uint64_t uNewDrX;
1736 int rc;
1737 NOREF(pVM);
1738
1739 if (CPUMIsGuestIn64BitCode(pVCpu))
1740 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1741 else
1742 {
1743 uint32_t val32;
1744 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1745 uNewDrX = val32;
1746 }
1747
1748 if (RT_SUCCESS(rc))
1749 {
1750 if (DestRegDrx == 6)
1751 {
1752 uNewDrX |= X86_DR6_RA1_MASK;
1753 uNewDrX &= ~X86_DR6_RAZ_MASK;
1754 }
1755 else if (DestRegDrx == 7)
1756 {
1757 uNewDrX |= X86_DR7_RA1_MASK;
1758 uNewDrX &= ~X86_DR7_RAZ_MASK;
1759 }
1760
1761 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1762 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1763 if (RT_SUCCESS(rc))
1764 return rc;
1765 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1766 }
1767 return VERR_EM_INTERPRETER;
1768}
1769
1770
1771/**
1772 * Interpret DRx read.
1773 *
1774 * @returns VBox status code.
1775 * @param pVM The cross context VM structure.
1776 * @param pVCpu The cross context virtual CPU structure.
1777 * @param pRegFrame The register frame.
1778 * @param DestRegGen General purpose register index (USE_REG_E**))
1779 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1780 */
1781VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1782{
1783 uint64_t val64;
1784 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1785 NOREF(pVM);
1786
1787 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1788 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1789 if (CPUMIsGuestIn64BitCode(pVCpu))
1790 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1791 else
1792 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1793
1794 if (RT_SUCCESS(rc))
1795 return VINF_SUCCESS;
1796
1797 return VERR_EM_INTERPRETER;
1798}
1799
1800
1801#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1802
1803
1804
1805
1806
1807
1808/*
1809 *
1810 * The old interpreter.
1811 * The old interpreter.
1812 * The old interpreter.
1813 * The old interpreter.
1814 * The old interpreter.
1815 *
1816 */
1817
1818DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1819{
1820#ifdef IN_RC
1821 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1822 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1823 return rc;
1824 /*
1825 * The page pool cache may end up here in some cases because it
1826 * flushed one of the shadow mappings used by the trapping
1827 * instruction and it either flushed the TLB or the CPU reused it.
1828 */
1829#else
1830 NOREF(pVM);
1831#endif
1832 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1833}
1834
1835
1836DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
1837{
1838 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
1839 pages or write monitored pages. */
1840 NOREF(pVM);
1841#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1842 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
1843#else
1844 int rc = VINF_SUCCESS;
1845#endif
1846#ifdef VBOX_COMPARE_IEM_AND_EM
1847 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
1848 g_cbEmWrote = cb;
1849 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
1850#endif
1851 return rc;
1852}
1853
1854
1855/** Convert sel:addr to a flat GC address. */
1856DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
1857{
1858 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
1859 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
1860}
1861
1862
1863#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1864/**
1865 * Get the mnemonic for the disassembled instruction.
1866 *
1867 * GC/R0 doesn't include the strings in the DIS tables because
1868 * of limited space.
1869 */
1870static const char *emGetMnemonic(PDISCPUSTATE pDis)
1871{
1872 switch (pDis->pCurInstr->uOpcode)
1873 {
1874 case OP_XCHG: return "Xchg";
1875 case OP_DEC: return "Dec";
1876 case OP_INC: return "Inc";
1877 case OP_POP: return "Pop";
1878 case OP_OR: return "Or";
1879 case OP_AND: return "And";
1880 case OP_MOV: return "Mov";
1881 case OP_INVLPG: return "InvlPg";
1882 case OP_CPUID: return "CpuId";
1883 case OP_MOV_CR: return "MovCRx";
1884 case OP_MOV_DR: return "MovDRx";
1885 case OP_LLDT: return "LLdt";
1886 case OP_LGDT: return "LGdt";
1887 case OP_LIDT: return "LIdt";
1888 case OP_CLTS: return "Clts";
1889 case OP_MONITOR: return "Monitor";
1890 case OP_MWAIT: return "MWait";
1891 case OP_RDMSR: return "Rdmsr";
1892 case OP_WRMSR: return "Wrmsr";
1893 case OP_ADD: return "Add";
1894 case OP_ADC: return "Adc";
1895 case OP_SUB: return "Sub";
1896 case OP_SBB: return "Sbb";
1897 case OP_RDTSC: return "Rdtsc";
1898 case OP_STI: return "Sti";
1899 case OP_CLI: return "Cli";
1900 case OP_XADD: return "XAdd";
1901 case OP_HLT: return "Hlt";
1902 case OP_IRET: return "Iret";
1903 case OP_MOVNTPS: return "MovNTPS";
1904 case OP_STOSWD: return "StosWD";
1905 case OP_WBINVD: return "WbInvd";
1906 case OP_XOR: return "Xor";
1907 case OP_BTR: return "Btr";
1908 case OP_BTS: return "Bts";
1909 case OP_BTC: return "Btc";
1910 case OP_LMSW: return "Lmsw";
1911 case OP_SMSW: return "Smsw";
1912 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
1913 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
1914
1915 default:
1916 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
1917 return "???";
1918 }
1919}
1920#endif /* VBOX_STRICT || LOG_ENABLED */
1921
1922
1923/**
1924 * XCHG instruction emulation.
1925 */
1926static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1927{
1928 DISQPVPARAMVAL param1, param2;
1929 NOREF(pvFault);
1930
1931 /* Source to make DISQueryParamVal read the register value - ugly hack */
1932 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
1933 if(RT_FAILURE(rc))
1934 return VERR_EM_INTERPRETER;
1935
1936 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
1937 if(RT_FAILURE(rc))
1938 return VERR_EM_INTERPRETER;
1939
1940#ifdef IN_RC
1941 if (TRPMHasTrap(pVCpu))
1942 {
1943 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1944 {
1945#endif
1946 RTGCPTR pParam1 = 0, pParam2 = 0;
1947 uint64_t valpar1, valpar2;
1948
1949 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
1950 switch(param1.type)
1951 {
1952 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
1953 valpar1 = param1.val.val64;
1954 break;
1955
1956 case DISQPV_TYPE_ADDRESS:
1957 pParam1 = (RTGCPTR)param1.val.val64;
1958 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1959 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1960 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1961 if (RT_FAILURE(rc))
1962 {
1963 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1964 return VERR_EM_INTERPRETER;
1965 }
1966 break;
1967
1968 default:
1969 AssertFailed();
1970 return VERR_EM_INTERPRETER;
1971 }
1972
1973 switch(param2.type)
1974 {
1975 case DISQPV_TYPE_ADDRESS:
1976 pParam2 = (RTGCPTR)param2.val.val64;
1977 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
1978 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
1979 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
1980 if (RT_FAILURE(rc))
1981 {
1982 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1983 }
1984 break;
1985
1986 case DISQPV_TYPE_IMMEDIATE:
1987 valpar2 = param2.val.val64;
1988 break;
1989
1990 default:
1991 AssertFailed();
1992 return VERR_EM_INTERPRETER;
1993 }
1994
1995 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
1996 if (pParam1 == 0)
1997 {
1998 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1999 switch(param1.size)
2000 {
2001 case 1: //special case for AH etc
2002 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
2003 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
2004 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
2005 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
2006 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2007 }
2008 if (RT_FAILURE(rc))
2009 return VERR_EM_INTERPRETER;
2010 }
2011 else
2012 {
2013 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
2014 if (RT_FAILURE(rc))
2015 {
2016 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2017 return VERR_EM_INTERPRETER;
2018 }
2019 }
2020
2021 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
2022 if (pParam2 == 0)
2023 {
2024 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2025 switch(param2.size)
2026 {
2027 case 1: //special case for AH etc
2028 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
2029 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
2030 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
2031 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
2032 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2033 }
2034 if (RT_FAILURE(rc))
2035 return VERR_EM_INTERPRETER;
2036 }
2037 else
2038 {
2039 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
2040 if (RT_FAILURE(rc))
2041 {
2042 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2043 return VERR_EM_INTERPRETER;
2044 }
2045 }
2046
2047 *pcbSize = param2.size;
2048 return VINF_SUCCESS;
2049#ifdef IN_RC
2050 }
2051 }
2052 return VERR_EM_INTERPRETER;
2053#endif
2054}
2055
2056
2057/**
2058 * INC and DEC emulation.
2059 */
2060static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2061 PFNEMULATEPARAM2 pfnEmulate)
2062{
2063 DISQPVPARAMVAL param1;
2064 NOREF(pvFault);
2065
2066 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2067 if(RT_FAILURE(rc))
2068 return VERR_EM_INTERPRETER;
2069
2070#ifdef IN_RC
2071 if (TRPMHasTrap(pVCpu))
2072 {
2073 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2074 {
2075#endif
2076 RTGCPTR pParam1 = 0;
2077 uint64_t valpar1;
2078
2079 if (param1.type == DISQPV_TYPE_ADDRESS)
2080 {
2081 pParam1 = (RTGCPTR)param1.val.val64;
2082 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2083#ifdef IN_RC
2084 /* Safety check (in theory it could cross a page boundary and fault there though) */
2085 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2086#endif
2087 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2088 if (RT_FAILURE(rc))
2089 {
2090 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2091 return VERR_EM_INTERPRETER;
2092 }
2093 }
2094 else
2095 {
2096 AssertFailed();
2097 return VERR_EM_INTERPRETER;
2098 }
2099
2100 uint32_t eflags;
2101
2102 eflags = pfnEmulate(&valpar1, param1.size);
2103
2104 /* Write result back */
2105 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2106 if (RT_FAILURE(rc))
2107 {
2108 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2109 return VERR_EM_INTERPRETER;
2110 }
2111
2112 /* Update guest's eflags and finish. */
2113 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2114 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2115
2116 /* All done! */
2117 *pcbSize = param1.size;
2118 return VINF_SUCCESS;
2119#ifdef IN_RC
2120 }
2121 }
2122 return VERR_EM_INTERPRETER;
2123#endif
2124}
2125
2126
2127/**
2128 * POP Emulation.
2129 */
2130static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2131{
2132 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2133 DISQPVPARAMVAL param1;
2134 NOREF(pvFault);
2135
2136 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2137 if(RT_FAILURE(rc))
2138 return VERR_EM_INTERPRETER;
2139
2140#ifdef IN_RC
2141 if (TRPMHasTrap(pVCpu))
2142 {
2143 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2144 {
2145#endif
2146 RTGCPTR pParam1 = 0;
2147 uint32_t valpar1;
2148 RTGCPTR pStackVal;
2149
2150 /* Read stack value first */
2151 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2152 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2153
2154 /* Convert address; don't bother checking limits etc, as we only read here */
2155 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2156 if (pStackVal == 0)
2157 return VERR_EM_INTERPRETER;
2158
2159 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2160 if (RT_FAILURE(rc))
2161 {
2162 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2163 return VERR_EM_INTERPRETER;
2164 }
2165
2166 if (param1.type == DISQPV_TYPE_ADDRESS)
2167 {
2168 pParam1 = (RTGCPTR)param1.val.val64;
2169
2170 /* pop [esp+xx] uses esp after the actual pop! */
2171 AssertCompile(DISGREG_ESP == DISGREG_SP);
2172 if ( (pDis->Param1.fUse & DISUSE_BASE)
2173 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2174 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2175 )
2176 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2177
2178 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2179 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2180 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2181 if (RT_FAILURE(rc))
2182 {
2183 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2184 return VERR_EM_INTERPRETER;
2185 }
2186
2187 /* Update ESP as the last step */
2188 pRegFrame->esp += param1.size;
2189 }
2190 else
2191 {
2192#ifndef DEBUG_bird // annoying assertion.
2193 AssertFailed();
2194#endif
2195 return VERR_EM_INTERPRETER;
2196 }
2197
2198 /* All done! */
2199 *pcbSize = param1.size;
2200 return VINF_SUCCESS;
2201#ifdef IN_RC
2202 }
2203 }
2204 return VERR_EM_INTERPRETER;
2205#endif
2206}
2207
2208
2209/**
2210 * XOR/OR/AND Emulation.
2211 */
2212static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2213 PFNEMULATEPARAM3 pfnEmulate)
2214{
2215 DISQPVPARAMVAL param1, param2;
2216 NOREF(pvFault);
2217
2218 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2219 if(RT_FAILURE(rc))
2220 return VERR_EM_INTERPRETER;
2221
2222 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2223 if(RT_FAILURE(rc))
2224 return VERR_EM_INTERPRETER;
2225
2226#ifdef IN_RC
2227 if (TRPMHasTrap(pVCpu))
2228 {
2229 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2230 {
2231#endif
2232 RTGCPTR pParam1;
2233 uint64_t valpar1, valpar2;
2234
2235 if (pDis->Param1.cb != pDis->Param2.cb)
2236 {
2237 if (pDis->Param1.cb < pDis->Param2.cb)
2238 {
2239 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2240 return VERR_EM_INTERPRETER;
2241 }
2242 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2243 pDis->Param2.cb = pDis->Param1.cb;
2244 param2.size = param1.size;
2245 }
2246
2247 /* The destination is always a virtual address */
2248 if (param1.type == DISQPV_TYPE_ADDRESS)
2249 {
2250 pParam1 = (RTGCPTR)param1.val.val64;
2251 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2252 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2253 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2254 if (RT_FAILURE(rc))
2255 {
2256 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2257 return VERR_EM_INTERPRETER;
2258 }
2259 }
2260 else
2261 {
2262 AssertFailed();
2263 return VERR_EM_INTERPRETER;
2264 }
2265
2266 /* Register or immediate data */
2267 switch(param2.type)
2268 {
2269 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2270 valpar2 = param2.val.val64;
2271 break;
2272
2273 default:
2274 AssertFailed();
2275 return VERR_EM_INTERPRETER;
2276 }
2277
2278 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2279
2280 /* Data read, emulate instruction. */
2281 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2282
2283 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2284
2285 /* Update guest's eflags and finish. */
2286 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2287 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2288
2289 /* And write it back */
2290 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2291 if (RT_SUCCESS(rc))
2292 {
2293 /* All done! */
2294 *pcbSize = param2.size;
2295 return VINF_SUCCESS;
2296 }
2297#ifdef IN_RC
2298 }
2299 }
2300#endif
2301 return VERR_EM_INTERPRETER;
2302}
2303
2304
2305#ifndef VBOX_COMPARE_IEM_AND_EM
2306/**
2307 * LOCK XOR/OR/AND Emulation.
2308 */
2309static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2310 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2311{
2312 void *pvParam1;
2313 DISQPVPARAMVAL param1, param2;
2314 NOREF(pvFault);
2315
2316#if HC_ARCH_BITS == 32
2317 Assert(pDis->Param1.cb <= 4);
2318#endif
2319
2320 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2321 if(RT_FAILURE(rc))
2322 return VERR_EM_INTERPRETER;
2323
2324 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2325 if(RT_FAILURE(rc))
2326 return VERR_EM_INTERPRETER;
2327
2328 if (pDis->Param1.cb != pDis->Param2.cb)
2329 {
2330 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2331 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2332 VERR_EM_INTERPRETER);
2333
2334 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2335 pDis->Param2.cb = pDis->Param1.cb;
2336 param2.size = param1.size;
2337 }
2338
2339#ifdef IN_RC
2340 /* Safety check (in theory it could cross a page boundary and fault there though) */
2341 Assert( TRPMHasTrap(pVCpu)
2342 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2343 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2344#endif
2345
2346 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2347 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2348 RTGCUINTREG ValPar2 = param2.val.val64;
2349
2350 /* The destination is always a virtual address */
2351 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2352
2353 RTGCPTR GCPtrPar1 = param1.val.val64;
2354 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2355 PGMPAGEMAPLOCK Lock;
2356 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2357 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2358
2359 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2360 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2361
2362 RTGCUINTREG32 eflags = 0;
2363 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2364 PGMPhysReleasePageMappingLock(pVM, &Lock);
2365 if (RT_FAILURE(rc))
2366 {
2367 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2368 return VERR_EM_INTERPRETER;
2369 }
2370
2371 /* Update guest's eflags and finish. */
2372 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2373 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2374
2375 *pcbSize = param2.size;
2376 return VINF_SUCCESS;
2377}
2378#endif /* !VBOX_COMPARE_IEM_AND_EM */
2379
2380
2381/**
2382 * ADD, ADC & SUB Emulation.
2383 */
2384static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2385 PFNEMULATEPARAM3 pfnEmulate)
2386{
2387 NOREF(pvFault);
2388 DISQPVPARAMVAL param1, param2;
2389 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2390 if(RT_FAILURE(rc))
2391 return VERR_EM_INTERPRETER;
2392
2393 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2394 if(RT_FAILURE(rc))
2395 return VERR_EM_INTERPRETER;
2396
2397#ifdef IN_RC
2398 if (TRPMHasTrap(pVCpu))
2399 {
2400 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2401 {
2402#endif
2403 RTGCPTR pParam1;
2404 uint64_t valpar1, valpar2;
2405
2406 if (pDis->Param1.cb != pDis->Param2.cb)
2407 {
2408 if (pDis->Param1.cb < pDis->Param2.cb)
2409 {
2410 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2411 return VERR_EM_INTERPRETER;
2412 }
2413 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2414 pDis->Param2.cb = pDis->Param1.cb;
2415 param2.size = param1.size;
2416 }
2417
2418 /* The destination is always a virtual address */
2419 if (param1.type == DISQPV_TYPE_ADDRESS)
2420 {
2421 pParam1 = (RTGCPTR)param1.val.val64;
2422 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2423 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2424 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2425 if (RT_FAILURE(rc))
2426 {
2427 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2428 return VERR_EM_INTERPRETER;
2429 }
2430 }
2431 else
2432 {
2433#ifndef DEBUG_bird
2434 AssertFailed();
2435#endif
2436 return VERR_EM_INTERPRETER;
2437 }
2438
2439 /* Register or immediate data */
2440 switch(param2.type)
2441 {
2442 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2443 valpar2 = param2.val.val64;
2444 break;
2445
2446 default:
2447 AssertFailed();
2448 return VERR_EM_INTERPRETER;
2449 }
2450
2451 /* Data read, emulate instruction. */
2452 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2453
2454 /* Update guest's eflags and finish. */
2455 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2456 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2457
2458 /* And write it back */
2459 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2460 if (RT_SUCCESS(rc))
2461 {
2462 /* All done! */
2463 *pcbSize = param2.size;
2464 return VINF_SUCCESS;
2465 }
2466#ifdef IN_RC
2467 }
2468 }
2469#endif
2470 return VERR_EM_INTERPRETER;
2471}
2472
2473
2474/**
2475 * ADC Emulation.
2476 */
2477static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2478{
2479 if (pRegFrame->eflags.Bits.u1CF)
2480 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2481 else
2482 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2483}
2484
2485
2486/**
2487 * BTR/C/S Emulation.
2488 */
2489static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2490 PFNEMULATEPARAM2UINT32 pfnEmulate)
2491{
2492 DISQPVPARAMVAL param1, param2;
2493 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2494 if(RT_FAILURE(rc))
2495 return VERR_EM_INTERPRETER;
2496
2497 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2498 if(RT_FAILURE(rc))
2499 return VERR_EM_INTERPRETER;
2500
2501#ifdef IN_RC
2502 if (TRPMHasTrap(pVCpu))
2503 {
2504 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2505 {
2506#endif
2507 RTGCPTR pParam1;
2508 uint64_t valpar1 = 0, valpar2;
2509 uint32_t eflags;
2510
2511 /* The destination is always a virtual address */
2512 if (param1.type != DISQPV_TYPE_ADDRESS)
2513 return VERR_EM_INTERPRETER;
2514
2515 pParam1 = (RTGCPTR)param1.val.val64;
2516 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2517
2518 /* Register or immediate data */
2519 switch(param2.type)
2520 {
2521 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2522 valpar2 = param2.val.val64;
2523 break;
2524
2525 default:
2526 AssertFailed();
2527 return VERR_EM_INTERPRETER;
2528 }
2529
2530 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2531 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2532 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2533 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2534 if (RT_FAILURE(rc))
2535 {
2536 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2537 return VERR_EM_INTERPRETER;
2538 }
2539
2540 Log2(("emInterpretBtx: val=%x\n", valpar1));
2541 /* Data read, emulate bit test instruction. */
2542 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2543
2544 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2545
2546 /* Update guest's eflags and finish. */
2547 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2548 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2549
2550 /* And write it back */
2551 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2552 if (RT_SUCCESS(rc))
2553 {
2554 /* All done! */
2555 *pcbSize = 1;
2556 return VINF_SUCCESS;
2557 }
2558#ifdef IN_RC
2559 }
2560 }
2561#endif
2562 return VERR_EM_INTERPRETER;
2563}
2564
2565
2566#ifndef VBOX_COMPARE_IEM_AND_EM
2567/**
2568 * LOCK BTR/C/S Emulation.
2569 */
2570static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2571 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2572{
2573 void *pvParam1;
2574
2575 DISQPVPARAMVAL param1, param2;
2576 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2577 if(RT_FAILURE(rc))
2578 return VERR_EM_INTERPRETER;
2579
2580 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2581 if(RT_FAILURE(rc))
2582 return VERR_EM_INTERPRETER;
2583
2584 /* The destination is always a virtual address */
2585 if (param1.type != DISQPV_TYPE_ADDRESS)
2586 return VERR_EM_INTERPRETER;
2587
2588 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2589 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2590 uint64_t ValPar2 = param2.val.val64;
2591
2592 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2593 RTGCPTR GCPtrPar1 = param1.val.val64;
2594 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2595 ValPar2 &= 7;
2596
2597 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2598#ifdef IN_RC
2599 Assert(TRPMHasTrap(pVCpu));
2600 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
2601#endif
2602
2603 PGMPAGEMAPLOCK Lock;
2604 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2605 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2606
2607 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
2608 NOREF(pvFault);
2609
2610 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2611 RTGCUINTREG32 eflags = 0;
2612 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
2613 PGMPhysReleasePageMappingLock(pVM, &Lock);
2614 if (RT_FAILURE(rc))
2615 {
2616 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
2617 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2618 return VERR_EM_INTERPRETER;
2619 }
2620
2621 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
2622
2623 /* Update guest's eflags and finish. */
2624 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2625 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2626
2627 *pcbSize = 1;
2628 return VINF_SUCCESS;
2629}
2630#endif /* !VBOX_COMPARE_IEM_AND_EM */
2631
2632
2633/**
2634 * MOV emulation.
2635 */
2636static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2637{
2638 NOREF(pvFault);
2639 DISQPVPARAMVAL param1, param2;
2640 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2641 if(RT_FAILURE(rc))
2642 return VERR_EM_INTERPRETER;
2643
2644 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2645 if(RT_FAILURE(rc))
2646 return VERR_EM_INTERPRETER;
2647
2648 /* If destination is a segment register, punt. We can't handle it here.
2649 * NB: Source can be a register and still trigger a #PF!
2650 */
2651 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
2652 return VERR_EM_INTERPRETER;
2653
2654 if (param1.type == DISQPV_TYPE_ADDRESS)
2655 {
2656 RTGCPTR pDest;
2657 uint64_t val64;
2658
2659 switch(param1.type)
2660 {
2661 case DISQPV_TYPE_IMMEDIATE:
2662 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2663 return VERR_EM_INTERPRETER;
2664 /* fallthru */
2665
2666 case DISQPV_TYPE_ADDRESS:
2667 pDest = (RTGCPTR)param1.val.val64;
2668 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
2669 break;
2670
2671 default:
2672 AssertFailed();
2673 return VERR_EM_INTERPRETER;
2674 }
2675
2676 switch(param2.type)
2677 {
2678 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2679 val64 = param2.val.val64;
2680 break;
2681
2682 default:
2683 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
2684 return VERR_EM_INTERPRETER;
2685 }
2686#ifdef LOG_ENABLED
2687 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2688 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
2689 else
2690 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
2691#endif
2692
2693 Assert(param2.size <= 8 && param2.size > 0);
2694 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
2695 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
2696 if (RT_FAILURE(rc))
2697 return VERR_EM_INTERPRETER;
2698
2699 *pcbSize = param2.size;
2700 }
2701#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
2702 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
2703 else if ( param1.type == DISQPV_TYPE_REGISTER
2704 && param2.type == DISQPV_TYPE_REGISTER)
2705 {
2706 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
2707 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
2708 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
2709
2710 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
2711 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
2712
2713 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
2714 switch (param1.size)
2715 {
2716 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
2717 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
2718 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
2719 default:
2720 AssertFailed();
2721 return VERR_EM_INTERPRETER;
2722 }
2723 AssertRCReturn(rc, rc);
2724 }
2725#endif
2726 else
2727 { /* read fault */
2728 RTGCPTR pSrc;
2729 uint64_t val64;
2730
2731 /* Source */
2732 switch(param2.type)
2733 {
2734 case DISQPV_TYPE_IMMEDIATE:
2735 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2736 return VERR_EM_INTERPRETER;
2737 /* fallthru */
2738
2739 case DISQPV_TYPE_ADDRESS:
2740 pSrc = (RTGCPTR)param2.val.val64;
2741 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
2742 break;
2743
2744 default:
2745 return VERR_EM_INTERPRETER;
2746 }
2747
2748 Assert(param1.size <= 8 && param1.size > 0);
2749 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
2750 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
2751 if (RT_FAILURE(rc))
2752 return VERR_EM_INTERPRETER;
2753
2754 /* Destination */
2755 switch(param1.type)
2756 {
2757 case DISQPV_TYPE_REGISTER:
2758 switch(param1.size)
2759 {
2760 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
2761 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
2762 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
2763 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
2764 default:
2765 return VERR_EM_INTERPRETER;
2766 }
2767 if (RT_FAILURE(rc))
2768 return rc;
2769 break;
2770
2771 default:
2772 return VERR_EM_INTERPRETER;
2773 }
2774#ifdef LOG_ENABLED
2775 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2776 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
2777 else
2778 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
2779#endif
2780 }
2781 return VINF_SUCCESS;
2782}
2783
2784
2785#ifndef IN_RC
2786/**
2787 * [REP] STOSWD emulation
2788 */
2789static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2790{
2791 int rc;
2792 RTGCPTR GCDest, GCOffset;
2793 uint32_t cbSize;
2794 uint64_t cTransfers;
2795 int offIncrement;
2796 NOREF(pvFault);
2797
2798 /* Don't support any but these three prefix bytes. */
2799 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
2800 return VERR_EM_INTERPRETER;
2801
2802 switch (pDis->uAddrMode)
2803 {
2804 case DISCPUMODE_16BIT:
2805 GCOffset = pRegFrame->di;
2806 cTransfers = pRegFrame->cx;
2807 break;
2808 case DISCPUMODE_32BIT:
2809 GCOffset = pRegFrame->edi;
2810 cTransfers = pRegFrame->ecx;
2811 break;
2812 case DISCPUMODE_64BIT:
2813 GCOffset = pRegFrame->rdi;
2814 cTransfers = pRegFrame->rcx;
2815 break;
2816 default:
2817 AssertFailed();
2818 return VERR_EM_INTERPRETER;
2819 }
2820
2821 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
2822 switch (pDis->uOpMode)
2823 {
2824 case DISCPUMODE_16BIT:
2825 cbSize = 2;
2826 break;
2827 case DISCPUMODE_32BIT:
2828 cbSize = 4;
2829 break;
2830 case DISCPUMODE_64BIT:
2831 cbSize = 8;
2832 break;
2833 default:
2834 AssertFailed();
2835 return VERR_EM_INTERPRETER;
2836 }
2837
2838 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
2839
2840 if (!(pDis->fPrefix & DISPREFIX_REP))
2841 {
2842 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
2843
2844 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2845 if (RT_FAILURE(rc))
2846 return VERR_EM_INTERPRETER;
2847 Assert(rc == VINF_SUCCESS);
2848
2849 /* Update (e/r)di. */
2850 switch (pDis->uAddrMode)
2851 {
2852 case DISCPUMODE_16BIT:
2853 pRegFrame->di += offIncrement;
2854 break;
2855 case DISCPUMODE_32BIT:
2856 pRegFrame->edi += offIncrement;
2857 break;
2858 case DISCPUMODE_64BIT:
2859 pRegFrame->rdi += offIncrement;
2860 break;
2861 default:
2862 AssertFailed();
2863 return VERR_EM_INTERPRETER;
2864 }
2865
2866 }
2867 else
2868 {
2869 if (!cTransfers)
2870 return VINF_SUCCESS;
2871
2872 /*
2873 * Do *not* try emulate cross page stuff here because we don't know what might
2874 * be waiting for us on the subsequent pages. The caller has only asked us to
2875 * ignore access handlers fro the current page.
2876 * This also fends off big stores which would quickly kill PGMR0DynMap.
2877 */
2878 if ( cbSize > PAGE_SIZE
2879 || cTransfers > PAGE_SIZE
2880 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
2881 {
2882 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
2883 GCDest, cbSize, offIncrement, cTransfers));
2884 return VERR_EM_INTERPRETER;
2885 }
2886
2887 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
2888 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2889 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
2890 cTransfers * cbSize,
2891 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
2892 if (rc != VINF_SUCCESS)
2893 {
2894 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
2895 return VERR_EM_INTERPRETER;
2896 }
2897
2898 /* REP case */
2899 while (cTransfers)
2900 {
2901 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2902 if (RT_FAILURE(rc))
2903 {
2904 rc = VERR_EM_INTERPRETER;
2905 break;
2906 }
2907
2908 Assert(rc == VINF_SUCCESS);
2909 GCOffset += offIncrement;
2910 GCDest += offIncrement;
2911 cTransfers--;
2912 }
2913
2914 /* Update the registers. */
2915 switch (pDis->uAddrMode)
2916 {
2917 case DISCPUMODE_16BIT:
2918 pRegFrame->di = GCOffset;
2919 pRegFrame->cx = cTransfers;
2920 break;
2921 case DISCPUMODE_32BIT:
2922 pRegFrame->edi = GCOffset;
2923 pRegFrame->ecx = cTransfers;
2924 break;
2925 case DISCPUMODE_64BIT:
2926 pRegFrame->rdi = GCOffset;
2927 pRegFrame->rcx = cTransfers;
2928 break;
2929 default:
2930 AssertFailed();
2931 return VERR_EM_INTERPRETER;
2932 }
2933 }
2934
2935 *pcbSize = cbSize;
2936 return rc;
2937}
2938#endif /* !IN_RC */
2939
2940
2941/**
2942 * [LOCK] CMPXCHG emulation.
2943 */
2944static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2945{
2946 DISQPVPARAMVAL param1, param2;
2947 NOREF(pvFault);
2948
2949#if HC_ARCH_BITS == 32
2950 Assert(pDis->Param1.cb <= 4);
2951#endif
2952
2953 /* Source to make DISQueryParamVal read the register value - ugly hack */
2954 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2955 if(RT_FAILURE(rc))
2956 return VERR_EM_INTERPRETER;
2957
2958 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2959 if(RT_FAILURE(rc))
2960 return VERR_EM_INTERPRETER;
2961
2962 uint64_t valpar;
2963 switch(param2.type)
2964 {
2965 case DISQPV_TYPE_IMMEDIATE: /* register actually */
2966 valpar = param2.val.val64;
2967 break;
2968
2969 default:
2970 return VERR_EM_INTERPRETER;
2971 }
2972
2973 PGMPAGEMAPLOCK Lock;
2974 RTGCPTR GCPtrPar1;
2975 void *pvParam1;
2976 uint64_t eflags;
2977
2978 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2979 switch(param1.type)
2980 {
2981 case DISQPV_TYPE_ADDRESS:
2982 GCPtrPar1 = param1.val.val64;
2983 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2984
2985 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2986 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2987 break;
2988
2989 default:
2990 return VERR_EM_INTERPRETER;
2991 }
2992
2993 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
2994
2995#ifndef VBOX_COMPARE_IEM_AND_EM
2996 if (pDis->fPrefix & DISPREFIX_LOCK)
2997 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2998 else
2999 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3000#else /* VBOX_COMPARE_IEM_AND_EM */
3001 uint64_t u64;
3002 switch (pDis->Param2.cb)
3003 {
3004 case 1: u64 = *(uint8_t *)pvParam1; break;
3005 case 2: u64 = *(uint16_t *)pvParam1; break;
3006 case 4: u64 = *(uint32_t *)pvParam1; break;
3007 default:
3008 case 8: u64 = *(uint64_t *)pvParam1; break;
3009 }
3010 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
3011 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3012#endif /* VBOX_COMPARE_IEM_AND_EM */
3013
3014 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
3015
3016 /* Update guest's eflags and finish. */
3017 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3018 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3019
3020 *pcbSize = param2.size;
3021 PGMPhysReleasePageMappingLock(pVM, &Lock);
3022 return VINF_SUCCESS;
3023}
3024
3025
3026/**
3027 * [LOCK] CMPXCHG8B emulation.
3028 */
3029static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3030{
3031 DISQPVPARAMVAL param1;
3032 NOREF(pvFault);
3033
3034 /* Source to make DISQueryParamVal read the register value - ugly hack */
3035 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3036 if(RT_FAILURE(rc))
3037 return VERR_EM_INTERPRETER;
3038
3039 RTGCPTR GCPtrPar1;
3040 void *pvParam1;
3041 uint64_t eflags;
3042 PGMPAGEMAPLOCK Lock;
3043
3044 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
3045 switch(param1.type)
3046 {
3047 case DISQPV_TYPE_ADDRESS:
3048 GCPtrPar1 = param1.val.val64;
3049 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3050
3051 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3052 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3053 break;
3054
3055 default:
3056 return VERR_EM_INTERPRETER;
3057 }
3058
3059 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
3060
3061#ifndef VBOX_COMPARE_IEM_AND_EM
3062 if (pDis->fPrefix & DISPREFIX_LOCK)
3063 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3064 else
3065 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3066#else /* VBOX_COMPARE_IEM_AND_EM */
3067 uint64_t u64 = *(uint64_t *)pvParam1;
3068 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3069 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
3070#endif /* VBOX_COMPARE_IEM_AND_EM */
3071
3072 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
3073
3074 /* Update guest's eflags and finish; note that *only* ZF is affected. */
3075 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
3076 | (eflags & (X86_EFL_ZF));
3077
3078 *pcbSize = 8;
3079 PGMPhysReleasePageMappingLock(pVM, &Lock);
3080 return VINF_SUCCESS;
3081}
3082
3083
3084#ifdef IN_RC /** @todo test+enable for HM as well. */
3085/**
3086 * [LOCK] XADD emulation.
3087 */
3088static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3089{
3090 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
3091 DISQPVPARAMVAL param1;
3092 void *pvParamReg2;
3093 size_t cbParamReg2;
3094 NOREF(pvFault);
3095
3096 /* Source to make DISQueryParamVal read the register value - ugly hack */
3097 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3098 if(RT_FAILURE(rc))
3099 return VERR_EM_INTERPRETER;
3100
3101 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3102 Assert(cbParamReg2 <= 4);
3103 if(RT_FAILURE(rc))
3104 return VERR_EM_INTERPRETER;
3105
3106#ifdef IN_RC
3107 if (TRPMHasTrap(pVCpu))
3108 {
3109 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3110 {
3111#endif
3112 RTGCPTR GCPtrPar1;
3113 void *pvParam1;
3114 uint32_t eflags;
3115 PGMPAGEMAPLOCK Lock;
3116
3117 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3118 switch(param1.type)
3119 {
3120 case DISQPV_TYPE_ADDRESS:
3121 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3122#ifdef IN_RC
3123 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3124#endif
3125
3126 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3127 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3128 break;
3129
3130 default:
3131 return VERR_EM_INTERPRETER;
3132 }
3133
3134 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3135
3136#ifndef VBOX_COMPARE_IEM_AND_EM
3137 if (pDis->fPrefix & DISPREFIX_LOCK)
3138 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3139 else
3140 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3141#else /* VBOX_COMPARE_IEM_AND_EM */
3142 uint64_t u64;
3143 switch (cbParamReg2)
3144 {
3145 case 1: u64 = *(uint8_t *)pvParam1; break;
3146 case 2: u64 = *(uint16_t *)pvParam1; break;
3147 case 4: u64 = *(uint32_t *)pvParam1; break;
3148 default:
3149 case 8: u64 = *(uint64_t *)pvParam1; break;
3150 }
3151 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3152 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3153#endif /* VBOX_COMPARE_IEM_AND_EM */
3154
3155 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3156
3157 /* Update guest's eflags and finish. */
3158 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3159 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3160
3161 *pcbSize = cbParamReg2;
3162 PGMPhysReleasePageMappingLock(pVM, &Lock);
3163 return VINF_SUCCESS;
3164#ifdef IN_RC
3165 }
3166 }
3167
3168 return VERR_EM_INTERPRETER;
3169#endif
3170}
3171#endif /* IN_RC */
3172
3173
3174/**
3175 * WBINVD Emulation.
3176 */
3177static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3178{
3179 /* Nothing to do. */
3180 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3181 return VINF_SUCCESS;
3182}
3183
3184
3185/**
3186 * INVLPG Emulation.
3187 */
3188static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3189{
3190 DISQPVPARAMVAL param1;
3191 RTGCPTR addr;
3192 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3193
3194 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3195 if(RT_FAILURE(rc))
3196 return VERR_EM_INTERPRETER;
3197
3198 switch(param1.type)
3199 {
3200 case DISQPV_TYPE_IMMEDIATE:
3201 case DISQPV_TYPE_ADDRESS:
3202 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3203 return VERR_EM_INTERPRETER;
3204 addr = (RTGCPTR)param1.val.val64;
3205 break;
3206
3207 default:
3208 return VERR_EM_INTERPRETER;
3209 }
3210
3211 /** @todo is addr always a flat linear address or ds based
3212 * (in absence of segment override prefixes)????
3213 */
3214#ifdef IN_RC
3215 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3216#endif
3217 rc = PGMInvalidatePage(pVCpu, addr);
3218 if ( rc == VINF_SUCCESS
3219 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3220 return VINF_SUCCESS;
3221 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3222 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3223 VERR_EM_INTERPRETER);
3224 return rc;
3225}
3226
3227/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3228
3229/**
3230 * CPUID Emulation.
3231 */
3232static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3233{
3234 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3235 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3236 return rc;
3237}
3238
3239
3240/**
3241 * CLTS Emulation.
3242 */
3243static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3244{
3245 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3246
3247 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3248 if (!(cr0 & X86_CR0_TS))
3249 return VINF_SUCCESS;
3250 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3251}
3252
3253
3254/**
3255 * LMSW Emulation.
3256 */
3257static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3258{
3259 DISQPVPARAMVAL param1;
3260 uint32_t val;
3261 NOREF(pvFault); NOREF(pcbSize);
3262 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3263
3264 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3265 if(RT_FAILURE(rc))
3266 return VERR_EM_INTERPRETER;
3267
3268 switch(param1.type)
3269 {
3270 case DISQPV_TYPE_IMMEDIATE:
3271 case DISQPV_TYPE_ADDRESS:
3272 if(!(param1.flags & DISQPV_FLAG_16))
3273 return VERR_EM_INTERPRETER;
3274 val = param1.val.val32;
3275 break;
3276
3277 default:
3278 return VERR_EM_INTERPRETER;
3279 }
3280
3281 LogFlow(("emInterpretLmsw %x\n", val));
3282 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3283
3284 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3285 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3286 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3287
3288 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3289
3290}
3291
3292#ifdef EM_EMULATE_SMSW
3293/**
3294 * SMSW Emulation.
3295 */
3296static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3297{
3298 NOREF(pvFault); NOREF(pcbSize);
3299 DISQPVPARAMVAL param1;
3300 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3301
3302 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3303 if(RT_FAILURE(rc))
3304 return VERR_EM_INTERPRETER;
3305
3306 switch(param1.type)
3307 {
3308 case DISQPV_TYPE_IMMEDIATE:
3309 if(param1.size != sizeof(uint16_t))
3310 return VERR_EM_INTERPRETER;
3311 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3312 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3313 break;
3314
3315 case DISQPV_TYPE_ADDRESS:
3316 {
3317 RTGCPTR pParam1;
3318
3319 /* Actually forced to 16 bits regardless of the operand size. */
3320 if(param1.size != sizeof(uint16_t))
3321 return VERR_EM_INTERPRETER;
3322
3323 pParam1 = (RTGCPTR)param1.val.val64;
3324 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3325 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3326
3327 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3328 if (RT_FAILURE(rc))
3329 {
3330 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3331 return VERR_EM_INTERPRETER;
3332 }
3333 break;
3334 }
3335
3336 default:
3337 return VERR_EM_INTERPRETER;
3338 }
3339
3340 LogFlow(("emInterpretSmsw %x\n", cr0));
3341 return rc;
3342}
3343#endif
3344
3345
3346/**
3347 * Interpret CRx read.
3348 *
3349 * @returns VBox status code.
3350 * @param pVM The cross context VM structure.
3351 * @param pVCpu The cross context virtual CPU structure.
3352 * @param pRegFrame The register frame.
3353 * @param DestRegGen General purpose register index (USE_REG_E**))
3354 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3355 *
3356 */
3357static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3358{
3359 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3360 uint64_t val64;
3361 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3362 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3363 NOREF(pVM);
3364
3365 if (CPUMIsGuestIn64BitCode(pVCpu))
3366 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3367 else
3368 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3369
3370 if (RT_SUCCESS(rc))
3371 {
3372 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3373 return VINF_SUCCESS;
3374 }
3375 return VERR_EM_INTERPRETER;
3376}
3377
3378
3379/**
3380 * Interpret CRx write.
3381 *
3382 * @returns VBox status code.
3383 * @param pVM The cross context VM structure.
3384 * @param pVCpu The cross context virtual CPU structure.
3385 * @param pRegFrame The register frame.
3386 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3387 * @param SrcRegGen General purpose register index (USE_REG_E**))
3388 *
3389 */
3390static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3391{
3392 uint64_t val;
3393 int rc;
3394 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3395
3396 if (CPUMIsGuestIn64BitCode(pVCpu))
3397 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3398 else
3399 {
3400 uint32_t val32;
3401 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3402 val = val32;
3403 }
3404
3405 if (RT_SUCCESS(rc))
3406 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3407
3408 return VERR_EM_INTERPRETER;
3409}
3410
3411
3412/**
3413 * MOV CRx
3414 */
3415static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3416{
3417 NOREF(pvFault); NOREF(pcbSize);
3418 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3419 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3420
3421 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3422 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3423
3424 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3425}
3426
3427
3428/**
3429 * MOV DRx
3430 */
3431static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3432{
3433 int rc = VERR_EM_INTERPRETER;
3434 NOREF(pvFault); NOREF(pcbSize);
3435
3436 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
3437 {
3438 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
3439 }
3440 else
3441 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3442 {
3443 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
3444 }
3445 else
3446 AssertMsgFailed(("Unexpected debug register move\n"));
3447
3448 return rc;
3449}
3450
3451
3452/**
3453 * LLDT Emulation.
3454 */
3455static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3456{
3457 DISQPVPARAMVAL param1;
3458 RTSEL sel;
3459 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
3460
3461 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3462 if(RT_FAILURE(rc))
3463 return VERR_EM_INTERPRETER;
3464
3465 switch(param1.type)
3466 {
3467 case DISQPV_TYPE_ADDRESS:
3468 return VERR_EM_INTERPRETER; //feeling lazy right now
3469
3470 case DISQPV_TYPE_IMMEDIATE:
3471 if(!(param1.flags & DISQPV_FLAG_16))
3472 return VERR_EM_INTERPRETER;
3473 sel = (RTSEL)param1.val.val16;
3474 break;
3475
3476 default:
3477 return VERR_EM_INTERPRETER;
3478 }
3479
3480#ifdef IN_RING0
3481 /* Only for the VT-x real-mode emulation case. */
3482 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3483 CPUMSetGuestLDTR(pVCpu, sel);
3484 return VINF_SUCCESS;
3485#else
3486 if (sel == 0)
3487 {
3488 if (CPUMGetHyperLDTR(pVCpu) == 0)
3489 {
3490 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
3491 return VINF_SUCCESS;
3492 }
3493 }
3494 //still feeling lazy
3495 return VERR_EM_INTERPRETER;
3496#endif
3497}
3498
3499#ifdef IN_RING0
3500/**
3501 * LIDT/LGDT Emulation.
3502 */
3503static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3504{
3505 DISQPVPARAMVAL param1;
3506 RTGCPTR pParam1;
3507 X86XDTR32 dtr32;
3508 NOREF(pvFault); NOREF(pcbSize);
3509
3510 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
3511
3512 /* Only for the VT-x real-mode emulation case. */
3513 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3514
3515 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3516 if(RT_FAILURE(rc))
3517 return VERR_EM_INTERPRETER;
3518
3519 switch(param1.type)
3520 {
3521 case DISQPV_TYPE_ADDRESS:
3522 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
3523 break;
3524
3525 default:
3526 return VERR_EM_INTERPRETER;
3527 }
3528
3529 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
3530 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3531
3532 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
3533 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
3534
3535 if (pDis->pCurInstr->uOpcode == OP_LIDT)
3536 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3537 else
3538 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3539
3540 return VINF_SUCCESS;
3541}
3542#endif
3543
3544
3545#ifdef IN_RC
3546/**
3547 * STI Emulation.
3548 *
3549 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
3550 */
3551static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3552{
3553 NOREF(pcbSize);
3554 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
3555
3556 if(!pGCState)
3557 {
3558 Assert(pGCState);
3559 return VERR_EM_INTERPRETER;
3560 }
3561 pGCState->uVMFlags |= X86_EFL_IF;
3562
3563 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
3564 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
3565
3566 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
3567 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3568
3569 return VINF_SUCCESS;
3570}
3571#endif /* IN_RC */
3572
3573
3574/**
3575 * HLT Emulation.
3576 */
3577static VBOXSTRICTRC
3578emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3579{
3580 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3581 return VINF_EM_HALT;
3582}
3583
3584
3585/**
3586 * RDTSC Emulation.
3587 */
3588static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3589{
3590 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3591 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
3592}
3593
3594/**
3595 * RDPMC Emulation
3596 */
3597static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3598{
3599 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3600 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
3601}
3602
3603
3604static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3605{
3606 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3607 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
3608}
3609
3610
3611static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3612{
3613 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3614 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
3615}
3616
3617
3618/**
3619 * RDMSR Emulation.
3620 */
3621static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3622{
3623 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
3624 different, so we play safe by completely disassembling the instruction. */
3625 Assert(!(pDis->fPrefix & DISPREFIX_REX));
3626 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3627 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
3628}
3629
3630
3631/**
3632 * WRMSR Emulation.
3633 */
3634static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3635{
3636 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3637 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
3638}
3639
3640
3641/**
3642 * Internal worker.
3643 * @copydoc emInterpretInstructionCPUOuter
3644 * @param pVM The cross context VM structure.
3645 */
3646DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3647 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3648{
3649 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3650 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
3651 Assert(pcbSize);
3652 *pcbSize = 0;
3653
3654 if (enmCodeType == EMCODETYPE_SUPERVISOR)
3655 {
3656 /*
3657 * Only supervisor guest code!!
3658 * And no complicated prefixes.
3659 */
3660 /* Get the current privilege level. */
3661 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3662#ifdef VBOX_WITH_RAW_RING1
3663 if ( !EMIsRawRing1Enabled(pVM)
3664 || cpl > 1
3665 || pRegFrame->eflags.Bits.u2IOPL > cpl
3666 )
3667#endif
3668 {
3669 if ( cpl != 0
3670 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
3671 {
3672 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
3673 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
3674 return VERR_EM_INTERPRETER;
3675 }
3676 }
3677 }
3678 else
3679 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
3680
3681#ifdef IN_RC
3682 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
3683 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3684 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3685 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3686 && pDis->pCurInstr->uOpcode != OP_XADD
3687 && pDis->pCurInstr->uOpcode != OP_OR
3688 && pDis->pCurInstr->uOpcode != OP_AND
3689 && pDis->pCurInstr->uOpcode != OP_XOR
3690 && pDis->pCurInstr->uOpcode != OP_BTR
3691 )
3692 )
3693#else
3694 if ( (pDis->fPrefix & DISPREFIX_REPNE)
3695 || ( (pDis->fPrefix & DISPREFIX_REP)
3696 && pDis->pCurInstr->uOpcode != OP_STOSWD
3697 )
3698 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3699 && pDis->pCurInstr->uOpcode != OP_OR
3700 && pDis->pCurInstr->uOpcode != OP_AND
3701 && pDis->pCurInstr->uOpcode != OP_XOR
3702 && pDis->pCurInstr->uOpcode != OP_BTR
3703 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3704 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3705 )
3706 )
3707#endif
3708 {
3709 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
3710 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
3711 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
3712 return VERR_EM_INTERPRETER;
3713 }
3714
3715#if HC_ARCH_BITS == 32
3716 /*
3717 * Unable to emulate most >4 bytes accesses in 32 bits mode.
3718 * Whitelisted instructions are safe.
3719 */
3720 if ( pDis->Param1.cb > 4
3721 && CPUMIsGuestIn64BitCode(pVCpu))
3722 {
3723 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
3724 if ( uOpCode != OP_STOSWD
3725 && uOpCode != OP_MOV
3726 && uOpCode != OP_CMPXCHG8B
3727 && uOpCode != OP_XCHG
3728 && uOpCode != OP_BTS
3729 && uOpCode != OP_BTR
3730 && uOpCode != OP_BTC
3731 )
3732 {
3733# ifdef VBOX_WITH_STATISTICS
3734 switch (pDis->pCurInstr->uOpcode)
3735 {
3736# define INTERPRET_FAILED_CASE(opcode, Instr) \
3737 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
3738 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
3739 INTERPRET_FAILED_CASE(OP_DEC,Dec);
3740 INTERPRET_FAILED_CASE(OP_INC,Inc);
3741 INTERPRET_FAILED_CASE(OP_POP,Pop);
3742 INTERPRET_FAILED_CASE(OP_OR, Or);
3743 INTERPRET_FAILED_CASE(OP_XOR,Xor);
3744 INTERPRET_FAILED_CASE(OP_AND,And);
3745 INTERPRET_FAILED_CASE(OP_MOV,Mov);
3746 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
3747 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
3748 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
3749 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
3750 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
3751 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
3752 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
3753 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
3754 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
3755 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
3756 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
3757 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
3758 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
3759 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
3760 INTERPRET_FAILED_CASE(OP_ADD,Add);
3761 INTERPRET_FAILED_CASE(OP_SUB,Sub);
3762 INTERPRET_FAILED_CASE(OP_ADC,Adc);
3763 INTERPRET_FAILED_CASE(OP_BTR,Btr);
3764 INTERPRET_FAILED_CASE(OP_BTS,Bts);
3765 INTERPRET_FAILED_CASE(OP_BTC,Btc);
3766 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
3767 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
3768 INTERPRET_FAILED_CASE(OP_STI, Sti);
3769 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
3770 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
3771 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
3772 INTERPRET_FAILED_CASE(OP_IRET,Iret);
3773 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
3774 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
3775# undef INTERPRET_FAILED_CASE
3776 default:
3777 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3778 break;
3779 }
3780# endif /* VBOX_WITH_STATISTICS */
3781 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
3782 return VERR_EM_INTERPRETER;
3783 }
3784 }
3785#endif
3786
3787 VBOXSTRICTRC rc;
3788#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
3789 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
3790#endif
3791 switch (pDis->pCurInstr->uOpcode)
3792 {
3793 /*
3794 * Macros for generating the right case statements.
3795 */
3796# ifndef VBOX_COMPARE_IEM_AND_EM
3797# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3798 case opcode:\
3799 if (pDis->fPrefix & DISPREFIX_LOCK) \
3800 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
3801 else \
3802 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3803 if (RT_SUCCESS(rc)) \
3804 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3805 else \
3806 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3807 return rc
3808# else /* VBOX_COMPARE_IEM_AND_EM */
3809# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3810 case opcode:\
3811 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3812 if (RT_SUCCESS(rc)) \
3813 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3814 else \
3815 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3816 return rc
3817# endif /* VBOX_COMPARE_IEM_AND_EM */
3818
3819#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
3820 case opcode:\
3821 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3822 if (RT_SUCCESS(rc)) \
3823 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3824 else \
3825 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3826 return rc
3827
3828#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
3829 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
3830#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3831 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
3832
3833#define INTERPRET_CASE(opcode, Instr) \
3834 case opcode:\
3835 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3836 if (RT_SUCCESS(rc)) \
3837 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3838 else \
3839 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3840 return rc
3841
3842#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
3843 case opcode:\
3844 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3845 if (RT_SUCCESS(rc)) \
3846 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3847 else \
3848 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3849 return rc
3850
3851#define INTERPRET_STAT_CASE(opcode, Instr) \
3852 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
3853
3854 /*
3855 * The actual case statements.
3856 */
3857 INTERPRET_CASE(OP_XCHG,Xchg);
3858 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
3859 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
3860 INTERPRET_CASE(OP_POP,Pop);
3861 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
3862 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
3863 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
3864 INTERPRET_CASE(OP_MOV,Mov);
3865#ifndef IN_RC
3866 INTERPRET_CASE(OP_STOSWD,StosWD);
3867#endif
3868 INTERPRET_CASE(OP_INVLPG,InvlPg);
3869 INTERPRET_CASE(OP_CPUID,CpuId);
3870 INTERPRET_CASE(OP_MOV_CR,MovCRx);
3871 INTERPRET_CASE(OP_MOV_DR,MovDRx);
3872#ifdef IN_RING0
3873 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
3874 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
3875#endif
3876 INTERPRET_CASE(OP_LLDT,LLdt);
3877 INTERPRET_CASE(OP_LMSW,Lmsw);
3878#ifdef EM_EMULATE_SMSW
3879 INTERPRET_CASE(OP_SMSW,Smsw);
3880#endif
3881 INTERPRET_CASE(OP_CLTS,Clts);
3882 INTERPRET_CASE(OP_MONITOR, Monitor);
3883 INTERPRET_CASE(OP_MWAIT, MWait);
3884 INTERPRET_CASE(OP_RDMSR, Rdmsr);
3885 INTERPRET_CASE(OP_WRMSR, Wrmsr);
3886 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
3887 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
3888 INTERPRET_CASE(OP_ADC,Adc);
3889 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
3890 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
3891 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
3892 INTERPRET_CASE(OP_RDPMC,Rdpmc);
3893 INTERPRET_CASE(OP_RDTSC,Rdtsc);
3894 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
3895#ifdef IN_RC
3896 INTERPRET_CASE(OP_STI,Sti);
3897 INTERPRET_CASE(OP_XADD, XAdd);
3898 INTERPRET_CASE(OP_IRET,Iret);
3899#endif
3900 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
3901 INTERPRET_CASE(OP_HLT,Hlt);
3902 INTERPRET_CASE(OP_WBINVD,WbInvd);
3903#ifdef VBOX_WITH_STATISTICS
3904# ifndef IN_RC
3905 INTERPRET_STAT_CASE(OP_XADD, XAdd);
3906# endif
3907 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
3908#endif
3909
3910 default:
3911 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
3912 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3913 return VERR_EM_INTERPRETER;
3914
3915#undef INTERPRET_CASE_EX_PARAM2
3916#undef INTERPRET_STAT_CASE
3917#undef INTERPRET_CASE_EX
3918#undef INTERPRET_CASE
3919 } /* switch (opcode) */
3920 /* not reached */
3921}
3922
3923/**
3924 * Interprets the current instruction using the supplied DISCPUSTATE structure.
3925 *
3926 * EIP is *NOT* updated!
3927 *
3928 * @returns VBox strict status code.
3929 * @retval VINF_* Scheduling instructions. When these are returned, it
3930 * starts to get a bit tricky to know whether code was
3931 * executed or not... We'll address this when it becomes a problem.
3932 * @retval VERR_EM_INTERPRETER Something we can't cope with.
3933 * @retval VERR_* Fatal errors.
3934 *
3935 * @param pVCpu The cross context virtual CPU structure.
3936 * @param pDis The disassembler cpu state for the instruction to be
3937 * interpreted.
3938 * @param pRegFrame The register frame. EIP is *NOT* changed!
3939 * @param pvFault The fault address (CR2).
3940 * @param pcbSize Size of the write (if applicable).
3941 * @param enmCodeType Code type (user/supervisor)
3942 *
3943 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
3944 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
3945 * to worry about e.g. invalid modrm combinations (!)
3946 *
3947 * @todo At this time we do NOT check if the instruction overwrites vital information.
3948 * Make sure this can't happen!! (will add some assertions/checks later)
3949 */
3950DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3951 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3952{
3953 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
3954 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
3955 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
3956 if (RT_SUCCESS(rc))
3957 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
3958 else
3959 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
3960 return rc;
3961}
3962
3963
3964#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette