VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 71152

Last change on this file since 71152 was 70948, checked in by vboxsync, 7 years ago

VMM: Added a bMainExecutionEngine member to the VM structure for use instead of fHMEnabled and fNEMEnabled. Changed a lot of HMIsEnabled invocations to use the new macros VM_IS_RAW_MODE_ENABLED and VM_IS_HM_OR_NEM_ENABLED. Eliminated fHMEnabledFixed. Fixed inverted test for raw-mode debug register sanity checking. Some other minor cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 144.3 KB
Line 
1/* $Id: EMAll.cpp 70948 2018-02-10 15:38:12Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef VBOX_WITH_IEM
51//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
52//# define VBOX_SAME_AS_EM
53//# define VBOX_COMPARE_IEM_LAST
54#endif
55
56#ifdef VBOX_WITH_RAW_RING1
57# define EM_EMULATE_SMSW
58#endif
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** @def EM_ASSERT_FAULT_RETURN
65 * Safety check.
66 *
67 * Could in theory misfire on a cross page boundary access...
68 *
69 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
70 * turns up an alias page instead of the original faulting one and annoying the
71 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
72 */
73#if 0
74# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
75#else
76# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
77#endif
78
79
80/*********************************************************************************************************************************
81* Internal Functions *
82*********************************************************************************************************************************/
83#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
84DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
85 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92#ifdef VBOX_COMPARE_IEM_AND_EM
93static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
94 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
95 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
96 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
97static uint32_t g_fIncomingFFs;
98static CPUMCTX g_IncomingCtx;
99static bool g_fIgnoreRaxRdx = false;
100
101static uint32_t g_fEmFFs;
102static CPUMCTX g_EmCtx;
103static uint8_t g_abEmWrote[256];
104static size_t g_cbEmWrote;
105
106static uint32_t g_fIemFFs;
107static CPUMCTX g_IemCtx;
108extern uint8_t g_abIemWrote[256];
109#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
110extern size_t g_cbIemWrote;
111#else
112static size_t g_cbIemWrote;
113#endif
114#endif
115
116
117/**
118 * Get the current execution manager status.
119 *
120 * @returns Current status.
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
124{
125 return pVCpu->em.s.enmState;
126}
127
128
129/**
130 * Sets the current execution manager status. (use only when you know what you're doing!)
131 *
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
134 */
135VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
136{
137 /* Only allowed combination: */
138 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
139 pVCpu->em.s.enmState = enmNewState;
140}
141
142
143/**
144 * Sets the PC for which interrupts should be inhibited.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param PC The PC.
148 */
149VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
150{
151 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
152 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
153}
154
155
156/**
157 * Gets the PC for which interrupts should be inhibited.
158 *
159 * There are a few instructions which inhibits or delays interrupts
160 * for the instruction following them. These instructions are:
161 * - STI
162 * - MOV SS, r/m16
163 * - POP SS
164 *
165 * @returns The PC for which interrupts should be inhibited.
166 * @param pVCpu The cross context virtual CPU structure.
167 *
168 */
169VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
170{
171 return pVCpu->em.s.GCPtrInhibitInterrupts;
172}
173
174
175/**
176 * Prepare an MWAIT - essentials of the MONITOR instruction.
177 *
178 * @returns VINF_SUCCESS
179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
180 * @param rax The content of RAX.
181 * @param rcx The content of RCX.
182 * @param rdx The content of RDX.
183 * @param GCPhys The physical address corresponding to rax.
184 */
185VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
186{
187 pVCpu->em.s.MWait.uMonitorRAX = rax;
188 pVCpu->em.s.MWait.uMonitorRCX = rcx;
189 pVCpu->em.s.MWait.uMonitorRDX = rdx;
190 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
191 /** @todo Make use of GCPhys. */
192 NOREF(GCPhys);
193 /** @todo Complete MONITOR implementation. */
194 return VINF_SUCCESS;
195}
196
197
198/**
199 * Checks if the monitor hardware is armed / active.
200 *
201 * @returns true if armed, false otherwise.
202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
203 */
204VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
205{
206 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
207}
208
209
210/**
211 * Performs an MWAIT.
212 *
213 * @returns VINF_SUCCESS
214 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
215 * @param rax The content of RAX.
216 * @param rcx The content of RCX.
217 */
218VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
219{
220 pVCpu->em.s.MWait.uMWaitRAX = rax;
221 pVCpu->em.s.MWait.uMWaitRCX = rcx;
222 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
223 if (rcx)
224 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
225 else
226 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
227 /** @todo not completely correct?? */
228 return VINF_EM_HALT;
229}
230
231
232
233/**
234 * Determine if we should continue execution in HM after encountering an mwait
235 * instruction.
236 *
237 * Clears MWAIT flags if returning @c true.
238 *
239 * @returns true if we should continue, false if we should halt.
240 * @param pVCpu The cross context virtual CPU structure.
241 * @param pCtx Current CPU context.
242 */
243VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
244{
245 if ( pCtx->eflags.Bits.u1IF
246 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
247 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
248 {
249 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
250 {
251 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
252 return true;
253 }
254 }
255
256 return false;
257}
258
259
260/**
261 * Determine if we should continue execution in HM after encountering a hlt
262 * instruction.
263 *
264 * @returns true if we should continue, false if we should halt.
265 * @param pVCpu The cross context virtual CPU structure.
266 * @param pCtx Current CPU context.
267 */
268VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
269{
270 if (pCtx->eflags.Bits.u1IF)
271 return !!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
272 return false;
273}
274
275
276/**
277 * Unhalts and wakes up the given CPU.
278 *
279 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
280 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
281 * the CPU isn't currently in a halt, the next HLT instruction it executes will
282 * be affected.
283 *
284 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
285 * @param pVM The cross context VM structure.
286 * @param pVCpuDst The cross context virtual CPU structure of the
287 * CPU to unhalt and wake up. This is usually not the
288 * same as the caller.
289 * @thread EMT
290 */
291VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
292{
293 /*
294 * Flag the current(/next) HLT to unhalt immediately.
295 */
296 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
297
298 /*
299 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
300 * just do it here for now).
301 */
302#ifdef IN_RING0
303 /* We might be here with preemption disabled or enabled (i.e. depending on
304 thread-context hooks being used), so don't try obtaining the GVMMR0 used
305 lock here. See @bugref{7270#c148}. */
306 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
307 AssertRC(rc);
308
309#elif defined(IN_RING3)
310 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
311 AssertRC(rc);
312
313#else
314 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
315 Assert(pVM->cCpus == 1); NOREF(pVM);
316 int rc = VINF_SUCCESS;
317#endif
318 return rc;
319}
320
321
322/**
323 * Locks REM execution to a single VCPU.
324 *
325 * @param pVM The cross context VM structure.
326 */
327VMMDECL(void) EMRemLock(PVM pVM)
328{
329#ifdef VBOX_WITH_REM
330 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
331 return; /* early init */
332
333 Assert(!PGMIsLockOwner(pVM));
334 Assert(!IOMIsLockWriteOwner(pVM));
335 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
336 AssertRCSuccess(rc);
337#else
338 RT_NOREF(pVM);
339#endif
340}
341
342
343/**
344 * Unlocks REM execution
345 *
346 * @param pVM The cross context VM structure.
347 */
348VMMDECL(void) EMRemUnlock(PVM pVM)
349{
350#ifdef VBOX_WITH_REM
351 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
352 return; /* early init */
353
354 PDMCritSectLeave(&pVM->em.s.CritSectREM);
355#else
356 RT_NOREF(pVM);
357#endif
358}
359
360
361/**
362 * Check if this VCPU currently owns the REM lock.
363 *
364 * @returns bool owner/not owner
365 * @param pVM The cross context VM structure.
366 */
367VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
368{
369#ifdef VBOX_WITH_REM
370 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
371 return true; /* early init */
372
373 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
374#else
375 RT_NOREF(pVM);
376 return true;
377#endif
378}
379
380
381/**
382 * Try to acquire the REM lock.
383 *
384 * @returns VBox status code
385 * @param pVM The cross context VM structure.
386 */
387VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
388{
389#ifdef VBOX_WITH_REM
390 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
391 return VINF_SUCCESS; /* early init */
392
393 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
394#else
395 RT_NOREF(pVM);
396 return VINF_SUCCESS;
397#endif
398}
399
400
401/**
402 * @callback_method_impl{FNDISREADBYTES}
403 */
404static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
405{
406 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
407#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
408 PVM pVM = pVCpu->CTX_SUFF(pVM);
409#endif
410 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
411 int rc;
412
413 /*
414 * Figure how much we can or must read.
415 */
416 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
417 if (cbToRead > cbMaxRead)
418 cbToRead = cbMaxRead;
419 else if (cbToRead < cbMinRead)
420 cbToRead = cbMinRead;
421
422#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
423 /*
424 * We might be called upon to interpret an instruction in a patch.
425 */
426 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
427 {
428# ifdef IN_RC
429 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
430# else
431 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
432# endif
433 rc = VINF_SUCCESS;
434 }
435 else
436#endif
437 {
438# ifdef IN_RC
439 /*
440 * Try access it thru the shadow page tables first. Fall back on the
441 * slower PGM method if it fails because the TLB or page table was
442 * modified recently.
443 */
444 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
445 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
446 {
447 cbToRead = cbMinRead;
448 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
449 }
450 if (rc == VERR_ACCESS_DENIED)
451#endif
452 {
453 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
454 if (RT_FAILURE(rc))
455 {
456 if (cbToRead > cbMinRead)
457 {
458 cbToRead = cbMinRead;
459 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
460 }
461 if (RT_FAILURE(rc))
462 {
463#ifndef IN_RC
464 /*
465 * If we fail to find the page via the guest's page tables
466 * we invalidate the page in the host TLB (pertaining to
467 * the guest in the NestedPaging case). See @bugref{6043}.
468 */
469 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
470 {
471 HMInvalidatePage(pVCpu, uSrcAddr);
472 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
473 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
474 }
475#endif
476 }
477 }
478 }
479 }
480
481 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
482 return rc;
483}
484
485
486#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
487DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
488{
489 NOREF(pVM);
490 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
491}
492#endif
493
494
495/**
496 * Disassembles the current instruction.
497 *
498 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
499 * details.
500 *
501 * @param pVM The cross context VM structure.
502 * @param pVCpu The cross context virtual CPU structure.
503 * @param pDis Where to return the parsed instruction info.
504 * @param pcbInstr Where to return the instruction size. (optional)
505 */
506VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
507{
508 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
509 RTGCPTR GCPtrInstr;
510#if 0
511 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
512#else
513/** @todo Get the CPU mode as well while we're at it! */
514 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
515 pCtxCore->rip, &GCPtrInstr);
516#endif
517 if (RT_FAILURE(rc))
518 {
519 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
520 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
521 return rc;
522 }
523 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
524}
525
526
527/**
528 * Disassembles one instruction.
529 *
530 * This is used by internally by the interpreter and by trap/access handlers.
531 *
532 * @returns VBox status code.
533 *
534 * @param pVM The cross context VM structure.
535 * @param pVCpu The cross context virtual CPU structure.
536 * @param GCPtrInstr The flat address of the instruction.
537 * @param pCtxCore The context core (used to determine the cpu mode).
538 * @param pDis Where to return the parsed instruction info.
539 * @param pcbInstr Where to return the instruction size. (optional)
540 */
541VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
542 PDISCPUSTATE pDis, unsigned *pcbInstr)
543{
544 NOREF(pVM);
545 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
546 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
547 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
548 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
549 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
550 if (RT_SUCCESS(rc))
551 return VINF_SUCCESS;
552 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
553 return rc;
554}
555
556
557#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
558static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
559 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
560 uint32_t cbEm, uint32_t cbIem)
561{
562 /* Quick compare. */
563 if ( rcEm == rcIem
564 && cbEm == cbIem
565 && g_cbEmWrote == g_cbIemWrote
566 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
567 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
568 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
569 )
570 return;
571
572 /* Report exact differences. */
573 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
574 if (rcEm != rcIem)
575 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
576 else if (cbEm != cbIem)
577 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
578
579 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
580 {
581 if (g_cbIemWrote != g_cbEmWrote)
582 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
583 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
584 {
585 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
586 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
587 }
588
589 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
590 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
591 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
592
593# define CHECK_FIELD(a_Field) \
594 do \
595 { \
596 if (pEmCtx->a_Field != pIemCtx->a_Field) \
597 { \
598 switch (sizeof(pEmCtx->a_Field)) \
599 { \
600 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
601 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
602 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
603 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
604 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
605 } \
606 cDiffs++; \
607 } \
608 } while (0)
609
610# define CHECK_BIT_FIELD(a_Field) \
611 do \
612 { \
613 if (pEmCtx->a_Field != pIemCtx->a_Field) \
614 { \
615 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
616 cDiffs++; \
617 } \
618 } while (0)
619
620# define CHECK_SEL(a_Sel) \
621 do \
622 { \
623 CHECK_FIELD(a_Sel.Sel); \
624 CHECK_FIELD(a_Sel.Attr.u); \
625 CHECK_FIELD(a_Sel.u64Base); \
626 CHECK_FIELD(a_Sel.u32Limit); \
627 CHECK_FIELD(a_Sel.fFlags); \
628 } while (0)
629
630 unsigned cDiffs = 0;
631 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
632 {
633 RTLogPrintf(" the FPU state differs\n");
634 cDiffs++;
635 CHECK_FIELD(fpu.FCW);
636 CHECK_FIELD(fpu.FSW);
637 CHECK_FIELD(fpu.FTW);
638 CHECK_FIELD(fpu.FOP);
639 CHECK_FIELD(fpu.FPUIP);
640 CHECK_FIELD(fpu.CS);
641 CHECK_FIELD(fpu.Rsrvd1);
642 CHECK_FIELD(fpu.FPUDP);
643 CHECK_FIELD(fpu.DS);
644 CHECK_FIELD(fpu.Rsrvd2);
645 CHECK_FIELD(fpu.MXCSR);
646 CHECK_FIELD(fpu.MXCSR_MASK);
647 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
648 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
649 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
650 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
651 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
652 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
653 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
654 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
655 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
656 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
657 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
658 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
659 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
660 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
661 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
662 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
663 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
664 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
665 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
666 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
667 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
668 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
669 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
670 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
671 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
672 CHECK_FIELD(fpu.au32RsrvdRest[i]);
673 }
674 CHECK_FIELD(rip);
675 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
676 {
677 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
678 CHECK_BIT_FIELD(rflags.Bits.u1CF);
679 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
680 CHECK_BIT_FIELD(rflags.Bits.u1PF);
681 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
682 CHECK_BIT_FIELD(rflags.Bits.u1AF);
683 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
684 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
685 CHECK_BIT_FIELD(rflags.Bits.u1SF);
686 CHECK_BIT_FIELD(rflags.Bits.u1TF);
687 CHECK_BIT_FIELD(rflags.Bits.u1IF);
688 CHECK_BIT_FIELD(rflags.Bits.u1DF);
689 CHECK_BIT_FIELD(rflags.Bits.u1OF);
690 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
691 CHECK_BIT_FIELD(rflags.Bits.u1NT);
692 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
693 CHECK_BIT_FIELD(rflags.Bits.u1RF);
694 CHECK_BIT_FIELD(rflags.Bits.u1VM);
695 CHECK_BIT_FIELD(rflags.Bits.u1AC);
696 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
697 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
698 CHECK_BIT_FIELD(rflags.Bits.u1ID);
699 }
700
701 if (!g_fIgnoreRaxRdx)
702 CHECK_FIELD(rax);
703 CHECK_FIELD(rcx);
704 if (!g_fIgnoreRaxRdx)
705 CHECK_FIELD(rdx);
706 CHECK_FIELD(rbx);
707 CHECK_FIELD(rsp);
708 CHECK_FIELD(rbp);
709 CHECK_FIELD(rsi);
710 CHECK_FIELD(rdi);
711 CHECK_FIELD(r8);
712 CHECK_FIELD(r9);
713 CHECK_FIELD(r10);
714 CHECK_FIELD(r11);
715 CHECK_FIELD(r12);
716 CHECK_FIELD(r13);
717 CHECK_SEL(cs);
718 CHECK_SEL(ss);
719 CHECK_SEL(ds);
720 CHECK_SEL(es);
721 CHECK_SEL(fs);
722 CHECK_SEL(gs);
723 CHECK_FIELD(cr0);
724 CHECK_FIELD(cr2);
725 CHECK_FIELD(cr3);
726 CHECK_FIELD(cr4);
727 CHECK_FIELD(dr[0]);
728 CHECK_FIELD(dr[1]);
729 CHECK_FIELD(dr[2]);
730 CHECK_FIELD(dr[3]);
731 CHECK_FIELD(dr[6]);
732 CHECK_FIELD(dr[7]);
733 CHECK_FIELD(gdtr.cbGdt);
734 CHECK_FIELD(gdtr.pGdt);
735 CHECK_FIELD(idtr.cbIdt);
736 CHECK_FIELD(idtr.pIdt);
737 CHECK_SEL(ldtr);
738 CHECK_SEL(tr);
739 CHECK_FIELD(SysEnter.cs);
740 CHECK_FIELD(SysEnter.eip);
741 CHECK_FIELD(SysEnter.esp);
742 CHECK_FIELD(msrEFER);
743 CHECK_FIELD(msrSTAR);
744 CHECK_FIELD(msrPAT);
745 CHECK_FIELD(msrLSTAR);
746 CHECK_FIELD(msrCSTAR);
747 CHECK_FIELD(msrSFMASK);
748 CHECK_FIELD(msrKERNELGSBASE);
749
750# undef CHECK_FIELD
751# undef CHECK_BIT_FIELD
752 }
753}
754#endif /* VBOX_COMPARE_IEM_AND_EM */
755
756
757/**
758 * Interprets the current instruction.
759 *
760 * @returns VBox status code.
761 * @retval VINF_* Scheduling instructions.
762 * @retval VERR_EM_INTERPRETER Something we can't cope with.
763 * @retval VERR_* Fatal errors.
764 *
765 * @param pVCpu The cross context virtual CPU structure.
766 * @param pRegFrame The register frame.
767 * Updates the EIP if an instruction was executed successfully.
768 * @param pvFault The fault address (CR2).
769 *
770 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
771 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
772 * to worry about e.g. invalid modrm combinations (!)
773 */
774VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
775{
776 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
777 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
778#ifdef VBOX_WITH_IEM
779 NOREF(pvFault);
780
781# ifdef VBOX_COMPARE_IEM_AND_EM
782 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
783 g_IncomingCtx = *pCtx;
784 g_fIncomingFFs = pVCpu->fLocalForcedActions;
785 g_cbEmWrote = g_cbIemWrote = 0;
786
787# ifdef VBOX_COMPARE_IEM_FIRST
788 /* IEM */
789 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
790 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
791 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
792 rcIem = VERR_EM_INTERPRETER;
793 g_IemCtx = *pCtx;
794 g_fIemFFs = pVCpu->fLocalForcedActions;
795 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
796 *pCtx = g_IncomingCtx;
797# endif
798
799 /* EM */
800 RTGCPTR pbCode;
801 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
802 if (RT_SUCCESS(rcEm))
803 {
804 uint32_t cbOp;
805 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
806 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
807 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
808 if (RT_SUCCESS(rcEm))
809 {
810 Assert(cbOp == pDis->cbInstr);
811 uint32_t cbIgnored;
812 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
813 if (RT_SUCCESS(rcEm))
814 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
815
816 }
817 rcEm = VERR_EM_INTERPRETER;
818 }
819 else
820 rcEm = VERR_EM_INTERPRETER;
821# ifdef VBOX_SAME_AS_EM
822 if (rcEm == VERR_EM_INTERPRETER)
823 {
824 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
825 return rcEm;
826 }
827# endif
828 g_EmCtx = *pCtx;
829 g_fEmFFs = pVCpu->fLocalForcedActions;
830 VBOXSTRICTRC rc = rcEm;
831
832# ifdef VBOX_COMPARE_IEM_LAST
833 /* IEM */
834 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
835 *pCtx = g_IncomingCtx;
836 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
837 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
838 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
839 rcIem = VERR_EM_INTERPRETER;
840 g_IemCtx = *pCtx;
841 g_fIemFFs = pVCpu->fLocalForcedActions;
842 rc = rcIem;
843# endif
844
845# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
846 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
847# endif
848
849# else
850 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
851 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
852 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
853 rc = VERR_EM_INTERPRETER;
854# endif
855 if (rc != VINF_SUCCESS)
856 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
857
858 return rc;
859#else
860 RTGCPTR pbCode;
861 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
862 if (RT_SUCCESS(rc))
863 {
864 uint32_t cbOp;
865 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
866 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
867 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
868 if (RT_SUCCESS(rc))
869 {
870 Assert(cbOp == pDis->cbInstr);
871 uint32_t cbIgnored;
872 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
873 if (RT_SUCCESS(rc))
874 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
875
876 return rc;
877 }
878 }
879 return VERR_EM_INTERPRETER;
880#endif
881}
882
883
884/**
885 * Interprets the current instruction.
886 *
887 * @returns VBox status code.
888 * @retval VINF_* Scheduling instructions.
889 * @retval VERR_EM_INTERPRETER Something we can't cope with.
890 * @retval VERR_* Fatal errors.
891 *
892 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
893 * @param pRegFrame The register frame.
894 * Updates the EIP if an instruction was executed successfully.
895 * @param pvFault The fault address (CR2).
896 * @param pcbWritten Size of the write (if applicable).
897 *
898 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
899 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
900 * to worry about e.g. invalid modrm combinations (!)
901 */
902VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
903{
904 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
905 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
906#ifdef VBOX_WITH_IEM
907 NOREF(pvFault);
908
909# ifdef VBOX_COMPARE_IEM_AND_EM
910 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
911 g_IncomingCtx = *pCtx;
912 g_fIncomingFFs = pVCpu->fLocalForcedActions;
913 g_cbEmWrote = g_cbIemWrote = 0;
914
915# ifdef VBOX_COMPARE_IEM_FIRST
916 /* IEM */
917 uint32_t cbIemWritten = 0;
918 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
919 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
920 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
921 rcIem = VERR_EM_INTERPRETER;
922 g_IemCtx = *pCtx;
923 g_fIemFFs = pVCpu->fLocalForcedActions;
924 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
925 *pCtx = g_IncomingCtx;
926# endif
927
928 /* EM */
929 uint32_t cbEmWritten = 0;
930 RTGCPTR pbCode;
931 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
932 if (RT_SUCCESS(rcEm))
933 {
934 uint32_t cbOp;
935 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
936 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
937 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
938 if (RT_SUCCESS(rcEm))
939 {
940 Assert(cbOp == pDis->cbInstr);
941 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
942 if (RT_SUCCESS(rcEm))
943 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
944
945 }
946 else
947 rcEm = VERR_EM_INTERPRETER;
948 }
949 else
950 rcEm = VERR_EM_INTERPRETER;
951# ifdef VBOX_SAME_AS_EM
952 if (rcEm == VERR_EM_INTERPRETER)
953 {
954 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
955 return rcEm;
956 }
957# endif
958 g_EmCtx = *pCtx;
959 g_fEmFFs = pVCpu->fLocalForcedActions;
960 *pcbWritten = cbEmWritten;
961 VBOXSTRICTRC rc = rcEm;
962
963# ifdef VBOX_COMPARE_IEM_LAST
964 /* IEM */
965 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
966 *pCtx = g_IncomingCtx;
967 uint32_t cbIemWritten = 0;
968 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
969 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
970 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
971 rcIem = VERR_EM_INTERPRETER;
972 g_IemCtx = *pCtx;
973 g_fIemFFs = pVCpu->fLocalForcedActions;
974 *pcbWritten = cbIemWritten;
975 rc = rcIem;
976# endif
977
978# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
979 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
980# endif
981
982# else
983 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
984 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
985 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
986 rc = VERR_EM_INTERPRETER;
987# endif
988 if (rc != VINF_SUCCESS)
989 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
990
991 return rc;
992#else
993 RTGCPTR pbCode;
994 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
995 if (RT_SUCCESS(rc))
996 {
997 uint32_t cbOp;
998 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
999 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1000 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1001 if (RT_SUCCESS(rc))
1002 {
1003 Assert(cbOp == pDis->cbInstr);
1004 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
1005 if (RT_SUCCESS(rc))
1006 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1007
1008 return rc;
1009 }
1010 }
1011 return VERR_EM_INTERPRETER;
1012#endif
1013}
1014
1015
1016/**
1017 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1018 *
1019 * IP/EIP/RIP *IS* updated!
1020 *
1021 * @returns VBox strict status code.
1022 * @retval VINF_* Scheduling instructions. When these are returned, it
1023 * starts to get a bit tricky to know whether code was
1024 * executed or not... We'll address this when it becomes a problem.
1025 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1026 * @retval VERR_* Fatal errors.
1027 *
1028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1029 * @param pDis The disassembler cpu state for the instruction to be
1030 * interpreted.
1031 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1032 * @param pvFault The fault address (CR2).
1033 * @param enmCodeType Code type (user/supervisor)
1034 *
1035 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1036 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1037 * to worry about e.g. invalid modrm combinations (!)
1038 *
1039 * @todo At this time we do NOT check if the instruction overwrites vital information.
1040 * Make sure this can't happen!! (will add some assertions/checks later)
1041 */
1042VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1043 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1044{
1045 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1046 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1047#ifdef VBOX_WITH_IEM
1048 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1049
1050# ifdef VBOX_COMPARE_IEM_AND_EM
1051 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1052 g_IncomingCtx = *pCtx;
1053 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1054 g_cbEmWrote = g_cbIemWrote = 0;
1055
1056# ifdef VBOX_COMPARE_IEM_FIRST
1057 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1058 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1059 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1060 rcIem = VERR_EM_INTERPRETER;
1061 g_IemCtx = *pCtx;
1062 g_fIemFFs = pVCpu->fLocalForcedActions;
1063 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1064 *pCtx = g_IncomingCtx;
1065# endif
1066
1067 /* EM */
1068 uint32_t cbIgnored;
1069 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1070 if (RT_SUCCESS(rcEm))
1071 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1072# ifdef VBOX_SAME_AS_EM
1073 if (rcEm == VERR_EM_INTERPRETER)
1074 {
1075 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1076 return rcEm;
1077 }
1078# endif
1079 g_EmCtx = *pCtx;
1080 g_fEmFFs = pVCpu->fLocalForcedActions;
1081 VBOXSTRICTRC rc = rcEm;
1082
1083# ifdef VBOX_COMPARE_IEM_LAST
1084 /* IEM */
1085 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1086 *pCtx = g_IncomingCtx;
1087 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1088 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1089 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1090 rcIem = VERR_EM_INTERPRETER;
1091 g_IemCtx = *pCtx;
1092 g_fIemFFs = pVCpu->fLocalForcedActions;
1093 rc = rcIem;
1094# endif
1095
1096# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1097 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1098# endif
1099
1100# else
1101 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1102 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1103 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1104 rc = VERR_EM_INTERPRETER;
1105# endif
1106
1107 if (rc != VINF_SUCCESS)
1108 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1109
1110 return rc;
1111#else
1112 uint32_t cbIgnored;
1113 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1114 if (RT_SUCCESS(rc))
1115 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1116 return rc;
1117#endif
1118}
1119
1120#ifdef IN_RC
1121
1122DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1123{
1124 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1125 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1126 return rc;
1127 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1128}
1129
1130
1131/**
1132 * Interpret IRET (currently only to V86 code) - PATM only.
1133 *
1134 * @returns VBox status code.
1135 * @param pVM The cross context VM structure.
1136 * @param pVCpu The cross context virtual CPU structure.
1137 * @param pRegFrame The register frame.
1138 *
1139 */
1140VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1141{
1142 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1143 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1144 int rc;
1145
1146 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1147 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1148 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1149 * this function. Fear that it may guru on us, thus not converted to
1150 * IEM. */
1151
1152 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1153 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1154 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1155 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1156 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1157
1158 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1159 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1160 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1161 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1162 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1163 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1164 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1165
1166 pRegFrame->eip = eip & 0xffff;
1167 pRegFrame->cs.Sel = cs;
1168
1169 /* Mask away all reserved bits */
1170 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1171 eflags &= uMask;
1172
1173 CPUMRawSetEFlags(pVCpu, eflags);
1174 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1175
1176 pRegFrame->esp = esp;
1177 pRegFrame->ss.Sel = ss;
1178 pRegFrame->ds.Sel = ds;
1179 pRegFrame->es.Sel = es;
1180 pRegFrame->fs.Sel = fs;
1181 pRegFrame->gs.Sel = gs;
1182
1183 return VINF_SUCCESS;
1184}
1185
1186# ifndef VBOX_WITH_IEM
1187/**
1188 * IRET Emulation.
1189 */
1190static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1191{
1192#ifdef VBOX_WITH_RAW_RING1
1193 NOREF(pvFault); NOREF(pcbSize); NOREF(pDis);
1194 if (EMIsRawRing1Enabled(pVM))
1195 {
1196 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1197 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1198 int rc;
1199 uint32_t cpl, rpl;
1200
1201 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1202 /** @todo we don't verify all the edge cases that generate #GP faults */
1203
1204 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1205 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1206 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1207 * this function. Fear that it may guru on us, thus not converted to
1208 * IEM. */
1209
1210 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1211 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1212 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1213 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1214 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1215
1216 /* Deal with V86 above. */
1217 if (eflags & X86_EFL_VM)
1218 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1219
1220 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1221 rpl = cs & X86_SEL_RPL;
1222
1223 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1224 if (rpl != cpl)
1225 {
1226 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1227 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1228 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1229 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1230 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1231 pRegFrame->ss.Sel = ss;
1232 pRegFrame->esp = esp;
1233 }
1234 pRegFrame->cs.Sel = cs;
1235 pRegFrame->eip = eip;
1236
1237 /* Adjust CS & SS as required. */
1238 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1239
1240 /* Mask away all reserved bits */
1241 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1242 eflags &= uMask;
1243
1244 CPUMRawSetEFlags(pVCpu, eflags);
1245 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1246 return VINF_SUCCESS;
1247 }
1248#else
1249 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1250#endif
1251 return VERR_EM_INTERPRETER;
1252}
1253# endif /* !VBOX_WITH_IEM */
1254
1255#endif /* IN_RC */
1256
1257
1258
1259/*
1260 *
1261 * Old interpreter primitives used by HM, move/eliminate later.
1262 * Old interpreter primitives used by HM, move/eliminate later.
1263 * Old interpreter primitives used by HM, move/eliminate later.
1264 * Old interpreter primitives used by HM, move/eliminate later.
1265 * Old interpreter primitives used by HM, move/eliminate later.
1266 *
1267 */
1268
1269
1270/**
1271 * Interpret CPUID given the parameters in the CPU context.
1272 *
1273 * @returns VBox status code.
1274 * @param pVM The cross context VM structure.
1275 * @param pVCpu The cross context virtual CPU structure.
1276 * @param pRegFrame The register frame.
1277 *
1278 */
1279VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1280{
1281 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1282 uint32_t iLeaf = pRegFrame->eax;
1283 uint32_t iSubLeaf = pRegFrame->ecx;
1284 NOREF(pVM);
1285
1286 /* cpuid clears the high dwords of the affected 64 bits registers. */
1287 pRegFrame->rax = 0;
1288 pRegFrame->rbx = 0;
1289 pRegFrame->rcx = 0;
1290 pRegFrame->rdx = 0;
1291
1292 /* Note: operates the same in 64 and non-64 bits mode. */
1293 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1294 Log(("Emulate: CPUID %x/%x -> %08x %08x %08x %08x\n", iLeaf, iSubLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1295 return VINF_SUCCESS;
1296}
1297
1298
1299/**
1300 * Interpret RDTSC.
1301 *
1302 * @returns VBox status code.
1303 * @param pVM The cross context VM structure.
1304 * @param pVCpu The cross context virtual CPU structure.
1305 * @param pRegFrame The register frame.
1306 *
1307 */
1308VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1309{
1310 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1311 unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
1312
1313 if (uCR4 & X86_CR4_TSD)
1314 return VERR_EM_INTERPRETER; /* genuine #GP */
1315
1316 uint64_t uTicks = TMCpuTickGet(pVCpu);
1317#ifdef VBOX_WITH_NESTED_HWVIRT
1318 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1319#endif
1320
1321 /* Same behaviour in 32 & 64 bits mode */
1322 pRegFrame->rax = RT_LO_U32(uTicks);
1323 pRegFrame->rdx = RT_HI_U32(uTicks);
1324#ifdef VBOX_COMPARE_IEM_AND_EM
1325 g_fIgnoreRaxRdx = true;
1326#endif
1327
1328 NOREF(pVM);
1329 return VINF_SUCCESS;
1330}
1331
1332/**
1333 * Interpret RDTSCP.
1334 *
1335 * @returns VBox status code.
1336 * @param pVM The cross context VM structure.
1337 * @param pVCpu The cross context virtual CPU structure.
1338 * @param pCtx The CPU context.
1339 *
1340 */
1341VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1342{
1343 Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
1344 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1345
1346 if (!pVM->cpum.ro.GuestFeatures.fRdTscP)
1347 {
1348 AssertFailed();
1349 return VERR_EM_INTERPRETER; /* genuine #UD */
1350 }
1351
1352 if (uCR4 & X86_CR4_TSD)
1353 return VERR_EM_INTERPRETER; /* genuine #GP */
1354
1355 uint64_t uTicks = TMCpuTickGet(pVCpu);
1356#ifdef VBOX_WITH_NESTED_HWVIRT
1357 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1358#endif
1359
1360 /* Same behaviour in 32 & 64 bits mode */
1361 pCtx->rax = RT_LO_U32(uTicks);
1362 pCtx->rdx = RT_HI_U32(uTicks);
1363#ifdef VBOX_COMPARE_IEM_AND_EM
1364 g_fIgnoreRaxRdx = true;
1365#endif
1366 /* Low dword of the TSC_AUX msr only. */
1367 VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1368 pCtx->rcx &= UINT32_C(0xffffffff);
1369
1370 return VINF_SUCCESS;
1371}
1372
1373/**
1374 * Interpret RDPMC.
1375 *
1376 * @returns VBox status code.
1377 * @param pVM The cross context VM structure.
1378 * @param pVCpu The cross context virtual CPU structure.
1379 * @param pRegFrame The register frame.
1380 *
1381 */
1382VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1383{
1384 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1385 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1386
1387 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1388 if ( !(uCR4 & X86_CR4_PCE)
1389 && CPUMGetGuestCPL(pVCpu) != 0)
1390 {
1391 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1392 return VERR_EM_INTERPRETER; /* genuine #GP */
1393 }
1394
1395 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1396 pRegFrame->rax = 0;
1397 pRegFrame->rdx = 0;
1398 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1399 * ecx but see @bugref{3472}! */
1400
1401 NOREF(pVM);
1402 return VINF_SUCCESS;
1403}
1404
1405
1406/**
1407 * MWAIT Emulation.
1408 */
1409VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1410{
1411 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1412 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1413 NOREF(pVM);
1414
1415 /* Get the current privilege level. */
1416 cpl = CPUMGetGuestCPL(pVCpu);
1417 if (cpl != 0)
1418 return VERR_EM_INTERPRETER; /* supervisor only */
1419
1420 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1421 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1422 return VERR_EM_INTERPRETER; /* not supported */
1423
1424 /*
1425 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1426 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1427 */
1428 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1429 if (pRegFrame->ecx > 1)
1430 {
1431 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1432 return VERR_EM_INTERPRETER; /* illegal value. */
1433 }
1434
1435 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1436 {
1437 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1438 return VERR_EM_INTERPRETER; /* illegal value. */
1439 }
1440
1441 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1442}
1443
1444
1445/**
1446 * MONITOR Emulation.
1447 */
1448VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1449{
1450 uint32_t u32Dummy, u32ExtFeatures, cpl;
1451 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1452 NOREF(pVM);
1453
1454 if (pRegFrame->ecx != 0)
1455 {
1456 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1457 return VERR_EM_INTERPRETER; /* illegal value. */
1458 }
1459
1460 /* Get the current privilege level. */
1461 cpl = CPUMGetGuestCPL(pVCpu);
1462 if (cpl != 0)
1463 return VERR_EM_INTERPRETER; /* supervisor only */
1464
1465 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1466 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1467 return VERR_EM_INTERPRETER; /* not supported */
1468
1469 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1470 return VINF_SUCCESS;
1471}
1472
1473
1474/* VT-x only: */
1475
1476/**
1477 * Interpret INVLPG.
1478 *
1479 * @returns VBox status code.
1480 * @param pVM The cross context VM structure.
1481 * @param pVCpu The cross context virtual CPU structure.
1482 * @param pRegFrame The register frame.
1483 * @param pAddrGC Operand address.
1484 *
1485 */
1486VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1487{
1488 /** @todo is addr always a flat linear address or ds based
1489 * (in absence of segment override prefixes)????
1490 */
1491 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1492 NOREF(pVM); NOREF(pRegFrame);
1493#ifdef IN_RC
1494 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1495#endif
1496 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1497 if ( rc == VINF_SUCCESS
1498 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1499 return VINF_SUCCESS;
1500 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1501 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1502 VERR_EM_INTERPRETER);
1503 return rc;
1504}
1505
1506
1507#ifdef LOG_ENABLED
1508static const char *emMSRtoString(uint32_t uMsr)
1509{
1510 switch (uMsr)
1511 {
1512 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
1513 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
1514 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
1515 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
1516 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
1517 case MSR_K6_EFER: return "MSR_K6_EFER";
1518 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
1519 case MSR_K6_STAR: return "MSR_K6_STAR";
1520 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
1521 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
1522 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
1523 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
1524 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
1525 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
1526 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
1527 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
1528 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
1529 case MSR_IA32_TSC: return "MSR_IA32_TSC";
1530 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
1531 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
1532 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
1533 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
1534 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
1535 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
1536 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
1537 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
1538 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
1539 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
1540 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
1541 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
1542 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
1543 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
1544 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
1545 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
1546 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
1547 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
1548 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
1549 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
1550 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
1551 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
1552 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
1553 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
1554 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
1555 }
1556 return "Unknown MSR";
1557}
1558#endif /* LOG_ENABLED */
1559
1560
1561/**
1562 * Interpret RDMSR
1563 *
1564 * @returns VBox status code.
1565 * @param pVM The cross context VM structure.
1566 * @param pVCpu The cross context virtual CPU structure.
1567 * @param pRegFrame The register frame.
1568 */
1569VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1570{
1571 NOREF(pVM);
1572
1573 /* Get the current privilege level. */
1574 if (CPUMGetGuestCPL(pVCpu) != 0)
1575 {
1576 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
1577 return VERR_EM_INTERPRETER; /* supervisor only */
1578 }
1579
1580 uint64_t uValue;
1581 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
1582 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1583 {
1584 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1585 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
1586 return VERR_EM_INTERPRETER;
1587 }
1588 pRegFrame->rax = RT_LO_U32(uValue);
1589 pRegFrame->rdx = RT_HI_U32(uValue);
1590 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
1591 return VINF_SUCCESS;
1592}
1593
1594
1595/**
1596 * Interpret WRMSR
1597 *
1598 * @returns VBox status code.
1599 * @param pVM The cross context VM structure.
1600 * @param pVCpu The cross context virtual CPU structure.
1601 * @param pRegFrame The register frame.
1602 */
1603VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1604{
1605 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1606
1607 /* Check the current privilege level, this instruction is supervisor only. */
1608 if (CPUMGetGuestCPL(pVCpu) != 0)
1609 {
1610 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
1611 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
1612 }
1613
1614 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
1615 if (rcStrict != VINF_SUCCESS)
1616 {
1617 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1618 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
1619 return VERR_EM_INTERPRETER;
1620 }
1621 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
1622 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
1623 NOREF(pVM);
1624 return VINF_SUCCESS;
1625}
1626
1627
1628/**
1629 * Interpret DRx write.
1630 *
1631 * @returns VBox status code.
1632 * @param pVM The cross context VM structure.
1633 * @param pVCpu The cross context virtual CPU structure.
1634 * @param pRegFrame The register frame.
1635 * @param DestRegDrx DRx register index (USE_REG_DR*)
1636 * @param SrcRegGen General purpose register index (USE_REG_E**))
1637 *
1638 */
1639VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1640{
1641 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1642 uint64_t uNewDrX;
1643 int rc;
1644 NOREF(pVM);
1645
1646 if (CPUMIsGuestIn64BitCode(pVCpu))
1647 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1648 else
1649 {
1650 uint32_t val32;
1651 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1652 uNewDrX = val32;
1653 }
1654
1655 if (RT_SUCCESS(rc))
1656 {
1657 if (DestRegDrx == 6)
1658 {
1659 uNewDrX |= X86_DR6_RA1_MASK;
1660 uNewDrX &= ~X86_DR6_RAZ_MASK;
1661 }
1662 else if (DestRegDrx == 7)
1663 {
1664 uNewDrX |= X86_DR7_RA1_MASK;
1665 uNewDrX &= ~X86_DR7_RAZ_MASK;
1666 }
1667
1668 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1669 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1670 if (RT_SUCCESS(rc))
1671 return rc;
1672 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1673 }
1674 return VERR_EM_INTERPRETER;
1675}
1676
1677
1678/**
1679 * Interpret DRx read.
1680 *
1681 * @returns VBox status code.
1682 * @param pVM The cross context VM structure.
1683 * @param pVCpu The cross context virtual CPU structure.
1684 * @param pRegFrame The register frame.
1685 * @param DestRegGen General purpose register index (USE_REG_E**))
1686 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1687 */
1688VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1689{
1690 uint64_t val64;
1691 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1692 NOREF(pVM);
1693
1694 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1695 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1696 if (CPUMIsGuestIn64BitCode(pVCpu))
1697 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1698 else
1699 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1700
1701 if (RT_SUCCESS(rc))
1702 return VINF_SUCCESS;
1703
1704 return VERR_EM_INTERPRETER;
1705}
1706
1707
1708#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1709
1710
1711
1712
1713
1714
1715/*
1716 *
1717 * The old interpreter.
1718 * The old interpreter.
1719 * The old interpreter.
1720 * The old interpreter.
1721 * The old interpreter.
1722 *
1723 */
1724
1725DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1726{
1727#ifdef IN_RC
1728 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1729 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1730 return rc;
1731 /*
1732 * The page pool cache may end up here in some cases because it
1733 * flushed one of the shadow mappings used by the trapping
1734 * instruction and it either flushed the TLB or the CPU reused it.
1735 */
1736#else
1737 NOREF(pVM);
1738#endif
1739 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1740}
1741
1742
1743DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
1744{
1745 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
1746 pages or write monitored pages. */
1747 NOREF(pVM);
1748#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1749 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
1750#else
1751 int rc = VINF_SUCCESS;
1752#endif
1753#ifdef VBOX_COMPARE_IEM_AND_EM
1754 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
1755 g_cbEmWrote = cb;
1756 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
1757#endif
1758 return rc;
1759}
1760
1761
1762/** Convert sel:addr to a flat GC address. */
1763DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
1764{
1765 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
1766 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
1767}
1768
1769
1770#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1771/**
1772 * Get the mnemonic for the disassembled instruction.
1773 *
1774 * GC/R0 doesn't include the strings in the DIS tables because
1775 * of limited space.
1776 */
1777static const char *emGetMnemonic(PDISCPUSTATE pDis)
1778{
1779 switch (pDis->pCurInstr->uOpcode)
1780 {
1781 case OP_XCHG: return "Xchg";
1782 case OP_DEC: return "Dec";
1783 case OP_INC: return "Inc";
1784 case OP_POP: return "Pop";
1785 case OP_OR: return "Or";
1786 case OP_AND: return "And";
1787 case OP_MOV: return "Mov";
1788 case OP_INVLPG: return "InvlPg";
1789 case OP_CPUID: return "CpuId";
1790 case OP_MOV_CR: return "MovCRx";
1791 case OP_MOV_DR: return "MovDRx";
1792 case OP_LLDT: return "LLdt";
1793 case OP_LGDT: return "LGdt";
1794 case OP_LIDT: return "LIdt";
1795 case OP_CLTS: return "Clts";
1796 case OP_MONITOR: return "Monitor";
1797 case OP_MWAIT: return "MWait";
1798 case OP_RDMSR: return "Rdmsr";
1799 case OP_WRMSR: return "Wrmsr";
1800 case OP_ADD: return "Add";
1801 case OP_ADC: return "Adc";
1802 case OP_SUB: return "Sub";
1803 case OP_SBB: return "Sbb";
1804 case OP_RDTSC: return "Rdtsc";
1805 case OP_STI: return "Sti";
1806 case OP_CLI: return "Cli";
1807 case OP_XADD: return "XAdd";
1808 case OP_HLT: return "Hlt";
1809 case OP_IRET: return "Iret";
1810 case OP_MOVNTPS: return "MovNTPS";
1811 case OP_STOSWD: return "StosWD";
1812 case OP_WBINVD: return "WbInvd";
1813 case OP_XOR: return "Xor";
1814 case OP_BTR: return "Btr";
1815 case OP_BTS: return "Bts";
1816 case OP_BTC: return "Btc";
1817 case OP_LMSW: return "Lmsw";
1818 case OP_SMSW: return "Smsw";
1819 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
1820 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
1821
1822 default:
1823 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
1824 return "???";
1825 }
1826}
1827#endif /* VBOX_STRICT || LOG_ENABLED */
1828
1829
1830/**
1831 * XCHG instruction emulation.
1832 */
1833static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1834{
1835 DISQPVPARAMVAL param1, param2;
1836 NOREF(pvFault);
1837
1838 /* Source to make DISQueryParamVal read the register value - ugly hack */
1839 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
1840 if(RT_FAILURE(rc))
1841 return VERR_EM_INTERPRETER;
1842
1843 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
1844 if(RT_FAILURE(rc))
1845 return VERR_EM_INTERPRETER;
1846
1847#ifdef IN_RC
1848 if (TRPMHasTrap(pVCpu))
1849 {
1850 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1851 {
1852#endif
1853 RTGCPTR pParam1 = 0, pParam2 = 0;
1854 uint64_t valpar1, valpar2;
1855
1856 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
1857 switch(param1.type)
1858 {
1859 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
1860 valpar1 = param1.val.val64;
1861 break;
1862
1863 case DISQPV_TYPE_ADDRESS:
1864 pParam1 = (RTGCPTR)param1.val.val64;
1865 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1866 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1867 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1868 if (RT_FAILURE(rc))
1869 {
1870 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1871 return VERR_EM_INTERPRETER;
1872 }
1873 break;
1874
1875 default:
1876 AssertFailed();
1877 return VERR_EM_INTERPRETER;
1878 }
1879
1880 switch(param2.type)
1881 {
1882 case DISQPV_TYPE_ADDRESS:
1883 pParam2 = (RTGCPTR)param2.val.val64;
1884 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
1885 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
1886 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
1887 if (RT_FAILURE(rc))
1888 {
1889 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1890 }
1891 break;
1892
1893 case DISQPV_TYPE_IMMEDIATE:
1894 valpar2 = param2.val.val64;
1895 break;
1896
1897 default:
1898 AssertFailed();
1899 return VERR_EM_INTERPRETER;
1900 }
1901
1902 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
1903 if (pParam1 == 0)
1904 {
1905 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1906 switch(param1.size)
1907 {
1908 case 1: //special case for AH etc
1909 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
1910 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
1911 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
1912 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
1913 default: AssertFailedReturn(VERR_EM_INTERPRETER);
1914 }
1915 if (RT_FAILURE(rc))
1916 return VERR_EM_INTERPRETER;
1917 }
1918 else
1919 {
1920 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
1921 if (RT_FAILURE(rc))
1922 {
1923 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1924 return VERR_EM_INTERPRETER;
1925 }
1926 }
1927
1928 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
1929 if (pParam2 == 0)
1930 {
1931 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1932 switch(param2.size)
1933 {
1934 case 1: //special case for AH etc
1935 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
1936 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
1937 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
1938 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
1939 default: AssertFailedReturn(VERR_EM_INTERPRETER);
1940 }
1941 if (RT_FAILURE(rc))
1942 return VERR_EM_INTERPRETER;
1943 }
1944 else
1945 {
1946 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
1947 if (RT_FAILURE(rc))
1948 {
1949 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1950 return VERR_EM_INTERPRETER;
1951 }
1952 }
1953
1954 *pcbSize = param2.size;
1955 return VINF_SUCCESS;
1956#ifdef IN_RC
1957 }
1958 }
1959 return VERR_EM_INTERPRETER;
1960#endif
1961}
1962
1963
1964/**
1965 * INC and DEC emulation.
1966 */
1967static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
1968 PFNEMULATEPARAM2 pfnEmulate)
1969{
1970 DISQPVPARAMVAL param1;
1971 NOREF(pvFault);
1972
1973 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
1974 if(RT_FAILURE(rc))
1975 return VERR_EM_INTERPRETER;
1976
1977#ifdef IN_RC
1978 if (TRPMHasTrap(pVCpu))
1979 {
1980 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1981 {
1982#endif
1983 RTGCPTR pParam1 = 0;
1984 uint64_t valpar1;
1985
1986 if (param1.type == DISQPV_TYPE_ADDRESS)
1987 {
1988 pParam1 = (RTGCPTR)param1.val.val64;
1989 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1990#ifdef IN_RC
1991 /* Safety check (in theory it could cross a page boundary and fault there though) */
1992 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1993#endif
1994 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1995 if (RT_FAILURE(rc))
1996 {
1997 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1998 return VERR_EM_INTERPRETER;
1999 }
2000 }
2001 else
2002 {
2003 AssertFailed();
2004 return VERR_EM_INTERPRETER;
2005 }
2006
2007 uint32_t eflags;
2008
2009 eflags = pfnEmulate(&valpar1, param1.size);
2010
2011 /* Write result back */
2012 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2013 if (RT_FAILURE(rc))
2014 {
2015 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2016 return VERR_EM_INTERPRETER;
2017 }
2018
2019 /* Update guest's eflags and finish. */
2020 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2021 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2022
2023 /* All done! */
2024 *pcbSize = param1.size;
2025 return VINF_SUCCESS;
2026#ifdef IN_RC
2027 }
2028 }
2029 return VERR_EM_INTERPRETER;
2030#endif
2031}
2032
2033
2034/**
2035 * POP Emulation.
2036 */
2037static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2038{
2039 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2040 DISQPVPARAMVAL param1;
2041 NOREF(pvFault);
2042
2043 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2044 if(RT_FAILURE(rc))
2045 return VERR_EM_INTERPRETER;
2046
2047#ifdef IN_RC
2048 if (TRPMHasTrap(pVCpu))
2049 {
2050 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2051 {
2052#endif
2053 RTGCPTR pParam1 = 0;
2054 uint32_t valpar1;
2055 RTGCPTR pStackVal;
2056
2057 /* Read stack value first */
2058 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2059 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2060
2061 /* Convert address; don't bother checking limits etc, as we only read here */
2062 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2063 if (pStackVal == 0)
2064 return VERR_EM_INTERPRETER;
2065
2066 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2067 if (RT_FAILURE(rc))
2068 {
2069 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2070 return VERR_EM_INTERPRETER;
2071 }
2072
2073 if (param1.type == DISQPV_TYPE_ADDRESS)
2074 {
2075 pParam1 = (RTGCPTR)param1.val.val64;
2076
2077 /* pop [esp+xx] uses esp after the actual pop! */
2078 AssertCompile(DISGREG_ESP == DISGREG_SP);
2079 if ( (pDis->Param1.fUse & DISUSE_BASE)
2080 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2081 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2082 )
2083 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2084
2085 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2086 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2087 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2088 if (RT_FAILURE(rc))
2089 {
2090 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2091 return VERR_EM_INTERPRETER;
2092 }
2093
2094 /* Update ESP as the last step */
2095 pRegFrame->esp += param1.size;
2096 }
2097 else
2098 {
2099#ifndef DEBUG_bird // annoying assertion.
2100 AssertFailed();
2101#endif
2102 return VERR_EM_INTERPRETER;
2103 }
2104
2105 /* All done! */
2106 *pcbSize = param1.size;
2107 return VINF_SUCCESS;
2108#ifdef IN_RC
2109 }
2110 }
2111 return VERR_EM_INTERPRETER;
2112#endif
2113}
2114
2115
2116/**
2117 * XOR/OR/AND Emulation.
2118 */
2119static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2120 PFNEMULATEPARAM3 pfnEmulate)
2121{
2122 DISQPVPARAMVAL param1, param2;
2123 NOREF(pvFault);
2124
2125 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2126 if(RT_FAILURE(rc))
2127 return VERR_EM_INTERPRETER;
2128
2129 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2130 if(RT_FAILURE(rc))
2131 return VERR_EM_INTERPRETER;
2132
2133#ifdef IN_RC
2134 if (TRPMHasTrap(pVCpu))
2135 {
2136 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2137 {
2138#endif
2139 RTGCPTR pParam1;
2140 uint64_t valpar1, valpar2;
2141
2142 if (pDis->Param1.cb != pDis->Param2.cb)
2143 {
2144 if (pDis->Param1.cb < pDis->Param2.cb)
2145 {
2146 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2147 return VERR_EM_INTERPRETER;
2148 }
2149 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2150 pDis->Param2.cb = pDis->Param1.cb;
2151 param2.size = param1.size;
2152 }
2153
2154 /* The destination is always a virtual address */
2155 if (param1.type == DISQPV_TYPE_ADDRESS)
2156 {
2157 pParam1 = (RTGCPTR)param1.val.val64;
2158 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2159 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2160 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2161 if (RT_FAILURE(rc))
2162 {
2163 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2164 return VERR_EM_INTERPRETER;
2165 }
2166 }
2167 else
2168 {
2169 AssertFailed();
2170 return VERR_EM_INTERPRETER;
2171 }
2172
2173 /* Register or immediate data */
2174 switch(param2.type)
2175 {
2176 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2177 valpar2 = param2.val.val64;
2178 break;
2179
2180 default:
2181 AssertFailed();
2182 return VERR_EM_INTERPRETER;
2183 }
2184
2185 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2186
2187 /* Data read, emulate instruction. */
2188 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2189
2190 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2191
2192 /* Update guest's eflags and finish. */
2193 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2194 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2195
2196 /* And write it back */
2197 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2198 if (RT_SUCCESS(rc))
2199 {
2200 /* All done! */
2201 *pcbSize = param2.size;
2202 return VINF_SUCCESS;
2203 }
2204#ifdef IN_RC
2205 }
2206 }
2207#endif
2208 return VERR_EM_INTERPRETER;
2209}
2210
2211
2212#ifndef VBOX_COMPARE_IEM_AND_EM
2213/**
2214 * LOCK XOR/OR/AND Emulation.
2215 */
2216static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2217 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2218{
2219 void *pvParam1;
2220 DISQPVPARAMVAL param1, param2;
2221 NOREF(pvFault);
2222
2223#if HC_ARCH_BITS == 32
2224 Assert(pDis->Param1.cb <= 4);
2225#endif
2226
2227 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2228 if(RT_FAILURE(rc))
2229 return VERR_EM_INTERPRETER;
2230
2231 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2232 if(RT_FAILURE(rc))
2233 return VERR_EM_INTERPRETER;
2234
2235 if (pDis->Param1.cb != pDis->Param2.cb)
2236 {
2237 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2238 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2239 VERR_EM_INTERPRETER);
2240
2241 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2242 pDis->Param2.cb = pDis->Param1.cb;
2243 param2.size = param1.size;
2244 }
2245
2246#ifdef IN_RC
2247 /* Safety check (in theory it could cross a page boundary and fault there though) */
2248 Assert( TRPMHasTrap(pVCpu)
2249 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2250 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2251#endif
2252
2253 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2254 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2255 RTGCUINTREG ValPar2 = param2.val.val64;
2256
2257 /* The destination is always a virtual address */
2258 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2259
2260 RTGCPTR GCPtrPar1 = param1.val.val64;
2261 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2262 PGMPAGEMAPLOCK Lock;
2263 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2264 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2265
2266 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2267 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2268
2269 RTGCUINTREG32 eflags = 0;
2270 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2271 PGMPhysReleasePageMappingLock(pVM, &Lock);
2272 if (RT_FAILURE(rc))
2273 {
2274 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2275 return VERR_EM_INTERPRETER;
2276 }
2277
2278 /* Update guest's eflags and finish. */
2279 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2280 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2281
2282 *pcbSize = param2.size;
2283 return VINF_SUCCESS;
2284}
2285#endif /* !VBOX_COMPARE_IEM_AND_EM */
2286
2287
2288/**
2289 * ADD, ADC & SUB Emulation.
2290 */
2291static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2292 PFNEMULATEPARAM3 pfnEmulate)
2293{
2294 NOREF(pvFault);
2295 DISQPVPARAMVAL param1, param2;
2296 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2297 if(RT_FAILURE(rc))
2298 return VERR_EM_INTERPRETER;
2299
2300 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2301 if(RT_FAILURE(rc))
2302 return VERR_EM_INTERPRETER;
2303
2304#ifdef IN_RC
2305 if (TRPMHasTrap(pVCpu))
2306 {
2307 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2308 {
2309#endif
2310 RTGCPTR pParam1;
2311 uint64_t valpar1, valpar2;
2312
2313 if (pDis->Param1.cb != pDis->Param2.cb)
2314 {
2315 if (pDis->Param1.cb < pDis->Param2.cb)
2316 {
2317 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2318 return VERR_EM_INTERPRETER;
2319 }
2320 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2321 pDis->Param2.cb = pDis->Param1.cb;
2322 param2.size = param1.size;
2323 }
2324
2325 /* The destination is always a virtual address */
2326 if (param1.type == DISQPV_TYPE_ADDRESS)
2327 {
2328 pParam1 = (RTGCPTR)param1.val.val64;
2329 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2330 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2331 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2332 if (RT_FAILURE(rc))
2333 {
2334 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2335 return VERR_EM_INTERPRETER;
2336 }
2337 }
2338 else
2339 {
2340#ifndef DEBUG_bird
2341 AssertFailed();
2342#endif
2343 return VERR_EM_INTERPRETER;
2344 }
2345
2346 /* Register or immediate data */
2347 switch(param2.type)
2348 {
2349 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2350 valpar2 = param2.val.val64;
2351 break;
2352
2353 default:
2354 AssertFailed();
2355 return VERR_EM_INTERPRETER;
2356 }
2357
2358 /* Data read, emulate instruction. */
2359 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2360
2361 /* Update guest's eflags and finish. */
2362 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2363 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2364
2365 /* And write it back */
2366 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2367 if (RT_SUCCESS(rc))
2368 {
2369 /* All done! */
2370 *pcbSize = param2.size;
2371 return VINF_SUCCESS;
2372 }
2373#ifdef IN_RC
2374 }
2375 }
2376#endif
2377 return VERR_EM_INTERPRETER;
2378}
2379
2380
2381/**
2382 * ADC Emulation.
2383 */
2384static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2385{
2386 if (pRegFrame->eflags.Bits.u1CF)
2387 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2388 else
2389 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2390}
2391
2392
2393/**
2394 * BTR/C/S Emulation.
2395 */
2396static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2397 PFNEMULATEPARAM2UINT32 pfnEmulate)
2398{
2399 DISQPVPARAMVAL param1, param2;
2400 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2401 if(RT_FAILURE(rc))
2402 return VERR_EM_INTERPRETER;
2403
2404 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2405 if(RT_FAILURE(rc))
2406 return VERR_EM_INTERPRETER;
2407
2408#ifdef IN_RC
2409 if (TRPMHasTrap(pVCpu))
2410 {
2411 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2412 {
2413#endif
2414 RTGCPTR pParam1;
2415 uint64_t valpar1 = 0, valpar2;
2416 uint32_t eflags;
2417
2418 /* The destination is always a virtual address */
2419 if (param1.type != DISQPV_TYPE_ADDRESS)
2420 return VERR_EM_INTERPRETER;
2421
2422 pParam1 = (RTGCPTR)param1.val.val64;
2423 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2424
2425 /* Register or immediate data */
2426 switch(param2.type)
2427 {
2428 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2429 valpar2 = param2.val.val64;
2430 break;
2431
2432 default:
2433 AssertFailed();
2434 return VERR_EM_INTERPRETER;
2435 }
2436
2437 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2438 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2439 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2440 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2441 if (RT_FAILURE(rc))
2442 {
2443 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2444 return VERR_EM_INTERPRETER;
2445 }
2446
2447 Log2(("emInterpretBtx: val=%x\n", valpar1));
2448 /* Data read, emulate bit test instruction. */
2449 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2450
2451 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2452
2453 /* Update guest's eflags and finish. */
2454 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2455 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2456
2457 /* And write it back */
2458 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2459 if (RT_SUCCESS(rc))
2460 {
2461 /* All done! */
2462 *pcbSize = 1;
2463 return VINF_SUCCESS;
2464 }
2465#ifdef IN_RC
2466 }
2467 }
2468#endif
2469 return VERR_EM_INTERPRETER;
2470}
2471
2472
2473#ifndef VBOX_COMPARE_IEM_AND_EM
2474/**
2475 * LOCK BTR/C/S Emulation.
2476 */
2477static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2478 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2479{
2480 void *pvParam1;
2481
2482 DISQPVPARAMVAL param1, param2;
2483 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2484 if(RT_FAILURE(rc))
2485 return VERR_EM_INTERPRETER;
2486
2487 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2488 if(RT_FAILURE(rc))
2489 return VERR_EM_INTERPRETER;
2490
2491 /* The destination is always a virtual address */
2492 if (param1.type != DISQPV_TYPE_ADDRESS)
2493 return VERR_EM_INTERPRETER;
2494
2495 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2496 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2497 uint64_t ValPar2 = param2.val.val64;
2498
2499 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2500 RTGCPTR GCPtrPar1 = param1.val.val64;
2501 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2502 ValPar2 &= 7;
2503
2504 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2505#ifdef IN_RC
2506 Assert(TRPMHasTrap(pVCpu));
2507 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
2508#endif
2509
2510 PGMPAGEMAPLOCK Lock;
2511 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2512 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2513
2514 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
2515 NOREF(pvFault);
2516
2517 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2518 RTGCUINTREG32 eflags = 0;
2519 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
2520 PGMPhysReleasePageMappingLock(pVM, &Lock);
2521 if (RT_FAILURE(rc))
2522 {
2523 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
2524 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2525 return VERR_EM_INTERPRETER;
2526 }
2527
2528 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
2529
2530 /* Update guest's eflags and finish. */
2531 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2532 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2533
2534 *pcbSize = 1;
2535 return VINF_SUCCESS;
2536}
2537#endif /* !VBOX_COMPARE_IEM_AND_EM */
2538
2539
2540/**
2541 * MOV emulation.
2542 */
2543static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2544{
2545 NOREF(pvFault);
2546 DISQPVPARAMVAL param1, param2;
2547 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2548 if(RT_FAILURE(rc))
2549 return VERR_EM_INTERPRETER;
2550
2551 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2552 if(RT_FAILURE(rc))
2553 return VERR_EM_INTERPRETER;
2554
2555 /* If destination is a segment register, punt. We can't handle it here.
2556 * NB: Source can be a register and still trigger a #PF!
2557 */
2558 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
2559 return VERR_EM_INTERPRETER;
2560
2561 if (param1.type == DISQPV_TYPE_ADDRESS)
2562 {
2563 RTGCPTR pDest;
2564 uint64_t val64;
2565
2566 switch(param1.type)
2567 {
2568 case DISQPV_TYPE_IMMEDIATE:
2569 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2570 return VERR_EM_INTERPRETER;
2571 RT_FALL_THRU();
2572
2573 case DISQPV_TYPE_ADDRESS:
2574 pDest = (RTGCPTR)param1.val.val64;
2575 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
2576 break;
2577
2578 default:
2579 AssertFailed();
2580 return VERR_EM_INTERPRETER;
2581 }
2582
2583 switch(param2.type)
2584 {
2585 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2586 val64 = param2.val.val64;
2587 break;
2588
2589 default:
2590 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
2591 return VERR_EM_INTERPRETER;
2592 }
2593#ifdef LOG_ENABLED
2594 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2595 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
2596 else
2597 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
2598#endif
2599
2600 Assert(param2.size <= 8 && param2.size > 0);
2601 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
2602 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
2603 if (RT_FAILURE(rc))
2604 return VERR_EM_INTERPRETER;
2605
2606 *pcbSize = param2.size;
2607 }
2608#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
2609 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
2610 else if ( param1.type == DISQPV_TYPE_REGISTER
2611 && param2.type == DISQPV_TYPE_REGISTER)
2612 {
2613 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
2614 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
2615 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
2616
2617 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
2618 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
2619
2620 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
2621 switch (param1.size)
2622 {
2623 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
2624 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
2625 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
2626 default:
2627 AssertFailed();
2628 return VERR_EM_INTERPRETER;
2629 }
2630 AssertRCReturn(rc, rc);
2631 }
2632#endif
2633 else
2634 { /* read fault */
2635 RTGCPTR pSrc;
2636 uint64_t val64;
2637
2638 /* Source */
2639 switch(param2.type)
2640 {
2641 case DISQPV_TYPE_IMMEDIATE:
2642 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2643 return VERR_EM_INTERPRETER;
2644 RT_FALL_THRU();
2645
2646 case DISQPV_TYPE_ADDRESS:
2647 pSrc = (RTGCPTR)param2.val.val64;
2648 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
2649 break;
2650
2651 default:
2652 return VERR_EM_INTERPRETER;
2653 }
2654
2655 Assert(param1.size <= 8 && param1.size > 0);
2656 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
2657 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
2658 if (RT_FAILURE(rc))
2659 return VERR_EM_INTERPRETER;
2660
2661 /* Destination */
2662 switch(param1.type)
2663 {
2664 case DISQPV_TYPE_REGISTER:
2665 switch(param1.size)
2666 {
2667 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
2668 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
2669 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
2670 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
2671 default:
2672 return VERR_EM_INTERPRETER;
2673 }
2674 if (RT_FAILURE(rc))
2675 return rc;
2676 break;
2677
2678 default:
2679 return VERR_EM_INTERPRETER;
2680 }
2681#ifdef LOG_ENABLED
2682 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2683 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
2684 else
2685 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
2686#endif
2687 }
2688 return VINF_SUCCESS;
2689}
2690
2691
2692#ifndef IN_RC
2693/**
2694 * [REP] STOSWD emulation
2695 */
2696static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2697{
2698 int rc;
2699 RTGCPTR GCDest, GCOffset;
2700 uint32_t cbSize;
2701 uint64_t cTransfers;
2702 int offIncrement;
2703 NOREF(pvFault);
2704
2705 /* Don't support any but these three prefix bytes. */
2706 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
2707 return VERR_EM_INTERPRETER;
2708
2709 switch (pDis->uAddrMode)
2710 {
2711 case DISCPUMODE_16BIT:
2712 GCOffset = pRegFrame->di;
2713 cTransfers = pRegFrame->cx;
2714 break;
2715 case DISCPUMODE_32BIT:
2716 GCOffset = pRegFrame->edi;
2717 cTransfers = pRegFrame->ecx;
2718 break;
2719 case DISCPUMODE_64BIT:
2720 GCOffset = pRegFrame->rdi;
2721 cTransfers = pRegFrame->rcx;
2722 break;
2723 default:
2724 AssertFailed();
2725 return VERR_EM_INTERPRETER;
2726 }
2727
2728 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
2729 switch (pDis->uOpMode)
2730 {
2731 case DISCPUMODE_16BIT:
2732 cbSize = 2;
2733 break;
2734 case DISCPUMODE_32BIT:
2735 cbSize = 4;
2736 break;
2737 case DISCPUMODE_64BIT:
2738 cbSize = 8;
2739 break;
2740 default:
2741 AssertFailed();
2742 return VERR_EM_INTERPRETER;
2743 }
2744
2745 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
2746
2747 if (!(pDis->fPrefix & DISPREFIX_REP))
2748 {
2749 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
2750
2751 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2752 if (RT_FAILURE(rc))
2753 return VERR_EM_INTERPRETER;
2754 Assert(rc == VINF_SUCCESS);
2755
2756 /* Update (e/r)di. */
2757 switch (pDis->uAddrMode)
2758 {
2759 case DISCPUMODE_16BIT:
2760 pRegFrame->di += offIncrement;
2761 break;
2762 case DISCPUMODE_32BIT:
2763 pRegFrame->edi += offIncrement;
2764 break;
2765 case DISCPUMODE_64BIT:
2766 pRegFrame->rdi += offIncrement;
2767 break;
2768 default:
2769 AssertFailed();
2770 return VERR_EM_INTERPRETER;
2771 }
2772
2773 }
2774 else
2775 {
2776 if (!cTransfers)
2777 return VINF_SUCCESS;
2778
2779 /*
2780 * Do *not* try emulate cross page stuff here because we don't know what might
2781 * be waiting for us on the subsequent pages. The caller has only asked us to
2782 * ignore access handlers fro the current page.
2783 * This also fends off big stores which would quickly kill PGMR0DynMap.
2784 */
2785 if ( cbSize > PAGE_SIZE
2786 || cTransfers > PAGE_SIZE
2787 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
2788 {
2789 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
2790 GCDest, cbSize, offIncrement, cTransfers));
2791 return VERR_EM_INTERPRETER;
2792 }
2793
2794 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
2795 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2796 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
2797 cTransfers * cbSize,
2798 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
2799 if (rc != VINF_SUCCESS)
2800 {
2801 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
2802 return VERR_EM_INTERPRETER;
2803 }
2804
2805 /* REP case */
2806 while (cTransfers)
2807 {
2808 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2809 if (RT_FAILURE(rc))
2810 {
2811 rc = VERR_EM_INTERPRETER;
2812 break;
2813 }
2814
2815 Assert(rc == VINF_SUCCESS);
2816 GCOffset += offIncrement;
2817 GCDest += offIncrement;
2818 cTransfers--;
2819 }
2820
2821 /* Update the registers. */
2822 switch (pDis->uAddrMode)
2823 {
2824 case DISCPUMODE_16BIT:
2825 pRegFrame->di = GCOffset;
2826 pRegFrame->cx = cTransfers;
2827 break;
2828 case DISCPUMODE_32BIT:
2829 pRegFrame->edi = GCOffset;
2830 pRegFrame->ecx = cTransfers;
2831 break;
2832 case DISCPUMODE_64BIT:
2833 pRegFrame->rdi = GCOffset;
2834 pRegFrame->rcx = cTransfers;
2835 break;
2836 default:
2837 AssertFailed();
2838 return VERR_EM_INTERPRETER;
2839 }
2840 }
2841
2842 *pcbSize = cbSize;
2843 return rc;
2844}
2845#endif /* !IN_RC */
2846
2847
2848/**
2849 * [LOCK] CMPXCHG emulation.
2850 */
2851static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2852{
2853 DISQPVPARAMVAL param1, param2;
2854 NOREF(pvFault);
2855
2856#if HC_ARCH_BITS == 32
2857 Assert(pDis->Param1.cb <= 4);
2858#endif
2859
2860 /* Source to make DISQueryParamVal read the register value - ugly hack */
2861 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2862 if(RT_FAILURE(rc))
2863 return VERR_EM_INTERPRETER;
2864
2865 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2866 if(RT_FAILURE(rc))
2867 return VERR_EM_INTERPRETER;
2868
2869 uint64_t valpar;
2870 switch(param2.type)
2871 {
2872 case DISQPV_TYPE_IMMEDIATE: /* register actually */
2873 valpar = param2.val.val64;
2874 break;
2875
2876 default:
2877 return VERR_EM_INTERPRETER;
2878 }
2879
2880 PGMPAGEMAPLOCK Lock;
2881 RTGCPTR GCPtrPar1;
2882 void *pvParam1;
2883 uint64_t eflags;
2884
2885 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2886 switch(param1.type)
2887 {
2888 case DISQPV_TYPE_ADDRESS:
2889 GCPtrPar1 = param1.val.val64;
2890 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2891
2892 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2893 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2894 break;
2895
2896 default:
2897 return VERR_EM_INTERPRETER;
2898 }
2899
2900 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
2901
2902#ifndef VBOX_COMPARE_IEM_AND_EM
2903 if (pDis->fPrefix & DISPREFIX_LOCK)
2904 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2905 else
2906 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2907#else /* VBOX_COMPARE_IEM_AND_EM */
2908 uint64_t u64;
2909 switch (pDis->Param2.cb)
2910 {
2911 case 1: u64 = *(uint8_t *)pvParam1; break;
2912 case 2: u64 = *(uint16_t *)pvParam1; break;
2913 case 4: u64 = *(uint32_t *)pvParam1; break;
2914 default:
2915 case 8: u64 = *(uint64_t *)pvParam1; break;
2916 }
2917 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
2918 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
2919#endif /* VBOX_COMPARE_IEM_AND_EM */
2920
2921 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
2922
2923 /* Update guest's eflags and finish. */
2924 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2925 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2926
2927 *pcbSize = param2.size;
2928 PGMPhysReleasePageMappingLock(pVM, &Lock);
2929 return VINF_SUCCESS;
2930}
2931
2932
2933/**
2934 * [LOCK] CMPXCHG8B emulation.
2935 */
2936static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2937{
2938 DISQPVPARAMVAL param1;
2939 NOREF(pvFault);
2940
2941 /* Source to make DISQueryParamVal read the register value - ugly hack */
2942 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2943 if(RT_FAILURE(rc))
2944 return VERR_EM_INTERPRETER;
2945
2946 RTGCPTR GCPtrPar1;
2947 void *pvParam1;
2948 uint64_t eflags;
2949 PGMPAGEMAPLOCK Lock;
2950
2951 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
2952 switch(param1.type)
2953 {
2954 case DISQPV_TYPE_ADDRESS:
2955 GCPtrPar1 = param1.val.val64;
2956 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2957
2958 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2959 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2960 break;
2961
2962 default:
2963 return VERR_EM_INTERPRETER;
2964 }
2965
2966 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
2967
2968#ifndef VBOX_COMPARE_IEM_AND_EM
2969 if (pDis->fPrefix & DISPREFIX_LOCK)
2970 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2971 else
2972 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2973#else /* VBOX_COMPARE_IEM_AND_EM */
2974 uint64_t u64 = *(uint64_t *)pvParam1;
2975 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2976 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
2977#endif /* VBOX_COMPARE_IEM_AND_EM */
2978
2979 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
2980
2981 /* Update guest's eflags and finish; note that *only* ZF is affected. */
2982 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
2983 | (eflags & (X86_EFL_ZF));
2984
2985 *pcbSize = 8;
2986 PGMPhysReleasePageMappingLock(pVM, &Lock);
2987 return VINF_SUCCESS;
2988}
2989
2990
2991#ifdef IN_RC /** @todo test+enable for HM as well. */
2992/**
2993 * [LOCK] XADD emulation.
2994 */
2995static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2996{
2997 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2998 DISQPVPARAMVAL param1;
2999 void *pvParamReg2;
3000 size_t cbParamReg2;
3001 NOREF(pvFault);
3002
3003 /* Source to make DISQueryParamVal read the register value - ugly hack */
3004 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3005 if(RT_FAILURE(rc))
3006 return VERR_EM_INTERPRETER;
3007
3008 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3009 Assert(cbParamReg2 <= 4);
3010 if(RT_FAILURE(rc))
3011 return VERR_EM_INTERPRETER;
3012
3013#ifdef IN_RC
3014 if (TRPMHasTrap(pVCpu))
3015 {
3016 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3017 {
3018#endif
3019 RTGCPTR GCPtrPar1;
3020 void *pvParam1;
3021 uint32_t eflags;
3022 PGMPAGEMAPLOCK Lock;
3023
3024 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3025 switch(param1.type)
3026 {
3027 case DISQPV_TYPE_ADDRESS:
3028 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3029#ifdef IN_RC
3030 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3031#endif
3032
3033 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3034 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3035 break;
3036
3037 default:
3038 return VERR_EM_INTERPRETER;
3039 }
3040
3041 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3042
3043#ifndef VBOX_COMPARE_IEM_AND_EM
3044 if (pDis->fPrefix & DISPREFIX_LOCK)
3045 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3046 else
3047 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3048#else /* VBOX_COMPARE_IEM_AND_EM */
3049 uint64_t u64;
3050 switch (cbParamReg2)
3051 {
3052 case 1: u64 = *(uint8_t *)pvParam1; break;
3053 case 2: u64 = *(uint16_t *)pvParam1; break;
3054 case 4: u64 = *(uint32_t *)pvParam1; break;
3055 default:
3056 case 8: u64 = *(uint64_t *)pvParam1; break;
3057 }
3058 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3059 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3060#endif /* VBOX_COMPARE_IEM_AND_EM */
3061
3062 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3063
3064 /* Update guest's eflags and finish. */
3065 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3066 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3067
3068 *pcbSize = cbParamReg2;
3069 PGMPhysReleasePageMappingLock(pVM, &Lock);
3070 return VINF_SUCCESS;
3071#ifdef IN_RC
3072 }
3073 }
3074
3075 return VERR_EM_INTERPRETER;
3076#endif
3077}
3078#endif /* IN_RC */
3079
3080
3081/**
3082 * WBINVD Emulation.
3083 */
3084static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3085{
3086 /* Nothing to do. */
3087 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3088 return VINF_SUCCESS;
3089}
3090
3091
3092/**
3093 * INVLPG Emulation.
3094 */
3095static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3096{
3097 DISQPVPARAMVAL param1;
3098 RTGCPTR addr;
3099 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3100
3101 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3102 if(RT_FAILURE(rc))
3103 return VERR_EM_INTERPRETER;
3104
3105 switch(param1.type)
3106 {
3107 case DISQPV_TYPE_IMMEDIATE:
3108 case DISQPV_TYPE_ADDRESS:
3109 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3110 return VERR_EM_INTERPRETER;
3111 addr = (RTGCPTR)param1.val.val64;
3112 break;
3113
3114 default:
3115 return VERR_EM_INTERPRETER;
3116 }
3117
3118 /** @todo is addr always a flat linear address or ds based
3119 * (in absence of segment override prefixes)????
3120 */
3121#ifdef IN_RC
3122 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3123#endif
3124 rc = PGMInvalidatePage(pVCpu, addr);
3125 if ( rc == VINF_SUCCESS
3126 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3127 return VINF_SUCCESS;
3128 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3129 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3130 VERR_EM_INTERPRETER);
3131 return rc;
3132}
3133
3134/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3135
3136/**
3137 * CPUID Emulation.
3138 */
3139static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3140{
3141 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3142 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3143 return rc;
3144}
3145
3146
3147/**
3148 * CLTS Emulation.
3149 */
3150static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3151{
3152 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3153
3154 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3155 if (!(cr0 & X86_CR0_TS))
3156 return VINF_SUCCESS;
3157 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3158}
3159
3160
3161/**
3162 * Update CRx.
3163 *
3164 * @returns VBox status code.
3165 * @param pVM The cross context VM structure.
3166 * @param pVCpu The cross context virtual CPU structure.
3167 * @param pRegFrame The register frame.
3168 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3169 * @param val New CRx value
3170 *
3171 */
3172static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3173{
3174 uint64_t oldval;
3175 uint64_t msrEFER;
3176 uint32_t fValid;
3177 int rc, rc2;
3178 NOREF(pVM);
3179
3180 /** @todo Clean up this mess. */
3181 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3182 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3183 switch (DestRegCrx)
3184 {
3185 case DISCREG_CR0:
3186 oldval = CPUMGetGuestCR0(pVCpu);
3187#ifdef IN_RC
3188 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3189 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3190 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3191 return VERR_EM_INTERPRETER;
3192#endif
3193 rc = VINF_SUCCESS;
3194#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3195 CPUMSetGuestCR0(pVCpu, val);
3196#else
3197 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3198#endif
3199 val = CPUMGetGuestCR0(pVCpu);
3200 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3201 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3202 {
3203 /* global flush */
3204 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3205 AssertRCReturn(rc, rc);
3206 }
3207
3208 /* Deal with long mode enabling/disabling. */
3209 msrEFER = CPUMGetGuestEFER(pVCpu);
3210 if (msrEFER & MSR_K6_EFER_LME)
3211 {
3212 if ( !(oldval & X86_CR0_PG)
3213 && (val & X86_CR0_PG))
3214 {
3215 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3216 if (pRegFrame->cs.Attr.n.u1Long)
3217 {
3218 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3219 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3220 }
3221
3222 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3223 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3224 {
3225 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3226 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3227 }
3228 msrEFER |= MSR_K6_EFER_LMA;
3229 }
3230 else
3231 if ( (oldval & X86_CR0_PG)
3232 && !(val & X86_CR0_PG))
3233 {
3234 msrEFER &= ~MSR_K6_EFER_LMA;
3235 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3236 }
3237 CPUMSetGuestEFER(pVCpu, msrEFER);
3238 }
3239 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3240 return rc2 == VINF_SUCCESS ? rc : rc2;
3241
3242 case DISCREG_CR2:
3243 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3244 return VINF_SUCCESS;
3245
3246 case DISCREG_CR3:
3247 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3248 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3249 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3250 {
3251 /* flush */
3252 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3253 AssertRC(rc);
3254 }
3255 return rc;
3256
3257 case DISCREG_CR4:
3258 oldval = CPUMGetGuestCR4(pVCpu);
3259 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3260 val = CPUMGetGuestCR4(pVCpu);
3261
3262 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3263 msrEFER = CPUMGetGuestEFER(pVCpu);
3264 if ( (msrEFER & MSR_K6_EFER_LMA)
3265 && (oldval & X86_CR4_PAE)
3266 && !(val & X86_CR4_PAE))
3267 {
3268 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3269 }
3270
3271 /* From IEM iemCImpl_load_CrX. */
3272 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3273 fValid = X86_CR4_VME | X86_CR4_PVI
3274 | X86_CR4_TSD | X86_CR4_DE
3275 | X86_CR4_PSE | X86_CR4_PAE
3276 | X86_CR4_MCE | X86_CR4_PGE
3277 | X86_CR4_PCE | X86_CR4_OSFXSR
3278 | X86_CR4_OSXMMEEXCPT;
3279 //if (xxx)
3280 // fValid |= X86_CR4_VMXE;
3281 //if (xxx)
3282 // fValid |= X86_CR4_OSXSAVE;
3283 if (val & ~(uint64_t)fValid)
3284 {
3285 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3286 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3287 }
3288
3289 rc = VINF_SUCCESS;
3290 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3291 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3292 {
3293 /* global flush */
3294 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3295 AssertRCReturn(rc, rc);
3296 }
3297
3298 /* Feeling extremely lazy. */
3299# ifdef IN_RC
3300 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3301 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3302 {
3303 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3304 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3305 }
3306# endif
3307# ifdef VBOX_WITH_RAW_MODE
3308 if (((val ^ oldval) & X86_CR4_VME) && VM_IS_RAW_MODE_ENABLED(pVM))
3309 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3310# endif
3311
3312 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3313 return rc2 == VINF_SUCCESS ? rc : rc2;
3314
3315 case DISCREG_CR8:
3316 return APICSetTpr(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3317
3318 default:
3319 AssertFailed();
3320 case DISCREG_CR1: /* illegal op */
3321 break;
3322 }
3323 return VERR_EM_INTERPRETER;
3324}
3325
3326
3327/**
3328 * LMSW Emulation.
3329 */
3330static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3331{
3332 DISQPVPARAMVAL param1;
3333 uint32_t val;
3334 NOREF(pvFault); NOREF(pcbSize);
3335 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3336
3337 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3338 if(RT_FAILURE(rc))
3339 return VERR_EM_INTERPRETER;
3340
3341 switch(param1.type)
3342 {
3343 case DISQPV_TYPE_IMMEDIATE:
3344 case DISQPV_TYPE_ADDRESS:
3345 if(!(param1.flags & DISQPV_FLAG_16))
3346 return VERR_EM_INTERPRETER;
3347 val = param1.val.val32;
3348 break;
3349
3350 default:
3351 return VERR_EM_INTERPRETER;
3352 }
3353
3354 LogFlow(("emInterpretLmsw %x\n", val));
3355 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3356
3357 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3358 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3359 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3360
3361 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3362
3363}
3364
3365#ifdef EM_EMULATE_SMSW
3366/**
3367 * SMSW Emulation.
3368 */
3369static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3370{
3371 NOREF(pvFault); NOREF(pcbSize);
3372 DISQPVPARAMVAL param1;
3373 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3374
3375 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3376 if(RT_FAILURE(rc))
3377 return VERR_EM_INTERPRETER;
3378
3379 switch(param1.type)
3380 {
3381 case DISQPV_TYPE_IMMEDIATE:
3382 if(param1.size != sizeof(uint16_t))
3383 return VERR_EM_INTERPRETER;
3384 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3385 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3386 break;
3387
3388 case DISQPV_TYPE_ADDRESS:
3389 {
3390 RTGCPTR pParam1;
3391
3392 /* Actually forced to 16 bits regardless of the operand size. */
3393 if(param1.size != sizeof(uint16_t))
3394 return VERR_EM_INTERPRETER;
3395
3396 pParam1 = (RTGCPTR)param1.val.val64;
3397 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3398 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3399
3400 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3401 if (RT_FAILURE(rc))
3402 {
3403 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3404 return VERR_EM_INTERPRETER;
3405 }
3406 break;
3407 }
3408
3409 default:
3410 return VERR_EM_INTERPRETER;
3411 }
3412
3413 LogFlow(("emInterpretSmsw %x\n", cr0));
3414 return rc;
3415}
3416#endif
3417
3418
3419/**
3420 * Interpret CRx read.
3421 *
3422 * @returns VBox status code.
3423 * @param pVM The cross context VM structure.
3424 * @param pVCpu The cross context virtual CPU structure.
3425 * @param pRegFrame The register frame.
3426 * @param DestRegGen General purpose register index (USE_REG_E**))
3427 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3428 *
3429 */
3430static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3431{
3432 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3433 uint64_t val64;
3434 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3435 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3436 NOREF(pVM);
3437
3438 if (CPUMIsGuestIn64BitCode(pVCpu))
3439 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3440 else
3441 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3442
3443 if (RT_SUCCESS(rc))
3444 {
3445 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3446 return VINF_SUCCESS;
3447 }
3448 return VERR_EM_INTERPRETER;
3449}
3450
3451
3452/**
3453 * Interpret CRx write.
3454 *
3455 * @returns VBox status code.
3456 * @param pVM The cross context VM structure.
3457 * @param pVCpu The cross context virtual CPU structure.
3458 * @param pRegFrame The register frame.
3459 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3460 * @param SrcRegGen General purpose register index (USE_REG_E**))
3461 *
3462 */
3463static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3464{
3465 uint64_t val;
3466 int rc;
3467 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3468
3469 if (CPUMIsGuestIn64BitCode(pVCpu))
3470 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3471 else
3472 {
3473 uint32_t val32;
3474 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3475 val = val32;
3476 }
3477
3478 if (RT_SUCCESS(rc))
3479 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3480
3481 return VERR_EM_INTERPRETER;
3482}
3483
3484
3485/**
3486 * MOV CRx
3487 */
3488static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3489{
3490 NOREF(pvFault); NOREF(pcbSize);
3491 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3492 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3493
3494 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3495 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3496
3497 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3498}
3499
3500
3501/**
3502 * MOV DRx
3503 */
3504static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3505{
3506 int rc = VERR_EM_INTERPRETER;
3507 NOREF(pvFault); NOREF(pcbSize);
3508
3509 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
3510 {
3511 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
3512 }
3513 else
3514 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3515 {
3516 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
3517 }
3518 else
3519 AssertMsgFailed(("Unexpected debug register move\n"));
3520
3521 return rc;
3522}
3523
3524
3525/**
3526 * LLDT Emulation.
3527 */
3528static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3529{
3530 DISQPVPARAMVAL param1;
3531 RTSEL sel;
3532 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
3533
3534 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3535 if(RT_FAILURE(rc))
3536 return VERR_EM_INTERPRETER;
3537
3538 switch(param1.type)
3539 {
3540 case DISQPV_TYPE_ADDRESS:
3541 return VERR_EM_INTERPRETER; //feeling lazy right now
3542
3543 case DISQPV_TYPE_IMMEDIATE:
3544 if(!(param1.flags & DISQPV_FLAG_16))
3545 return VERR_EM_INTERPRETER;
3546 sel = (RTSEL)param1.val.val16;
3547 break;
3548
3549 default:
3550 return VERR_EM_INTERPRETER;
3551 }
3552
3553#ifdef IN_RING0
3554 /* Only for the VT-x real-mode emulation case. */
3555 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3556 CPUMSetGuestLDTR(pVCpu, sel);
3557 return VINF_SUCCESS;
3558#else
3559 if (sel == 0)
3560 {
3561 if (CPUMGetHyperLDTR(pVCpu) == 0)
3562 {
3563 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
3564 return VINF_SUCCESS;
3565 }
3566 }
3567 //still feeling lazy
3568 return VERR_EM_INTERPRETER;
3569#endif
3570}
3571
3572#ifdef IN_RING0
3573/**
3574 * LIDT/LGDT Emulation.
3575 */
3576static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3577{
3578 DISQPVPARAMVAL param1;
3579 RTGCPTR pParam1;
3580 X86XDTR32 dtr32;
3581 NOREF(pvFault); NOREF(pcbSize);
3582
3583 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
3584
3585 /* Only for the VT-x real-mode emulation case. */
3586 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3587
3588 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3589 if(RT_FAILURE(rc))
3590 return VERR_EM_INTERPRETER;
3591
3592 switch(param1.type)
3593 {
3594 case DISQPV_TYPE_ADDRESS:
3595 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
3596 break;
3597
3598 default:
3599 return VERR_EM_INTERPRETER;
3600 }
3601
3602 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
3603 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3604
3605 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
3606 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
3607
3608 if (pDis->pCurInstr->uOpcode == OP_LIDT)
3609 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3610 else
3611 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3612
3613 return VINF_SUCCESS;
3614}
3615#endif
3616
3617
3618#ifdef IN_RC
3619/**
3620 * STI Emulation.
3621 *
3622 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
3623 */
3624static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3625{
3626 NOREF(pcbSize);
3627 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
3628
3629 if(!pGCState)
3630 {
3631 Assert(pGCState);
3632 return VERR_EM_INTERPRETER;
3633 }
3634 pGCState->uVMFlags |= X86_EFL_IF;
3635
3636 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
3637 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
3638
3639 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
3640 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3641
3642 return VINF_SUCCESS;
3643}
3644#endif /* IN_RC */
3645
3646
3647/**
3648 * HLT Emulation.
3649 */
3650static VBOXSTRICTRC
3651emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3652{
3653 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3654 return VINF_EM_HALT;
3655}
3656
3657
3658/**
3659 * RDTSC Emulation.
3660 */
3661static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3662{
3663 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3664 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
3665}
3666
3667/**
3668 * RDPMC Emulation
3669 */
3670static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3671{
3672 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3673 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
3674}
3675
3676
3677static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3678{
3679 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3680 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
3681}
3682
3683
3684static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3685{
3686 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3687 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
3688}
3689
3690
3691/**
3692 * RDMSR Emulation.
3693 */
3694static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3695{
3696 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
3697 different, so we play safe by completely disassembling the instruction. */
3698 Assert(!(pDis->fPrefix & DISPREFIX_REX));
3699 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3700 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
3701}
3702
3703
3704/**
3705 * WRMSR Emulation.
3706 */
3707static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3708{
3709 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3710 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
3711}
3712
3713
3714/**
3715 * Internal worker.
3716 * @copydoc emInterpretInstructionCPUOuter
3717 * @param pVM The cross context VM structure.
3718 */
3719DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3720 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3721{
3722 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3723 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
3724 Assert(pcbSize);
3725 *pcbSize = 0;
3726
3727 if (enmCodeType == EMCODETYPE_SUPERVISOR)
3728 {
3729 /*
3730 * Only supervisor guest code!!
3731 * And no complicated prefixes.
3732 */
3733 /* Get the current privilege level. */
3734 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3735#ifdef VBOX_WITH_RAW_RING1
3736 if ( !EMIsRawRing1Enabled(pVM)
3737 || cpl > 1
3738 || pRegFrame->eflags.Bits.u2IOPL > cpl
3739 )
3740#endif
3741 {
3742 if ( cpl != 0
3743 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
3744 {
3745 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
3746 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
3747 return VERR_EM_INTERPRETER;
3748 }
3749 }
3750 }
3751 else
3752 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
3753
3754#ifdef IN_RC
3755 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
3756 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3757 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3758 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3759 && pDis->pCurInstr->uOpcode != OP_XADD
3760 && pDis->pCurInstr->uOpcode != OP_OR
3761 && pDis->pCurInstr->uOpcode != OP_AND
3762 && pDis->pCurInstr->uOpcode != OP_XOR
3763 && pDis->pCurInstr->uOpcode != OP_BTR
3764 )
3765 )
3766#else
3767 if ( (pDis->fPrefix & DISPREFIX_REPNE)
3768 || ( (pDis->fPrefix & DISPREFIX_REP)
3769 && pDis->pCurInstr->uOpcode != OP_STOSWD
3770 )
3771 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3772 && pDis->pCurInstr->uOpcode != OP_OR
3773 && pDis->pCurInstr->uOpcode != OP_AND
3774 && pDis->pCurInstr->uOpcode != OP_XOR
3775 && pDis->pCurInstr->uOpcode != OP_BTR
3776 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3777 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3778 )
3779 )
3780#endif
3781 {
3782 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
3783 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
3784 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
3785 return VERR_EM_INTERPRETER;
3786 }
3787
3788#if HC_ARCH_BITS == 32
3789 /*
3790 * Unable to emulate most >4 bytes accesses in 32 bits mode.
3791 * Whitelisted instructions are safe.
3792 */
3793 if ( pDis->Param1.cb > 4
3794 && CPUMIsGuestIn64BitCode(pVCpu))
3795 {
3796 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
3797 if ( uOpCode != OP_STOSWD
3798 && uOpCode != OP_MOV
3799 && uOpCode != OP_CMPXCHG8B
3800 && uOpCode != OP_XCHG
3801 && uOpCode != OP_BTS
3802 && uOpCode != OP_BTR
3803 && uOpCode != OP_BTC
3804 )
3805 {
3806# ifdef VBOX_WITH_STATISTICS
3807 switch (pDis->pCurInstr->uOpcode)
3808 {
3809# define INTERPRET_FAILED_CASE(opcode, Instr) \
3810 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
3811 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
3812 INTERPRET_FAILED_CASE(OP_DEC,Dec);
3813 INTERPRET_FAILED_CASE(OP_INC,Inc);
3814 INTERPRET_FAILED_CASE(OP_POP,Pop);
3815 INTERPRET_FAILED_CASE(OP_OR, Or);
3816 INTERPRET_FAILED_CASE(OP_XOR,Xor);
3817 INTERPRET_FAILED_CASE(OP_AND,And);
3818 INTERPRET_FAILED_CASE(OP_MOV,Mov);
3819 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
3820 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
3821 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
3822 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
3823 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
3824 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
3825 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
3826 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
3827 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
3828 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
3829 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
3830 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
3831 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
3832 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
3833 INTERPRET_FAILED_CASE(OP_ADD,Add);
3834 INTERPRET_FAILED_CASE(OP_SUB,Sub);
3835 INTERPRET_FAILED_CASE(OP_ADC,Adc);
3836 INTERPRET_FAILED_CASE(OP_BTR,Btr);
3837 INTERPRET_FAILED_CASE(OP_BTS,Bts);
3838 INTERPRET_FAILED_CASE(OP_BTC,Btc);
3839 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
3840 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
3841 INTERPRET_FAILED_CASE(OP_STI, Sti);
3842 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
3843 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
3844 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
3845 INTERPRET_FAILED_CASE(OP_IRET,Iret);
3846 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
3847 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
3848# undef INTERPRET_FAILED_CASE
3849 default:
3850 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3851 break;
3852 }
3853# endif /* VBOX_WITH_STATISTICS */
3854 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
3855 return VERR_EM_INTERPRETER;
3856 }
3857 }
3858#endif
3859
3860 VBOXSTRICTRC rc;
3861#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
3862 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
3863#endif
3864 switch (pDis->pCurInstr->uOpcode)
3865 {
3866 /*
3867 * Macros for generating the right case statements.
3868 */
3869# ifndef VBOX_COMPARE_IEM_AND_EM
3870# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3871 case opcode:\
3872 if (pDis->fPrefix & DISPREFIX_LOCK) \
3873 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
3874 else \
3875 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3876 if (RT_SUCCESS(rc)) \
3877 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3878 else \
3879 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3880 return rc
3881# else /* VBOX_COMPARE_IEM_AND_EM */
3882# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3883 case opcode:\
3884 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3885 if (RT_SUCCESS(rc)) \
3886 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3887 else \
3888 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3889 return rc
3890# endif /* VBOX_COMPARE_IEM_AND_EM */
3891
3892#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
3893 case opcode:\
3894 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3895 if (RT_SUCCESS(rc)) \
3896 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3897 else \
3898 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3899 return rc
3900
3901#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
3902 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
3903#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3904 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
3905
3906#define INTERPRET_CASE(opcode, Instr) \
3907 case opcode:\
3908 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3909 if (RT_SUCCESS(rc)) \
3910 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3911 else \
3912 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3913 return rc
3914
3915#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
3916 case opcode:\
3917 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3918 if (RT_SUCCESS(rc)) \
3919 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3920 else \
3921 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3922 return rc
3923
3924#define INTERPRET_STAT_CASE(opcode, Instr) \
3925 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
3926
3927 /*
3928 * The actual case statements.
3929 */
3930 INTERPRET_CASE(OP_XCHG,Xchg);
3931 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
3932 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
3933 INTERPRET_CASE(OP_POP,Pop);
3934 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
3935 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
3936 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
3937 INTERPRET_CASE(OP_MOV,Mov);
3938#ifndef IN_RC
3939 INTERPRET_CASE(OP_STOSWD,StosWD);
3940#endif
3941 INTERPRET_CASE(OP_INVLPG,InvlPg);
3942 INTERPRET_CASE(OP_CPUID,CpuId);
3943 INTERPRET_CASE(OP_MOV_CR,MovCRx);
3944 INTERPRET_CASE(OP_MOV_DR,MovDRx);
3945#ifdef IN_RING0
3946 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
3947 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
3948#endif
3949 INTERPRET_CASE(OP_LLDT,LLdt);
3950 INTERPRET_CASE(OP_LMSW,Lmsw);
3951#ifdef EM_EMULATE_SMSW
3952 INTERPRET_CASE(OP_SMSW,Smsw);
3953#endif
3954 INTERPRET_CASE(OP_CLTS,Clts);
3955 INTERPRET_CASE(OP_MONITOR, Monitor);
3956 INTERPRET_CASE(OP_MWAIT, MWait);
3957 INTERPRET_CASE(OP_RDMSR, Rdmsr);
3958 INTERPRET_CASE(OP_WRMSR, Wrmsr);
3959 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
3960 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
3961 INTERPRET_CASE(OP_ADC,Adc);
3962 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
3963 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
3964 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
3965 INTERPRET_CASE(OP_RDPMC,Rdpmc);
3966 INTERPRET_CASE(OP_RDTSC,Rdtsc);
3967 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
3968#ifdef IN_RC
3969 INTERPRET_CASE(OP_STI,Sti);
3970 INTERPRET_CASE(OP_XADD, XAdd);
3971 INTERPRET_CASE(OP_IRET,Iret);
3972#endif
3973 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
3974 INTERPRET_CASE(OP_HLT,Hlt);
3975 INTERPRET_CASE(OP_WBINVD,WbInvd);
3976#ifdef VBOX_WITH_STATISTICS
3977# ifndef IN_RC
3978 INTERPRET_STAT_CASE(OP_XADD, XAdd);
3979# endif
3980 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
3981#endif
3982
3983 default:
3984 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
3985 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3986 return VERR_EM_INTERPRETER;
3987
3988#undef INTERPRET_CASE_EX_PARAM2
3989#undef INTERPRET_STAT_CASE
3990#undef INTERPRET_CASE_EX
3991#undef INTERPRET_CASE
3992 } /* switch (opcode) */
3993 /* not reached */
3994}
3995
3996/**
3997 * Interprets the current instruction using the supplied DISCPUSTATE structure.
3998 *
3999 * EIP is *NOT* updated!
4000 *
4001 * @returns VBox strict status code.
4002 * @retval VINF_* Scheduling instructions. When these are returned, it
4003 * starts to get a bit tricky to know whether code was
4004 * executed or not... We'll address this when it becomes a problem.
4005 * @retval VERR_EM_INTERPRETER Something we can't cope with.
4006 * @retval VERR_* Fatal errors.
4007 *
4008 * @param pVCpu The cross context virtual CPU structure.
4009 * @param pDis The disassembler cpu state for the instruction to be
4010 * interpreted.
4011 * @param pRegFrame The register frame. EIP is *NOT* changed!
4012 * @param pvFault The fault address (CR2).
4013 * @param pcbSize Size of the write (if applicable).
4014 * @param enmCodeType Code type (user/supervisor)
4015 *
4016 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
4017 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
4018 * to worry about e.g. invalid modrm combinations (!)
4019 *
4020 * @todo At this time we do NOT check if the instruction overwrites vital information.
4021 * Make sure this can't happen!! (will add some assertions/checks later)
4022 */
4023DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4024 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4025{
4026 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4027 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
4028 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4029 if (RT_SUCCESS(rc))
4030 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
4031 else
4032 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
4033 return rc;
4034}
4035
4036
4037#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette