VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 72496

Last change on this file since 72496 was 72490, checked in by vboxsync, 7 years ago

NEM,EM: Generic optimization of I/O port accesses that have to be executed in ring-3. Only NEM uses the feature for now. bugref:9044 bugref:9193

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 147.3 KB
Line 
1/* $Id: EMAll.cpp 72490 2018-06-09 15:11:13Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef VBOX_WITH_IEM
51//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
52//# define VBOX_SAME_AS_EM
53//# define VBOX_COMPARE_IEM_LAST
54#endif
55
56#ifdef VBOX_WITH_RAW_RING1
57# define EM_EMULATE_SMSW
58#endif
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** @def EM_ASSERT_FAULT_RETURN
65 * Safety check.
66 *
67 * Could in theory misfire on a cross page boundary access...
68 *
69 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
70 * turns up an alias page instead of the original faulting one and annoying the
71 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
72 */
73#if 0
74# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
75#else
76# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
77#endif
78
79
80/*********************************************************************************************************************************
81* Internal Functions *
82*********************************************************************************************************************************/
83#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
84DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
85 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92#ifdef VBOX_COMPARE_IEM_AND_EM
93static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
94 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
95 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
96 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
97static uint32_t g_fIncomingFFs;
98static CPUMCTX g_IncomingCtx;
99static bool g_fIgnoreRaxRdx = false;
100
101static uint32_t g_fEmFFs;
102static CPUMCTX g_EmCtx;
103static uint8_t g_abEmWrote[256];
104static size_t g_cbEmWrote;
105
106static uint32_t g_fIemFFs;
107static CPUMCTX g_IemCtx;
108extern uint8_t g_abIemWrote[256];
109#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
110extern size_t g_cbIemWrote;
111#else
112static size_t g_cbIemWrote;
113#endif
114#endif
115
116
117/**
118 * Get the current execution manager status.
119 *
120 * @returns Current status.
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
124{
125 return pVCpu->em.s.enmState;
126}
127
128
129/**
130 * Sets the current execution manager status. (use only when you know what you're doing!)
131 *
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
134 */
135VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
136{
137 /* Only allowed combination: */
138 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
139 pVCpu->em.s.enmState = enmNewState;
140}
141
142
143/**
144 * Sets the PC for which interrupts should be inhibited.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param PC The PC.
148 */
149VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
150{
151 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
152 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
153}
154
155
156/**
157 * Gets the PC for which interrupts should be inhibited.
158 *
159 * There are a few instructions which inhibits or delays interrupts
160 * for the instruction following them. These instructions are:
161 * - STI
162 * - MOV SS, r/m16
163 * - POP SS
164 *
165 * @returns The PC for which interrupts should be inhibited.
166 * @param pVCpu The cross context virtual CPU structure.
167 *
168 */
169VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
170{
171 return pVCpu->em.s.GCPtrInhibitInterrupts;
172}
173
174
175/**
176 * Enables / disable hypercall instructions.
177 *
178 * This interface is used by GIM to tell the execution monitors whether the
179 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
180 *
181 * @param pVCpu The cross context virtual CPU structure this applies to.
182 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
183 */
184VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
185{
186 pVCpu->em.s.fHypercallEnabled = fEnabled;
187}
188
189
190/**
191 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
192 *
193 * @returns true if enabled, false if not.
194 * @param pVCpu The cross context virtual CPU structure.
195 *
196 * @note If this call becomes a performance factor, we can make the data
197 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
198 */
199VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
200{
201 return pVCpu->em.s.fHypercallEnabled;
202}
203
204
205/**
206 * Prepare an MWAIT - essentials of the MONITOR instruction.
207 *
208 * @returns VINF_SUCCESS
209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
210 * @param rax The content of RAX.
211 * @param rcx The content of RCX.
212 * @param rdx The content of RDX.
213 * @param GCPhys The physical address corresponding to rax.
214 */
215VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
216{
217 pVCpu->em.s.MWait.uMonitorRAX = rax;
218 pVCpu->em.s.MWait.uMonitorRCX = rcx;
219 pVCpu->em.s.MWait.uMonitorRDX = rdx;
220 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
221 /** @todo Make use of GCPhys. */
222 NOREF(GCPhys);
223 /** @todo Complete MONITOR implementation. */
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Checks if the monitor hardware is armed / active.
230 *
231 * @returns true if armed, false otherwise.
232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
233 */
234VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
235{
236 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
237}
238
239
240/**
241 * Performs an MWAIT.
242 *
243 * @returns VINF_SUCCESS
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 * @param rax The content of RAX.
246 * @param rcx The content of RCX.
247 */
248VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
249{
250 pVCpu->em.s.MWait.uMWaitRAX = rax;
251 pVCpu->em.s.MWait.uMWaitRCX = rcx;
252 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
253 if (rcx)
254 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
255 else
256 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
257 /** @todo not completely correct?? */
258 return VINF_EM_HALT;
259}
260
261
262
263/**
264 * Determine if we should continue execution in HM after encountering an mwait
265 * instruction.
266 *
267 * Clears MWAIT flags if returning @c true.
268 *
269 * @returns true if we should continue, false if we should halt.
270 * @param pVCpu The cross context virtual CPU structure.
271 * @param pCtx Current CPU context.
272 */
273VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
274{
275 if ( pCtx->eflags.Bits.u1IF
276 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
277 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
278 {
279 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
280 {
281 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
282 return true;
283 }
284 }
285
286 return false;
287}
288
289
290/**
291 * Determine if we should continue execution in HM after encountering a hlt
292 * instruction.
293 *
294 * @returns true if we should continue, false if we should halt.
295 * @param pVCpu The cross context virtual CPU structure.
296 * @param pCtx Current CPU context.
297 */
298VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
299{
300 /** @todo Shouldn't we be checking GIF here? */
301 if (pCtx->eflags.Bits.u1IF)
302 return VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
303 return false;
304}
305
306
307/**
308 * Unhalts and wakes up the given CPU.
309 *
310 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
311 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
312 * the CPU isn't currently in a halt, the next HLT instruction it executes will
313 * be affected.
314 *
315 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
316 * @param pVM The cross context VM structure.
317 * @param pVCpuDst The cross context virtual CPU structure of the
318 * CPU to unhalt and wake up. This is usually not the
319 * same as the caller.
320 * @thread EMT
321 */
322VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
323{
324 /*
325 * Flag the current(/next) HLT to unhalt immediately.
326 */
327 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
328
329 /*
330 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
331 * just do it here for now).
332 */
333#ifdef IN_RING0
334 /* We might be here with preemption disabled or enabled (i.e. depending on
335 thread-context hooks being used), so don't try obtaining the GVMMR0 used
336 lock here. See @bugref{7270#c148}. */
337 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
338 AssertRC(rc);
339
340#elif defined(IN_RING3)
341 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
342 AssertRC(rc);
343
344#else
345 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
346 Assert(pVM->cCpus == 1); NOREF(pVM);
347 int rc = VINF_SUCCESS;
348#endif
349 return rc;
350}
351
352#ifndef IN_RING3
353
354/**
355 * Makes an I/O port write pending for ring-3 processing.
356 *
357 * @returns VINF_EM_PENDING_R3_IOPORT_READ
358 * @param pVCpu The cross context virtual CPU structure.
359 * @param uPort The I/O port.
360 * @param cbInstr The instruction length (for RIP updating).
361 * @param cbValue The write size.
362 * @param uValue The value being written.
363 * @sa emR3ExecutePendingIoPortWrite
364 *
365 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
366 */
367VMMRZ_INT_DECL(VBOXSTRICTRC)
368EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
369{
370 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
371 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
372 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
373 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
374 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
375 return VINF_EM_PENDING_R3_IOPORT_WRITE;
376}
377
378
379/**
380 * Makes an I/O port read pending for ring-3 processing.
381 *
382 * @returns VINF_EM_PENDING_R3_IOPORT_READ
383 * @param pVCpu The cross context virtual CPU structure.
384 * @param uPort The I/O port.
385 * @param cbInstr The instruction length (for RIP updating).
386 * @param cbValue The read size.
387 * @sa emR3ExecutePendingIoPortRead
388 *
389 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
390 */
391VMMRZ_INT_DECL(VBOXSTRICTRC)
392EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
393{
394 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
395 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
396 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
397 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
398 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
399 return VINF_EM_PENDING_R3_IOPORT_READ;
400}
401
402#endif /* IN_RING3 */
403
404/**
405 * Locks REM execution to a single VCPU.
406 *
407 * @param pVM The cross context VM structure.
408 */
409VMMDECL(void) EMRemLock(PVM pVM)
410{
411#ifdef VBOX_WITH_REM
412 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
413 return; /* early init */
414
415 Assert(!PGMIsLockOwner(pVM));
416 Assert(!IOMIsLockWriteOwner(pVM));
417 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
418 AssertRCSuccess(rc);
419#else
420 RT_NOREF(pVM);
421#endif
422}
423
424
425/**
426 * Unlocks REM execution
427 *
428 * @param pVM The cross context VM structure.
429 */
430VMMDECL(void) EMRemUnlock(PVM pVM)
431{
432#ifdef VBOX_WITH_REM
433 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
434 return; /* early init */
435
436 PDMCritSectLeave(&pVM->em.s.CritSectREM);
437#else
438 RT_NOREF(pVM);
439#endif
440}
441
442
443/**
444 * Check if this VCPU currently owns the REM lock.
445 *
446 * @returns bool owner/not owner
447 * @param pVM The cross context VM structure.
448 */
449VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
450{
451#ifdef VBOX_WITH_REM
452 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
453 return true; /* early init */
454
455 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
456#else
457 RT_NOREF(pVM);
458 return true;
459#endif
460}
461
462
463/**
464 * Try to acquire the REM lock.
465 *
466 * @returns VBox status code
467 * @param pVM The cross context VM structure.
468 */
469VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
470{
471#ifdef VBOX_WITH_REM
472 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
473 return VINF_SUCCESS; /* early init */
474
475 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
476#else
477 RT_NOREF(pVM);
478 return VINF_SUCCESS;
479#endif
480}
481
482
483/**
484 * @callback_method_impl{FNDISREADBYTES}
485 */
486static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
487{
488 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
489#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
490 PVM pVM = pVCpu->CTX_SUFF(pVM);
491#endif
492 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
493 int rc;
494
495 /*
496 * Figure how much we can or must read.
497 */
498 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
499 if (cbToRead > cbMaxRead)
500 cbToRead = cbMaxRead;
501 else if (cbToRead < cbMinRead)
502 cbToRead = cbMinRead;
503
504#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
505 /*
506 * We might be called upon to interpret an instruction in a patch.
507 */
508 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
509 {
510# ifdef IN_RC
511 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
512# else
513 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
514# endif
515 rc = VINF_SUCCESS;
516 }
517 else
518#endif
519 {
520# ifdef IN_RC
521 /*
522 * Try access it thru the shadow page tables first. Fall back on the
523 * slower PGM method if it fails because the TLB or page table was
524 * modified recently.
525 */
526 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
527 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
528 {
529 cbToRead = cbMinRead;
530 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
531 }
532 if (rc == VERR_ACCESS_DENIED)
533#endif
534 {
535 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
536 if (RT_FAILURE(rc))
537 {
538 if (cbToRead > cbMinRead)
539 {
540 cbToRead = cbMinRead;
541 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
542 }
543 if (RT_FAILURE(rc))
544 {
545#ifndef IN_RC
546 /*
547 * If we fail to find the page via the guest's page tables
548 * we invalidate the page in the host TLB (pertaining to
549 * the guest in the NestedPaging case). See @bugref{6043}.
550 */
551 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
552 {
553 HMInvalidatePage(pVCpu, uSrcAddr);
554 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
555 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
556 }
557#endif
558 }
559 }
560 }
561 }
562
563 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
564 return rc;
565}
566
567
568#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
569DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
570{
571 NOREF(pVM);
572 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
573}
574#endif
575
576
577/**
578 * Disassembles the current instruction.
579 *
580 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
581 * details.
582 *
583 * @param pVM The cross context VM structure.
584 * @param pVCpu The cross context virtual CPU structure.
585 * @param pDis Where to return the parsed instruction info.
586 * @param pcbInstr Where to return the instruction size. (optional)
587 */
588VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
589{
590 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
591 RTGCPTR GCPtrInstr;
592#if 0
593 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
594#else
595/** @todo Get the CPU mode as well while we're at it! */
596 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
597 pCtxCore->rip, &GCPtrInstr);
598#endif
599 if (RT_FAILURE(rc))
600 {
601 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
602 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
603 return rc;
604 }
605 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
606}
607
608
609/**
610 * Disassembles one instruction.
611 *
612 * This is used by internally by the interpreter and by trap/access handlers.
613 *
614 * @returns VBox status code.
615 *
616 * @param pVM The cross context VM structure.
617 * @param pVCpu The cross context virtual CPU structure.
618 * @param GCPtrInstr The flat address of the instruction.
619 * @param pCtxCore The context core (used to determine the cpu mode).
620 * @param pDis Where to return the parsed instruction info.
621 * @param pcbInstr Where to return the instruction size. (optional)
622 */
623VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
624 PDISCPUSTATE pDis, unsigned *pcbInstr)
625{
626 NOREF(pVM);
627 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
628 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
629 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
630 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
631 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
632 if (RT_SUCCESS(rc))
633 return VINF_SUCCESS;
634 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
635 return rc;
636}
637
638
639#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
640static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
641 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
642 uint32_t cbEm, uint32_t cbIem)
643{
644 /* Quick compare. */
645 if ( rcEm == rcIem
646 && cbEm == cbIem
647 && g_cbEmWrote == g_cbIemWrote
648 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
649 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
650 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
651 )
652 return;
653
654 /* Report exact differences. */
655 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
656 if (rcEm != rcIem)
657 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
658 else if (cbEm != cbIem)
659 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
660
661 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
662 {
663 if (g_cbIemWrote != g_cbEmWrote)
664 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
665 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
666 {
667 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
668 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
669 }
670
671 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
672 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
673 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
674
675# define CHECK_FIELD(a_Field) \
676 do \
677 { \
678 if (pEmCtx->a_Field != pIemCtx->a_Field) \
679 { \
680 switch (sizeof(pEmCtx->a_Field)) \
681 { \
682 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
683 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
684 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
685 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
686 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
687 } \
688 cDiffs++; \
689 } \
690 } while (0)
691
692# define CHECK_BIT_FIELD(a_Field) \
693 do \
694 { \
695 if (pEmCtx->a_Field != pIemCtx->a_Field) \
696 { \
697 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
698 cDiffs++; \
699 } \
700 } while (0)
701
702# define CHECK_SEL(a_Sel) \
703 do \
704 { \
705 CHECK_FIELD(a_Sel.Sel); \
706 CHECK_FIELD(a_Sel.Attr.u); \
707 CHECK_FIELD(a_Sel.u64Base); \
708 CHECK_FIELD(a_Sel.u32Limit); \
709 CHECK_FIELD(a_Sel.fFlags); \
710 } while (0)
711
712 unsigned cDiffs = 0;
713 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
714 {
715 RTLogPrintf(" the FPU state differs\n");
716 cDiffs++;
717 CHECK_FIELD(fpu.FCW);
718 CHECK_FIELD(fpu.FSW);
719 CHECK_FIELD(fpu.FTW);
720 CHECK_FIELD(fpu.FOP);
721 CHECK_FIELD(fpu.FPUIP);
722 CHECK_FIELD(fpu.CS);
723 CHECK_FIELD(fpu.Rsrvd1);
724 CHECK_FIELD(fpu.FPUDP);
725 CHECK_FIELD(fpu.DS);
726 CHECK_FIELD(fpu.Rsrvd2);
727 CHECK_FIELD(fpu.MXCSR);
728 CHECK_FIELD(fpu.MXCSR_MASK);
729 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
730 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
731 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
732 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
733 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
734 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
735 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
736 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
737 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
738 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
739 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
740 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
741 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
742 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
743 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
744 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
745 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
746 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
747 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
748 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
749 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
750 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
751 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
752 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
753 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
754 CHECK_FIELD(fpu.au32RsrvdRest[i]);
755 }
756 CHECK_FIELD(rip);
757 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
758 {
759 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
760 CHECK_BIT_FIELD(rflags.Bits.u1CF);
761 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
762 CHECK_BIT_FIELD(rflags.Bits.u1PF);
763 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
764 CHECK_BIT_FIELD(rflags.Bits.u1AF);
765 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
766 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
767 CHECK_BIT_FIELD(rflags.Bits.u1SF);
768 CHECK_BIT_FIELD(rflags.Bits.u1TF);
769 CHECK_BIT_FIELD(rflags.Bits.u1IF);
770 CHECK_BIT_FIELD(rflags.Bits.u1DF);
771 CHECK_BIT_FIELD(rflags.Bits.u1OF);
772 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
773 CHECK_BIT_FIELD(rflags.Bits.u1NT);
774 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
775 CHECK_BIT_FIELD(rflags.Bits.u1RF);
776 CHECK_BIT_FIELD(rflags.Bits.u1VM);
777 CHECK_BIT_FIELD(rflags.Bits.u1AC);
778 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
779 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
780 CHECK_BIT_FIELD(rflags.Bits.u1ID);
781 }
782
783 if (!g_fIgnoreRaxRdx)
784 CHECK_FIELD(rax);
785 CHECK_FIELD(rcx);
786 if (!g_fIgnoreRaxRdx)
787 CHECK_FIELD(rdx);
788 CHECK_FIELD(rbx);
789 CHECK_FIELD(rsp);
790 CHECK_FIELD(rbp);
791 CHECK_FIELD(rsi);
792 CHECK_FIELD(rdi);
793 CHECK_FIELD(r8);
794 CHECK_FIELD(r9);
795 CHECK_FIELD(r10);
796 CHECK_FIELD(r11);
797 CHECK_FIELD(r12);
798 CHECK_FIELD(r13);
799 CHECK_SEL(cs);
800 CHECK_SEL(ss);
801 CHECK_SEL(ds);
802 CHECK_SEL(es);
803 CHECK_SEL(fs);
804 CHECK_SEL(gs);
805 CHECK_FIELD(cr0);
806 CHECK_FIELD(cr2);
807 CHECK_FIELD(cr3);
808 CHECK_FIELD(cr4);
809 CHECK_FIELD(dr[0]);
810 CHECK_FIELD(dr[1]);
811 CHECK_FIELD(dr[2]);
812 CHECK_FIELD(dr[3]);
813 CHECK_FIELD(dr[6]);
814 CHECK_FIELD(dr[7]);
815 CHECK_FIELD(gdtr.cbGdt);
816 CHECK_FIELD(gdtr.pGdt);
817 CHECK_FIELD(idtr.cbIdt);
818 CHECK_FIELD(idtr.pIdt);
819 CHECK_SEL(ldtr);
820 CHECK_SEL(tr);
821 CHECK_FIELD(SysEnter.cs);
822 CHECK_FIELD(SysEnter.eip);
823 CHECK_FIELD(SysEnter.esp);
824 CHECK_FIELD(msrEFER);
825 CHECK_FIELD(msrSTAR);
826 CHECK_FIELD(msrPAT);
827 CHECK_FIELD(msrLSTAR);
828 CHECK_FIELD(msrCSTAR);
829 CHECK_FIELD(msrSFMASK);
830 CHECK_FIELD(msrKERNELGSBASE);
831
832# undef CHECK_FIELD
833# undef CHECK_BIT_FIELD
834 }
835}
836#endif /* VBOX_COMPARE_IEM_AND_EM */
837
838
839/**
840 * Interprets the current instruction.
841 *
842 * @returns VBox status code.
843 * @retval VINF_* Scheduling instructions.
844 * @retval VERR_EM_INTERPRETER Something we can't cope with.
845 * @retval VERR_* Fatal errors.
846 *
847 * @param pVCpu The cross context virtual CPU structure.
848 * @param pRegFrame The register frame.
849 * Updates the EIP if an instruction was executed successfully.
850 * @param pvFault The fault address (CR2).
851 *
852 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
853 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
854 * to worry about e.g. invalid modrm combinations (!)
855 */
856VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
857{
858 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
859 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
860#ifdef VBOX_WITH_IEM
861 NOREF(pvFault);
862
863# ifdef VBOX_COMPARE_IEM_AND_EM
864 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
865 g_IncomingCtx = *pCtx;
866 g_fIncomingFFs = pVCpu->fLocalForcedActions;
867 g_cbEmWrote = g_cbIemWrote = 0;
868
869# ifdef VBOX_COMPARE_IEM_FIRST
870 /* IEM */
871 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
872 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
873 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
874 rcIem = VERR_EM_INTERPRETER;
875 g_IemCtx = *pCtx;
876 g_fIemFFs = pVCpu->fLocalForcedActions;
877 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
878 *pCtx = g_IncomingCtx;
879# endif
880
881 /* EM */
882 RTGCPTR pbCode;
883 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
884 if (RT_SUCCESS(rcEm))
885 {
886 uint32_t cbOp;
887 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
888 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
889 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
890 if (RT_SUCCESS(rcEm))
891 {
892 Assert(cbOp == pDis->cbInstr);
893 uint32_t cbIgnored;
894 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
895 if (RT_SUCCESS(rcEm))
896 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
897
898 }
899 rcEm = VERR_EM_INTERPRETER;
900 }
901 else
902 rcEm = VERR_EM_INTERPRETER;
903# ifdef VBOX_SAME_AS_EM
904 if (rcEm == VERR_EM_INTERPRETER)
905 {
906 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
907 return rcEm;
908 }
909# endif
910 g_EmCtx = *pCtx;
911 g_fEmFFs = pVCpu->fLocalForcedActions;
912 VBOXSTRICTRC rc = rcEm;
913
914# ifdef VBOX_COMPARE_IEM_LAST
915 /* IEM */
916 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
917 *pCtx = g_IncomingCtx;
918 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
919 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
920 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
921 rcIem = VERR_EM_INTERPRETER;
922 g_IemCtx = *pCtx;
923 g_fIemFFs = pVCpu->fLocalForcedActions;
924 rc = rcIem;
925# endif
926
927# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
928 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
929# endif
930
931# else
932 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
933 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
934 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
935 rc = VERR_EM_INTERPRETER;
936# endif
937 if (rc != VINF_SUCCESS)
938 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
939
940 return rc;
941#else
942 RTGCPTR pbCode;
943 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
944 if (RT_SUCCESS(rc))
945 {
946 uint32_t cbOp;
947 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
948 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
949 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
950 if (RT_SUCCESS(rc))
951 {
952 Assert(cbOp == pDis->cbInstr);
953 uint32_t cbIgnored;
954 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
955 if (RT_SUCCESS(rc))
956 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
957
958 return rc;
959 }
960 }
961 return VERR_EM_INTERPRETER;
962#endif
963}
964
965
966/**
967 * Interprets the current instruction.
968 *
969 * @returns VBox status code.
970 * @retval VINF_* Scheduling instructions.
971 * @retval VERR_EM_INTERPRETER Something we can't cope with.
972 * @retval VERR_* Fatal errors.
973 *
974 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
975 * @param pRegFrame The register frame.
976 * Updates the EIP if an instruction was executed successfully.
977 * @param pvFault The fault address (CR2).
978 * @param pcbWritten Size of the write (if applicable).
979 *
980 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
981 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
982 * to worry about e.g. invalid modrm combinations (!)
983 */
984VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
985{
986 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
987 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
988#ifdef VBOX_WITH_IEM
989 NOREF(pvFault);
990
991# ifdef VBOX_COMPARE_IEM_AND_EM
992 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
993 g_IncomingCtx = *pCtx;
994 g_fIncomingFFs = pVCpu->fLocalForcedActions;
995 g_cbEmWrote = g_cbIemWrote = 0;
996
997# ifdef VBOX_COMPARE_IEM_FIRST
998 /* IEM */
999 uint32_t cbIemWritten = 0;
1000 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1001 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1002 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1003 rcIem = VERR_EM_INTERPRETER;
1004 g_IemCtx = *pCtx;
1005 g_fIemFFs = pVCpu->fLocalForcedActions;
1006 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1007 *pCtx = g_IncomingCtx;
1008# endif
1009
1010 /* EM */
1011 uint32_t cbEmWritten = 0;
1012 RTGCPTR pbCode;
1013 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1014 if (RT_SUCCESS(rcEm))
1015 {
1016 uint32_t cbOp;
1017 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1018 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1019 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1020 if (RT_SUCCESS(rcEm))
1021 {
1022 Assert(cbOp == pDis->cbInstr);
1023 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
1024 if (RT_SUCCESS(rcEm))
1025 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1026
1027 }
1028 else
1029 rcEm = VERR_EM_INTERPRETER;
1030 }
1031 else
1032 rcEm = VERR_EM_INTERPRETER;
1033# ifdef VBOX_SAME_AS_EM
1034 if (rcEm == VERR_EM_INTERPRETER)
1035 {
1036 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1037 return rcEm;
1038 }
1039# endif
1040 g_EmCtx = *pCtx;
1041 g_fEmFFs = pVCpu->fLocalForcedActions;
1042 *pcbWritten = cbEmWritten;
1043 VBOXSTRICTRC rc = rcEm;
1044
1045# ifdef VBOX_COMPARE_IEM_LAST
1046 /* IEM */
1047 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1048 *pCtx = g_IncomingCtx;
1049 uint32_t cbIemWritten = 0;
1050 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1051 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1052 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1053 rcIem = VERR_EM_INTERPRETER;
1054 g_IemCtx = *pCtx;
1055 g_fIemFFs = pVCpu->fLocalForcedActions;
1056 *pcbWritten = cbIemWritten;
1057 rc = rcIem;
1058# endif
1059
1060# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1061 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
1062# endif
1063
1064# else
1065 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1066 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1067 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1068 rc = VERR_EM_INTERPRETER;
1069# endif
1070 if (rc != VINF_SUCCESS)
1071 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1072
1073 return rc;
1074#else
1075 RTGCPTR pbCode;
1076 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1077 if (RT_SUCCESS(rc))
1078 {
1079 uint32_t cbOp;
1080 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1081 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1082 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1083 if (RT_SUCCESS(rc))
1084 {
1085 Assert(cbOp == pDis->cbInstr);
1086 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
1087 if (RT_SUCCESS(rc))
1088 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1089
1090 return rc;
1091 }
1092 }
1093 return VERR_EM_INTERPRETER;
1094#endif
1095}
1096
1097
1098/**
1099 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1100 *
1101 * IP/EIP/RIP *IS* updated!
1102 *
1103 * @returns VBox strict status code.
1104 * @retval VINF_* Scheduling instructions. When these are returned, it
1105 * starts to get a bit tricky to know whether code was
1106 * executed or not... We'll address this when it becomes a problem.
1107 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1108 * @retval VERR_* Fatal errors.
1109 *
1110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1111 * @param pDis The disassembler cpu state for the instruction to be
1112 * interpreted.
1113 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1114 * @param pvFault The fault address (CR2).
1115 * @param enmCodeType Code type (user/supervisor)
1116 *
1117 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1118 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1119 * to worry about e.g. invalid modrm combinations (!)
1120 *
1121 * @todo At this time we do NOT check if the instruction overwrites vital information.
1122 * Make sure this can't happen!! (will add some assertions/checks later)
1123 */
1124VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1125 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1126{
1127 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1128 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1129#ifdef VBOX_WITH_IEM
1130 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1131
1132# ifdef VBOX_COMPARE_IEM_AND_EM
1133 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1134 g_IncomingCtx = *pCtx;
1135 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1136 g_cbEmWrote = g_cbIemWrote = 0;
1137
1138# ifdef VBOX_COMPARE_IEM_FIRST
1139 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1140 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1141 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1142 rcIem = VERR_EM_INTERPRETER;
1143 g_IemCtx = *pCtx;
1144 g_fIemFFs = pVCpu->fLocalForcedActions;
1145 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1146 *pCtx = g_IncomingCtx;
1147# endif
1148
1149 /* EM */
1150 uint32_t cbIgnored;
1151 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1152 if (RT_SUCCESS(rcEm))
1153 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1154# ifdef VBOX_SAME_AS_EM
1155 if (rcEm == VERR_EM_INTERPRETER)
1156 {
1157 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1158 return rcEm;
1159 }
1160# endif
1161 g_EmCtx = *pCtx;
1162 g_fEmFFs = pVCpu->fLocalForcedActions;
1163 VBOXSTRICTRC rc = rcEm;
1164
1165# ifdef VBOX_COMPARE_IEM_LAST
1166 /* IEM */
1167 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1168 *pCtx = g_IncomingCtx;
1169 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1170 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1171 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1172 rcIem = VERR_EM_INTERPRETER;
1173 g_IemCtx = *pCtx;
1174 g_fIemFFs = pVCpu->fLocalForcedActions;
1175 rc = rcIem;
1176# endif
1177
1178# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1179 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1180# endif
1181
1182# else
1183 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1184 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1185 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1186 rc = VERR_EM_INTERPRETER;
1187# endif
1188
1189 if (rc != VINF_SUCCESS)
1190 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1191
1192 return rc;
1193#else
1194 uint32_t cbIgnored;
1195 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1196 if (RT_SUCCESS(rc))
1197 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1198 return rc;
1199#endif
1200}
1201
1202#ifdef IN_RC
1203
1204DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1205{
1206 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1207 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1208 return rc;
1209 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1210}
1211
1212
1213/**
1214 * Interpret IRET (currently only to V86 code) - PATM only.
1215 *
1216 * @returns VBox status code.
1217 * @param pVM The cross context VM structure.
1218 * @param pVCpu The cross context virtual CPU structure.
1219 * @param pRegFrame The register frame.
1220 *
1221 */
1222VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1223{
1224 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1225 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1226 int rc;
1227
1228 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1229 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1230 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1231 * this function. Fear that it may guru on us, thus not converted to
1232 * IEM. */
1233
1234 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1235 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1236 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1237 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1238 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1239
1240 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1241 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1242 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1243 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1244 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1245 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1246 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1247
1248 pRegFrame->eip = eip & 0xffff;
1249 pRegFrame->cs.Sel = cs;
1250
1251 /* Mask away all reserved bits */
1252 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1253 eflags &= uMask;
1254
1255 CPUMRawSetEFlags(pVCpu, eflags);
1256 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1257
1258 pRegFrame->esp = esp;
1259 pRegFrame->ss.Sel = ss;
1260 pRegFrame->ds.Sel = ds;
1261 pRegFrame->es.Sel = es;
1262 pRegFrame->fs.Sel = fs;
1263 pRegFrame->gs.Sel = gs;
1264
1265 return VINF_SUCCESS;
1266}
1267
1268# ifndef VBOX_WITH_IEM
1269/**
1270 * IRET Emulation.
1271 */
1272static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1273{
1274#ifdef VBOX_WITH_RAW_RING1
1275 NOREF(pvFault); NOREF(pcbSize); NOREF(pDis);
1276 if (EMIsRawRing1Enabled(pVM))
1277 {
1278 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1279 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1280 int rc;
1281 uint32_t cpl, rpl;
1282
1283 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1284 /** @todo we don't verify all the edge cases that generate #GP faults */
1285
1286 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1287 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1288 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1289 * this function. Fear that it may guru on us, thus not converted to
1290 * IEM. */
1291
1292 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1293 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1294 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1295 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1296 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1297
1298 /* Deal with V86 above. */
1299 if (eflags & X86_EFL_VM)
1300 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1301
1302 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1303 rpl = cs & X86_SEL_RPL;
1304
1305 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1306 if (rpl != cpl)
1307 {
1308 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1309 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1310 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1311 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1312 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1313 pRegFrame->ss.Sel = ss;
1314 pRegFrame->esp = esp;
1315 }
1316 pRegFrame->cs.Sel = cs;
1317 pRegFrame->eip = eip;
1318
1319 /* Adjust CS & SS as required. */
1320 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1321
1322 /* Mask away all reserved bits */
1323 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1324 eflags &= uMask;
1325
1326 CPUMRawSetEFlags(pVCpu, eflags);
1327 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1328 return VINF_SUCCESS;
1329 }
1330#else
1331 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1332#endif
1333 return VERR_EM_INTERPRETER;
1334}
1335# endif /* !VBOX_WITH_IEM */
1336
1337#endif /* IN_RC */
1338
1339
1340
1341/*
1342 *
1343 * Old interpreter primitives used by HM, move/eliminate later.
1344 * Old interpreter primitives used by HM, move/eliminate later.
1345 * Old interpreter primitives used by HM, move/eliminate later.
1346 * Old interpreter primitives used by HM, move/eliminate later.
1347 * Old interpreter primitives used by HM, move/eliminate later.
1348 *
1349 */
1350
1351
1352/**
1353 * Interpret CPUID given the parameters in the CPU context.
1354 *
1355 * @returns VBox status code.
1356 * @param pVM The cross context VM structure.
1357 * @param pVCpu The cross context virtual CPU structure.
1358 * @param pRegFrame The register frame.
1359 *
1360 */
1361VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1362{
1363 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1364 uint32_t iLeaf = pRegFrame->eax;
1365 uint32_t iSubLeaf = pRegFrame->ecx;
1366 NOREF(pVM);
1367
1368 /* cpuid clears the high dwords of the affected 64 bits registers. */
1369 pRegFrame->rax = 0;
1370 pRegFrame->rbx = 0;
1371 pRegFrame->rcx = 0;
1372 pRegFrame->rdx = 0;
1373
1374 /* Note: operates the same in 64 and non-64 bits mode. */
1375 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1376 Log(("Emulate: CPUID %x/%x -> %08x %08x %08x %08x\n", iLeaf, iSubLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1377 return VINF_SUCCESS;
1378}
1379
1380
1381/**
1382 * Interpret RDTSC.
1383 *
1384 * @returns VBox status code.
1385 * @param pVM The cross context VM structure.
1386 * @param pVCpu The cross context virtual CPU structure.
1387 * @param pRegFrame The register frame.
1388 *
1389 */
1390VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1391{
1392 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1393 unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
1394
1395 if (uCR4 & X86_CR4_TSD)
1396 return VERR_EM_INTERPRETER; /* genuine #GP */
1397
1398 uint64_t uTicks = TMCpuTickGet(pVCpu);
1399#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1400 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1401#endif
1402
1403 /* Same behaviour in 32 & 64 bits mode */
1404 pRegFrame->rax = RT_LO_U32(uTicks);
1405 pRegFrame->rdx = RT_HI_U32(uTicks);
1406#ifdef VBOX_COMPARE_IEM_AND_EM
1407 g_fIgnoreRaxRdx = true;
1408#endif
1409
1410 NOREF(pVM);
1411 return VINF_SUCCESS;
1412}
1413
1414/**
1415 * Interpret RDTSCP.
1416 *
1417 * @returns VBox status code.
1418 * @param pVM The cross context VM structure.
1419 * @param pVCpu The cross context virtual CPU structure.
1420 * @param pCtx The CPU context.
1421 *
1422 */
1423VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1424{
1425 Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
1426 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1427
1428 if (!pVM->cpum.ro.GuestFeatures.fRdTscP)
1429 {
1430 AssertFailed();
1431 return VERR_EM_INTERPRETER; /* genuine #UD */
1432 }
1433
1434 if (uCR4 & X86_CR4_TSD)
1435 return VERR_EM_INTERPRETER; /* genuine #GP */
1436
1437 uint64_t uTicks = TMCpuTickGet(pVCpu);
1438#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1439 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1440#endif
1441
1442 /* Same behaviour in 32 & 64 bits mode */
1443 pCtx->rax = RT_LO_U32(uTicks);
1444 pCtx->rdx = RT_HI_U32(uTicks);
1445#ifdef VBOX_COMPARE_IEM_AND_EM
1446 g_fIgnoreRaxRdx = true;
1447#endif
1448 /* Low dword of the TSC_AUX msr only. */
1449 VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1450 pCtx->rcx &= UINT32_C(0xffffffff);
1451
1452 return VINF_SUCCESS;
1453}
1454
1455/**
1456 * Interpret RDPMC.
1457 *
1458 * @returns VBox status code.
1459 * @param pVM The cross context VM structure.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param pRegFrame The register frame.
1462 *
1463 */
1464VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1465{
1466 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1467 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1468
1469 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1470 if ( !(uCR4 & X86_CR4_PCE)
1471 && CPUMGetGuestCPL(pVCpu) != 0)
1472 {
1473 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1474 return VERR_EM_INTERPRETER; /* genuine #GP */
1475 }
1476
1477 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1478 pRegFrame->rax = 0;
1479 pRegFrame->rdx = 0;
1480 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1481 * ecx but see @bugref{3472}! */
1482
1483 NOREF(pVM);
1484 return VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * MWAIT Emulation.
1490 */
1491VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1492{
1493 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1494 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1495 NOREF(pVM);
1496
1497 /* Get the current privilege level. */
1498 cpl = CPUMGetGuestCPL(pVCpu);
1499 if (cpl != 0)
1500 return VERR_EM_INTERPRETER; /* supervisor only */
1501
1502 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1503 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1504 return VERR_EM_INTERPRETER; /* not supported */
1505
1506 /*
1507 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1508 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1509 */
1510 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1511 if (pRegFrame->ecx > 1)
1512 {
1513 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1514 return VERR_EM_INTERPRETER; /* illegal value. */
1515 }
1516
1517 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1518 {
1519 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1520 return VERR_EM_INTERPRETER; /* illegal value. */
1521 }
1522
1523 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1524}
1525
1526
1527/**
1528 * MONITOR Emulation.
1529 */
1530VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1531{
1532 uint32_t u32Dummy, u32ExtFeatures, cpl;
1533 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1534 NOREF(pVM);
1535
1536 if (pRegFrame->ecx != 0)
1537 {
1538 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1539 return VERR_EM_INTERPRETER; /* illegal value. */
1540 }
1541
1542 /* Get the current privilege level. */
1543 cpl = CPUMGetGuestCPL(pVCpu);
1544 if (cpl != 0)
1545 return VERR_EM_INTERPRETER; /* supervisor only */
1546
1547 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1548 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1549 return VERR_EM_INTERPRETER; /* not supported */
1550
1551 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1552 return VINF_SUCCESS;
1553}
1554
1555
1556/* VT-x only: */
1557
1558/**
1559 * Interpret INVLPG.
1560 *
1561 * @returns VBox status code.
1562 * @param pVM The cross context VM structure.
1563 * @param pVCpu The cross context virtual CPU structure.
1564 * @param pRegFrame The register frame.
1565 * @param pAddrGC Operand address.
1566 *
1567 */
1568VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1569{
1570 /** @todo is addr always a flat linear address or ds based
1571 * (in absence of segment override prefixes)????
1572 */
1573 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1574 NOREF(pVM); NOREF(pRegFrame);
1575#ifdef IN_RC
1576 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1577#endif
1578 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1579 if ( rc == VINF_SUCCESS
1580 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1581 return VINF_SUCCESS;
1582 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1583 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1584 VERR_EM_INTERPRETER);
1585 return rc;
1586}
1587
1588
1589#ifdef LOG_ENABLED
1590static const char *emMSRtoString(uint32_t uMsr)
1591{
1592 switch (uMsr)
1593 {
1594 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
1595 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
1596 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
1597 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
1598 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
1599 case MSR_K6_EFER: return "MSR_K6_EFER";
1600 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
1601 case MSR_K6_STAR: return "MSR_K6_STAR";
1602 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
1603 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
1604 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
1605 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
1606 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
1607 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
1608 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
1609 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
1610 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
1611 case MSR_IA32_TSC: return "MSR_IA32_TSC";
1612 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
1613 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
1614 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
1615 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
1616 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
1617 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
1618 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
1619 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
1620 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
1621 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
1622 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
1623 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
1624 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
1625 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
1626 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
1627 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
1628 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
1629 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
1630 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
1631 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
1632 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
1633 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
1634 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
1635 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
1636 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
1637 }
1638 return "Unknown MSR";
1639}
1640#endif /* LOG_ENABLED */
1641
1642
1643/**
1644 * Interpret RDMSR
1645 *
1646 * @returns VBox status code.
1647 * @param pVM The cross context VM structure.
1648 * @param pVCpu The cross context virtual CPU structure.
1649 * @param pRegFrame The register frame.
1650 */
1651VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1652{
1653 NOREF(pVM);
1654
1655 /* Get the current privilege level. */
1656 if (CPUMGetGuestCPL(pVCpu) != 0)
1657 {
1658 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
1659 return VERR_EM_INTERPRETER; /* supervisor only */
1660 }
1661
1662 uint64_t uValue;
1663 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
1664 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1665 {
1666 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1667 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
1668 return VERR_EM_INTERPRETER;
1669 }
1670 pRegFrame->rax = RT_LO_U32(uValue);
1671 pRegFrame->rdx = RT_HI_U32(uValue);
1672 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
1673 return VINF_SUCCESS;
1674}
1675
1676
1677/**
1678 * Interpret WRMSR
1679 *
1680 * @returns VBox status code.
1681 * @param pVM The cross context VM structure.
1682 * @param pVCpu The cross context virtual CPU structure.
1683 * @param pRegFrame The register frame.
1684 */
1685VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1686{
1687 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1688
1689 /* Check the current privilege level, this instruction is supervisor only. */
1690 if (CPUMGetGuestCPL(pVCpu) != 0)
1691 {
1692 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
1693 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
1694 }
1695
1696 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
1697 if (rcStrict != VINF_SUCCESS)
1698 {
1699 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1700 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
1701 return VERR_EM_INTERPRETER;
1702 }
1703 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
1704 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
1705 NOREF(pVM);
1706 return VINF_SUCCESS;
1707}
1708
1709
1710/**
1711 * Interpret DRx write.
1712 *
1713 * @returns VBox status code.
1714 * @param pVM The cross context VM structure.
1715 * @param pVCpu The cross context virtual CPU structure.
1716 * @param pRegFrame The register frame.
1717 * @param DestRegDrx DRx register index (USE_REG_DR*)
1718 * @param SrcRegGen General purpose register index (USE_REG_E**))
1719 *
1720 */
1721VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1722{
1723 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1724 uint64_t uNewDrX;
1725 int rc;
1726 NOREF(pVM);
1727
1728 if (CPUMIsGuestIn64BitCode(pVCpu))
1729 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1730 else
1731 {
1732 uint32_t val32;
1733 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1734 uNewDrX = val32;
1735 }
1736
1737 if (RT_SUCCESS(rc))
1738 {
1739 if (DestRegDrx == 6)
1740 {
1741 uNewDrX |= X86_DR6_RA1_MASK;
1742 uNewDrX &= ~X86_DR6_RAZ_MASK;
1743 }
1744 else if (DestRegDrx == 7)
1745 {
1746 uNewDrX |= X86_DR7_RA1_MASK;
1747 uNewDrX &= ~X86_DR7_RAZ_MASK;
1748 }
1749
1750 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1751 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1752 if (RT_SUCCESS(rc))
1753 return rc;
1754 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1755 }
1756 return VERR_EM_INTERPRETER;
1757}
1758
1759
1760/**
1761 * Interpret DRx read.
1762 *
1763 * @returns VBox status code.
1764 * @param pVM The cross context VM structure.
1765 * @param pVCpu The cross context virtual CPU structure.
1766 * @param pRegFrame The register frame.
1767 * @param DestRegGen General purpose register index (USE_REG_E**))
1768 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1769 */
1770VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1771{
1772 uint64_t val64;
1773 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1774 NOREF(pVM);
1775
1776 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1777 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1778 if (CPUMIsGuestIn64BitCode(pVCpu))
1779 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1780 else
1781 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1782
1783 if (RT_SUCCESS(rc))
1784 return VINF_SUCCESS;
1785
1786 return VERR_EM_INTERPRETER;
1787}
1788
1789
1790#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1791
1792
1793
1794
1795
1796
1797/*
1798 *
1799 * The old interpreter.
1800 * The old interpreter.
1801 * The old interpreter.
1802 * The old interpreter.
1803 * The old interpreter.
1804 *
1805 */
1806
1807DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1808{
1809#ifdef IN_RC
1810 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1811 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1812 return rc;
1813 /*
1814 * The page pool cache may end up here in some cases because it
1815 * flushed one of the shadow mappings used by the trapping
1816 * instruction and it either flushed the TLB or the CPU reused it.
1817 */
1818#else
1819 NOREF(pVM);
1820#endif
1821 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1822}
1823
1824
1825DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
1826{
1827 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
1828 pages or write monitored pages. */
1829 NOREF(pVM);
1830#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1831 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
1832#else
1833 int rc = VINF_SUCCESS;
1834#endif
1835#ifdef VBOX_COMPARE_IEM_AND_EM
1836 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
1837 g_cbEmWrote = cb;
1838 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
1839#endif
1840 return rc;
1841}
1842
1843
1844/** Convert sel:addr to a flat GC address. */
1845DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
1846{
1847 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
1848 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
1849}
1850
1851
1852#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1853/**
1854 * Get the mnemonic for the disassembled instruction.
1855 *
1856 * GC/R0 doesn't include the strings in the DIS tables because
1857 * of limited space.
1858 */
1859static const char *emGetMnemonic(PDISCPUSTATE pDis)
1860{
1861 switch (pDis->pCurInstr->uOpcode)
1862 {
1863 case OP_XCHG: return "Xchg";
1864 case OP_DEC: return "Dec";
1865 case OP_INC: return "Inc";
1866 case OP_POP: return "Pop";
1867 case OP_OR: return "Or";
1868 case OP_AND: return "And";
1869 case OP_MOV: return "Mov";
1870 case OP_INVLPG: return "InvlPg";
1871 case OP_CPUID: return "CpuId";
1872 case OP_MOV_CR: return "MovCRx";
1873 case OP_MOV_DR: return "MovDRx";
1874 case OP_LLDT: return "LLdt";
1875 case OP_LGDT: return "LGdt";
1876 case OP_LIDT: return "LIdt";
1877 case OP_CLTS: return "Clts";
1878 case OP_MONITOR: return "Monitor";
1879 case OP_MWAIT: return "MWait";
1880 case OP_RDMSR: return "Rdmsr";
1881 case OP_WRMSR: return "Wrmsr";
1882 case OP_ADD: return "Add";
1883 case OP_ADC: return "Adc";
1884 case OP_SUB: return "Sub";
1885 case OP_SBB: return "Sbb";
1886 case OP_RDTSC: return "Rdtsc";
1887 case OP_STI: return "Sti";
1888 case OP_CLI: return "Cli";
1889 case OP_XADD: return "XAdd";
1890 case OP_HLT: return "Hlt";
1891 case OP_IRET: return "Iret";
1892 case OP_MOVNTPS: return "MovNTPS";
1893 case OP_STOSWD: return "StosWD";
1894 case OP_WBINVD: return "WbInvd";
1895 case OP_XOR: return "Xor";
1896 case OP_BTR: return "Btr";
1897 case OP_BTS: return "Bts";
1898 case OP_BTC: return "Btc";
1899 case OP_LMSW: return "Lmsw";
1900 case OP_SMSW: return "Smsw";
1901 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
1902 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
1903
1904 default:
1905 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
1906 return "???";
1907 }
1908}
1909#endif /* VBOX_STRICT || LOG_ENABLED */
1910
1911
1912/**
1913 * XCHG instruction emulation.
1914 */
1915static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1916{
1917 DISQPVPARAMVAL param1, param2;
1918 NOREF(pvFault);
1919
1920 /* Source to make DISQueryParamVal read the register value - ugly hack */
1921 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
1922 if(RT_FAILURE(rc))
1923 return VERR_EM_INTERPRETER;
1924
1925 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
1926 if(RT_FAILURE(rc))
1927 return VERR_EM_INTERPRETER;
1928
1929#ifdef IN_RC
1930 if (TRPMHasTrap(pVCpu))
1931 {
1932 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1933 {
1934#endif
1935 RTGCPTR pParam1 = 0, pParam2 = 0;
1936 uint64_t valpar1, valpar2;
1937
1938 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
1939 switch(param1.type)
1940 {
1941 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
1942 valpar1 = param1.val.val64;
1943 break;
1944
1945 case DISQPV_TYPE_ADDRESS:
1946 pParam1 = (RTGCPTR)param1.val.val64;
1947 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1948 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1949 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1950 if (RT_FAILURE(rc))
1951 {
1952 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1953 return VERR_EM_INTERPRETER;
1954 }
1955 break;
1956
1957 default:
1958 AssertFailed();
1959 return VERR_EM_INTERPRETER;
1960 }
1961
1962 switch(param2.type)
1963 {
1964 case DISQPV_TYPE_ADDRESS:
1965 pParam2 = (RTGCPTR)param2.val.val64;
1966 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
1967 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
1968 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
1969 if (RT_FAILURE(rc))
1970 {
1971 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1972 }
1973 break;
1974
1975 case DISQPV_TYPE_IMMEDIATE:
1976 valpar2 = param2.val.val64;
1977 break;
1978
1979 default:
1980 AssertFailed();
1981 return VERR_EM_INTERPRETER;
1982 }
1983
1984 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
1985 if (pParam1 == 0)
1986 {
1987 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1988 switch(param1.size)
1989 {
1990 case 1: //special case for AH etc
1991 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
1992 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
1993 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
1994 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
1995 default: AssertFailedReturn(VERR_EM_INTERPRETER);
1996 }
1997 if (RT_FAILURE(rc))
1998 return VERR_EM_INTERPRETER;
1999 }
2000 else
2001 {
2002 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
2003 if (RT_FAILURE(rc))
2004 {
2005 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2006 return VERR_EM_INTERPRETER;
2007 }
2008 }
2009
2010 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
2011 if (pParam2 == 0)
2012 {
2013 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2014 switch(param2.size)
2015 {
2016 case 1: //special case for AH etc
2017 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
2018 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
2019 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
2020 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
2021 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2022 }
2023 if (RT_FAILURE(rc))
2024 return VERR_EM_INTERPRETER;
2025 }
2026 else
2027 {
2028 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
2029 if (RT_FAILURE(rc))
2030 {
2031 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2032 return VERR_EM_INTERPRETER;
2033 }
2034 }
2035
2036 *pcbSize = param2.size;
2037 return VINF_SUCCESS;
2038#ifdef IN_RC
2039 }
2040 }
2041 return VERR_EM_INTERPRETER;
2042#endif
2043}
2044
2045
2046/**
2047 * INC and DEC emulation.
2048 */
2049static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2050 PFNEMULATEPARAM2 pfnEmulate)
2051{
2052 DISQPVPARAMVAL param1;
2053 NOREF(pvFault);
2054
2055 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2056 if(RT_FAILURE(rc))
2057 return VERR_EM_INTERPRETER;
2058
2059#ifdef IN_RC
2060 if (TRPMHasTrap(pVCpu))
2061 {
2062 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2063 {
2064#endif
2065 RTGCPTR pParam1 = 0;
2066 uint64_t valpar1;
2067
2068 if (param1.type == DISQPV_TYPE_ADDRESS)
2069 {
2070 pParam1 = (RTGCPTR)param1.val.val64;
2071 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2072#ifdef IN_RC
2073 /* Safety check (in theory it could cross a page boundary and fault there though) */
2074 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2075#endif
2076 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2077 if (RT_FAILURE(rc))
2078 {
2079 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2080 return VERR_EM_INTERPRETER;
2081 }
2082 }
2083 else
2084 {
2085 AssertFailed();
2086 return VERR_EM_INTERPRETER;
2087 }
2088
2089 uint32_t eflags;
2090
2091 eflags = pfnEmulate(&valpar1, param1.size);
2092
2093 /* Write result back */
2094 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2095 if (RT_FAILURE(rc))
2096 {
2097 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2098 return VERR_EM_INTERPRETER;
2099 }
2100
2101 /* Update guest's eflags and finish. */
2102 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2103 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2104
2105 /* All done! */
2106 *pcbSize = param1.size;
2107 return VINF_SUCCESS;
2108#ifdef IN_RC
2109 }
2110 }
2111 return VERR_EM_INTERPRETER;
2112#endif
2113}
2114
2115
2116/**
2117 * POP Emulation.
2118 */
2119static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2120{
2121 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2122 DISQPVPARAMVAL param1;
2123 NOREF(pvFault);
2124
2125 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2126 if(RT_FAILURE(rc))
2127 return VERR_EM_INTERPRETER;
2128
2129#ifdef IN_RC
2130 if (TRPMHasTrap(pVCpu))
2131 {
2132 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2133 {
2134#endif
2135 RTGCPTR pParam1 = 0;
2136 uint32_t valpar1;
2137 RTGCPTR pStackVal;
2138
2139 /* Read stack value first */
2140 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2141 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2142
2143 /* Convert address; don't bother checking limits etc, as we only read here */
2144 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2145 if (pStackVal == 0)
2146 return VERR_EM_INTERPRETER;
2147
2148 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2149 if (RT_FAILURE(rc))
2150 {
2151 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2152 return VERR_EM_INTERPRETER;
2153 }
2154
2155 if (param1.type == DISQPV_TYPE_ADDRESS)
2156 {
2157 pParam1 = (RTGCPTR)param1.val.val64;
2158
2159 /* pop [esp+xx] uses esp after the actual pop! */
2160 AssertCompile(DISGREG_ESP == DISGREG_SP);
2161 if ( (pDis->Param1.fUse & DISUSE_BASE)
2162 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2163 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2164 )
2165 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2166
2167 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2168 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2169 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2170 if (RT_FAILURE(rc))
2171 {
2172 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2173 return VERR_EM_INTERPRETER;
2174 }
2175
2176 /* Update ESP as the last step */
2177 pRegFrame->esp += param1.size;
2178 }
2179 else
2180 {
2181#ifndef DEBUG_bird // annoying assertion.
2182 AssertFailed();
2183#endif
2184 return VERR_EM_INTERPRETER;
2185 }
2186
2187 /* All done! */
2188 *pcbSize = param1.size;
2189 return VINF_SUCCESS;
2190#ifdef IN_RC
2191 }
2192 }
2193 return VERR_EM_INTERPRETER;
2194#endif
2195}
2196
2197
2198/**
2199 * XOR/OR/AND Emulation.
2200 */
2201static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2202 PFNEMULATEPARAM3 pfnEmulate)
2203{
2204 DISQPVPARAMVAL param1, param2;
2205 NOREF(pvFault);
2206
2207 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2208 if(RT_FAILURE(rc))
2209 return VERR_EM_INTERPRETER;
2210
2211 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2212 if(RT_FAILURE(rc))
2213 return VERR_EM_INTERPRETER;
2214
2215#ifdef IN_RC
2216 if (TRPMHasTrap(pVCpu))
2217 {
2218 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2219 {
2220#endif
2221 RTGCPTR pParam1;
2222 uint64_t valpar1, valpar2;
2223
2224 if (pDis->Param1.cb != pDis->Param2.cb)
2225 {
2226 if (pDis->Param1.cb < pDis->Param2.cb)
2227 {
2228 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2229 return VERR_EM_INTERPRETER;
2230 }
2231 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2232 pDis->Param2.cb = pDis->Param1.cb;
2233 param2.size = param1.size;
2234 }
2235
2236 /* The destination is always a virtual address */
2237 if (param1.type == DISQPV_TYPE_ADDRESS)
2238 {
2239 pParam1 = (RTGCPTR)param1.val.val64;
2240 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2241 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2242 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2243 if (RT_FAILURE(rc))
2244 {
2245 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2246 return VERR_EM_INTERPRETER;
2247 }
2248 }
2249 else
2250 {
2251 AssertFailed();
2252 return VERR_EM_INTERPRETER;
2253 }
2254
2255 /* Register or immediate data */
2256 switch(param2.type)
2257 {
2258 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2259 valpar2 = param2.val.val64;
2260 break;
2261
2262 default:
2263 AssertFailed();
2264 return VERR_EM_INTERPRETER;
2265 }
2266
2267 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2268
2269 /* Data read, emulate instruction. */
2270 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2271
2272 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2273
2274 /* Update guest's eflags and finish. */
2275 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2276 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2277
2278 /* And write it back */
2279 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2280 if (RT_SUCCESS(rc))
2281 {
2282 /* All done! */
2283 *pcbSize = param2.size;
2284 return VINF_SUCCESS;
2285 }
2286#ifdef IN_RC
2287 }
2288 }
2289#endif
2290 return VERR_EM_INTERPRETER;
2291}
2292
2293
2294#ifndef VBOX_COMPARE_IEM_AND_EM
2295/**
2296 * LOCK XOR/OR/AND Emulation.
2297 */
2298static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2299 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2300{
2301 void *pvParam1;
2302 DISQPVPARAMVAL param1, param2;
2303 NOREF(pvFault);
2304
2305#if HC_ARCH_BITS == 32
2306 Assert(pDis->Param1.cb <= 4);
2307#endif
2308
2309 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2310 if(RT_FAILURE(rc))
2311 return VERR_EM_INTERPRETER;
2312
2313 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2314 if(RT_FAILURE(rc))
2315 return VERR_EM_INTERPRETER;
2316
2317 if (pDis->Param1.cb != pDis->Param2.cb)
2318 {
2319 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2320 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2321 VERR_EM_INTERPRETER);
2322
2323 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2324 pDis->Param2.cb = pDis->Param1.cb;
2325 param2.size = param1.size;
2326 }
2327
2328#ifdef IN_RC
2329 /* Safety check (in theory it could cross a page boundary and fault there though) */
2330 Assert( TRPMHasTrap(pVCpu)
2331 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2332 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2333#endif
2334
2335 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2336 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2337 RTGCUINTREG ValPar2 = param2.val.val64;
2338
2339 /* The destination is always a virtual address */
2340 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2341
2342 RTGCPTR GCPtrPar1 = param1.val.val64;
2343 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2344 PGMPAGEMAPLOCK Lock;
2345 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2346 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2347
2348 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2349 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2350
2351 RTGCUINTREG32 eflags = 0;
2352 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2353 PGMPhysReleasePageMappingLock(pVM, &Lock);
2354 if (RT_FAILURE(rc))
2355 {
2356 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2357 return VERR_EM_INTERPRETER;
2358 }
2359
2360 /* Update guest's eflags and finish. */
2361 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2362 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2363
2364 *pcbSize = param2.size;
2365 return VINF_SUCCESS;
2366}
2367#endif /* !VBOX_COMPARE_IEM_AND_EM */
2368
2369
2370/**
2371 * ADD, ADC & SUB Emulation.
2372 */
2373static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2374 PFNEMULATEPARAM3 pfnEmulate)
2375{
2376 NOREF(pvFault);
2377 DISQPVPARAMVAL param1, param2;
2378 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2379 if(RT_FAILURE(rc))
2380 return VERR_EM_INTERPRETER;
2381
2382 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2383 if(RT_FAILURE(rc))
2384 return VERR_EM_INTERPRETER;
2385
2386#ifdef IN_RC
2387 if (TRPMHasTrap(pVCpu))
2388 {
2389 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2390 {
2391#endif
2392 RTGCPTR pParam1;
2393 uint64_t valpar1, valpar2;
2394
2395 if (pDis->Param1.cb != pDis->Param2.cb)
2396 {
2397 if (pDis->Param1.cb < pDis->Param2.cb)
2398 {
2399 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2400 return VERR_EM_INTERPRETER;
2401 }
2402 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2403 pDis->Param2.cb = pDis->Param1.cb;
2404 param2.size = param1.size;
2405 }
2406
2407 /* The destination is always a virtual address */
2408 if (param1.type == DISQPV_TYPE_ADDRESS)
2409 {
2410 pParam1 = (RTGCPTR)param1.val.val64;
2411 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2412 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2413 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2414 if (RT_FAILURE(rc))
2415 {
2416 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2417 return VERR_EM_INTERPRETER;
2418 }
2419 }
2420 else
2421 {
2422#ifndef DEBUG_bird
2423 AssertFailed();
2424#endif
2425 return VERR_EM_INTERPRETER;
2426 }
2427
2428 /* Register or immediate data */
2429 switch(param2.type)
2430 {
2431 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2432 valpar2 = param2.val.val64;
2433 break;
2434
2435 default:
2436 AssertFailed();
2437 return VERR_EM_INTERPRETER;
2438 }
2439
2440 /* Data read, emulate instruction. */
2441 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2442
2443 /* Update guest's eflags and finish. */
2444 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2445 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2446
2447 /* And write it back */
2448 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2449 if (RT_SUCCESS(rc))
2450 {
2451 /* All done! */
2452 *pcbSize = param2.size;
2453 return VINF_SUCCESS;
2454 }
2455#ifdef IN_RC
2456 }
2457 }
2458#endif
2459 return VERR_EM_INTERPRETER;
2460}
2461
2462
2463/**
2464 * ADC Emulation.
2465 */
2466static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2467{
2468 if (pRegFrame->eflags.Bits.u1CF)
2469 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2470 else
2471 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2472}
2473
2474
2475/**
2476 * BTR/C/S Emulation.
2477 */
2478static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2479 PFNEMULATEPARAM2UINT32 pfnEmulate)
2480{
2481 DISQPVPARAMVAL param1, param2;
2482 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2483 if(RT_FAILURE(rc))
2484 return VERR_EM_INTERPRETER;
2485
2486 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2487 if(RT_FAILURE(rc))
2488 return VERR_EM_INTERPRETER;
2489
2490#ifdef IN_RC
2491 if (TRPMHasTrap(pVCpu))
2492 {
2493 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2494 {
2495#endif
2496 RTGCPTR pParam1;
2497 uint64_t valpar1 = 0, valpar2;
2498 uint32_t eflags;
2499
2500 /* The destination is always a virtual address */
2501 if (param1.type != DISQPV_TYPE_ADDRESS)
2502 return VERR_EM_INTERPRETER;
2503
2504 pParam1 = (RTGCPTR)param1.val.val64;
2505 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2506
2507 /* Register or immediate data */
2508 switch(param2.type)
2509 {
2510 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2511 valpar2 = param2.val.val64;
2512 break;
2513
2514 default:
2515 AssertFailed();
2516 return VERR_EM_INTERPRETER;
2517 }
2518
2519 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2520 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2521 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2522 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2523 if (RT_FAILURE(rc))
2524 {
2525 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2526 return VERR_EM_INTERPRETER;
2527 }
2528
2529 Log2(("emInterpretBtx: val=%x\n", valpar1));
2530 /* Data read, emulate bit test instruction. */
2531 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2532
2533 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2534
2535 /* Update guest's eflags and finish. */
2536 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2537 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2538
2539 /* And write it back */
2540 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2541 if (RT_SUCCESS(rc))
2542 {
2543 /* All done! */
2544 *pcbSize = 1;
2545 return VINF_SUCCESS;
2546 }
2547#ifdef IN_RC
2548 }
2549 }
2550#endif
2551 return VERR_EM_INTERPRETER;
2552}
2553
2554
2555#ifndef VBOX_COMPARE_IEM_AND_EM
2556/**
2557 * LOCK BTR/C/S Emulation.
2558 */
2559static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2560 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2561{
2562 void *pvParam1;
2563
2564 DISQPVPARAMVAL param1, param2;
2565 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2566 if(RT_FAILURE(rc))
2567 return VERR_EM_INTERPRETER;
2568
2569 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2570 if(RT_FAILURE(rc))
2571 return VERR_EM_INTERPRETER;
2572
2573 /* The destination is always a virtual address */
2574 if (param1.type != DISQPV_TYPE_ADDRESS)
2575 return VERR_EM_INTERPRETER;
2576
2577 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2578 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2579 uint64_t ValPar2 = param2.val.val64;
2580
2581 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2582 RTGCPTR GCPtrPar1 = param1.val.val64;
2583 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2584 ValPar2 &= 7;
2585
2586 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2587#ifdef IN_RC
2588 Assert(TRPMHasTrap(pVCpu));
2589 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
2590#endif
2591
2592 PGMPAGEMAPLOCK Lock;
2593 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2594 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2595
2596 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
2597 NOREF(pvFault);
2598
2599 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2600 RTGCUINTREG32 eflags = 0;
2601 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
2602 PGMPhysReleasePageMappingLock(pVM, &Lock);
2603 if (RT_FAILURE(rc))
2604 {
2605 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
2606 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2607 return VERR_EM_INTERPRETER;
2608 }
2609
2610 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
2611
2612 /* Update guest's eflags and finish. */
2613 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2614 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2615
2616 *pcbSize = 1;
2617 return VINF_SUCCESS;
2618}
2619#endif /* !VBOX_COMPARE_IEM_AND_EM */
2620
2621
2622/**
2623 * MOV emulation.
2624 */
2625static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2626{
2627 NOREF(pvFault);
2628 DISQPVPARAMVAL param1, param2;
2629 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2630 if(RT_FAILURE(rc))
2631 return VERR_EM_INTERPRETER;
2632
2633 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2634 if(RT_FAILURE(rc))
2635 return VERR_EM_INTERPRETER;
2636
2637 /* If destination is a segment register, punt. We can't handle it here.
2638 * NB: Source can be a register and still trigger a #PF!
2639 */
2640 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
2641 return VERR_EM_INTERPRETER;
2642
2643 if (param1.type == DISQPV_TYPE_ADDRESS)
2644 {
2645 RTGCPTR pDest;
2646 uint64_t val64;
2647
2648 switch(param1.type)
2649 {
2650 case DISQPV_TYPE_IMMEDIATE:
2651 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2652 return VERR_EM_INTERPRETER;
2653 RT_FALL_THRU();
2654
2655 case DISQPV_TYPE_ADDRESS:
2656 pDest = (RTGCPTR)param1.val.val64;
2657 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
2658 break;
2659
2660 default:
2661 AssertFailed();
2662 return VERR_EM_INTERPRETER;
2663 }
2664
2665 switch(param2.type)
2666 {
2667 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2668 val64 = param2.val.val64;
2669 break;
2670
2671 default:
2672 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
2673 return VERR_EM_INTERPRETER;
2674 }
2675#ifdef LOG_ENABLED
2676 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2677 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
2678 else
2679 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
2680#endif
2681
2682 Assert(param2.size <= 8 && param2.size > 0);
2683 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
2684 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
2685 if (RT_FAILURE(rc))
2686 return VERR_EM_INTERPRETER;
2687
2688 *pcbSize = param2.size;
2689 }
2690#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
2691 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
2692 else if ( param1.type == DISQPV_TYPE_REGISTER
2693 && param2.type == DISQPV_TYPE_REGISTER)
2694 {
2695 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
2696 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
2697 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
2698
2699 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
2700 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
2701
2702 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
2703 switch (param1.size)
2704 {
2705 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
2706 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
2707 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
2708 default:
2709 AssertFailed();
2710 return VERR_EM_INTERPRETER;
2711 }
2712 AssertRCReturn(rc, rc);
2713 }
2714#endif
2715 else
2716 { /* read fault */
2717 RTGCPTR pSrc;
2718 uint64_t val64;
2719
2720 /* Source */
2721 switch(param2.type)
2722 {
2723 case DISQPV_TYPE_IMMEDIATE:
2724 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2725 return VERR_EM_INTERPRETER;
2726 RT_FALL_THRU();
2727
2728 case DISQPV_TYPE_ADDRESS:
2729 pSrc = (RTGCPTR)param2.val.val64;
2730 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
2731 break;
2732
2733 default:
2734 return VERR_EM_INTERPRETER;
2735 }
2736
2737 Assert(param1.size <= 8 && param1.size > 0);
2738 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
2739 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
2740 if (RT_FAILURE(rc))
2741 return VERR_EM_INTERPRETER;
2742
2743 /* Destination */
2744 switch(param1.type)
2745 {
2746 case DISQPV_TYPE_REGISTER:
2747 switch(param1.size)
2748 {
2749 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
2750 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
2751 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
2752 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
2753 default:
2754 return VERR_EM_INTERPRETER;
2755 }
2756 if (RT_FAILURE(rc))
2757 return rc;
2758 break;
2759
2760 default:
2761 return VERR_EM_INTERPRETER;
2762 }
2763#ifdef LOG_ENABLED
2764 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2765 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
2766 else
2767 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
2768#endif
2769 }
2770 return VINF_SUCCESS;
2771}
2772
2773
2774#ifndef IN_RC
2775/**
2776 * [REP] STOSWD emulation
2777 */
2778static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2779{
2780 int rc;
2781 RTGCPTR GCDest, GCOffset;
2782 uint32_t cbSize;
2783 uint64_t cTransfers;
2784 int offIncrement;
2785 NOREF(pvFault);
2786
2787 /* Don't support any but these three prefix bytes. */
2788 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
2789 return VERR_EM_INTERPRETER;
2790
2791 switch (pDis->uAddrMode)
2792 {
2793 case DISCPUMODE_16BIT:
2794 GCOffset = pRegFrame->di;
2795 cTransfers = pRegFrame->cx;
2796 break;
2797 case DISCPUMODE_32BIT:
2798 GCOffset = pRegFrame->edi;
2799 cTransfers = pRegFrame->ecx;
2800 break;
2801 case DISCPUMODE_64BIT:
2802 GCOffset = pRegFrame->rdi;
2803 cTransfers = pRegFrame->rcx;
2804 break;
2805 default:
2806 AssertFailed();
2807 return VERR_EM_INTERPRETER;
2808 }
2809
2810 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
2811 switch (pDis->uOpMode)
2812 {
2813 case DISCPUMODE_16BIT:
2814 cbSize = 2;
2815 break;
2816 case DISCPUMODE_32BIT:
2817 cbSize = 4;
2818 break;
2819 case DISCPUMODE_64BIT:
2820 cbSize = 8;
2821 break;
2822 default:
2823 AssertFailed();
2824 return VERR_EM_INTERPRETER;
2825 }
2826
2827 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
2828
2829 if (!(pDis->fPrefix & DISPREFIX_REP))
2830 {
2831 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
2832
2833 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2834 if (RT_FAILURE(rc))
2835 return VERR_EM_INTERPRETER;
2836 Assert(rc == VINF_SUCCESS);
2837
2838 /* Update (e/r)di. */
2839 switch (pDis->uAddrMode)
2840 {
2841 case DISCPUMODE_16BIT:
2842 pRegFrame->di += offIncrement;
2843 break;
2844 case DISCPUMODE_32BIT:
2845 pRegFrame->edi += offIncrement;
2846 break;
2847 case DISCPUMODE_64BIT:
2848 pRegFrame->rdi += offIncrement;
2849 break;
2850 default:
2851 AssertFailed();
2852 return VERR_EM_INTERPRETER;
2853 }
2854
2855 }
2856 else
2857 {
2858 if (!cTransfers)
2859 return VINF_SUCCESS;
2860
2861 /*
2862 * Do *not* try emulate cross page stuff here because we don't know what might
2863 * be waiting for us on the subsequent pages. The caller has only asked us to
2864 * ignore access handlers fro the current page.
2865 * This also fends off big stores which would quickly kill PGMR0DynMap.
2866 */
2867 if ( cbSize > PAGE_SIZE
2868 || cTransfers > PAGE_SIZE
2869 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
2870 {
2871 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
2872 GCDest, cbSize, offIncrement, cTransfers));
2873 return VERR_EM_INTERPRETER;
2874 }
2875
2876 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
2877 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2878 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
2879 cTransfers * cbSize,
2880 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
2881 if (rc != VINF_SUCCESS)
2882 {
2883 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
2884 return VERR_EM_INTERPRETER;
2885 }
2886
2887 /* REP case */
2888 while (cTransfers)
2889 {
2890 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2891 if (RT_FAILURE(rc))
2892 {
2893 rc = VERR_EM_INTERPRETER;
2894 break;
2895 }
2896
2897 Assert(rc == VINF_SUCCESS);
2898 GCOffset += offIncrement;
2899 GCDest += offIncrement;
2900 cTransfers--;
2901 }
2902
2903 /* Update the registers. */
2904 switch (pDis->uAddrMode)
2905 {
2906 case DISCPUMODE_16BIT:
2907 pRegFrame->di = GCOffset;
2908 pRegFrame->cx = cTransfers;
2909 break;
2910 case DISCPUMODE_32BIT:
2911 pRegFrame->edi = GCOffset;
2912 pRegFrame->ecx = cTransfers;
2913 break;
2914 case DISCPUMODE_64BIT:
2915 pRegFrame->rdi = GCOffset;
2916 pRegFrame->rcx = cTransfers;
2917 break;
2918 default:
2919 AssertFailed();
2920 return VERR_EM_INTERPRETER;
2921 }
2922 }
2923
2924 *pcbSize = cbSize;
2925 return rc;
2926}
2927#endif /* !IN_RC */
2928
2929
2930/**
2931 * [LOCK] CMPXCHG emulation.
2932 */
2933static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2934{
2935 DISQPVPARAMVAL param1, param2;
2936 NOREF(pvFault);
2937
2938#if HC_ARCH_BITS == 32
2939 Assert(pDis->Param1.cb <= 4);
2940#endif
2941
2942 /* Source to make DISQueryParamVal read the register value - ugly hack */
2943 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2944 if(RT_FAILURE(rc))
2945 return VERR_EM_INTERPRETER;
2946
2947 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2948 if(RT_FAILURE(rc))
2949 return VERR_EM_INTERPRETER;
2950
2951 uint64_t valpar;
2952 switch(param2.type)
2953 {
2954 case DISQPV_TYPE_IMMEDIATE: /* register actually */
2955 valpar = param2.val.val64;
2956 break;
2957
2958 default:
2959 return VERR_EM_INTERPRETER;
2960 }
2961
2962 PGMPAGEMAPLOCK Lock;
2963 RTGCPTR GCPtrPar1;
2964 void *pvParam1;
2965 uint64_t eflags;
2966
2967 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2968 switch(param1.type)
2969 {
2970 case DISQPV_TYPE_ADDRESS:
2971 GCPtrPar1 = param1.val.val64;
2972 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2973
2974 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2975 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2976 break;
2977
2978 default:
2979 return VERR_EM_INTERPRETER;
2980 }
2981
2982 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
2983
2984#ifndef VBOX_COMPARE_IEM_AND_EM
2985 if (pDis->fPrefix & DISPREFIX_LOCK)
2986 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2987 else
2988 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2989#else /* VBOX_COMPARE_IEM_AND_EM */
2990 uint64_t u64;
2991 switch (pDis->Param2.cb)
2992 {
2993 case 1: u64 = *(uint8_t *)pvParam1; break;
2994 case 2: u64 = *(uint16_t *)pvParam1; break;
2995 case 4: u64 = *(uint32_t *)pvParam1; break;
2996 default:
2997 case 8: u64 = *(uint64_t *)pvParam1; break;
2998 }
2999 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
3000 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3001#endif /* VBOX_COMPARE_IEM_AND_EM */
3002
3003 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
3004
3005 /* Update guest's eflags and finish. */
3006 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3007 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3008
3009 *pcbSize = param2.size;
3010 PGMPhysReleasePageMappingLock(pVM, &Lock);
3011 return VINF_SUCCESS;
3012}
3013
3014
3015/**
3016 * [LOCK] CMPXCHG8B emulation.
3017 */
3018static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3019{
3020 DISQPVPARAMVAL param1;
3021 NOREF(pvFault);
3022
3023 /* Source to make DISQueryParamVal read the register value - ugly hack */
3024 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3025 if(RT_FAILURE(rc))
3026 return VERR_EM_INTERPRETER;
3027
3028 RTGCPTR GCPtrPar1;
3029 void *pvParam1;
3030 uint64_t eflags;
3031 PGMPAGEMAPLOCK Lock;
3032
3033 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
3034 switch(param1.type)
3035 {
3036 case DISQPV_TYPE_ADDRESS:
3037 GCPtrPar1 = param1.val.val64;
3038 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3039
3040 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3041 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3042 break;
3043
3044 default:
3045 return VERR_EM_INTERPRETER;
3046 }
3047
3048 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
3049
3050#ifndef VBOX_COMPARE_IEM_AND_EM
3051 if (pDis->fPrefix & DISPREFIX_LOCK)
3052 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3053 else
3054 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3055#else /* VBOX_COMPARE_IEM_AND_EM */
3056 uint64_t u64 = *(uint64_t *)pvParam1;
3057 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3058 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
3059#endif /* VBOX_COMPARE_IEM_AND_EM */
3060
3061 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
3062
3063 /* Update guest's eflags and finish; note that *only* ZF is affected. */
3064 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
3065 | (eflags & (X86_EFL_ZF));
3066
3067 *pcbSize = 8;
3068 PGMPhysReleasePageMappingLock(pVM, &Lock);
3069 return VINF_SUCCESS;
3070}
3071
3072
3073#ifdef IN_RC /** @todo test+enable for HM as well. */
3074/**
3075 * [LOCK] XADD emulation.
3076 */
3077static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3078{
3079 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
3080 DISQPVPARAMVAL param1;
3081 void *pvParamReg2;
3082 size_t cbParamReg2;
3083 NOREF(pvFault);
3084
3085 /* Source to make DISQueryParamVal read the register value - ugly hack */
3086 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3087 if(RT_FAILURE(rc))
3088 return VERR_EM_INTERPRETER;
3089
3090 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3091 Assert(cbParamReg2 <= 4);
3092 if(RT_FAILURE(rc))
3093 return VERR_EM_INTERPRETER;
3094
3095#ifdef IN_RC
3096 if (TRPMHasTrap(pVCpu))
3097 {
3098 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3099 {
3100#endif
3101 RTGCPTR GCPtrPar1;
3102 void *pvParam1;
3103 uint32_t eflags;
3104 PGMPAGEMAPLOCK Lock;
3105
3106 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3107 switch(param1.type)
3108 {
3109 case DISQPV_TYPE_ADDRESS:
3110 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3111#ifdef IN_RC
3112 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3113#endif
3114
3115 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3116 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3117 break;
3118
3119 default:
3120 return VERR_EM_INTERPRETER;
3121 }
3122
3123 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3124
3125#ifndef VBOX_COMPARE_IEM_AND_EM
3126 if (pDis->fPrefix & DISPREFIX_LOCK)
3127 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3128 else
3129 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3130#else /* VBOX_COMPARE_IEM_AND_EM */
3131 uint64_t u64;
3132 switch (cbParamReg2)
3133 {
3134 case 1: u64 = *(uint8_t *)pvParam1; break;
3135 case 2: u64 = *(uint16_t *)pvParam1; break;
3136 case 4: u64 = *(uint32_t *)pvParam1; break;
3137 default:
3138 case 8: u64 = *(uint64_t *)pvParam1; break;
3139 }
3140 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3141 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3142#endif /* VBOX_COMPARE_IEM_AND_EM */
3143
3144 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3145
3146 /* Update guest's eflags and finish. */
3147 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3148 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3149
3150 *pcbSize = cbParamReg2;
3151 PGMPhysReleasePageMappingLock(pVM, &Lock);
3152 return VINF_SUCCESS;
3153#ifdef IN_RC
3154 }
3155 }
3156
3157 return VERR_EM_INTERPRETER;
3158#endif
3159}
3160#endif /* IN_RC */
3161
3162
3163/**
3164 * WBINVD Emulation.
3165 */
3166static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3167{
3168 /* Nothing to do. */
3169 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3170 return VINF_SUCCESS;
3171}
3172
3173
3174/**
3175 * INVLPG Emulation.
3176 */
3177static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3178{
3179 DISQPVPARAMVAL param1;
3180 RTGCPTR addr;
3181 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3182
3183 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3184 if(RT_FAILURE(rc))
3185 return VERR_EM_INTERPRETER;
3186
3187 switch(param1.type)
3188 {
3189 case DISQPV_TYPE_IMMEDIATE:
3190 case DISQPV_TYPE_ADDRESS:
3191 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3192 return VERR_EM_INTERPRETER;
3193 addr = (RTGCPTR)param1.val.val64;
3194 break;
3195
3196 default:
3197 return VERR_EM_INTERPRETER;
3198 }
3199
3200 /** @todo is addr always a flat linear address or ds based
3201 * (in absence of segment override prefixes)????
3202 */
3203#ifdef IN_RC
3204 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3205#endif
3206 rc = PGMInvalidatePage(pVCpu, addr);
3207 if ( rc == VINF_SUCCESS
3208 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3209 return VINF_SUCCESS;
3210 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3211 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3212 VERR_EM_INTERPRETER);
3213 return rc;
3214}
3215
3216/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3217
3218/**
3219 * CPUID Emulation.
3220 */
3221static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3222{
3223 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3224 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3225 return rc;
3226}
3227
3228
3229/**
3230 * CLTS Emulation.
3231 */
3232static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3233{
3234 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3235
3236 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3237 if (!(cr0 & X86_CR0_TS))
3238 return VINF_SUCCESS;
3239 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3240}
3241
3242
3243/**
3244 * Update CRx.
3245 *
3246 * @returns VBox status code.
3247 * @param pVM The cross context VM structure.
3248 * @param pVCpu The cross context virtual CPU structure.
3249 * @param pRegFrame The register frame.
3250 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3251 * @param val New CRx value
3252 *
3253 */
3254static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3255{
3256 uint64_t oldval;
3257 uint64_t msrEFER;
3258 uint32_t fValid;
3259 int rc, rc2;
3260 NOREF(pVM);
3261
3262 /** @todo Clean up this mess. */
3263 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3264 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3265 switch (DestRegCrx)
3266 {
3267 case DISCREG_CR0:
3268 oldval = CPUMGetGuestCR0(pVCpu);
3269#ifdef IN_RC
3270 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3271 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3272 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3273 return VERR_EM_INTERPRETER;
3274#endif
3275 rc = VINF_SUCCESS;
3276#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3277 CPUMSetGuestCR0(pVCpu, val);
3278#else
3279 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3280#endif
3281 val = CPUMGetGuestCR0(pVCpu);
3282 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3283 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3284 {
3285 /* global flush */
3286 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3287 AssertRCReturn(rc, rc);
3288 }
3289
3290 /* Deal with long mode enabling/disabling. */
3291 msrEFER = CPUMGetGuestEFER(pVCpu);
3292 if (msrEFER & MSR_K6_EFER_LME)
3293 {
3294 if ( !(oldval & X86_CR0_PG)
3295 && (val & X86_CR0_PG))
3296 {
3297 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3298 if (pRegFrame->cs.Attr.n.u1Long)
3299 {
3300 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3301 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3302 }
3303
3304 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3305 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3306 {
3307 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3308 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3309 }
3310 msrEFER |= MSR_K6_EFER_LMA;
3311 }
3312 else
3313 if ( (oldval & X86_CR0_PG)
3314 && !(val & X86_CR0_PG))
3315 {
3316 msrEFER &= ~MSR_K6_EFER_LMA;
3317 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3318 }
3319 CPUMSetGuestEFER(pVCpu, msrEFER);
3320 }
3321 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3322 return rc2 == VINF_SUCCESS ? rc : rc2;
3323
3324 case DISCREG_CR2:
3325 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3326 return VINF_SUCCESS;
3327
3328 case DISCREG_CR3:
3329 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3330 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3331 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3332 {
3333 /* flush */
3334 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3335 AssertRC(rc);
3336 }
3337 return rc;
3338
3339 case DISCREG_CR4:
3340 oldval = CPUMGetGuestCR4(pVCpu);
3341 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3342 val = CPUMGetGuestCR4(pVCpu);
3343
3344 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3345 msrEFER = CPUMGetGuestEFER(pVCpu);
3346 if ( (msrEFER & MSR_K6_EFER_LMA)
3347 && (oldval & X86_CR4_PAE)
3348 && !(val & X86_CR4_PAE))
3349 {
3350 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3351 }
3352
3353 /* From IEM iemCImpl_load_CrX. */
3354 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3355 fValid = X86_CR4_VME | X86_CR4_PVI
3356 | X86_CR4_TSD | X86_CR4_DE
3357 | X86_CR4_PSE | X86_CR4_PAE
3358 | X86_CR4_MCE | X86_CR4_PGE
3359 | X86_CR4_PCE | X86_CR4_OSFXSR
3360 | X86_CR4_OSXMMEEXCPT;
3361 //if (xxx)
3362 // fValid |= X86_CR4_VMXE;
3363 //if (xxx)
3364 // fValid |= X86_CR4_OSXSAVE;
3365 if (val & ~(uint64_t)fValid)
3366 {
3367 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3368 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3369 }
3370
3371 rc = VINF_SUCCESS;
3372 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3373 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3374 {
3375 /* global flush */
3376 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3377 AssertRCReturn(rc, rc);
3378 }
3379
3380 /* Feeling extremely lazy. */
3381# ifdef IN_RC
3382 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3383 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3384 {
3385 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3386 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3387 }
3388# endif
3389# ifdef VBOX_WITH_RAW_MODE
3390 if (((val ^ oldval) & X86_CR4_VME) && VM_IS_RAW_MODE_ENABLED(pVM))
3391 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3392# endif
3393
3394 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3395 return rc2 == VINF_SUCCESS ? rc : rc2;
3396
3397 case DISCREG_CR8:
3398 return APICSetTpr(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3399
3400 default:
3401 AssertFailed();
3402 case DISCREG_CR1: /* illegal op */
3403 break;
3404 }
3405 return VERR_EM_INTERPRETER;
3406}
3407
3408
3409/**
3410 * LMSW Emulation.
3411 */
3412static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3413{
3414 DISQPVPARAMVAL param1;
3415 uint32_t val;
3416 NOREF(pvFault); NOREF(pcbSize);
3417 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3418
3419 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3420 if(RT_FAILURE(rc))
3421 return VERR_EM_INTERPRETER;
3422
3423 switch(param1.type)
3424 {
3425 case DISQPV_TYPE_IMMEDIATE:
3426 case DISQPV_TYPE_ADDRESS:
3427 if(!(param1.flags & DISQPV_FLAG_16))
3428 return VERR_EM_INTERPRETER;
3429 val = param1.val.val32;
3430 break;
3431
3432 default:
3433 return VERR_EM_INTERPRETER;
3434 }
3435
3436 LogFlow(("emInterpretLmsw %x\n", val));
3437 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3438
3439 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3440 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3441 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3442
3443 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3444
3445}
3446
3447#ifdef EM_EMULATE_SMSW
3448/**
3449 * SMSW Emulation.
3450 */
3451static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3452{
3453 NOREF(pvFault); NOREF(pcbSize);
3454 DISQPVPARAMVAL param1;
3455 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3456
3457 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3458 if(RT_FAILURE(rc))
3459 return VERR_EM_INTERPRETER;
3460
3461 switch(param1.type)
3462 {
3463 case DISQPV_TYPE_IMMEDIATE:
3464 if(param1.size != sizeof(uint16_t))
3465 return VERR_EM_INTERPRETER;
3466 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3467 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3468 break;
3469
3470 case DISQPV_TYPE_ADDRESS:
3471 {
3472 RTGCPTR pParam1;
3473
3474 /* Actually forced to 16 bits regardless of the operand size. */
3475 if(param1.size != sizeof(uint16_t))
3476 return VERR_EM_INTERPRETER;
3477
3478 pParam1 = (RTGCPTR)param1.val.val64;
3479 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3480 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3481
3482 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3483 if (RT_FAILURE(rc))
3484 {
3485 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3486 return VERR_EM_INTERPRETER;
3487 }
3488 break;
3489 }
3490
3491 default:
3492 return VERR_EM_INTERPRETER;
3493 }
3494
3495 LogFlow(("emInterpretSmsw %x\n", cr0));
3496 return rc;
3497}
3498#endif
3499
3500
3501/**
3502 * Interpret CRx read.
3503 *
3504 * @returns VBox status code.
3505 * @param pVM The cross context VM structure.
3506 * @param pVCpu The cross context virtual CPU structure.
3507 * @param pRegFrame The register frame.
3508 * @param DestRegGen General purpose register index (USE_REG_E**))
3509 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3510 *
3511 */
3512static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3513{
3514 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3515 uint64_t val64;
3516 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3517 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3518 NOREF(pVM);
3519
3520 if (CPUMIsGuestIn64BitCode(pVCpu))
3521 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3522 else
3523 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3524
3525 if (RT_SUCCESS(rc))
3526 {
3527 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3528 return VINF_SUCCESS;
3529 }
3530 return VERR_EM_INTERPRETER;
3531}
3532
3533
3534/**
3535 * Interpret CRx write.
3536 *
3537 * @returns VBox status code.
3538 * @param pVM The cross context VM structure.
3539 * @param pVCpu The cross context virtual CPU structure.
3540 * @param pRegFrame The register frame.
3541 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3542 * @param SrcRegGen General purpose register index (USE_REG_E**))
3543 *
3544 */
3545static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3546{
3547 uint64_t val;
3548 int rc;
3549 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3550
3551 if (CPUMIsGuestIn64BitCode(pVCpu))
3552 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3553 else
3554 {
3555 uint32_t val32;
3556 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3557 val = val32;
3558 }
3559
3560 if (RT_SUCCESS(rc))
3561 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3562
3563 return VERR_EM_INTERPRETER;
3564}
3565
3566
3567/**
3568 * MOV CRx
3569 */
3570static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3571{
3572 NOREF(pvFault); NOREF(pcbSize);
3573 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3574 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3575
3576 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3577 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3578
3579 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3580}
3581
3582
3583/**
3584 * MOV DRx
3585 */
3586static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3587{
3588 int rc = VERR_EM_INTERPRETER;
3589 NOREF(pvFault); NOREF(pcbSize);
3590
3591 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
3592 {
3593 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
3594 }
3595 else
3596 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3597 {
3598 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
3599 }
3600 else
3601 AssertMsgFailed(("Unexpected debug register move\n"));
3602
3603 return rc;
3604}
3605
3606
3607/**
3608 * LLDT Emulation.
3609 */
3610static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3611{
3612 DISQPVPARAMVAL param1;
3613 RTSEL sel;
3614 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
3615
3616 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3617 if(RT_FAILURE(rc))
3618 return VERR_EM_INTERPRETER;
3619
3620 switch(param1.type)
3621 {
3622 case DISQPV_TYPE_ADDRESS:
3623 return VERR_EM_INTERPRETER; //feeling lazy right now
3624
3625 case DISQPV_TYPE_IMMEDIATE:
3626 if(!(param1.flags & DISQPV_FLAG_16))
3627 return VERR_EM_INTERPRETER;
3628 sel = (RTSEL)param1.val.val16;
3629 break;
3630
3631 default:
3632 return VERR_EM_INTERPRETER;
3633 }
3634
3635#ifdef IN_RING0
3636 /* Only for the VT-x real-mode emulation case. */
3637 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3638 CPUMSetGuestLDTR(pVCpu, sel);
3639 return VINF_SUCCESS;
3640#else
3641 if (sel == 0)
3642 {
3643 if (CPUMGetHyperLDTR(pVCpu) == 0)
3644 {
3645 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
3646 return VINF_SUCCESS;
3647 }
3648 }
3649 //still feeling lazy
3650 return VERR_EM_INTERPRETER;
3651#endif
3652}
3653
3654#ifdef IN_RING0
3655/**
3656 * LIDT/LGDT Emulation.
3657 */
3658static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3659{
3660 DISQPVPARAMVAL param1;
3661 RTGCPTR pParam1;
3662 X86XDTR32 dtr32;
3663 NOREF(pvFault); NOREF(pcbSize);
3664
3665 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
3666
3667 /* Only for the VT-x real-mode emulation case. */
3668 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3669
3670 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3671 if(RT_FAILURE(rc))
3672 return VERR_EM_INTERPRETER;
3673
3674 switch(param1.type)
3675 {
3676 case DISQPV_TYPE_ADDRESS:
3677 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
3678 break;
3679
3680 default:
3681 return VERR_EM_INTERPRETER;
3682 }
3683
3684 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
3685 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3686
3687 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
3688 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
3689
3690 if (pDis->pCurInstr->uOpcode == OP_LIDT)
3691 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3692 else
3693 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3694
3695 return VINF_SUCCESS;
3696}
3697#endif
3698
3699
3700#ifdef IN_RC
3701/**
3702 * STI Emulation.
3703 *
3704 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
3705 */
3706static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3707{
3708 NOREF(pcbSize);
3709 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
3710
3711 if(!pGCState)
3712 {
3713 Assert(pGCState);
3714 return VERR_EM_INTERPRETER;
3715 }
3716 pGCState->uVMFlags |= X86_EFL_IF;
3717
3718 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
3719 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
3720
3721 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
3722 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3723
3724 return VINF_SUCCESS;
3725}
3726#endif /* IN_RC */
3727
3728
3729/**
3730 * HLT Emulation.
3731 */
3732static VBOXSTRICTRC
3733emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3734{
3735 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3736 return VINF_EM_HALT;
3737}
3738
3739
3740/**
3741 * RDTSC Emulation.
3742 */
3743static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3744{
3745 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3746 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
3747}
3748
3749/**
3750 * RDPMC Emulation
3751 */
3752static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3753{
3754 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3755 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
3756}
3757
3758
3759static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3760{
3761 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3762 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
3763}
3764
3765
3766static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3767{
3768 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3769 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
3770}
3771
3772
3773/**
3774 * RDMSR Emulation.
3775 */
3776static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3777{
3778 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
3779 different, so we play safe by completely disassembling the instruction. */
3780 Assert(!(pDis->fPrefix & DISPREFIX_REX));
3781 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3782 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
3783}
3784
3785
3786/**
3787 * WRMSR Emulation.
3788 */
3789static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3790{
3791 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3792 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
3793}
3794
3795
3796/**
3797 * Internal worker.
3798 * @copydoc emInterpretInstructionCPUOuter
3799 * @param pVM The cross context VM structure.
3800 */
3801DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3802 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3803{
3804 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3805 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
3806 Assert(pcbSize);
3807 *pcbSize = 0;
3808
3809 if (enmCodeType == EMCODETYPE_SUPERVISOR)
3810 {
3811 /*
3812 * Only supervisor guest code!!
3813 * And no complicated prefixes.
3814 */
3815 /* Get the current privilege level. */
3816 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3817#ifdef VBOX_WITH_RAW_RING1
3818 if ( !EMIsRawRing1Enabled(pVM)
3819 || cpl > 1
3820 || pRegFrame->eflags.Bits.u2IOPL > cpl
3821 )
3822#endif
3823 {
3824 if ( cpl != 0
3825 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
3826 {
3827 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
3828 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
3829 return VERR_EM_INTERPRETER;
3830 }
3831 }
3832 }
3833 else
3834 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
3835
3836#ifdef IN_RC
3837 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
3838 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3839 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3840 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3841 && pDis->pCurInstr->uOpcode != OP_XADD
3842 && pDis->pCurInstr->uOpcode != OP_OR
3843 && pDis->pCurInstr->uOpcode != OP_AND
3844 && pDis->pCurInstr->uOpcode != OP_XOR
3845 && pDis->pCurInstr->uOpcode != OP_BTR
3846 )
3847 )
3848#else
3849 if ( (pDis->fPrefix & DISPREFIX_REPNE)
3850 || ( (pDis->fPrefix & DISPREFIX_REP)
3851 && pDis->pCurInstr->uOpcode != OP_STOSWD
3852 )
3853 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3854 && pDis->pCurInstr->uOpcode != OP_OR
3855 && pDis->pCurInstr->uOpcode != OP_AND
3856 && pDis->pCurInstr->uOpcode != OP_XOR
3857 && pDis->pCurInstr->uOpcode != OP_BTR
3858 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3859 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3860 )
3861 )
3862#endif
3863 {
3864 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
3865 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
3866 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
3867 return VERR_EM_INTERPRETER;
3868 }
3869
3870#if HC_ARCH_BITS == 32
3871 /*
3872 * Unable to emulate most >4 bytes accesses in 32 bits mode.
3873 * Whitelisted instructions are safe.
3874 */
3875 if ( pDis->Param1.cb > 4
3876 && CPUMIsGuestIn64BitCode(pVCpu))
3877 {
3878 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
3879 if ( uOpCode != OP_STOSWD
3880 && uOpCode != OP_MOV
3881 && uOpCode != OP_CMPXCHG8B
3882 && uOpCode != OP_XCHG
3883 && uOpCode != OP_BTS
3884 && uOpCode != OP_BTR
3885 && uOpCode != OP_BTC
3886 )
3887 {
3888# ifdef VBOX_WITH_STATISTICS
3889 switch (pDis->pCurInstr->uOpcode)
3890 {
3891# define INTERPRET_FAILED_CASE(opcode, Instr) \
3892 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
3893 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
3894 INTERPRET_FAILED_CASE(OP_DEC,Dec);
3895 INTERPRET_FAILED_CASE(OP_INC,Inc);
3896 INTERPRET_FAILED_CASE(OP_POP,Pop);
3897 INTERPRET_FAILED_CASE(OP_OR, Or);
3898 INTERPRET_FAILED_CASE(OP_XOR,Xor);
3899 INTERPRET_FAILED_CASE(OP_AND,And);
3900 INTERPRET_FAILED_CASE(OP_MOV,Mov);
3901 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
3902 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
3903 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
3904 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
3905 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
3906 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
3907 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
3908 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
3909 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
3910 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
3911 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
3912 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
3913 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
3914 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
3915 INTERPRET_FAILED_CASE(OP_ADD,Add);
3916 INTERPRET_FAILED_CASE(OP_SUB,Sub);
3917 INTERPRET_FAILED_CASE(OP_ADC,Adc);
3918 INTERPRET_FAILED_CASE(OP_BTR,Btr);
3919 INTERPRET_FAILED_CASE(OP_BTS,Bts);
3920 INTERPRET_FAILED_CASE(OP_BTC,Btc);
3921 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
3922 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
3923 INTERPRET_FAILED_CASE(OP_STI, Sti);
3924 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
3925 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
3926 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
3927 INTERPRET_FAILED_CASE(OP_IRET,Iret);
3928 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
3929 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
3930# undef INTERPRET_FAILED_CASE
3931 default:
3932 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3933 break;
3934 }
3935# endif /* VBOX_WITH_STATISTICS */
3936 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
3937 return VERR_EM_INTERPRETER;
3938 }
3939 }
3940#endif
3941
3942 VBOXSTRICTRC rc;
3943#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
3944 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
3945#endif
3946 switch (pDis->pCurInstr->uOpcode)
3947 {
3948 /*
3949 * Macros for generating the right case statements.
3950 */
3951# ifndef VBOX_COMPARE_IEM_AND_EM
3952# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3953 case opcode:\
3954 if (pDis->fPrefix & DISPREFIX_LOCK) \
3955 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
3956 else \
3957 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3958 if (RT_SUCCESS(rc)) \
3959 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3960 else \
3961 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3962 return rc
3963# else /* VBOX_COMPARE_IEM_AND_EM */
3964# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3965 case opcode:\
3966 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3967 if (RT_SUCCESS(rc)) \
3968 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3969 else \
3970 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3971 return rc
3972# endif /* VBOX_COMPARE_IEM_AND_EM */
3973
3974#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
3975 case opcode:\
3976 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3977 if (RT_SUCCESS(rc)) \
3978 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3979 else \
3980 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3981 return rc
3982
3983#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
3984 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
3985#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3986 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
3987
3988#define INTERPRET_CASE(opcode, Instr) \
3989 case opcode:\
3990 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3991 if (RT_SUCCESS(rc)) \
3992 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3993 else \
3994 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3995 return rc
3996
3997#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
3998 case opcode:\
3999 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4000 if (RT_SUCCESS(rc)) \
4001 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4002 else \
4003 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4004 return rc
4005
4006#define INTERPRET_STAT_CASE(opcode, Instr) \
4007 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
4008
4009 /*
4010 * The actual case statements.
4011 */
4012 INTERPRET_CASE(OP_XCHG,Xchg);
4013 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
4014 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
4015 INTERPRET_CASE(OP_POP,Pop);
4016 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
4017 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
4018 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
4019 INTERPRET_CASE(OP_MOV,Mov);
4020#ifndef IN_RC
4021 INTERPRET_CASE(OP_STOSWD,StosWD);
4022#endif
4023 INTERPRET_CASE(OP_INVLPG,InvlPg);
4024 INTERPRET_CASE(OP_CPUID,CpuId);
4025 INTERPRET_CASE(OP_MOV_CR,MovCRx);
4026 INTERPRET_CASE(OP_MOV_DR,MovDRx);
4027#ifdef IN_RING0
4028 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
4029 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
4030#endif
4031 INTERPRET_CASE(OP_LLDT,LLdt);
4032 INTERPRET_CASE(OP_LMSW,Lmsw);
4033#ifdef EM_EMULATE_SMSW
4034 INTERPRET_CASE(OP_SMSW,Smsw);
4035#endif
4036 INTERPRET_CASE(OP_CLTS,Clts);
4037 INTERPRET_CASE(OP_MONITOR, Monitor);
4038 INTERPRET_CASE(OP_MWAIT, MWait);
4039 INTERPRET_CASE(OP_RDMSR, Rdmsr);
4040 INTERPRET_CASE(OP_WRMSR, Wrmsr);
4041 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
4042 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
4043 INTERPRET_CASE(OP_ADC,Adc);
4044 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
4045 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
4046 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
4047 INTERPRET_CASE(OP_RDPMC,Rdpmc);
4048 INTERPRET_CASE(OP_RDTSC,Rdtsc);
4049 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
4050#ifdef IN_RC
4051 INTERPRET_CASE(OP_STI,Sti);
4052 INTERPRET_CASE(OP_XADD, XAdd);
4053 INTERPRET_CASE(OP_IRET,Iret);
4054#endif
4055 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
4056 INTERPRET_CASE(OP_HLT,Hlt);
4057 INTERPRET_CASE(OP_WBINVD,WbInvd);
4058#ifdef VBOX_WITH_STATISTICS
4059# ifndef IN_RC
4060 INTERPRET_STAT_CASE(OP_XADD, XAdd);
4061# endif
4062 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
4063#endif
4064
4065 default:
4066 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
4067 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4068 return VERR_EM_INTERPRETER;
4069
4070#undef INTERPRET_CASE_EX_PARAM2
4071#undef INTERPRET_STAT_CASE
4072#undef INTERPRET_CASE_EX
4073#undef INTERPRET_CASE
4074 } /* switch (opcode) */
4075 /* not reached */
4076}
4077
4078/**
4079 * Interprets the current instruction using the supplied DISCPUSTATE structure.
4080 *
4081 * EIP is *NOT* updated!
4082 *
4083 * @returns VBox strict status code.
4084 * @retval VINF_* Scheduling instructions. When these are returned, it
4085 * starts to get a bit tricky to know whether code was
4086 * executed or not... We'll address this when it becomes a problem.
4087 * @retval VERR_EM_INTERPRETER Something we can't cope with.
4088 * @retval VERR_* Fatal errors.
4089 *
4090 * @param pVCpu The cross context virtual CPU structure.
4091 * @param pDis The disassembler cpu state for the instruction to be
4092 * interpreted.
4093 * @param pRegFrame The register frame. EIP is *NOT* changed!
4094 * @param pvFault The fault address (CR2).
4095 * @param pcbSize Size of the write (if applicable).
4096 * @param enmCodeType Code type (user/supervisor)
4097 *
4098 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
4099 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
4100 * to worry about e.g. invalid modrm combinations (!)
4101 *
4102 * @todo At this time we do NOT check if the instruction overwrites vital information.
4103 * Make sure this can't happen!! (will add some assertions/checks later)
4104 */
4105DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4106 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4107{
4108 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4109 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
4110 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4111 if (RT_SUCCESS(rc))
4112 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
4113 else
4114 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
4115 return rc;
4116}
4117
4118
4119#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette