VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 72565

Last change on this file since 72565 was 72565, checked in by vboxsync, 7 years ago

EM,TRPM: Record TSC with exit history in raw-mode. Added the exit history counter to the statistic (/PROF/CPUx/EM/RecordedExits). bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 152.3 KB
Line 
1/* $Id: EMAll.cpp 72565 2018-06-15 13:30:01Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef VBOX_WITH_IEM
51//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
52//# define VBOX_SAME_AS_EM
53//# define VBOX_COMPARE_IEM_LAST
54#endif
55
56#ifdef VBOX_WITH_RAW_RING1
57# define EM_EMULATE_SMSW
58#endif
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** @def EM_ASSERT_FAULT_RETURN
65 * Safety check.
66 *
67 * Could in theory misfire on a cross page boundary access...
68 *
69 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
70 * turns up an alias page instead of the original faulting one and annoying the
71 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
72 */
73#if 0
74# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
75#else
76# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
77#endif
78
79
80/*********************************************************************************************************************************
81* Internal Functions *
82*********************************************************************************************************************************/
83#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
84DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
85 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92#ifdef VBOX_COMPARE_IEM_AND_EM
93static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
94 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
95 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
96 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
97static uint32_t g_fIncomingFFs;
98static CPUMCTX g_IncomingCtx;
99static bool g_fIgnoreRaxRdx = false;
100
101static uint32_t g_fEmFFs;
102static CPUMCTX g_EmCtx;
103static uint8_t g_abEmWrote[256];
104static size_t g_cbEmWrote;
105
106static uint32_t g_fIemFFs;
107static CPUMCTX g_IemCtx;
108extern uint8_t g_abIemWrote[256];
109#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
110extern size_t g_cbIemWrote;
111#else
112static size_t g_cbIemWrote;
113#endif
114#endif
115
116
117/**
118 * Get the current execution manager status.
119 *
120 * @returns Current status.
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
124{
125 return pVCpu->em.s.enmState;
126}
127
128
129/**
130 * Sets the current execution manager status. (use only when you know what you're doing!)
131 *
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
134 */
135VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
136{
137 /* Only allowed combination: */
138 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
139 pVCpu->em.s.enmState = enmNewState;
140}
141
142
143/**
144 * Sets the PC for which interrupts should be inhibited.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param PC The PC.
148 */
149VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
150{
151 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
152 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
153}
154
155
156/**
157 * Gets the PC for which interrupts should be inhibited.
158 *
159 * There are a few instructions which inhibits or delays interrupts
160 * for the instruction following them. These instructions are:
161 * - STI
162 * - MOV SS, r/m16
163 * - POP SS
164 *
165 * @returns The PC for which interrupts should be inhibited.
166 * @param pVCpu The cross context virtual CPU structure.
167 *
168 */
169VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
170{
171 return pVCpu->em.s.GCPtrInhibitInterrupts;
172}
173
174
175/**
176 * Enables / disable hypercall instructions.
177 *
178 * This interface is used by GIM to tell the execution monitors whether the
179 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
180 *
181 * @param pVCpu The cross context virtual CPU structure this applies to.
182 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
183 */
184VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
185{
186 pVCpu->em.s.fHypercallEnabled = fEnabled;
187}
188
189
190/**
191 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
192 *
193 * @returns true if enabled, false if not.
194 * @param pVCpu The cross context virtual CPU structure.
195 *
196 * @note If this call becomes a performance factor, we can make the data
197 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
198 */
199VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
200{
201 return pVCpu->em.s.fHypercallEnabled;
202}
203
204
205/**
206 * Prepare an MWAIT - essentials of the MONITOR instruction.
207 *
208 * @returns VINF_SUCCESS
209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
210 * @param rax The content of RAX.
211 * @param rcx The content of RCX.
212 * @param rdx The content of RDX.
213 * @param GCPhys The physical address corresponding to rax.
214 */
215VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
216{
217 pVCpu->em.s.MWait.uMonitorRAX = rax;
218 pVCpu->em.s.MWait.uMonitorRCX = rcx;
219 pVCpu->em.s.MWait.uMonitorRDX = rdx;
220 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
221 /** @todo Make use of GCPhys. */
222 NOREF(GCPhys);
223 /** @todo Complete MONITOR implementation. */
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Checks if the monitor hardware is armed / active.
230 *
231 * @returns true if armed, false otherwise.
232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
233 */
234VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
235{
236 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
237}
238
239
240/**
241 * Performs an MWAIT.
242 *
243 * @returns VINF_SUCCESS
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 * @param rax The content of RAX.
246 * @param rcx The content of RCX.
247 */
248VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
249{
250 pVCpu->em.s.MWait.uMWaitRAX = rax;
251 pVCpu->em.s.MWait.uMWaitRCX = rcx;
252 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
253 if (rcx)
254 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
255 else
256 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
257 /** @todo not completely correct?? */
258 return VINF_EM_HALT;
259}
260
261
262
263/**
264 * Determine if we should continue execution in HM after encountering an mwait
265 * instruction.
266 *
267 * Clears MWAIT flags if returning @c true.
268 *
269 * @returns true if we should continue, false if we should halt.
270 * @param pVCpu The cross context virtual CPU structure.
271 * @param pCtx Current CPU context.
272 */
273VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
274{
275 if ( pCtx->eflags.Bits.u1IF
276 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
277 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
278 {
279 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
280 {
281 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
282 return true;
283 }
284 }
285
286 return false;
287}
288
289
290/**
291 * Determine if we should continue execution in HM after encountering a hlt
292 * instruction.
293 *
294 * @returns true if we should continue, false if we should halt.
295 * @param pVCpu The cross context virtual CPU structure.
296 * @param pCtx Current CPU context.
297 */
298VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
299{
300 /** @todo Shouldn't we be checking GIF here? */
301 if (pCtx->eflags.Bits.u1IF)
302 return VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
303 return false;
304}
305
306
307/**
308 * Unhalts and wakes up the given CPU.
309 *
310 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
311 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
312 * the CPU isn't currently in a halt, the next HLT instruction it executes will
313 * be affected.
314 *
315 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
316 * @param pVM The cross context VM structure.
317 * @param pVCpuDst The cross context virtual CPU structure of the
318 * CPU to unhalt and wake up. This is usually not the
319 * same as the caller.
320 * @thread EMT
321 */
322VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
323{
324 /*
325 * Flag the current(/next) HLT to unhalt immediately.
326 */
327 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
328
329 /*
330 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
331 * just do it here for now).
332 */
333#ifdef IN_RING0
334 /* We might be here with preemption disabled or enabled (i.e. depending on
335 thread-context hooks being used), so don't try obtaining the GVMMR0 used
336 lock here. See @bugref{7270#c148}. */
337 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
338 AssertRC(rc);
339
340#elif defined(IN_RING3)
341 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
342 AssertRC(rc);
343
344#else
345 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
346 Assert(pVM->cCpus == 1); NOREF(pVM);
347 int rc = VINF_SUCCESS;
348#endif
349 return rc;
350}
351
352#ifndef IN_RING3
353
354/**
355 * Makes an I/O port write pending for ring-3 processing.
356 *
357 * @returns VINF_EM_PENDING_R3_IOPORT_READ
358 * @param pVCpu The cross context virtual CPU structure.
359 * @param uPort The I/O port.
360 * @param cbInstr The instruction length (for RIP updating).
361 * @param cbValue The write size.
362 * @param uValue The value being written.
363 * @sa emR3ExecutePendingIoPortWrite
364 *
365 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
366 */
367VMMRZ_INT_DECL(VBOXSTRICTRC)
368EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
369{
370 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
371 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
372 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
373 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
374 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
375 return VINF_EM_PENDING_R3_IOPORT_WRITE;
376}
377
378
379/**
380 * Makes an I/O port read pending for ring-3 processing.
381 *
382 * @returns VINF_EM_PENDING_R3_IOPORT_READ
383 * @param pVCpu The cross context virtual CPU structure.
384 * @param uPort The I/O port.
385 * @param cbInstr The instruction length (for RIP updating).
386 * @param cbValue The read size.
387 * @sa emR3ExecutePendingIoPortRead
388 *
389 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
390 */
391VMMRZ_INT_DECL(VBOXSTRICTRC)
392EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
393{
394 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
395 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
396 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
397 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
398 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
399 return VINF_EM_PENDING_R3_IOPORT_READ;
400}
401
402#endif /* IN_RING3 */
403
404
405/**
406 * Adds an exit to the history for this CPU.
407 *
408 * @returns Suggested action to take.
409 * @param pVCpu The cross context virtual CPU structure.
410 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
411 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
412 * @param uTimestamp The TSC value for the exit, 0 if not available.
413 * @thread EMT(pVCpu)
414 */
415VMM_INT_DECL(EMEXITACTION) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
416{
417 VMCPU_ASSERT_EMT(pVCpu);
418
419 /*
420 * Add the exit history entry.
421 */
422 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
423 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
424 pHistEntry->uFlatPC = uFlatPC;
425 pHistEntry->uTimestamp = uTimestamp;
426 pHistEntry->uFlagsAndType = uFlagsAndType;
427 pHistEntry->idxSlot = UINT32_MAX;
428
429 /*
430 * If common exit type, we will insert/update the exit into the shared hash table.
431 */
432 if ((uFlagsAndType & EMEXIT_F_KIND_MASK) == EMEXIT_F_KIND_EM)
433 {
434 /** @todo later */
435 }
436
437 return EMEXITACTION_NORMAL;
438}
439
440
441#ifdef IN_RC
442/**
443 * Special raw-mode interface for adding an exit to the history.
444 *
445 * Currently this is only for recording, not optimizing, so no return value. If
446 * we start seriously caring about raw-mode again, we may extend it.
447 *
448 * @param pVCpu The cross context virtual CPU structure.
449 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
450 * @param uCs The CS.
451 * @param uEip The EIP.
452 * @param uTimestamp The TSC value for the exit, 0 if not available.
453 * @thread EMT(0)
454 */
455VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
456{
457 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
458 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
459 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
460 pHistEntry->uTimestamp = uTimestamp;
461 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
462 pHistEntry->idxSlot = UINT32_MAX;
463}
464#endif
465
466
467#ifdef IN_RING0
468/**
469 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
470 *
471 * @param pVCpu The cross context virtual CPU structure.
472 * @param uFlatPC The flattened program counter (RIP).
473 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
474 */
475VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
476{
477 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
478 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[((uintptr_t)pVCpu->em.s.iNextExit - 1) & 0xff];
479 pHistEntry->uFlatPC = uFlatPC;
480 if (fFlattened)
481 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
482 else
483 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
484}
485#endif
486
487
488/**
489 * Interface for convering a engine specific exit to a generic one and get guidance.
490 *
491 * @param pVCpu The cross context virtual CPU structure.
492 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
493 * @thread EMT(pVCpu)
494 */
495VMM_INT_DECL(EMEXITACTION) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
496{
497 VMCPU_ASSERT_EMT(pVCpu);
498
499 /*
500 * Do the updating.
501 */
502 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
503 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[((uintptr_t)pVCpu->em.s.iNextExit - 1) & 0xff];
504 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
505
506 /*
507 * If common exit type, we will insert/update the exit into the shared hash table.
508 */
509 if ((uFlagsAndType & EMEXIT_F_KIND_MASK) == EMEXIT_F_KIND_EM)
510 {
511 /** @todo later */
512 }
513
514 return EMEXITACTION_NORMAL;
515}
516
517
518/**
519 * Interface for convering a engine specific exit to a generic one and get
520 * guidance, supplying flattened PC too.
521 *
522 * @param pVCpu The cross context virtual CPU structure.
523 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
524 * @param uFlatPC The flattened program counter (RIP).
525 * @thread EMT(pVCpu)
526 */
527VMM_INT_DECL(EMEXITACTION) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
528{
529 VMCPU_ASSERT_EMT(pVCpu);
530
531 /*
532 * Do the updating.
533 */
534 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
535 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[((uintptr_t)pVCpu->em.s.iNextExit - 1) & 0xff];
536 pHistEntry->uFlagsAndType = uFlagsAndType;
537 pHistEntry->uFlatPC = uFlatPC;
538
539 /*
540 * If common exit type, we will insert/update the exit into the shared hash table.
541 */
542 if ((uFlagsAndType & EMEXIT_F_KIND_MASK) == EMEXIT_F_KIND_EM)
543 {
544 /** @todo later */
545 }
546
547 return EMEXITACTION_NORMAL;
548}
549
550
551/**
552 * Locks REM execution to a single VCPU.
553 *
554 * @param pVM The cross context VM structure.
555 */
556VMMDECL(void) EMRemLock(PVM pVM)
557{
558#ifdef VBOX_WITH_REM
559 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
560 return; /* early init */
561
562 Assert(!PGMIsLockOwner(pVM));
563 Assert(!IOMIsLockWriteOwner(pVM));
564 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
565 AssertRCSuccess(rc);
566#else
567 RT_NOREF(pVM);
568#endif
569}
570
571
572/**
573 * Unlocks REM execution
574 *
575 * @param pVM The cross context VM structure.
576 */
577VMMDECL(void) EMRemUnlock(PVM pVM)
578{
579#ifdef VBOX_WITH_REM
580 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
581 return; /* early init */
582
583 PDMCritSectLeave(&pVM->em.s.CritSectREM);
584#else
585 RT_NOREF(pVM);
586#endif
587}
588
589
590/**
591 * Check if this VCPU currently owns the REM lock.
592 *
593 * @returns bool owner/not owner
594 * @param pVM The cross context VM structure.
595 */
596VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
597{
598#ifdef VBOX_WITH_REM
599 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
600 return true; /* early init */
601
602 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
603#else
604 RT_NOREF(pVM);
605 return true;
606#endif
607}
608
609
610/**
611 * Try to acquire the REM lock.
612 *
613 * @returns VBox status code
614 * @param pVM The cross context VM structure.
615 */
616VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
617{
618#ifdef VBOX_WITH_REM
619 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
620 return VINF_SUCCESS; /* early init */
621
622 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
623#else
624 RT_NOREF(pVM);
625 return VINF_SUCCESS;
626#endif
627}
628
629
630/**
631 * @callback_method_impl{FNDISREADBYTES}
632 */
633static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
634{
635 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
636#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
637 PVM pVM = pVCpu->CTX_SUFF(pVM);
638#endif
639 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
640 int rc;
641
642 /*
643 * Figure how much we can or must read.
644 */
645 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
646 if (cbToRead > cbMaxRead)
647 cbToRead = cbMaxRead;
648 else if (cbToRead < cbMinRead)
649 cbToRead = cbMinRead;
650
651#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
652 /*
653 * We might be called upon to interpret an instruction in a patch.
654 */
655 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
656 {
657# ifdef IN_RC
658 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
659# else
660 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
661# endif
662 rc = VINF_SUCCESS;
663 }
664 else
665#endif
666 {
667# ifdef IN_RC
668 /*
669 * Try access it thru the shadow page tables first. Fall back on the
670 * slower PGM method if it fails because the TLB or page table was
671 * modified recently.
672 */
673 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
674 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
675 {
676 cbToRead = cbMinRead;
677 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
678 }
679 if (rc == VERR_ACCESS_DENIED)
680#endif
681 {
682 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
683 if (RT_FAILURE(rc))
684 {
685 if (cbToRead > cbMinRead)
686 {
687 cbToRead = cbMinRead;
688 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
689 }
690 if (RT_FAILURE(rc))
691 {
692#ifndef IN_RC
693 /*
694 * If we fail to find the page via the guest's page tables
695 * we invalidate the page in the host TLB (pertaining to
696 * the guest in the NestedPaging case). See @bugref{6043}.
697 */
698 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
699 {
700 HMInvalidatePage(pVCpu, uSrcAddr);
701 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
702 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
703 }
704#endif
705 }
706 }
707 }
708 }
709
710 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
711 return rc;
712}
713
714
715#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
716DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
717{
718 NOREF(pVM);
719 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
720}
721#endif
722
723
724/**
725 * Disassembles the current instruction.
726 *
727 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
728 * details.
729 *
730 * @param pVM The cross context VM structure.
731 * @param pVCpu The cross context virtual CPU structure.
732 * @param pDis Where to return the parsed instruction info.
733 * @param pcbInstr Where to return the instruction size. (optional)
734 */
735VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
736{
737 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
738 RTGCPTR GCPtrInstr;
739#if 0
740 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
741#else
742/** @todo Get the CPU mode as well while we're at it! */
743 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
744 pCtxCore->rip, &GCPtrInstr);
745#endif
746 if (RT_FAILURE(rc))
747 {
748 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
749 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
750 return rc;
751 }
752 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
753}
754
755
756/**
757 * Disassembles one instruction.
758 *
759 * This is used by internally by the interpreter and by trap/access handlers.
760 *
761 * @returns VBox status code.
762 *
763 * @param pVM The cross context VM structure.
764 * @param pVCpu The cross context virtual CPU structure.
765 * @param GCPtrInstr The flat address of the instruction.
766 * @param pCtxCore The context core (used to determine the cpu mode).
767 * @param pDis Where to return the parsed instruction info.
768 * @param pcbInstr Where to return the instruction size. (optional)
769 */
770VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
771 PDISCPUSTATE pDis, unsigned *pcbInstr)
772{
773 NOREF(pVM);
774 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
775 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
776 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
777 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
778 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
779 if (RT_SUCCESS(rc))
780 return VINF_SUCCESS;
781 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
782 return rc;
783}
784
785
786#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
787static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
788 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
789 uint32_t cbEm, uint32_t cbIem)
790{
791 /* Quick compare. */
792 if ( rcEm == rcIem
793 && cbEm == cbIem
794 && g_cbEmWrote == g_cbIemWrote
795 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
796 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
797 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
798 )
799 return;
800
801 /* Report exact differences. */
802 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
803 if (rcEm != rcIem)
804 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
805 else if (cbEm != cbIem)
806 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
807
808 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
809 {
810 if (g_cbIemWrote != g_cbEmWrote)
811 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
812 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
813 {
814 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
815 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
816 }
817
818 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
819 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
820 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
821
822# define CHECK_FIELD(a_Field) \
823 do \
824 { \
825 if (pEmCtx->a_Field != pIemCtx->a_Field) \
826 { \
827 switch (sizeof(pEmCtx->a_Field)) \
828 { \
829 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
830 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
831 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
832 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
833 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
834 } \
835 cDiffs++; \
836 } \
837 } while (0)
838
839# define CHECK_BIT_FIELD(a_Field) \
840 do \
841 { \
842 if (pEmCtx->a_Field != pIemCtx->a_Field) \
843 { \
844 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
845 cDiffs++; \
846 } \
847 } while (0)
848
849# define CHECK_SEL(a_Sel) \
850 do \
851 { \
852 CHECK_FIELD(a_Sel.Sel); \
853 CHECK_FIELD(a_Sel.Attr.u); \
854 CHECK_FIELD(a_Sel.u64Base); \
855 CHECK_FIELD(a_Sel.u32Limit); \
856 CHECK_FIELD(a_Sel.fFlags); \
857 } while (0)
858
859 unsigned cDiffs = 0;
860 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
861 {
862 RTLogPrintf(" the FPU state differs\n");
863 cDiffs++;
864 CHECK_FIELD(fpu.FCW);
865 CHECK_FIELD(fpu.FSW);
866 CHECK_FIELD(fpu.FTW);
867 CHECK_FIELD(fpu.FOP);
868 CHECK_FIELD(fpu.FPUIP);
869 CHECK_FIELD(fpu.CS);
870 CHECK_FIELD(fpu.Rsrvd1);
871 CHECK_FIELD(fpu.FPUDP);
872 CHECK_FIELD(fpu.DS);
873 CHECK_FIELD(fpu.Rsrvd2);
874 CHECK_FIELD(fpu.MXCSR);
875 CHECK_FIELD(fpu.MXCSR_MASK);
876 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
877 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
878 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
879 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
880 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
881 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
882 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
883 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
884 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
885 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
886 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
887 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
888 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
889 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
890 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
891 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
892 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
893 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
894 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
895 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
896 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
897 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
898 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
899 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
900 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
901 CHECK_FIELD(fpu.au32RsrvdRest[i]);
902 }
903 CHECK_FIELD(rip);
904 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
905 {
906 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
907 CHECK_BIT_FIELD(rflags.Bits.u1CF);
908 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
909 CHECK_BIT_FIELD(rflags.Bits.u1PF);
910 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
911 CHECK_BIT_FIELD(rflags.Bits.u1AF);
912 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
913 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
914 CHECK_BIT_FIELD(rflags.Bits.u1SF);
915 CHECK_BIT_FIELD(rflags.Bits.u1TF);
916 CHECK_BIT_FIELD(rflags.Bits.u1IF);
917 CHECK_BIT_FIELD(rflags.Bits.u1DF);
918 CHECK_BIT_FIELD(rflags.Bits.u1OF);
919 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
920 CHECK_BIT_FIELD(rflags.Bits.u1NT);
921 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
922 CHECK_BIT_FIELD(rflags.Bits.u1RF);
923 CHECK_BIT_FIELD(rflags.Bits.u1VM);
924 CHECK_BIT_FIELD(rflags.Bits.u1AC);
925 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
926 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
927 CHECK_BIT_FIELD(rflags.Bits.u1ID);
928 }
929
930 if (!g_fIgnoreRaxRdx)
931 CHECK_FIELD(rax);
932 CHECK_FIELD(rcx);
933 if (!g_fIgnoreRaxRdx)
934 CHECK_FIELD(rdx);
935 CHECK_FIELD(rbx);
936 CHECK_FIELD(rsp);
937 CHECK_FIELD(rbp);
938 CHECK_FIELD(rsi);
939 CHECK_FIELD(rdi);
940 CHECK_FIELD(r8);
941 CHECK_FIELD(r9);
942 CHECK_FIELD(r10);
943 CHECK_FIELD(r11);
944 CHECK_FIELD(r12);
945 CHECK_FIELD(r13);
946 CHECK_SEL(cs);
947 CHECK_SEL(ss);
948 CHECK_SEL(ds);
949 CHECK_SEL(es);
950 CHECK_SEL(fs);
951 CHECK_SEL(gs);
952 CHECK_FIELD(cr0);
953 CHECK_FIELD(cr2);
954 CHECK_FIELD(cr3);
955 CHECK_FIELD(cr4);
956 CHECK_FIELD(dr[0]);
957 CHECK_FIELD(dr[1]);
958 CHECK_FIELD(dr[2]);
959 CHECK_FIELD(dr[3]);
960 CHECK_FIELD(dr[6]);
961 CHECK_FIELD(dr[7]);
962 CHECK_FIELD(gdtr.cbGdt);
963 CHECK_FIELD(gdtr.pGdt);
964 CHECK_FIELD(idtr.cbIdt);
965 CHECK_FIELD(idtr.pIdt);
966 CHECK_SEL(ldtr);
967 CHECK_SEL(tr);
968 CHECK_FIELD(SysEnter.cs);
969 CHECK_FIELD(SysEnter.eip);
970 CHECK_FIELD(SysEnter.esp);
971 CHECK_FIELD(msrEFER);
972 CHECK_FIELD(msrSTAR);
973 CHECK_FIELD(msrPAT);
974 CHECK_FIELD(msrLSTAR);
975 CHECK_FIELD(msrCSTAR);
976 CHECK_FIELD(msrSFMASK);
977 CHECK_FIELD(msrKERNELGSBASE);
978
979# undef CHECK_FIELD
980# undef CHECK_BIT_FIELD
981 }
982}
983#endif /* VBOX_COMPARE_IEM_AND_EM */
984
985
986/**
987 * Interprets the current instruction.
988 *
989 * @returns VBox status code.
990 * @retval VINF_* Scheduling instructions.
991 * @retval VERR_EM_INTERPRETER Something we can't cope with.
992 * @retval VERR_* Fatal errors.
993 *
994 * @param pVCpu The cross context virtual CPU structure.
995 * @param pRegFrame The register frame.
996 * Updates the EIP if an instruction was executed successfully.
997 * @param pvFault The fault address (CR2).
998 *
999 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1000 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1001 * to worry about e.g. invalid modrm combinations (!)
1002 */
1003VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1004{
1005 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1006 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1007#ifdef VBOX_WITH_IEM
1008 NOREF(pvFault);
1009
1010# ifdef VBOX_COMPARE_IEM_AND_EM
1011 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1012 g_IncomingCtx = *pCtx;
1013 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1014 g_cbEmWrote = g_cbIemWrote = 0;
1015
1016# ifdef VBOX_COMPARE_IEM_FIRST
1017 /* IEM */
1018 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1019 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1020 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1021 rcIem = VERR_EM_INTERPRETER;
1022 g_IemCtx = *pCtx;
1023 g_fIemFFs = pVCpu->fLocalForcedActions;
1024 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1025 *pCtx = g_IncomingCtx;
1026# endif
1027
1028 /* EM */
1029 RTGCPTR pbCode;
1030 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1031 if (RT_SUCCESS(rcEm))
1032 {
1033 uint32_t cbOp;
1034 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1035 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1036 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1037 if (RT_SUCCESS(rcEm))
1038 {
1039 Assert(cbOp == pDis->cbInstr);
1040 uint32_t cbIgnored;
1041 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1042 if (RT_SUCCESS(rcEm))
1043 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1044
1045 }
1046 rcEm = VERR_EM_INTERPRETER;
1047 }
1048 else
1049 rcEm = VERR_EM_INTERPRETER;
1050# ifdef VBOX_SAME_AS_EM
1051 if (rcEm == VERR_EM_INTERPRETER)
1052 {
1053 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1054 return rcEm;
1055 }
1056# endif
1057 g_EmCtx = *pCtx;
1058 g_fEmFFs = pVCpu->fLocalForcedActions;
1059 VBOXSTRICTRC rc = rcEm;
1060
1061# ifdef VBOX_COMPARE_IEM_LAST
1062 /* IEM */
1063 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1064 *pCtx = g_IncomingCtx;
1065 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1066 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1067 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1068 rcIem = VERR_EM_INTERPRETER;
1069 g_IemCtx = *pCtx;
1070 g_fIemFFs = pVCpu->fLocalForcedActions;
1071 rc = rcIem;
1072# endif
1073
1074# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1075 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1076# endif
1077
1078# else
1079 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1080 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1081 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1082 rc = VERR_EM_INTERPRETER;
1083# endif
1084 if (rc != VINF_SUCCESS)
1085 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1086
1087 return rc;
1088#else
1089 RTGCPTR pbCode;
1090 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1091 if (RT_SUCCESS(rc))
1092 {
1093 uint32_t cbOp;
1094 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1095 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1096 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1097 if (RT_SUCCESS(rc))
1098 {
1099 Assert(cbOp == pDis->cbInstr);
1100 uint32_t cbIgnored;
1101 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1102 if (RT_SUCCESS(rc))
1103 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1104
1105 return rc;
1106 }
1107 }
1108 return VERR_EM_INTERPRETER;
1109#endif
1110}
1111
1112
1113/**
1114 * Interprets the current instruction.
1115 *
1116 * @returns VBox status code.
1117 * @retval VINF_* Scheduling instructions.
1118 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1119 * @retval VERR_* Fatal errors.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1122 * @param pRegFrame The register frame.
1123 * Updates the EIP if an instruction was executed successfully.
1124 * @param pvFault The fault address (CR2).
1125 * @param pcbWritten Size of the write (if applicable).
1126 *
1127 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1128 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1129 * to worry about e.g. invalid modrm combinations (!)
1130 */
1131VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1132{
1133 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1134 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1135#ifdef VBOX_WITH_IEM
1136 NOREF(pvFault);
1137
1138# ifdef VBOX_COMPARE_IEM_AND_EM
1139 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1140 g_IncomingCtx = *pCtx;
1141 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1142 g_cbEmWrote = g_cbIemWrote = 0;
1143
1144# ifdef VBOX_COMPARE_IEM_FIRST
1145 /* IEM */
1146 uint32_t cbIemWritten = 0;
1147 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1148 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1149 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1150 rcIem = VERR_EM_INTERPRETER;
1151 g_IemCtx = *pCtx;
1152 g_fIemFFs = pVCpu->fLocalForcedActions;
1153 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1154 *pCtx = g_IncomingCtx;
1155# endif
1156
1157 /* EM */
1158 uint32_t cbEmWritten = 0;
1159 RTGCPTR pbCode;
1160 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1161 if (RT_SUCCESS(rcEm))
1162 {
1163 uint32_t cbOp;
1164 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1165 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1166 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1167 if (RT_SUCCESS(rcEm))
1168 {
1169 Assert(cbOp == pDis->cbInstr);
1170 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
1171 if (RT_SUCCESS(rcEm))
1172 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1173
1174 }
1175 else
1176 rcEm = VERR_EM_INTERPRETER;
1177 }
1178 else
1179 rcEm = VERR_EM_INTERPRETER;
1180# ifdef VBOX_SAME_AS_EM
1181 if (rcEm == VERR_EM_INTERPRETER)
1182 {
1183 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1184 return rcEm;
1185 }
1186# endif
1187 g_EmCtx = *pCtx;
1188 g_fEmFFs = pVCpu->fLocalForcedActions;
1189 *pcbWritten = cbEmWritten;
1190 VBOXSTRICTRC rc = rcEm;
1191
1192# ifdef VBOX_COMPARE_IEM_LAST
1193 /* IEM */
1194 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1195 *pCtx = g_IncomingCtx;
1196 uint32_t cbIemWritten = 0;
1197 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1198 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1199 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1200 rcIem = VERR_EM_INTERPRETER;
1201 g_IemCtx = *pCtx;
1202 g_fIemFFs = pVCpu->fLocalForcedActions;
1203 *pcbWritten = cbIemWritten;
1204 rc = rcIem;
1205# endif
1206
1207# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1208 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
1209# endif
1210
1211# else
1212 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1213 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1214 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1215 rc = VERR_EM_INTERPRETER;
1216# endif
1217 if (rc != VINF_SUCCESS)
1218 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1219
1220 return rc;
1221#else
1222 RTGCPTR pbCode;
1223 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1224 if (RT_SUCCESS(rc))
1225 {
1226 uint32_t cbOp;
1227 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1228 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1229 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1230 if (RT_SUCCESS(rc))
1231 {
1232 Assert(cbOp == pDis->cbInstr);
1233 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
1234 if (RT_SUCCESS(rc))
1235 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1236
1237 return rc;
1238 }
1239 }
1240 return VERR_EM_INTERPRETER;
1241#endif
1242}
1243
1244
1245/**
1246 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1247 *
1248 * IP/EIP/RIP *IS* updated!
1249 *
1250 * @returns VBox strict status code.
1251 * @retval VINF_* Scheduling instructions. When these are returned, it
1252 * starts to get a bit tricky to know whether code was
1253 * executed or not... We'll address this when it becomes a problem.
1254 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1255 * @retval VERR_* Fatal errors.
1256 *
1257 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1258 * @param pDis The disassembler cpu state for the instruction to be
1259 * interpreted.
1260 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1261 * @param pvFault The fault address (CR2).
1262 * @param enmCodeType Code type (user/supervisor)
1263 *
1264 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1265 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1266 * to worry about e.g. invalid modrm combinations (!)
1267 *
1268 * @todo At this time we do NOT check if the instruction overwrites vital information.
1269 * Make sure this can't happen!! (will add some assertions/checks later)
1270 */
1271VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1272 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1273{
1274 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1275 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1276#ifdef VBOX_WITH_IEM
1277 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1278
1279# ifdef VBOX_COMPARE_IEM_AND_EM
1280 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1281 g_IncomingCtx = *pCtx;
1282 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1283 g_cbEmWrote = g_cbIemWrote = 0;
1284
1285# ifdef VBOX_COMPARE_IEM_FIRST
1286 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1287 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1288 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1289 rcIem = VERR_EM_INTERPRETER;
1290 g_IemCtx = *pCtx;
1291 g_fIemFFs = pVCpu->fLocalForcedActions;
1292 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1293 *pCtx = g_IncomingCtx;
1294# endif
1295
1296 /* EM */
1297 uint32_t cbIgnored;
1298 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1299 if (RT_SUCCESS(rcEm))
1300 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1301# ifdef VBOX_SAME_AS_EM
1302 if (rcEm == VERR_EM_INTERPRETER)
1303 {
1304 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1305 return rcEm;
1306 }
1307# endif
1308 g_EmCtx = *pCtx;
1309 g_fEmFFs = pVCpu->fLocalForcedActions;
1310 VBOXSTRICTRC rc = rcEm;
1311
1312# ifdef VBOX_COMPARE_IEM_LAST
1313 /* IEM */
1314 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1315 *pCtx = g_IncomingCtx;
1316 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1317 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1318 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1319 rcIem = VERR_EM_INTERPRETER;
1320 g_IemCtx = *pCtx;
1321 g_fIemFFs = pVCpu->fLocalForcedActions;
1322 rc = rcIem;
1323# endif
1324
1325# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1326 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1327# endif
1328
1329# else
1330 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1331 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1332 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1333 rc = VERR_EM_INTERPRETER;
1334# endif
1335
1336 if (rc != VINF_SUCCESS)
1337 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1338
1339 return rc;
1340#else
1341 uint32_t cbIgnored;
1342 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1343 if (RT_SUCCESS(rc))
1344 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1345 return rc;
1346#endif
1347}
1348
1349#ifdef IN_RC
1350
1351DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1352{
1353 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1354 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1355 return rc;
1356 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1357}
1358
1359
1360/**
1361 * Interpret IRET (currently only to V86 code) - PATM only.
1362 *
1363 * @returns VBox status code.
1364 * @param pVM The cross context VM structure.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pRegFrame The register frame.
1367 *
1368 */
1369VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1370{
1371 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1372 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1373 int rc;
1374
1375 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1376 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1377 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1378 * this function. Fear that it may guru on us, thus not converted to
1379 * IEM. */
1380
1381 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1382 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1383 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1384 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1385 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1386
1387 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1388 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1389 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1390 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1391 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1392 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1393 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1394
1395 pRegFrame->eip = eip & 0xffff;
1396 pRegFrame->cs.Sel = cs;
1397
1398 /* Mask away all reserved bits */
1399 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1400 eflags &= uMask;
1401
1402 CPUMRawSetEFlags(pVCpu, eflags);
1403 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1404
1405 pRegFrame->esp = esp;
1406 pRegFrame->ss.Sel = ss;
1407 pRegFrame->ds.Sel = ds;
1408 pRegFrame->es.Sel = es;
1409 pRegFrame->fs.Sel = fs;
1410 pRegFrame->gs.Sel = gs;
1411
1412 return VINF_SUCCESS;
1413}
1414
1415# ifndef VBOX_WITH_IEM
1416/**
1417 * IRET Emulation.
1418 */
1419static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1420{
1421#ifdef VBOX_WITH_RAW_RING1
1422 NOREF(pvFault); NOREF(pcbSize); NOREF(pDis);
1423 if (EMIsRawRing1Enabled(pVM))
1424 {
1425 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1426 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1427 int rc;
1428 uint32_t cpl, rpl;
1429
1430 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1431 /** @todo we don't verify all the edge cases that generate #GP faults */
1432
1433 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1434 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1435 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1436 * this function. Fear that it may guru on us, thus not converted to
1437 * IEM. */
1438
1439 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1440 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1441 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1442 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1443 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1444
1445 /* Deal with V86 above. */
1446 if (eflags & X86_EFL_VM)
1447 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1448
1449 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1450 rpl = cs & X86_SEL_RPL;
1451
1452 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1453 if (rpl != cpl)
1454 {
1455 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1456 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1457 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1458 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1459 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1460 pRegFrame->ss.Sel = ss;
1461 pRegFrame->esp = esp;
1462 }
1463 pRegFrame->cs.Sel = cs;
1464 pRegFrame->eip = eip;
1465
1466 /* Adjust CS & SS as required. */
1467 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1468
1469 /* Mask away all reserved bits */
1470 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1471 eflags &= uMask;
1472
1473 CPUMRawSetEFlags(pVCpu, eflags);
1474 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1475 return VINF_SUCCESS;
1476 }
1477#else
1478 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1479#endif
1480 return VERR_EM_INTERPRETER;
1481}
1482# endif /* !VBOX_WITH_IEM */
1483
1484#endif /* IN_RC */
1485
1486
1487
1488/*
1489 *
1490 * Old interpreter primitives used by HM, move/eliminate later.
1491 * Old interpreter primitives used by HM, move/eliminate later.
1492 * Old interpreter primitives used by HM, move/eliminate later.
1493 * Old interpreter primitives used by HM, move/eliminate later.
1494 * Old interpreter primitives used by HM, move/eliminate later.
1495 *
1496 */
1497
1498
1499/**
1500 * Interpret CPUID given the parameters in the CPU context.
1501 *
1502 * @returns VBox status code.
1503 * @param pVM The cross context VM structure.
1504 * @param pVCpu The cross context virtual CPU structure.
1505 * @param pRegFrame The register frame.
1506 *
1507 */
1508VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1509{
1510 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1511 uint32_t iLeaf = pRegFrame->eax;
1512 uint32_t iSubLeaf = pRegFrame->ecx;
1513 NOREF(pVM);
1514
1515 /* cpuid clears the high dwords of the affected 64 bits registers. */
1516 pRegFrame->rax = 0;
1517 pRegFrame->rbx = 0;
1518 pRegFrame->rcx = 0;
1519 pRegFrame->rdx = 0;
1520
1521 /* Note: operates the same in 64 and non-64 bits mode. */
1522 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1523 Log(("Emulate: CPUID %x/%x -> %08x %08x %08x %08x\n", iLeaf, iSubLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1524 return VINF_SUCCESS;
1525}
1526
1527
1528/**
1529 * Interpret RDTSC.
1530 *
1531 * @returns VBox status code.
1532 * @param pVM The cross context VM structure.
1533 * @param pVCpu The cross context virtual CPU structure.
1534 * @param pRegFrame The register frame.
1535 *
1536 */
1537VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1538{
1539 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1540 unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
1541
1542 if (uCR4 & X86_CR4_TSD)
1543 return VERR_EM_INTERPRETER; /* genuine #GP */
1544
1545 uint64_t uTicks = TMCpuTickGet(pVCpu);
1546#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1547 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1548#endif
1549
1550 /* Same behaviour in 32 & 64 bits mode */
1551 pRegFrame->rax = RT_LO_U32(uTicks);
1552 pRegFrame->rdx = RT_HI_U32(uTicks);
1553#ifdef VBOX_COMPARE_IEM_AND_EM
1554 g_fIgnoreRaxRdx = true;
1555#endif
1556
1557 NOREF(pVM);
1558 return VINF_SUCCESS;
1559}
1560
1561/**
1562 * Interpret RDTSCP.
1563 *
1564 * @returns VBox status code.
1565 * @param pVM The cross context VM structure.
1566 * @param pVCpu The cross context virtual CPU structure.
1567 * @param pCtx The CPU context.
1568 *
1569 */
1570VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1571{
1572 Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
1573 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1574
1575 if (!pVM->cpum.ro.GuestFeatures.fRdTscP)
1576 {
1577 AssertFailed();
1578 return VERR_EM_INTERPRETER; /* genuine #UD */
1579 }
1580
1581 if (uCR4 & X86_CR4_TSD)
1582 return VERR_EM_INTERPRETER; /* genuine #GP */
1583
1584 uint64_t uTicks = TMCpuTickGet(pVCpu);
1585#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1586 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
1587#endif
1588
1589 /* Same behaviour in 32 & 64 bits mode */
1590 pCtx->rax = RT_LO_U32(uTicks);
1591 pCtx->rdx = RT_HI_U32(uTicks);
1592#ifdef VBOX_COMPARE_IEM_AND_EM
1593 g_fIgnoreRaxRdx = true;
1594#endif
1595 /* Low dword of the TSC_AUX msr only. */
1596 VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1597 pCtx->rcx &= UINT32_C(0xffffffff);
1598
1599 return VINF_SUCCESS;
1600}
1601
1602/**
1603 * Interpret RDPMC.
1604 *
1605 * @returns VBox status code.
1606 * @param pVM The cross context VM structure.
1607 * @param pVCpu The cross context virtual CPU structure.
1608 * @param pRegFrame The register frame.
1609 *
1610 */
1611VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1612{
1613 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1614 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1615
1616 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1617 if ( !(uCR4 & X86_CR4_PCE)
1618 && CPUMGetGuestCPL(pVCpu) != 0)
1619 {
1620 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1621 return VERR_EM_INTERPRETER; /* genuine #GP */
1622 }
1623
1624 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1625 pRegFrame->rax = 0;
1626 pRegFrame->rdx = 0;
1627 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1628 * ecx but see @bugref{3472}! */
1629
1630 NOREF(pVM);
1631 return VINF_SUCCESS;
1632}
1633
1634
1635/**
1636 * MWAIT Emulation.
1637 */
1638VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1639{
1640 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1641 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1642 NOREF(pVM);
1643
1644 /* Get the current privilege level. */
1645 cpl = CPUMGetGuestCPL(pVCpu);
1646 if (cpl != 0)
1647 return VERR_EM_INTERPRETER; /* supervisor only */
1648
1649 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1650 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1651 return VERR_EM_INTERPRETER; /* not supported */
1652
1653 /*
1654 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1655 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1656 */
1657 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1658 if (pRegFrame->ecx > 1)
1659 {
1660 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1661 return VERR_EM_INTERPRETER; /* illegal value. */
1662 }
1663
1664 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1665 {
1666 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1667 return VERR_EM_INTERPRETER; /* illegal value. */
1668 }
1669
1670 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1671}
1672
1673
1674/**
1675 * MONITOR Emulation.
1676 */
1677VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1678{
1679 uint32_t u32Dummy, u32ExtFeatures, cpl;
1680 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1681 NOREF(pVM);
1682
1683 if (pRegFrame->ecx != 0)
1684 {
1685 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1686 return VERR_EM_INTERPRETER; /* illegal value. */
1687 }
1688
1689 /* Get the current privilege level. */
1690 cpl = CPUMGetGuestCPL(pVCpu);
1691 if (cpl != 0)
1692 return VERR_EM_INTERPRETER; /* supervisor only */
1693
1694 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1695 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1696 return VERR_EM_INTERPRETER; /* not supported */
1697
1698 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1699 return VINF_SUCCESS;
1700}
1701
1702
1703/* VT-x only: */
1704
1705/**
1706 * Interpret INVLPG.
1707 *
1708 * @returns VBox status code.
1709 * @param pVM The cross context VM structure.
1710 * @param pVCpu The cross context virtual CPU structure.
1711 * @param pRegFrame The register frame.
1712 * @param pAddrGC Operand address.
1713 *
1714 */
1715VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1716{
1717 /** @todo is addr always a flat linear address or ds based
1718 * (in absence of segment override prefixes)????
1719 */
1720 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1721 NOREF(pVM); NOREF(pRegFrame);
1722#ifdef IN_RC
1723 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1724#endif
1725 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1726 if ( rc == VINF_SUCCESS
1727 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1728 return VINF_SUCCESS;
1729 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1730 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1731 VERR_EM_INTERPRETER);
1732 return rc;
1733}
1734
1735
1736#ifdef LOG_ENABLED
1737static const char *emMSRtoString(uint32_t uMsr)
1738{
1739 switch (uMsr)
1740 {
1741 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
1742 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
1743 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
1744 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
1745 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
1746 case MSR_K6_EFER: return "MSR_K6_EFER";
1747 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
1748 case MSR_K6_STAR: return "MSR_K6_STAR";
1749 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
1750 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
1751 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
1752 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
1753 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
1754 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
1755 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
1756 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
1757 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
1758 case MSR_IA32_TSC: return "MSR_IA32_TSC";
1759 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
1760 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
1761 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
1762 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
1763 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
1764 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
1765 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
1766 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
1767 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
1768 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
1769 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
1770 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
1771 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
1772 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
1773 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
1774 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
1775 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
1776 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
1777 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
1778 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
1779 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
1780 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
1781 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
1782 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
1783 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
1784 }
1785 return "Unknown MSR";
1786}
1787#endif /* LOG_ENABLED */
1788
1789
1790/**
1791 * Interpret RDMSR
1792 *
1793 * @returns VBox status code.
1794 * @param pVM The cross context VM structure.
1795 * @param pVCpu The cross context virtual CPU structure.
1796 * @param pRegFrame The register frame.
1797 */
1798VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1799{
1800 NOREF(pVM);
1801
1802 /* Get the current privilege level. */
1803 if (CPUMGetGuestCPL(pVCpu) != 0)
1804 {
1805 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
1806 return VERR_EM_INTERPRETER; /* supervisor only */
1807 }
1808
1809 uint64_t uValue;
1810 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
1811 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1812 {
1813 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1814 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
1815 return VERR_EM_INTERPRETER;
1816 }
1817 pRegFrame->rax = RT_LO_U32(uValue);
1818 pRegFrame->rdx = RT_HI_U32(uValue);
1819 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
1820 return VINF_SUCCESS;
1821}
1822
1823
1824/**
1825 * Interpret WRMSR
1826 *
1827 * @returns VBox status code.
1828 * @param pVM The cross context VM structure.
1829 * @param pVCpu The cross context virtual CPU structure.
1830 * @param pRegFrame The register frame.
1831 */
1832VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1833{
1834 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1835
1836 /* Check the current privilege level, this instruction is supervisor only. */
1837 if (CPUMGetGuestCPL(pVCpu) != 0)
1838 {
1839 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
1840 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
1841 }
1842
1843 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
1844 if (rcStrict != VINF_SUCCESS)
1845 {
1846 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1847 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
1848 return VERR_EM_INTERPRETER;
1849 }
1850 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
1851 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
1852 NOREF(pVM);
1853 return VINF_SUCCESS;
1854}
1855
1856
1857/**
1858 * Interpret DRx write.
1859 *
1860 * @returns VBox status code.
1861 * @param pVM The cross context VM structure.
1862 * @param pVCpu The cross context virtual CPU structure.
1863 * @param pRegFrame The register frame.
1864 * @param DestRegDrx DRx register index (USE_REG_DR*)
1865 * @param SrcRegGen General purpose register index (USE_REG_E**))
1866 *
1867 */
1868VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1869{
1870 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1871 uint64_t uNewDrX;
1872 int rc;
1873 NOREF(pVM);
1874
1875 if (CPUMIsGuestIn64BitCode(pVCpu))
1876 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1877 else
1878 {
1879 uint32_t val32;
1880 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1881 uNewDrX = val32;
1882 }
1883
1884 if (RT_SUCCESS(rc))
1885 {
1886 if (DestRegDrx == 6)
1887 {
1888 uNewDrX |= X86_DR6_RA1_MASK;
1889 uNewDrX &= ~X86_DR6_RAZ_MASK;
1890 }
1891 else if (DestRegDrx == 7)
1892 {
1893 uNewDrX |= X86_DR7_RA1_MASK;
1894 uNewDrX &= ~X86_DR7_RAZ_MASK;
1895 }
1896
1897 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1898 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1899 if (RT_SUCCESS(rc))
1900 return rc;
1901 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1902 }
1903 return VERR_EM_INTERPRETER;
1904}
1905
1906
1907/**
1908 * Interpret DRx read.
1909 *
1910 * @returns VBox status code.
1911 * @param pVM The cross context VM structure.
1912 * @param pVCpu The cross context virtual CPU structure.
1913 * @param pRegFrame The register frame.
1914 * @param DestRegGen General purpose register index (USE_REG_E**))
1915 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1916 */
1917VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1918{
1919 uint64_t val64;
1920 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1921 NOREF(pVM);
1922
1923 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1924 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1925 if (CPUMIsGuestIn64BitCode(pVCpu))
1926 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1927 else
1928 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1929
1930 if (RT_SUCCESS(rc))
1931 return VINF_SUCCESS;
1932
1933 return VERR_EM_INTERPRETER;
1934}
1935
1936
1937#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1938
1939
1940
1941
1942
1943
1944/*
1945 *
1946 * The old interpreter.
1947 * The old interpreter.
1948 * The old interpreter.
1949 * The old interpreter.
1950 * The old interpreter.
1951 *
1952 */
1953
1954DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1955{
1956#ifdef IN_RC
1957 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1958 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1959 return rc;
1960 /*
1961 * The page pool cache may end up here in some cases because it
1962 * flushed one of the shadow mappings used by the trapping
1963 * instruction and it either flushed the TLB or the CPU reused it.
1964 */
1965#else
1966 NOREF(pVM);
1967#endif
1968 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1969}
1970
1971
1972DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
1973{
1974 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
1975 pages or write monitored pages. */
1976 NOREF(pVM);
1977#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1978 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
1979#else
1980 int rc = VINF_SUCCESS;
1981#endif
1982#ifdef VBOX_COMPARE_IEM_AND_EM
1983 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
1984 g_cbEmWrote = cb;
1985 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
1986#endif
1987 return rc;
1988}
1989
1990
1991/** Convert sel:addr to a flat GC address. */
1992DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
1993{
1994 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
1995 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
1996}
1997
1998
1999#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2000/**
2001 * Get the mnemonic for the disassembled instruction.
2002 *
2003 * GC/R0 doesn't include the strings in the DIS tables because
2004 * of limited space.
2005 */
2006static const char *emGetMnemonic(PDISCPUSTATE pDis)
2007{
2008 switch (pDis->pCurInstr->uOpcode)
2009 {
2010 case OP_XCHG: return "Xchg";
2011 case OP_DEC: return "Dec";
2012 case OP_INC: return "Inc";
2013 case OP_POP: return "Pop";
2014 case OP_OR: return "Or";
2015 case OP_AND: return "And";
2016 case OP_MOV: return "Mov";
2017 case OP_INVLPG: return "InvlPg";
2018 case OP_CPUID: return "CpuId";
2019 case OP_MOV_CR: return "MovCRx";
2020 case OP_MOV_DR: return "MovDRx";
2021 case OP_LLDT: return "LLdt";
2022 case OP_LGDT: return "LGdt";
2023 case OP_LIDT: return "LIdt";
2024 case OP_CLTS: return "Clts";
2025 case OP_MONITOR: return "Monitor";
2026 case OP_MWAIT: return "MWait";
2027 case OP_RDMSR: return "Rdmsr";
2028 case OP_WRMSR: return "Wrmsr";
2029 case OP_ADD: return "Add";
2030 case OP_ADC: return "Adc";
2031 case OP_SUB: return "Sub";
2032 case OP_SBB: return "Sbb";
2033 case OP_RDTSC: return "Rdtsc";
2034 case OP_STI: return "Sti";
2035 case OP_CLI: return "Cli";
2036 case OP_XADD: return "XAdd";
2037 case OP_HLT: return "Hlt";
2038 case OP_IRET: return "Iret";
2039 case OP_MOVNTPS: return "MovNTPS";
2040 case OP_STOSWD: return "StosWD";
2041 case OP_WBINVD: return "WbInvd";
2042 case OP_XOR: return "Xor";
2043 case OP_BTR: return "Btr";
2044 case OP_BTS: return "Bts";
2045 case OP_BTC: return "Btc";
2046 case OP_LMSW: return "Lmsw";
2047 case OP_SMSW: return "Smsw";
2048 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
2049 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
2050
2051 default:
2052 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
2053 return "???";
2054 }
2055}
2056#endif /* VBOX_STRICT || LOG_ENABLED */
2057
2058
2059/**
2060 * XCHG instruction emulation.
2061 */
2062static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2063{
2064 DISQPVPARAMVAL param1, param2;
2065 NOREF(pvFault);
2066
2067 /* Source to make DISQueryParamVal read the register value - ugly hack */
2068 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2069 if(RT_FAILURE(rc))
2070 return VERR_EM_INTERPRETER;
2071
2072 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2073 if(RT_FAILURE(rc))
2074 return VERR_EM_INTERPRETER;
2075
2076#ifdef IN_RC
2077 if (TRPMHasTrap(pVCpu))
2078 {
2079 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2080 {
2081#endif
2082 RTGCPTR pParam1 = 0, pParam2 = 0;
2083 uint64_t valpar1, valpar2;
2084
2085 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2086 switch(param1.type)
2087 {
2088 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2089 valpar1 = param1.val.val64;
2090 break;
2091
2092 case DISQPV_TYPE_ADDRESS:
2093 pParam1 = (RTGCPTR)param1.val.val64;
2094 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2095 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2096 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2097 if (RT_FAILURE(rc))
2098 {
2099 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2100 return VERR_EM_INTERPRETER;
2101 }
2102 break;
2103
2104 default:
2105 AssertFailed();
2106 return VERR_EM_INTERPRETER;
2107 }
2108
2109 switch(param2.type)
2110 {
2111 case DISQPV_TYPE_ADDRESS:
2112 pParam2 = (RTGCPTR)param2.val.val64;
2113 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
2114 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
2115 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
2116 if (RT_FAILURE(rc))
2117 {
2118 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2119 }
2120 break;
2121
2122 case DISQPV_TYPE_IMMEDIATE:
2123 valpar2 = param2.val.val64;
2124 break;
2125
2126 default:
2127 AssertFailed();
2128 return VERR_EM_INTERPRETER;
2129 }
2130
2131 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
2132 if (pParam1 == 0)
2133 {
2134 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2135 switch(param1.size)
2136 {
2137 case 1: //special case for AH etc
2138 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
2139 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
2140 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
2141 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
2142 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2143 }
2144 if (RT_FAILURE(rc))
2145 return VERR_EM_INTERPRETER;
2146 }
2147 else
2148 {
2149 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
2150 if (RT_FAILURE(rc))
2151 {
2152 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2153 return VERR_EM_INTERPRETER;
2154 }
2155 }
2156
2157 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
2158 if (pParam2 == 0)
2159 {
2160 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2161 switch(param2.size)
2162 {
2163 case 1: //special case for AH etc
2164 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
2165 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
2166 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
2167 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
2168 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2169 }
2170 if (RT_FAILURE(rc))
2171 return VERR_EM_INTERPRETER;
2172 }
2173 else
2174 {
2175 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
2176 if (RT_FAILURE(rc))
2177 {
2178 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2179 return VERR_EM_INTERPRETER;
2180 }
2181 }
2182
2183 *pcbSize = param2.size;
2184 return VINF_SUCCESS;
2185#ifdef IN_RC
2186 }
2187 }
2188 return VERR_EM_INTERPRETER;
2189#endif
2190}
2191
2192
2193/**
2194 * INC and DEC emulation.
2195 */
2196static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2197 PFNEMULATEPARAM2 pfnEmulate)
2198{
2199 DISQPVPARAMVAL param1;
2200 NOREF(pvFault);
2201
2202 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2203 if(RT_FAILURE(rc))
2204 return VERR_EM_INTERPRETER;
2205
2206#ifdef IN_RC
2207 if (TRPMHasTrap(pVCpu))
2208 {
2209 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2210 {
2211#endif
2212 RTGCPTR pParam1 = 0;
2213 uint64_t valpar1;
2214
2215 if (param1.type == DISQPV_TYPE_ADDRESS)
2216 {
2217 pParam1 = (RTGCPTR)param1.val.val64;
2218 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2219#ifdef IN_RC
2220 /* Safety check (in theory it could cross a page boundary and fault there though) */
2221 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2222#endif
2223 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2224 if (RT_FAILURE(rc))
2225 {
2226 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2227 return VERR_EM_INTERPRETER;
2228 }
2229 }
2230 else
2231 {
2232 AssertFailed();
2233 return VERR_EM_INTERPRETER;
2234 }
2235
2236 uint32_t eflags;
2237
2238 eflags = pfnEmulate(&valpar1, param1.size);
2239
2240 /* Write result back */
2241 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2242 if (RT_FAILURE(rc))
2243 {
2244 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2245 return VERR_EM_INTERPRETER;
2246 }
2247
2248 /* Update guest's eflags and finish. */
2249 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2250 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2251
2252 /* All done! */
2253 *pcbSize = param1.size;
2254 return VINF_SUCCESS;
2255#ifdef IN_RC
2256 }
2257 }
2258 return VERR_EM_INTERPRETER;
2259#endif
2260}
2261
2262
2263/**
2264 * POP Emulation.
2265 */
2266static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2267{
2268 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2269 DISQPVPARAMVAL param1;
2270 NOREF(pvFault);
2271
2272 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2273 if(RT_FAILURE(rc))
2274 return VERR_EM_INTERPRETER;
2275
2276#ifdef IN_RC
2277 if (TRPMHasTrap(pVCpu))
2278 {
2279 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2280 {
2281#endif
2282 RTGCPTR pParam1 = 0;
2283 uint32_t valpar1;
2284 RTGCPTR pStackVal;
2285
2286 /* Read stack value first */
2287 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2288 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2289
2290 /* Convert address; don't bother checking limits etc, as we only read here */
2291 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2292 if (pStackVal == 0)
2293 return VERR_EM_INTERPRETER;
2294
2295 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2296 if (RT_FAILURE(rc))
2297 {
2298 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2299 return VERR_EM_INTERPRETER;
2300 }
2301
2302 if (param1.type == DISQPV_TYPE_ADDRESS)
2303 {
2304 pParam1 = (RTGCPTR)param1.val.val64;
2305
2306 /* pop [esp+xx] uses esp after the actual pop! */
2307 AssertCompile(DISGREG_ESP == DISGREG_SP);
2308 if ( (pDis->Param1.fUse & DISUSE_BASE)
2309 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2310 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2311 )
2312 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2313
2314 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2315 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2316 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2317 if (RT_FAILURE(rc))
2318 {
2319 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2320 return VERR_EM_INTERPRETER;
2321 }
2322
2323 /* Update ESP as the last step */
2324 pRegFrame->esp += param1.size;
2325 }
2326 else
2327 {
2328#ifndef DEBUG_bird // annoying assertion.
2329 AssertFailed();
2330#endif
2331 return VERR_EM_INTERPRETER;
2332 }
2333
2334 /* All done! */
2335 *pcbSize = param1.size;
2336 return VINF_SUCCESS;
2337#ifdef IN_RC
2338 }
2339 }
2340 return VERR_EM_INTERPRETER;
2341#endif
2342}
2343
2344
2345/**
2346 * XOR/OR/AND Emulation.
2347 */
2348static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2349 PFNEMULATEPARAM3 pfnEmulate)
2350{
2351 DISQPVPARAMVAL param1, param2;
2352 NOREF(pvFault);
2353
2354 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2355 if(RT_FAILURE(rc))
2356 return VERR_EM_INTERPRETER;
2357
2358 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2359 if(RT_FAILURE(rc))
2360 return VERR_EM_INTERPRETER;
2361
2362#ifdef IN_RC
2363 if (TRPMHasTrap(pVCpu))
2364 {
2365 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2366 {
2367#endif
2368 RTGCPTR pParam1;
2369 uint64_t valpar1, valpar2;
2370
2371 if (pDis->Param1.cb != pDis->Param2.cb)
2372 {
2373 if (pDis->Param1.cb < pDis->Param2.cb)
2374 {
2375 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2376 return VERR_EM_INTERPRETER;
2377 }
2378 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2379 pDis->Param2.cb = pDis->Param1.cb;
2380 param2.size = param1.size;
2381 }
2382
2383 /* The destination is always a virtual address */
2384 if (param1.type == DISQPV_TYPE_ADDRESS)
2385 {
2386 pParam1 = (RTGCPTR)param1.val.val64;
2387 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2388 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2389 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2390 if (RT_FAILURE(rc))
2391 {
2392 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2393 return VERR_EM_INTERPRETER;
2394 }
2395 }
2396 else
2397 {
2398 AssertFailed();
2399 return VERR_EM_INTERPRETER;
2400 }
2401
2402 /* Register or immediate data */
2403 switch(param2.type)
2404 {
2405 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2406 valpar2 = param2.val.val64;
2407 break;
2408
2409 default:
2410 AssertFailed();
2411 return VERR_EM_INTERPRETER;
2412 }
2413
2414 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2415
2416 /* Data read, emulate instruction. */
2417 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2418
2419 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2420
2421 /* Update guest's eflags and finish. */
2422 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2423 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2424
2425 /* And write it back */
2426 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2427 if (RT_SUCCESS(rc))
2428 {
2429 /* All done! */
2430 *pcbSize = param2.size;
2431 return VINF_SUCCESS;
2432 }
2433#ifdef IN_RC
2434 }
2435 }
2436#endif
2437 return VERR_EM_INTERPRETER;
2438}
2439
2440
2441#ifndef VBOX_COMPARE_IEM_AND_EM
2442/**
2443 * LOCK XOR/OR/AND Emulation.
2444 */
2445static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2446 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2447{
2448 void *pvParam1;
2449 DISQPVPARAMVAL param1, param2;
2450 NOREF(pvFault);
2451
2452#if HC_ARCH_BITS == 32
2453 Assert(pDis->Param1.cb <= 4);
2454#endif
2455
2456 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2457 if(RT_FAILURE(rc))
2458 return VERR_EM_INTERPRETER;
2459
2460 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2461 if(RT_FAILURE(rc))
2462 return VERR_EM_INTERPRETER;
2463
2464 if (pDis->Param1.cb != pDis->Param2.cb)
2465 {
2466 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2467 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2468 VERR_EM_INTERPRETER);
2469
2470 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2471 pDis->Param2.cb = pDis->Param1.cb;
2472 param2.size = param1.size;
2473 }
2474
2475#ifdef IN_RC
2476 /* Safety check (in theory it could cross a page boundary and fault there though) */
2477 Assert( TRPMHasTrap(pVCpu)
2478 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2479 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2480#endif
2481
2482 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2483 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2484 RTGCUINTREG ValPar2 = param2.val.val64;
2485
2486 /* The destination is always a virtual address */
2487 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2488
2489 RTGCPTR GCPtrPar1 = param1.val.val64;
2490 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2491 PGMPAGEMAPLOCK Lock;
2492 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2493 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2494
2495 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2496 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2497
2498 RTGCUINTREG32 eflags = 0;
2499 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2500 PGMPhysReleasePageMappingLock(pVM, &Lock);
2501 if (RT_FAILURE(rc))
2502 {
2503 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2504 return VERR_EM_INTERPRETER;
2505 }
2506
2507 /* Update guest's eflags and finish. */
2508 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2509 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2510
2511 *pcbSize = param2.size;
2512 return VINF_SUCCESS;
2513}
2514#endif /* !VBOX_COMPARE_IEM_AND_EM */
2515
2516
2517/**
2518 * ADD, ADC & SUB Emulation.
2519 */
2520static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2521 PFNEMULATEPARAM3 pfnEmulate)
2522{
2523 NOREF(pvFault);
2524 DISQPVPARAMVAL param1, param2;
2525 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2526 if(RT_FAILURE(rc))
2527 return VERR_EM_INTERPRETER;
2528
2529 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2530 if(RT_FAILURE(rc))
2531 return VERR_EM_INTERPRETER;
2532
2533#ifdef IN_RC
2534 if (TRPMHasTrap(pVCpu))
2535 {
2536 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2537 {
2538#endif
2539 RTGCPTR pParam1;
2540 uint64_t valpar1, valpar2;
2541
2542 if (pDis->Param1.cb != pDis->Param2.cb)
2543 {
2544 if (pDis->Param1.cb < pDis->Param2.cb)
2545 {
2546 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2547 return VERR_EM_INTERPRETER;
2548 }
2549 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2550 pDis->Param2.cb = pDis->Param1.cb;
2551 param2.size = param1.size;
2552 }
2553
2554 /* The destination is always a virtual address */
2555 if (param1.type == DISQPV_TYPE_ADDRESS)
2556 {
2557 pParam1 = (RTGCPTR)param1.val.val64;
2558 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2559 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2560 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2561 if (RT_FAILURE(rc))
2562 {
2563 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2564 return VERR_EM_INTERPRETER;
2565 }
2566 }
2567 else
2568 {
2569#ifndef DEBUG_bird
2570 AssertFailed();
2571#endif
2572 return VERR_EM_INTERPRETER;
2573 }
2574
2575 /* Register or immediate data */
2576 switch(param2.type)
2577 {
2578 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2579 valpar2 = param2.val.val64;
2580 break;
2581
2582 default:
2583 AssertFailed();
2584 return VERR_EM_INTERPRETER;
2585 }
2586
2587 /* Data read, emulate instruction. */
2588 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2589
2590 /* Update guest's eflags and finish. */
2591 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2592 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2593
2594 /* And write it back */
2595 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2596 if (RT_SUCCESS(rc))
2597 {
2598 /* All done! */
2599 *pcbSize = param2.size;
2600 return VINF_SUCCESS;
2601 }
2602#ifdef IN_RC
2603 }
2604 }
2605#endif
2606 return VERR_EM_INTERPRETER;
2607}
2608
2609
2610/**
2611 * ADC Emulation.
2612 */
2613static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2614{
2615 if (pRegFrame->eflags.Bits.u1CF)
2616 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2617 else
2618 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2619}
2620
2621
2622/**
2623 * BTR/C/S Emulation.
2624 */
2625static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2626 PFNEMULATEPARAM2UINT32 pfnEmulate)
2627{
2628 DISQPVPARAMVAL param1, param2;
2629 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2630 if(RT_FAILURE(rc))
2631 return VERR_EM_INTERPRETER;
2632
2633 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2634 if(RT_FAILURE(rc))
2635 return VERR_EM_INTERPRETER;
2636
2637#ifdef IN_RC
2638 if (TRPMHasTrap(pVCpu))
2639 {
2640 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2641 {
2642#endif
2643 RTGCPTR pParam1;
2644 uint64_t valpar1 = 0, valpar2;
2645 uint32_t eflags;
2646
2647 /* The destination is always a virtual address */
2648 if (param1.type != DISQPV_TYPE_ADDRESS)
2649 return VERR_EM_INTERPRETER;
2650
2651 pParam1 = (RTGCPTR)param1.val.val64;
2652 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2653
2654 /* Register or immediate data */
2655 switch(param2.type)
2656 {
2657 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2658 valpar2 = param2.val.val64;
2659 break;
2660
2661 default:
2662 AssertFailed();
2663 return VERR_EM_INTERPRETER;
2664 }
2665
2666 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2667 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2668 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2669 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2670 if (RT_FAILURE(rc))
2671 {
2672 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2673 return VERR_EM_INTERPRETER;
2674 }
2675
2676 Log2(("emInterpretBtx: val=%x\n", valpar1));
2677 /* Data read, emulate bit test instruction. */
2678 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2679
2680 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2681
2682 /* Update guest's eflags and finish. */
2683 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2684 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2685
2686 /* And write it back */
2687 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2688 if (RT_SUCCESS(rc))
2689 {
2690 /* All done! */
2691 *pcbSize = 1;
2692 return VINF_SUCCESS;
2693 }
2694#ifdef IN_RC
2695 }
2696 }
2697#endif
2698 return VERR_EM_INTERPRETER;
2699}
2700
2701
2702#ifndef VBOX_COMPARE_IEM_AND_EM
2703/**
2704 * LOCK BTR/C/S Emulation.
2705 */
2706static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2707 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2708{
2709 void *pvParam1;
2710
2711 DISQPVPARAMVAL param1, param2;
2712 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2713 if(RT_FAILURE(rc))
2714 return VERR_EM_INTERPRETER;
2715
2716 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2717 if(RT_FAILURE(rc))
2718 return VERR_EM_INTERPRETER;
2719
2720 /* The destination is always a virtual address */
2721 if (param1.type != DISQPV_TYPE_ADDRESS)
2722 return VERR_EM_INTERPRETER;
2723
2724 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2725 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2726 uint64_t ValPar2 = param2.val.val64;
2727
2728 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2729 RTGCPTR GCPtrPar1 = param1.val.val64;
2730 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2731 ValPar2 &= 7;
2732
2733 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2734#ifdef IN_RC
2735 Assert(TRPMHasTrap(pVCpu));
2736 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
2737#endif
2738
2739 PGMPAGEMAPLOCK Lock;
2740 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2741 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2742
2743 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
2744 NOREF(pvFault);
2745
2746 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2747 RTGCUINTREG32 eflags = 0;
2748 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
2749 PGMPhysReleasePageMappingLock(pVM, &Lock);
2750 if (RT_FAILURE(rc))
2751 {
2752 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
2753 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2754 return VERR_EM_INTERPRETER;
2755 }
2756
2757 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
2758
2759 /* Update guest's eflags and finish. */
2760 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2761 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2762
2763 *pcbSize = 1;
2764 return VINF_SUCCESS;
2765}
2766#endif /* !VBOX_COMPARE_IEM_AND_EM */
2767
2768
2769/**
2770 * MOV emulation.
2771 */
2772static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2773{
2774 NOREF(pvFault);
2775 DISQPVPARAMVAL param1, param2;
2776 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2777 if(RT_FAILURE(rc))
2778 return VERR_EM_INTERPRETER;
2779
2780 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2781 if(RT_FAILURE(rc))
2782 return VERR_EM_INTERPRETER;
2783
2784 /* If destination is a segment register, punt. We can't handle it here.
2785 * NB: Source can be a register and still trigger a #PF!
2786 */
2787 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
2788 return VERR_EM_INTERPRETER;
2789
2790 if (param1.type == DISQPV_TYPE_ADDRESS)
2791 {
2792 RTGCPTR pDest;
2793 uint64_t val64;
2794
2795 switch(param1.type)
2796 {
2797 case DISQPV_TYPE_IMMEDIATE:
2798 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2799 return VERR_EM_INTERPRETER;
2800 RT_FALL_THRU();
2801
2802 case DISQPV_TYPE_ADDRESS:
2803 pDest = (RTGCPTR)param1.val.val64;
2804 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
2805 break;
2806
2807 default:
2808 AssertFailed();
2809 return VERR_EM_INTERPRETER;
2810 }
2811
2812 switch(param2.type)
2813 {
2814 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2815 val64 = param2.val.val64;
2816 break;
2817
2818 default:
2819 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
2820 return VERR_EM_INTERPRETER;
2821 }
2822#ifdef LOG_ENABLED
2823 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2824 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
2825 else
2826 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
2827#endif
2828
2829 Assert(param2.size <= 8 && param2.size > 0);
2830 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
2831 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
2832 if (RT_FAILURE(rc))
2833 return VERR_EM_INTERPRETER;
2834
2835 *pcbSize = param2.size;
2836 }
2837#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
2838 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
2839 else if ( param1.type == DISQPV_TYPE_REGISTER
2840 && param2.type == DISQPV_TYPE_REGISTER)
2841 {
2842 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
2843 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
2844 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
2845
2846 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
2847 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
2848
2849 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
2850 switch (param1.size)
2851 {
2852 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
2853 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
2854 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
2855 default:
2856 AssertFailed();
2857 return VERR_EM_INTERPRETER;
2858 }
2859 AssertRCReturn(rc, rc);
2860 }
2861#endif
2862 else
2863 { /* read fault */
2864 RTGCPTR pSrc;
2865 uint64_t val64;
2866
2867 /* Source */
2868 switch(param2.type)
2869 {
2870 case DISQPV_TYPE_IMMEDIATE:
2871 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2872 return VERR_EM_INTERPRETER;
2873 RT_FALL_THRU();
2874
2875 case DISQPV_TYPE_ADDRESS:
2876 pSrc = (RTGCPTR)param2.val.val64;
2877 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
2878 break;
2879
2880 default:
2881 return VERR_EM_INTERPRETER;
2882 }
2883
2884 Assert(param1.size <= 8 && param1.size > 0);
2885 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
2886 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
2887 if (RT_FAILURE(rc))
2888 return VERR_EM_INTERPRETER;
2889
2890 /* Destination */
2891 switch(param1.type)
2892 {
2893 case DISQPV_TYPE_REGISTER:
2894 switch(param1.size)
2895 {
2896 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
2897 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
2898 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
2899 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
2900 default:
2901 return VERR_EM_INTERPRETER;
2902 }
2903 if (RT_FAILURE(rc))
2904 return rc;
2905 break;
2906
2907 default:
2908 return VERR_EM_INTERPRETER;
2909 }
2910#ifdef LOG_ENABLED
2911 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2912 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
2913 else
2914 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
2915#endif
2916 }
2917 return VINF_SUCCESS;
2918}
2919
2920
2921#ifndef IN_RC
2922/**
2923 * [REP] STOSWD emulation
2924 */
2925static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2926{
2927 int rc;
2928 RTGCPTR GCDest, GCOffset;
2929 uint32_t cbSize;
2930 uint64_t cTransfers;
2931 int offIncrement;
2932 NOREF(pvFault);
2933
2934 /* Don't support any but these three prefix bytes. */
2935 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
2936 return VERR_EM_INTERPRETER;
2937
2938 switch (pDis->uAddrMode)
2939 {
2940 case DISCPUMODE_16BIT:
2941 GCOffset = pRegFrame->di;
2942 cTransfers = pRegFrame->cx;
2943 break;
2944 case DISCPUMODE_32BIT:
2945 GCOffset = pRegFrame->edi;
2946 cTransfers = pRegFrame->ecx;
2947 break;
2948 case DISCPUMODE_64BIT:
2949 GCOffset = pRegFrame->rdi;
2950 cTransfers = pRegFrame->rcx;
2951 break;
2952 default:
2953 AssertFailed();
2954 return VERR_EM_INTERPRETER;
2955 }
2956
2957 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
2958 switch (pDis->uOpMode)
2959 {
2960 case DISCPUMODE_16BIT:
2961 cbSize = 2;
2962 break;
2963 case DISCPUMODE_32BIT:
2964 cbSize = 4;
2965 break;
2966 case DISCPUMODE_64BIT:
2967 cbSize = 8;
2968 break;
2969 default:
2970 AssertFailed();
2971 return VERR_EM_INTERPRETER;
2972 }
2973
2974 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
2975
2976 if (!(pDis->fPrefix & DISPREFIX_REP))
2977 {
2978 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
2979
2980 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2981 if (RT_FAILURE(rc))
2982 return VERR_EM_INTERPRETER;
2983 Assert(rc == VINF_SUCCESS);
2984
2985 /* Update (e/r)di. */
2986 switch (pDis->uAddrMode)
2987 {
2988 case DISCPUMODE_16BIT:
2989 pRegFrame->di += offIncrement;
2990 break;
2991 case DISCPUMODE_32BIT:
2992 pRegFrame->edi += offIncrement;
2993 break;
2994 case DISCPUMODE_64BIT:
2995 pRegFrame->rdi += offIncrement;
2996 break;
2997 default:
2998 AssertFailed();
2999 return VERR_EM_INTERPRETER;
3000 }
3001
3002 }
3003 else
3004 {
3005 if (!cTransfers)
3006 return VINF_SUCCESS;
3007
3008 /*
3009 * Do *not* try emulate cross page stuff here because we don't know what might
3010 * be waiting for us on the subsequent pages. The caller has only asked us to
3011 * ignore access handlers fro the current page.
3012 * This also fends off big stores which would quickly kill PGMR0DynMap.
3013 */
3014 if ( cbSize > PAGE_SIZE
3015 || cTransfers > PAGE_SIZE
3016 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
3017 {
3018 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
3019 GCDest, cbSize, offIncrement, cTransfers));
3020 return VERR_EM_INTERPRETER;
3021 }
3022
3023 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
3024 /* Access verification first; we currently can't recover properly from traps inside this instruction */
3025 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
3026 cTransfers * cbSize,
3027 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
3028 if (rc != VINF_SUCCESS)
3029 {
3030 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
3031 return VERR_EM_INTERPRETER;
3032 }
3033
3034 /* REP case */
3035 while (cTransfers)
3036 {
3037 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
3038 if (RT_FAILURE(rc))
3039 {
3040 rc = VERR_EM_INTERPRETER;
3041 break;
3042 }
3043
3044 Assert(rc == VINF_SUCCESS);
3045 GCOffset += offIncrement;
3046 GCDest += offIncrement;
3047 cTransfers--;
3048 }
3049
3050 /* Update the registers. */
3051 switch (pDis->uAddrMode)
3052 {
3053 case DISCPUMODE_16BIT:
3054 pRegFrame->di = GCOffset;
3055 pRegFrame->cx = cTransfers;
3056 break;
3057 case DISCPUMODE_32BIT:
3058 pRegFrame->edi = GCOffset;
3059 pRegFrame->ecx = cTransfers;
3060 break;
3061 case DISCPUMODE_64BIT:
3062 pRegFrame->rdi = GCOffset;
3063 pRegFrame->rcx = cTransfers;
3064 break;
3065 default:
3066 AssertFailed();
3067 return VERR_EM_INTERPRETER;
3068 }
3069 }
3070
3071 *pcbSize = cbSize;
3072 return rc;
3073}
3074#endif /* !IN_RC */
3075
3076
3077/**
3078 * [LOCK] CMPXCHG emulation.
3079 */
3080static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3081{
3082 DISQPVPARAMVAL param1, param2;
3083 NOREF(pvFault);
3084
3085#if HC_ARCH_BITS == 32
3086 Assert(pDis->Param1.cb <= 4);
3087#endif
3088
3089 /* Source to make DISQueryParamVal read the register value - ugly hack */
3090 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3091 if(RT_FAILURE(rc))
3092 return VERR_EM_INTERPRETER;
3093
3094 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3095 if(RT_FAILURE(rc))
3096 return VERR_EM_INTERPRETER;
3097
3098 uint64_t valpar;
3099 switch(param2.type)
3100 {
3101 case DISQPV_TYPE_IMMEDIATE: /* register actually */
3102 valpar = param2.val.val64;
3103 break;
3104
3105 default:
3106 return VERR_EM_INTERPRETER;
3107 }
3108
3109 PGMPAGEMAPLOCK Lock;
3110 RTGCPTR GCPtrPar1;
3111 void *pvParam1;
3112 uint64_t eflags;
3113
3114 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3115 switch(param1.type)
3116 {
3117 case DISQPV_TYPE_ADDRESS:
3118 GCPtrPar1 = param1.val.val64;
3119 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3120
3121 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3122 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3123 break;
3124
3125 default:
3126 return VERR_EM_INTERPRETER;
3127 }
3128
3129 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
3130
3131#ifndef VBOX_COMPARE_IEM_AND_EM
3132 if (pDis->fPrefix & DISPREFIX_LOCK)
3133 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3134 else
3135 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3136#else /* VBOX_COMPARE_IEM_AND_EM */
3137 uint64_t u64;
3138 switch (pDis->Param2.cb)
3139 {
3140 case 1: u64 = *(uint8_t *)pvParam1; break;
3141 case 2: u64 = *(uint16_t *)pvParam1; break;
3142 case 4: u64 = *(uint32_t *)pvParam1; break;
3143 default:
3144 case 8: u64 = *(uint64_t *)pvParam1; break;
3145 }
3146 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
3147 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3148#endif /* VBOX_COMPARE_IEM_AND_EM */
3149
3150 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
3151
3152 /* Update guest's eflags and finish. */
3153 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3154 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3155
3156 *pcbSize = param2.size;
3157 PGMPhysReleasePageMappingLock(pVM, &Lock);
3158 return VINF_SUCCESS;
3159}
3160
3161
3162/**
3163 * [LOCK] CMPXCHG8B emulation.
3164 */
3165static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3166{
3167 DISQPVPARAMVAL param1;
3168 NOREF(pvFault);
3169
3170 /* Source to make DISQueryParamVal read the register value - ugly hack */
3171 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3172 if(RT_FAILURE(rc))
3173 return VERR_EM_INTERPRETER;
3174
3175 RTGCPTR GCPtrPar1;
3176 void *pvParam1;
3177 uint64_t eflags;
3178 PGMPAGEMAPLOCK Lock;
3179
3180 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
3181 switch(param1.type)
3182 {
3183 case DISQPV_TYPE_ADDRESS:
3184 GCPtrPar1 = param1.val.val64;
3185 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3186
3187 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3188 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3189 break;
3190
3191 default:
3192 return VERR_EM_INTERPRETER;
3193 }
3194
3195 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
3196
3197#ifndef VBOX_COMPARE_IEM_AND_EM
3198 if (pDis->fPrefix & DISPREFIX_LOCK)
3199 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3200 else
3201 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3202#else /* VBOX_COMPARE_IEM_AND_EM */
3203 uint64_t u64 = *(uint64_t *)pvParam1;
3204 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3205 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
3206#endif /* VBOX_COMPARE_IEM_AND_EM */
3207
3208 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
3209
3210 /* Update guest's eflags and finish; note that *only* ZF is affected. */
3211 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
3212 | (eflags & (X86_EFL_ZF));
3213
3214 *pcbSize = 8;
3215 PGMPhysReleasePageMappingLock(pVM, &Lock);
3216 return VINF_SUCCESS;
3217}
3218
3219
3220#ifdef IN_RC /** @todo test+enable for HM as well. */
3221/**
3222 * [LOCK] XADD emulation.
3223 */
3224static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3225{
3226 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
3227 DISQPVPARAMVAL param1;
3228 void *pvParamReg2;
3229 size_t cbParamReg2;
3230 NOREF(pvFault);
3231
3232 /* Source to make DISQueryParamVal read the register value - ugly hack */
3233 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3234 if(RT_FAILURE(rc))
3235 return VERR_EM_INTERPRETER;
3236
3237 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3238 Assert(cbParamReg2 <= 4);
3239 if(RT_FAILURE(rc))
3240 return VERR_EM_INTERPRETER;
3241
3242#ifdef IN_RC
3243 if (TRPMHasTrap(pVCpu))
3244 {
3245 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3246 {
3247#endif
3248 RTGCPTR GCPtrPar1;
3249 void *pvParam1;
3250 uint32_t eflags;
3251 PGMPAGEMAPLOCK Lock;
3252
3253 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3254 switch(param1.type)
3255 {
3256 case DISQPV_TYPE_ADDRESS:
3257 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3258#ifdef IN_RC
3259 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3260#endif
3261
3262 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3263 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3264 break;
3265
3266 default:
3267 return VERR_EM_INTERPRETER;
3268 }
3269
3270 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3271
3272#ifndef VBOX_COMPARE_IEM_AND_EM
3273 if (pDis->fPrefix & DISPREFIX_LOCK)
3274 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3275 else
3276 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3277#else /* VBOX_COMPARE_IEM_AND_EM */
3278 uint64_t u64;
3279 switch (cbParamReg2)
3280 {
3281 case 1: u64 = *(uint8_t *)pvParam1; break;
3282 case 2: u64 = *(uint16_t *)pvParam1; break;
3283 case 4: u64 = *(uint32_t *)pvParam1; break;
3284 default:
3285 case 8: u64 = *(uint64_t *)pvParam1; break;
3286 }
3287 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3288 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3289#endif /* VBOX_COMPARE_IEM_AND_EM */
3290
3291 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3292
3293 /* Update guest's eflags and finish. */
3294 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3295 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3296
3297 *pcbSize = cbParamReg2;
3298 PGMPhysReleasePageMappingLock(pVM, &Lock);
3299 return VINF_SUCCESS;
3300#ifdef IN_RC
3301 }
3302 }
3303
3304 return VERR_EM_INTERPRETER;
3305#endif
3306}
3307#endif /* IN_RC */
3308
3309
3310/**
3311 * WBINVD Emulation.
3312 */
3313static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3314{
3315 /* Nothing to do. */
3316 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3317 return VINF_SUCCESS;
3318}
3319
3320
3321/**
3322 * INVLPG Emulation.
3323 */
3324static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3325{
3326 DISQPVPARAMVAL param1;
3327 RTGCPTR addr;
3328 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3329
3330 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3331 if(RT_FAILURE(rc))
3332 return VERR_EM_INTERPRETER;
3333
3334 switch(param1.type)
3335 {
3336 case DISQPV_TYPE_IMMEDIATE:
3337 case DISQPV_TYPE_ADDRESS:
3338 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3339 return VERR_EM_INTERPRETER;
3340 addr = (RTGCPTR)param1.val.val64;
3341 break;
3342
3343 default:
3344 return VERR_EM_INTERPRETER;
3345 }
3346
3347 /** @todo is addr always a flat linear address or ds based
3348 * (in absence of segment override prefixes)????
3349 */
3350#ifdef IN_RC
3351 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3352#endif
3353 rc = PGMInvalidatePage(pVCpu, addr);
3354 if ( rc == VINF_SUCCESS
3355 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3356 return VINF_SUCCESS;
3357 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3358 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3359 VERR_EM_INTERPRETER);
3360 return rc;
3361}
3362
3363/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3364
3365/**
3366 * CPUID Emulation.
3367 */
3368static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3369{
3370 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3371 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3372 return rc;
3373}
3374
3375
3376/**
3377 * CLTS Emulation.
3378 */
3379static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3380{
3381 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3382
3383 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3384 if (!(cr0 & X86_CR0_TS))
3385 return VINF_SUCCESS;
3386 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3387}
3388
3389
3390/**
3391 * Update CRx.
3392 *
3393 * @returns VBox status code.
3394 * @param pVM The cross context VM structure.
3395 * @param pVCpu The cross context virtual CPU structure.
3396 * @param pRegFrame The register frame.
3397 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3398 * @param val New CRx value
3399 *
3400 */
3401static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3402{
3403 uint64_t oldval;
3404 uint64_t msrEFER;
3405 uint32_t fValid;
3406 int rc, rc2;
3407 NOREF(pVM);
3408
3409 /** @todo Clean up this mess. */
3410 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3411 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3412 switch (DestRegCrx)
3413 {
3414 case DISCREG_CR0:
3415 oldval = CPUMGetGuestCR0(pVCpu);
3416#ifdef IN_RC
3417 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3418 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3419 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3420 return VERR_EM_INTERPRETER;
3421#endif
3422 rc = VINF_SUCCESS;
3423#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3424 CPUMSetGuestCR0(pVCpu, val);
3425#else
3426 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3427#endif
3428 val = CPUMGetGuestCR0(pVCpu);
3429 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3430 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3431 {
3432 /* global flush */
3433 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3434 AssertRCReturn(rc, rc);
3435 }
3436
3437 /* Deal with long mode enabling/disabling. */
3438 msrEFER = CPUMGetGuestEFER(pVCpu);
3439 if (msrEFER & MSR_K6_EFER_LME)
3440 {
3441 if ( !(oldval & X86_CR0_PG)
3442 && (val & X86_CR0_PG))
3443 {
3444 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3445 if (pRegFrame->cs.Attr.n.u1Long)
3446 {
3447 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3448 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3449 }
3450
3451 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3452 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3453 {
3454 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3455 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3456 }
3457 msrEFER |= MSR_K6_EFER_LMA;
3458 }
3459 else
3460 if ( (oldval & X86_CR0_PG)
3461 && !(val & X86_CR0_PG))
3462 {
3463 msrEFER &= ~MSR_K6_EFER_LMA;
3464 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3465 }
3466 CPUMSetGuestEFER(pVCpu, msrEFER);
3467 }
3468 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3469 return rc2 == VINF_SUCCESS ? rc : rc2;
3470
3471 case DISCREG_CR2:
3472 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3473 return VINF_SUCCESS;
3474
3475 case DISCREG_CR3:
3476 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3477 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3478 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3479 {
3480 /* flush */
3481 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3482 AssertRC(rc);
3483 }
3484 return rc;
3485
3486 case DISCREG_CR4:
3487 oldval = CPUMGetGuestCR4(pVCpu);
3488 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3489 val = CPUMGetGuestCR4(pVCpu);
3490
3491 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3492 msrEFER = CPUMGetGuestEFER(pVCpu);
3493 if ( (msrEFER & MSR_K6_EFER_LMA)
3494 && (oldval & X86_CR4_PAE)
3495 && !(val & X86_CR4_PAE))
3496 {
3497 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3498 }
3499
3500 /* From IEM iemCImpl_load_CrX. */
3501 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3502 fValid = X86_CR4_VME | X86_CR4_PVI
3503 | X86_CR4_TSD | X86_CR4_DE
3504 | X86_CR4_PSE | X86_CR4_PAE
3505 | X86_CR4_MCE | X86_CR4_PGE
3506 | X86_CR4_PCE | X86_CR4_OSFXSR
3507 | X86_CR4_OSXMMEEXCPT;
3508 //if (xxx)
3509 // fValid |= X86_CR4_VMXE;
3510 //if (xxx)
3511 // fValid |= X86_CR4_OSXSAVE;
3512 if (val & ~(uint64_t)fValid)
3513 {
3514 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3515 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3516 }
3517
3518 rc = VINF_SUCCESS;
3519 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3520 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3521 {
3522 /* global flush */
3523 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3524 AssertRCReturn(rc, rc);
3525 }
3526
3527 /* Feeling extremely lazy. */
3528# ifdef IN_RC
3529 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3530 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3531 {
3532 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3533 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3534 }
3535# endif
3536# ifdef VBOX_WITH_RAW_MODE
3537 if (((val ^ oldval) & X86_CR4_VME) && VM_IS_RAW_MODE_ENABLED(pVM))
3538 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3539# endif
3540
3541 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3542 return rc2 == VINF_SUCCESS ? rc : rc2;
3543
3544 case DISCREG_CR8:
3545 return APICSetTpr(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3546
3547 default:
3548 AssertFailed();
3549 case DISCREG_CR1: /* illegal op */
3550 break;
3551 }
3552 return VERR_EM_INTERPRETER;
3553}
3554
3555
3556/**
3557 * LMSW Emulation.
3558 */
3559static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3560{
3561 DISQPVPARAMVAL param1;
3562 uint32_t val;
3563 NOREF(pvFault); NOREF(pcbSize);
3564 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3565
3566 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3567 if(RT_FAILURE(rc))
3568 return VERR_EM_INTERPRETER;
3569
3570 switch(param1.type)
3571 {
3572 case DISQPV_TYPE_IMMEDIATE:
3573 case DISQPV_TYPE_ADDRESS:
3574 if(!(param1.flags & DISQPV_FLAG_16))
3575 return VERR_EM_INTERPRETER;
3576 val = param1.val.val32;
3577 break;
3578
3579 default:
3580 return VERR_EM_INTERPRETER;
3581 }
3582
3583 LogFlow(("emInterpretLmsw %x\n", val));
3584 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3585
3586 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3587 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3588 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3589
3590 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3591
3592}
3593
3594#ifdef EM_EMULATE_SMSW
3595/**
3596 * SMSW Emulation.
3597 */
3598static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3599{
3600 NOREF(pvFault); NOREF(pcbSize);
3601 DISQPVPARAMVAL param1;
3602 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3603
3604 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3605 if(RT_FAILURE(rc))
3606 return VERR_EM_INTERPRETER;
3607
3608 switch(param1.type)
3609 {
3610 case DISQPV_TYPE_IMMEDIATE:
3611 if(param1.size != sizeof(uint16_t))
3612 return VERR_EM_INTERPRETER;
3613 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3614 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3615 break;
3616
3617 case DISQPV_TYPE_ADDRESS:
3618 {
3619 RTGCPTR pParam1;
3620
3621 /* Actually forced to 16 bits regardless of the operand size. */
3622 if(param1.size != sizeof(uint16_t))
3623 return VERR_EM_INTERPRETER;
3624
3625 pParam1 = (RTGCPTR)param1.val.val64;
3626 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3627 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3628
3629 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3630 if (RT_FAILURE(rc))
3631 {
3632 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3633 return VERR_EM_INTERPRETER;
3634 }
3635 break;
3636 }
3637
3638 default:
3639 return VERR_EM_INTERPRETER;
3640 }
3641
3642 LogFlow(("emInterpretSmsw %x\n", cr0));
3643 return rc;
3644}
3645#endif
3646
3647
3648/**
3649 * Interpret CRx read.
3650 *
3651 * @returns VBox status code.
3652 * @param pVM The cross context VM structure.
3653 * @param pVCpu The cross context virtual CPU structure.
3654 * @param pRegFrame The register frame.
3655 * @param DestRegGen General purpose register index (USE_REG_E**))
3656 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3657 *
3658 */
3659static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3660{
3661 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3662 uint64_t val64;
3663 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3664 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3665 NOREF(pVM);
3666
3667 if (CPUMIsGuestIn64BitCode(pVCpu))
3668 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3669 else
3670 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3671
3672 if (RT_SUCCESS(rc))
3673 {
3674 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3675 return VINF_SUCCESS;
3676 }
3677 return VERR_EM_INTERPRETER;
3678}
3679
3680
3681/**
3682 * Interpret CRx write.
3683 *
3684 * @returns VBox status code.
3685 * @param pVM The cross context VM structure.
3686 * @param pVCpu The cross context virtual CPU structure.
3687 * @param pRegFrame The register frame.
3688 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3689 * @param SrcRegGen General purpose register index (USE_REG_E**))
3690 *
3691 */
3692static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3693{
3694 uint64_t val;
3695 int rc;
3696 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3697
3698 if (CPUMIsGuestIn64BitCode(pVCpu))
3699 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3700 else
3701 {
3702 uint32_t val32;
3703 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3704 val = val32;
3705 }
3706
3707 if (RT_SUCCESS(rc))
3708 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3709
3710 return VERR_EM_INTERPRETER;
3711}
3712
3713
3714/**
3715 * MOV CRx
3716 */
3717static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3718{
3719 NOREF(pvFault); NOREF(pcbSize);
3720 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3721 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3722
3723 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3724 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3725
3726 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3727}
3728
3729
3730/**
3731 * MOV DRx
3732 */
3733static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3734{
3735 int rc = VERR_EM_INTERPRETER;
3736 NOREF(pvFault); NOREF(pcbSize);
3737
3738 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
3739 {
3740 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
3741 }
3742 else
3743 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3744 {
3745 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
3746 }
3747 else
3748 AssertMsgFailed(("Unexpected debug register move\n"));
3749
3750 return rc;
3751}
3752
3753
3754/**
3755 * LLDT Emulation.
3756 */
3757static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3758{
3759 DISQPVPARAMVAL param1;
3760 RTSEL sel;
3761 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
3762
3763 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3764 if(RT_FAILURE(rc))
3765 return VERR_EM_INTERPRETER;
3766
3767 switch(param1.type)
3768 {
3769 case DISQPV_TYPE_ADDRESS:
3770 return VERR_EM_INTERPRETER; //feeling lazy right now
3771
3772 case DISQPV_TYPE_IMMEDIATE:
3773 if(!(param1.flags & DISQPV_FLAG_16))
3774 return VERR_EM_INTERPRETER;
3775 sel = (RTSEL)param1.val.val16;
3776 break;
3777
3778 default:
3779 return VERR_EM_INTERPRETER;
3780 }
3781
3782#ifdef IN_RING0
3783 /* Only for the VT-x real-mode emulation case. */
3784 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3785 CPUMSetGuestLDTR(pVCpu, sel);
3786 return VINF_SUCCESS;
3787#else
3788 if (sel == 0)
3789 {
3790 if (CPUMGetHyperLDTR(pVCpu) == 0)
3791 {
3792 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
3793 return VINF_SUCCESS;
3794 }
3795 }
3796 //still feeling lazy
3797 return VERR_EM_INTERPRETER;
3798#endif
3799}
3800
3801#ifdef IN_RING0
3802/**
3803 * LIDT/LGDT Emulation.
3804 */
3805static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3806{
3807 DISQPVPARAMVAL param1;
3808 RTGCPTR pParam1;
3809 X86XDTR32 dtr32;
3810 NOREF(pvFault); NOREF(pcbSize);
3811
3812 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
3813
3814 /* Only for the VT-x real-mode emulation case. */
3815 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3816
3817 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3818 if(RT_FAILURE(rc))
3819 return VERR_EM_INTERPRETER;
3820
3821 switch(param1.type)
3822 {
3823 case DISQPV_TYPE_ADDRESS:
3824 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
3825 break;
3826
3827 default:
3828 return VERR_EM_INTERPRETER;
3829 }
3830
3831 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
3832 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3833
3834 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
3835 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
3836
3837 if (pDis->pCurInstr->uOpcode == OP_LIDT)
3838 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3839 else
3840 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3841
3842 return VINF_SUCCESS;
3843}
3844#endif
3845
3846
3847#ifdef IN_RC
3848/**
3849 * STI Emulation.
3850 *
3851 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
3852 */
3853static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3854{
3855 NOREF(pcbSize);
3856 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
3857
3858 if(!pGCState)
3859 {
3860 Assert(pGCState);
3861 return VERR_EM_INTERPRETER;
3862 }
3863 pGCState->uVMFlags |= X86_EFL_IF;
3864
3865 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
3866 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
3867
3868 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
3869 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3870
3871 return VINF_SUCCESS;
3872}
3873#endif /* IN_RC */
3874
3875
3876/**
3877 * HLT Emulation.
3878 */
3879static VBOXSTRICTRC
3880emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3881{
3882 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3883 return VINF_EM_HALT;
3884}
3885
3886
3887/**
3888 * RDTSC Emulation.
3889 */
3890static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3891{
3892 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3893 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
3894}
3895
3896/**
3897 * RDPMC Emulation
3898 */
3899static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3900{
3901 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3902 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
3903}
3904
3905
3906static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3907{
3908 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3909 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
3910}
3911
3912
3913static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3914{
3915 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3916 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
3917}
3918
3919
3920/**
3921 * RDMSR Emulation.
3922 */
3923static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3924{
3925 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
3926 different, so we play safe by completely disassembling the instruction. */
3927 Assert(!(pDis->fPrefix & DISPREFIX_REX));
3928 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3929 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
3930}
3931
3932
3933/**
3934 * WRMSR Emulation.
3935 */
3936static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3937{
3938 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3939 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
3940}
3941
3942
3943/**
3944 * Internal worker.
3945 * @copydoc emInterpretInstructionCPUOuter
3946 * @param pVM The cross context VM structure.
3947 */
3948DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3949 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3950{
3951 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3952 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
3953 Assert(pcbSize);
3954 *pcbSize = 0;
3955
3956 if (enmCodeType == EMCODETYPE_SUPERVISOR)
3957 {
3958 /*
3959 * Only supervisor guest code!!
3960 * And no complicated prefixes.
3961 */
3962 /* Get the current privilege level. */
3963 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3964#ifdef VBOX_WITH_RAW_RING1
3965 if ( !EMIsRawRing1Enabled(pVM)
3966 || cpl > 1
3967 || pRegFrame->eflags.Bits.u2IOPL > cpl
3968 )
3969#endif
3970 {
3971 if ( cpl != 0
3972 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
3973 {
3974 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
3975 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
3976 return VERR_EM_INTERPRETER;
3977 }
3978 }
3979 }
3980 else
3981 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
3982
3983#ifdef IN_RC
3984 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
3985 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3986 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3987 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3988 && pDis->pCurInstr->uOpcode != OP_XADD
3989 && pDis->pCurInstr->uOpcode != OP_OR
3990 && pDis->pCurInstr->uOpcode != OP_AND
3991 && pDis->pCurInstr->uOpcode != OP_XOR
3992 && pDis->pCurInstr->uOpcode != OP_BTR
3993 )
3994 )
3995#else
3996 if ( (pDis->fPrefix & DISPREFIX_REPNE)
3997 || ( (pDis->fPrefix & DISPREFIX_REP)
3998 && pDis->pCurInstr->uOpcode != OP_STOSWD
3999 )
4000 || ( (pDis->fPrefix & DISPREFIX_LOCK)
4001 && pDis->pCurInstr->uOpcode != OP_OR
4002 && pDis->pCurInstr->uOpcode != OP_AND
4003 && pDis->pCurInstr->uOpcode != OP_XOR
4004 && pDis->pCurInstr->uOpcode != OP_BTR
4005 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
4006 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
4007 )
4008 )
4009#endif
4010 {
4011 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
4012 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
4013 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
4014 return VERR_EM_INTERPRETER;
4015 }
4016
4017#if HC_ARCH_BITS == 32
4018 /*
4019 * Unable to emulate most >4 bytes accesses in 32 bits mode.
4020 * Whitelisted instructions are safe.
4021 */
4022 if ( pDis->Param1.cb > 4
4023 && CPUMIsGuestIn64BitCode(pVCpu))
4024 {
4025 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
4026 if ( uOpCode != OP_STOSWD
4027 && uOpCode != OP_MOV
4028 && uOpCode != OP_CMPXCHG8B
4029 && uOpCode != OP_XCHG
4030 && uOpCode != OP_BTS
4031 && uOpCode != OP_BTR
4032 && uOpCode != OP_BTC
4033 )
4034 {
4035# ifdef VBOX_WITH_STATISTICS
4036 switch (pDis->pCurInstr->uOpcode)
4037 {
4038# define INTERPRET_FAILED_CASE(opcode, Instr) \
4039 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
4040 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
4041 INTERPRET_FAILED_CASE(OP_DEC,Dec);
4042 INTERPRET_FAILED_CASE(OP_INC,Inc);
4043 INTERPRET_FAILED_CASE(OP_POP,Pop);
4044 INTERPRET_FAILED_CASE(OP_OR, Or);
4045 INTERPRET_FAILED_CASE(OP_XOR,Xor);
4046 INTERPRET_FAILED_CASE(OP_AND,And);
4047 INTERPRET_FAILED_CASE(OP_MOV,Mov);
4048 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
4049 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
4050 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
4051 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
4052 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
4053 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
4054 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
4055 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
4056 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
4057 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
4058 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
4059 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
4060 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
4061 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
4062 INTERPRET_FAILED_CASE(OP_ADD,Add);
4063 INTERPRET_FAILED_CASE(OP_SUB,Sub);
4064 INTERPRET_FAILED_CASE(OP_ADC,Adc);
4065 INTERPRET_FAILED_CASE(OP_BTR,Btr);
4066 INTERPRET_FAILED_CASE(OP_BTS,Bts);
4067 INTERPRET_FAILED_CASE(OP_BTC,Btc);
4068 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
4069 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
4070 INTERPRET_FAILED_CASE(OP_STI, Sti);
4071 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
4072 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
4073 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
4074 INTERPRET_FAILED_CASE(OP_IRET,Iret);
4075 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
4076 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
4077# undef INTERPRET_FAILED_CASE
4078 default:
4079 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4080 break;
4081 }
4082# endif /* VBOX_WITH_STATISTICS */
4083 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
4084 return VERR_EM_INTERPRETER;
4085 }
4086 }
4087#endif
4088
4089 VBOXSTRICTRC rc;
4090#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
4091 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
4092#endif
4093 switch (pDis->pCurInstr->uOpcode)
4094 {
4095 /*
4096 * Macros for generating the right case statements.
4097 */
4098# ifndef VBOX_COMPARE_IEM_AND_EM
4099# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4100 case opcode:\
4101 if (pDis->fPrefix & DISPREFIX_LOCK) \
4102 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
4103 else \
4104 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4105 if (RT_SUCCESS(rc)) \
4106 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4107 else \
4108 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4109 return rc
4110# else /* VBOX_COMPARE_IEM_AND_EM */
4111# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4112 case opcode:\
4113 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4114 if (RT_SUCCESS(rc)) \
4115 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4116 else \
4117 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4118 return rc
4119# endif /* VBOX_COMPARE_IEM_AND_EM */
4120
4121#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
4122 case opcode:\
4123 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4124 if (RT_SUCCESS(rc)) \
4125 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4126 else \
4127 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4128 return rc
4129
4130#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
4131 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
4132#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4133 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
4134
4135#define INTERPRET_CASE(opcode, Instr) \
4136 case opcode:\
4137 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4138 if (RT_SUCCESS(rc)) \
4139 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4140 else \
4141 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4142 return rc
4143
4144#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
4145 case opcode:\
4146 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4147 if (RT_SUCCESS(rc)) \
4148 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4149 else \
4150 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4151 return rc
4152
4153#define INTERPRET_STAT_CASE(opcode, Instr) \
4154 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
4155
4156 /*
4157 * The actual case statements.
4158 */
4159 INTERPRET_CASE(OP_XCHG,Xchg);
4160 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
4161 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
4162 INTERPRET_CASE(OP_POP,Pop);
4163 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
4164 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
4165 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
4166 INTERPRET_CASE(OP_MOV,Mov);
4167#ifndef IN_RC
4168 INTERPRET_CASE(OP_STOSWD,StosWD);
4169#endif
4170 INTERPRET_CASE(OP_INVLPG,InvlPg);
4171 INTERPRET_CASE(OP_CPUID,CpuId);
4172 INTERPRET_CASE(OP_MOV_CR,MovCRx);
4173 INTERPRET_CASE(OP_MOV_DR,MovDRx);
4174#ifdef IN_RING0
4175 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
4176 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
4177#endif
4178 INTERPRET_CASE(OP_LLDT,LLdt);
4179 INTERPRET_CASE(OP_LMSW,Lmsw);
4180#ifdef EM_EMULATE_SMSW
4181 INTERPRET_CASE(OP_SMSW,Smsw);
4182#endif
4183 INTERPRET_CASE(OP_CLTS,Clts);
4184 INTERPRET_CASE(OP_MONITOR, Monitor);
4185 INTERPRET_CASE(OP_MWAIT, MWait);
4186 INTERPRET_CASE(OP_RDMSR, Rdmsr);
4187 INTERPRET_CASE(OP_WRMSR, Wrmsr);
4188 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
4189 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
4190 INTERPRET_CASE(OP_ADC,Adc);
4191 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
4192 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
4193 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
4194 INTERPRET_CASE(OP_RDPMC,Rdpmc);
4195 INTERPRET_CASE(OP_RDTSC,Rdtsc);
4196 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
4197#ifdef IN_RC
4198 INTERPRET_CASE(OP_STI,Sti);
4199 INTERPRET_CASE(OP_XADD, XAdd);
4200 INTERPRET_CASE(OP_IRET,Iret);
4201#endif
4202 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
4203 INTERPRET_CASE(OP_HLT,Hlt);
4204 INTERPRET_CASE(OP_WBINVD,WbInvd);
4205#ifdef VBOX_WITH_STATISTICS
4206# ifndef IN_RC
4207 INTERPRET_STAT_CASE(OP_XADD, XAdd);
4208# endif
4209 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
4210#endif
4211
4212 default:
4213 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
4214 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4215 return VERR_EM_INTERPRETER;
4216
4217#undef INTERPRET_CASE_EX_PARAM2
4218#undef INTERPRET_STAT_CASE
4219#undef INTERPRET_CASE_EX
4220#undef INTERPRET_CASE
4221 } /* switch (opcode) */
4222 /* not reached */
4223}
4224
4225/**
4226 * Interprets the current instruction using the supplied DISCPUSTATE structure.
4227 *
4228 * EIP is *NOT* updated!
4229 *
4230 * @returns VBox strict status code.
4231 * @retval VINF_* Scheduling instructions. When these are returned, it
4232 * starts to get a bit tricky to know whether code was
4233 * executed or not... We'll address this when it becomes a problem.
4234 * @retval VERR_EM_INTERPRETER Something we can't cope with.
4235 * @retval VERR_* Fatal errors.
4236 *
4237 * @param pVCpu The cross context virtual CPU structure.
4238 * @param pDis The disassembler cpu state for the instruction to be
4239 * interpreted.
4240 * @param pRegFrame The register frame. EIP is *NOT* changed!
4241 * @param pvFault The fault address (CR2).
4242 * @param pcbSize Size of the write (if applicable).
4243 * @param enmCodeType Code type (user/supervisor)
4244 *
4245 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
4246 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
4247 * to worry about e.g. invalid modrm combinations (!)
4248 *
4249 * @todo At this time we do NOT check if the instruction overwrites vital information.
4250 * Make sure this can't happen!! (will add some assertions/checks later)
4251 */
4252DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4253 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4254{
4255 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4256 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
4257 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4258 if (RT_SUCCESS(rc))
4259 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
4260 else
4261 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
4262 return rc;
4263}
4264
4265
4266#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette