VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 72596

Last change on this file since 72596 was 72596, checked in by vboxsync, 7 years ago

EM,HM: Removed EMInterpretRdtsc and EMInterpretRdtscp. bugref:6973

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 165.8 KB
Line 
1/* $Id: EMAll.cpp 72596 2018-06-18 12:51:15Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef VBOX_WITH_IEM
51//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
52//# define VBOX_SAME_AS_EM
53//# define VBOX_COMPARE_IEM_LAST
54#endif
55
56#ifdef VBOX_WITH_RAW_RING1
57# define EM_EMULATE_SMSW
58#endif
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** @def EM_ASSERT_FAULT_RETURN
65 * Safety check.
66 *
67 * Could in theory misfire on a cross page boundary access...
68 *
69 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
70 * turns up an alias page instead of the original faulting one and annoying the
71 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
72 */
73#if 0
74# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
75#else
76# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
77#endif
78
79
80/*********************************************************************************************************************************
81* Internal Functions *
82*********************************************************************************************************************************/
83#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
84DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
85 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92#ifdef VBOX_COMPARE_IEM_AND_EM
93static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
94 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
95 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
96 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
97static uint32_t g_fIncomingFFs;
98static CPUMCTX g_IncomingCtx;
99static bool g_fIgnoreRaxRdx = false;
100
101static uint32_t g_fEmFFs;
102static CPUMCTX g_EmCtx;
103static uint8_t g_abEmWrote[256];
104static size_t g_cbEmWrote;
105
106static uint32_t g_fIemFFs;
107static CPUMCTX g_IemCtx;
108extern uint8_t g_abIemWrote[256];
109#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
110extern size_t g_cbIemWrote;
111#else
112static size_t g_cbIemWrote;
113#endif
114#endif
115
116
117/**
118 * Get the current execution manager status.
119 *
120 * @returns Current status.
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
124{
125 return pVCpu->em.s.enmState;
126}
127
128
129/**
130 * Sets the current execution manager status. (use only when you know what you're doing!)
131 *
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
134 */
135VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
136{
137 /* Only allowed combination: */
138 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
139 pVCpu->em.s.enmState = enmNewState;
140}
141
142
143/**
144 * Sets the PC for which interrupts should be inhibited.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param PC The PC.
148 */
149VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
150{
151 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
152 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
153}
154
155
156/**
157 * Gets the PC for which interrupts should be inhibited.
158 *
159 * There are a few instructions which inhibits or delays interrupts
160 * for the instruction following them. These instructions are:
161 * - STI
162 * - MOV SS, r/m16
163 * - POP SS
164 *
165 * @returns The PC for which interrupts should be inhibited.
166 * @param pVCpu The cross context virtual CPU structure.
167 *
168 */
169VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
170{
171 return pVCpu->em.s.GCPtrInhibitInterrupts;
172}
173
174
175/**
176 * Enables / disable hypercall instructions.
177 *
178 * This interface is used by GIM to tell the execution monitors whether the
179 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
180 *
181 * @param pVCpu The cross context virtual CPU structure this applies to.
182 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
183 */
184VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
185{
186 pVCpu->em.s.fHypercallEnabled = fEnabled;
187}
188
189
190/**
191 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
192 *
193 * @returns true if enabled, false if not.
194 * @param pVCpu The cross context virtual CPU structure.
195 *
196 * @note If this call becomes a performance factor, we can make the data
197 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
198 */
199VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
200{
201 return pVCpu->em.s.fHypercallEnabled;
202}
203
204
205/**
206 * Prepare an MWAIT - essentials of the MONITOR instruction.
207 *
208 * @returns VINF_SUCCESS
209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
210 * @param rax The content of RAX.
211 * @param rcx The content of RCX.
212 * @param rdx The content of RDX.
213 * @param GCPhys The physical address corresponding to rax.
214 */
215VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
216{
217 pVCpu->em.s.MWait.uMonitorRAX = rax;
218 pVCpu->em.s.MWait.uMonitorRCX = rcx;
219 pVCpu->em.s.MWait.uMonitorRDX = rdx;
220 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
221 /** @todo Make use of GCPhys. */
222 NOREF(GCPhys);
223 /** @todo Complete MONITOR implementation. */
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Checks if the monitor hardware is armed / active.
230 *
231 * @returns true if armed, false otherwise.
232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
233 */
234VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
235{
236 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
237}
238
239
240/**
241 * Performs an MWAIT.
242 *
243 * @returns VINF_SUCCESS
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 * @param rax The content of RAX.
246 * @param rcx The content of RCX.
247 */
248VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
249{
250 pVCpu->em.s.MWait.uMWaitRAX = rax;
251 pVCpu->em.s.MWait.uMWaitRCX = rcx;
252 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
253 if (rcx)
254 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
255 else
256 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
257 /** @todo not completely correct?? */
258 return VINF_EM_HALT;
259}
260
261
262
263/**
264 * Determine if we should continue execution in HM after encountering an mwait
265 * instruction.
266 *
267 * Clears MWAIT flags if returning @c true.
268 *
269 * @returns true if we should continue, false if we should halt.
270 * @param pVCpu The cross context virtual CPU structure.
271 * @param pCtx Current CPU context.
272 */
273VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
274{
275 if ( pCtx->eflags.Bits.u1IF
276 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
277 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
278 {
279 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
280 {
281 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
282 return true;
283 }
284 }
285
286 return false;
287}
288
289
290/**
291 * Determine if we should continue execution in HM after encountering a hlt
292 * instruction.
293 *
294 * @returns true if we should continue, false if we should halt.
295 * @param pVCpu The cross context virtual CPU structure.
296 * @param pCtx Current CPU context.
297 */
298VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
299{
300 /** @todo Shouldn't we be checking GIF here? */
301 if (pCtx->eflags.Bits.u1IF)
302 return VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
303 return false;
304}
305
306
307/**
308 * Unhalts and wakes up the given CPU.
309 *
310 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
311 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
312 * the CPU isn't currently in a halt, the next HLT instruction it executes will
313 * be affected.
314 *
315 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
316 * @param pVM The cross context VM structure.
317 * @param pVCpuDst The cross context virtual CPU structure of the
318 * CPU to unhalt and wake up. This is usually not the
319 * same as the caller.
320 * @thread EMT
321 */
322VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
323{
324 /*
325 * Flag the current(/next) HLT to unhalt immediately.
326 */
327 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
328
329 /*
330 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
331 * just do it here for now).
332 */
333#ifdef IN_RING0
334 /* We might be here with preemption disabled or enabled (i.e. depending on
335 thread-context hooks being used), so don't try obtaining the GVMMR0 used
336 lock here. See @bugref{7270#c148}. */
337 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
338 AssertRC(rc);
339
340#elif defined(IN_RING3)
341 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
342 AssertRC(rc);
343
344#else
345 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
346 Assert(pVM->cCpus == 1); NOREF(pVM);
347 int rc = VINF_SUCCESS;
348#endif
349 return rc;
350}
351
352#ifndef IN_RING3
353
354/**
355 * Makes an I/O port write pending for ring-3 processing.
356 *
357 * @returns VINF_EM_PENDING_R3_IOPORT_READ
358 * @param pVCpu The cross context virtual CPU structure.
359 * @param uPort The I/O port.
360 * @param cbInstr The instruction length (for RIP updating).
361 * @param cbValue The write size.
362 * @param uValue The value being written.
363 * @sa emR3ExecutePendingIoPortWrite
364 *
365 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
366 */
367VMMRZ_INT_DECL(VBOXSTRICTRC)
368EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
369{
370 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
371 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
372 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
373 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
374 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
375 return VINF_EM_PENDING_R3_IOPORT_WRITE;
376}
377
378
379/**
380 * Makes an I/O port read pending for ring-3 processing.
381 *
382 * @returns VINF_EM_PENDING_R3_IOPORT_READ
383 * @param pVCpu The cross context virtual CPU structure.
384 * @param uPort The I/O port.
385 * @param cbInstr The instruction length (for RIP updating).
386 * @param cbValue The read size.
387 * @sa emR3ExecutePendingIoPortRead
388 *
389 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
390 */
391VMMRZ_INT_DECL(VBOXSTRICTRC)
392EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
393{
394 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
395 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
396 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
397 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
398 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
399 return VINF_EM_PENDING_R3_IOPORT_READ;
400}
401
402#endif /* IN_RING3 */
403
404
405/**
406 * Worker for EMHistoryExec that checks for ring-3 returns and flags
407 * continuation of the EMHistoryExec run there.
408 */
409DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
410{
411 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
412#ifdef IN_RING3
413 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
414#else
415 switch (VBOXSTRICTRC_VAL(rcStrict))
416 {
417 case VINF_SUCCESS:
418 default:
419 break;
420
421 /*
422 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
423 */
424 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
425 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
426 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
427 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
428 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
429 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
430 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
431 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
432 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
433 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
434 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
435 break;
436 }
437#endif /* !IN_RING3 */
438}
439
440
441/**
442 * Execute using history.
443 *
444 * This function will be called when EMHistoryAddExit() and friends returns a
445 * non-NULL result. This happens in response to probing or when probing has
446 * uncovered adjacent exits which can more effectively be reached by using IEM
447 * than restarting execution using the main execution engine and fielding an
448 * regular exit.
449 *
450 * @returns VBox strict status code, see IEMExecForExits.
451 * @param pVCpu The cross context virtual CPU structure.
452 * @param pExitRec The exit record return by a previous history add
453 * or update call.
454 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
455 */
456VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPU pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
457{
458 Assert(pExitRec);
459 VMCPU_ASSERT_EMT(pVCpu);
460 IEMEXECFOREXITSTATS ExecStats;
461 switch (pExitRec->enmAction)
462 {
463 /*
464 * Executes multiple instruction stopping only when we've gone a given
465 * number without perceived exits.
466 */
467 case EMEXITACTION_EXEC_WITH_MAX:
468 {
469 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
470 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
471 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
472 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
473 4096 /*cMaxInstructions*/,
474 pExitRec->cMaxInstructionsWithoutExit,
475 &ExecStats);
476 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
477 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
478 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
479 if (ExecStats.cExits > 1)
480 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
481 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
482 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
483 return rcStrict;
484 }
485
486 /*
487 * Probe a exit for close by exits.
488 */
489 case EMEXITACTION_EXEC_PROBE:
490 {
491 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
492 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
493 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
494 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
495 64 /*cMinInstructions*/,
496 4096 /*cMaxInstructions*/,
497 32 /*cMaxInstructionsWithoutExit*/,
498 &ExecStats);
499 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
500 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
501 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
502 if (ExecStats.cExits >= 2)
503 {
504 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
505 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
506 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
507 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
508 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
509 }
510#ifndef IN_RING3
511 else if (pVCpu->em.s.idxContinueExitRec != UINT16_MAX)
512 {
513 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
514 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
515 }
516#endif
517 else
518 {
519 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
520 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
521 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
522 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
523 }
524 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
525 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
526 return rcStrict;
527 }
528
529 /* We shouldn't ever see these here! */
530 case EMEXITACTION_FREE_RECORD:
531 case EMEXITACTION_NORMAL:
532 case EMEXITACTION_NORMAL_PROBED:
533 break;
534
535 /* No default case, want compiler warnings. */
536 }
537 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
538}
539
540
541/**
542 * Worker for emHistoryAddOrUpdateRecord.
543 */
544DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
545{
546 pExitRec->uFlatPC = uFlatPC;
547 pExitRec->uFlagsAndType = uFlagsAndType;
548 pExitRec->enmAction = EMEXITACTION_NORMAL;
549 pExitRec->bUnused = 0;
550 pExitRec->cMaxInstructionsWithoutExit = 64;
551 pExitRec->uLastExitNo = uExitNo;
552 pExitRec->cHits = 1;
553 return NULL;
554}
555
556
557/**
558 * Worker for emHistoryAddOrUpdateRecord.
559 */
560DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
561 PEMEXITREC pExitRec, uint64_t uFlatPC,
562 uint32_t uFlagsAndType, uint64_t uExitNo)
563{
564 pHistEntry->idxSlot = (uint32_t)idxSlot;
565 pVCpu->em.s.cExitRecordUsed++;
566 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
567 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
568 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
569}
570
571
572/**
573 * Worker for emHistoryAddOrUpdateRecord.
574 */
575DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
576 PEMEXITREC pExitRec, uint64_t uFlatPC,
577 uint32_t uFlagsAndType, uint64_t uExitNo)
578{
579 pHistEntry->idxSlot = (uint32_t)idxSlot;
580 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
581 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
582 uExitNo - pExitRec->uLastExitNo));
583 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
584}
585
586
587/**
588 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
589 *
590 * @returns Pointer to an exit record if special action should be taken using
591 * EMHistoryExec(). Take normal exit action when NULL.
592 *
593 * @param pVCpu The cross context virtual CPU structure.
594 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
595 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
596 * @param uFlatPC The flattened program counter.
597 * @param pHistEntry The exit history entry.
598 * @param uExitNo The current exit number.
599 */
600static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
601 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
602{
603 /*
604 * Work the hash table.
605 */
606 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
607#define EM_EXIT_RECORDS_IDX_MASK 0x3ff
608 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
609 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
610 if (pExitRec->uFlatPC == uFlatPC)
611 {
612 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
613 pHistEntry->idxSlot = (uint32_t)idxSlot;
614 if (pExitRec->uFlagsAndType == uFlagsAndType)
615 {
616 pExitRec->uLastExitNo = uExitNo;
617 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
618 }
619 else
620 {
621 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
622 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
623 }
624 }
625 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
626 {
627 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
628 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
629 }
630 else
631 {
632 /*
633 * Collision. We calculate a new hash for stepping away from the first,
634 * doing up to 8 steps away before replacing the least recently used record.
635 */
636 uintptr_t idxOldest = idxSlot;
637 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
638 unsigned iOldestStep = 0;
639 unsigned iStep = 1;
640 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
641 for (;;)
642 {
643 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
644 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
645 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
646 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
647
648 /* Step to the next slot. */
649 idxSlot += idxAdd;
650 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
651 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
652
653 /* Does it match? */
654 if (pExitRec->uFlatPC == uFlatPC)
655 {
656 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
657 pHistEntry->idxSlot = (uint32_t)idxSlot;
658 if (pExitRec->uFlagsAndType == uFlagsAndType)
659 {
660 pExitRec->uLastExitNo = uExitNo;
661 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
662 break;
663 }
664 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
665 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
666 }
667
668 /* Is it free? */
669 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
670 {
671 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
672 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
673 }
674
675 /* Is it the least recently used one? */
676 if (pExitRec->uLastExitNo < uOldestExitNo)
677 {
678 uOldestExitNo = pExitRec->uLastExitNo;
679 idxOldest = idxSlot;
680 iOldestStep = iStep;
681 }
682
683 /* Next iteration? */
684 iStep++;
685 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
686 if (RT_LIKELY(iStep < 8 + 1))
687 { /* likely */ }
688 else
689 {
690 /* Replace the least recently used slot. */
691 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
692 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
693 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
694 }
695 }
696 }
697
698 /*
699 * Found an existing record.
700 */
701 switch (pExitRec->enmAction)
702 {
703 case EMEXITACTION_NORMAL:
704 {
705 uint64_t const cHits = ++pExitRec->cHits;
706 if (cHits < 256)
707 return NULL;
708 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
709 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
710 return pExitRec;
711 }
712
713 case EMEXITACTION_NORMAL_PROBED:
714 pExitRec->cHits += 1;
715 return NULL;
716
717 default:
718 pExitRec->cHits += 1;
719 return pExitRec;
720
721 /* This will happen if the caller ignores or cannot serve the probe
722 request (forced to ring-3, whatever). We retry this 256 times. */
723 case EMEXITACTION_EXEC_PROBE:
724 {
725 uint64_t const cHits = ++pExitRec->cHits;
726 if (cHits < 512)
727 return pExitRec;
728 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
729 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
730 return NULL;
731 }
732 }
733}
734
735
736/**
737 * Adds an exit to the history for this CPU.
738 *
739 * @returns Pointer to an exit record if special action should be taken using
740 * EMHistoryExec(). Take normal exit action when NULL.
741 *
742 * @param pVCpu The cross context virtual CPU structure.
743 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
744 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
745 * @param uTimestamp The TSC value for the exit, 0 if not available.
746 * @thread EMT(pVCpu)
747 */
748VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
749{
750 VMCPU_ASSERT_EMT(pVCpu);
751
752 /*
753 * Add the exit history entry.
754 */
755 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
756 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
757 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
758 pHistEntry->uFlatPC = uFlatPC;
759 pHistEntry->uTimestamp = uTimestamp;
760 pHistEntry->uFlagsAndType = uFlagsAndType;
761 pHistEntry->idxSlot = UINT32_MAX;
762
763 /*
764 * If common exit type, we will insert/update the exit into the exit record hash table.
765 */
766 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
767 && pVCpu->em.s.fExitOptimizationEnabled
768 && uFlatPC != UINT64_MAX)
769 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
770 return NULL;
771}
772
773
774#ifdef IN_RC
775/**
776 * Special raw-mode interface for adding an exit to the history.
777 *
778 * Currently this is only for recording, not optimizing, so no return value. If
779 * we start seriously caring about raw-mode again, we may extend it.
780 *
781 * @param pVCpu The cross context virtual CPU structure.
782 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
783 * @param uCs The CS.
784 * @param uEip The EIP.
785 * @param uTimestamp The TSC value for the exit, 0 if not available.
786 * @thread EMT(0)
787 */
788VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
789{
790 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
791 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
792 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
793 pHistEntry->uTimestamp = uTimestamp;
794 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
795 pHistEntry->idxSlot = UINT32_MAX;
796}
797#endif
798
799
800#ifdef IN_RING0
801/**
802 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
803 *
804 * @param pVCpu The cross context virtual CPU structure.
805 * @param uFlatPC The flattened program counter (RIP).
806 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
807 */
808VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
809{
810 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
811 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
812 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
813 pHistEntry->uFlatPC = uFlatPC;
814 if (fFlattened)
815 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
816 else
817 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
818}
819#endif
820
821
822/**
823 * Interface for convering a engine specific exit to a generic one and get guidance.
824 *
825 * @returns Pointer to an exit record if special action should be taken using
826 * EMHistoryExec(). Take normal exit action when NULL.
827 *
828 * @param pVCpu The cross context virtual CPU structure.
829 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
830 * @thread EMT(pVCpu)
831 */
832VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
833{
834 VMCPU_ASSERT_EMT(pVCpu);
835
836 /*
837 * Do the updating.
838 */
839 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
840 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
841 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
842 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
843
844 /*
845 * If common exit type, we will insert/update the exit into the exit record hash table.
846 */
847 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
848 && pVCpu->em.s.fExitOptimizationEnabled
849 && pHistEntry->uFlatPC != UINT64_MAX)
850 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
851 return NULL;
852}
853
854
855/**
856 * Interface for convering a engine specific exit to a generic one and get
857 * guidance, supplying flattened PC too.
858 *
859 * @returns Pointer to an exit record if special action should be taken using
860 * EMHistoryExec(). Take normal exit action when NULL.
861 *
862 * @param pVCpu The cross context virtual CPU structure.
863 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
864 * @param uFlatPC The flattened program counter (RIP).
865 * @thread EMT(pVCpu)
866 */
867VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
868{
869 VMCPU_ASSERT_EMT(pVCpu);
870 Assert(uFlatPC != UINT64_MAX);
871
872 /*
873 * Do the updating.
874 */
875 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
876 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
877 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
878 pHistEntry->uFlagsAndType = uFlagsAndType;
879 pHistEntry->uFlatPC = uFlatPC;
880
881 /*
882 * If common exit type, we will insert/update the exit into the exit record hash table.
883 */
884 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
885 && pVCpu->em.s.fExitOptimizationEnabled)
886 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
887 return NULL;
888}
889
890
891/**
892 * Locks REM execution to a single VCPU.
893 *
894 * @param pVM The cross context VM structure.
895 */
896VMMDECL(void) EMRemLock(PVM pVM)
897{
898#ifdef VBOX_WITH_REM
899 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
900 return; /* early init */
901
902 Assert(!PGMIsLockOwner(pVM));
903 Assert(!IOMIsLockWriteOwner(pVM));
904 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
905 AssertRCSuccess(rc);
906#else
907 RT_NOREF(pVM);
908#endif
909}
910
911
912/**
913 * Unlocks REM execution
914 *
915 * @param pVM The cross context VM structure.
916 */
917VMMDECL(void) EMRemUnlock(PVM pVM)
918{
919#ifdef VBOX_WITH_REM
920 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
921 return; /* early init */
922
923 PDMCritSectLeave(&pVM->em.s.CritSectREM);
924#else
925 RT_NOREF(pVM);
926#endif
927}
928
929
930/**
931 * Check if this VCPU currently owns the REM lock.
932 *
933 * @returns bool owner/not owner
934 * @param pVM The cross context VM structure.
935 */
936VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
937{
938#ifdef VBOX_WITH_REM
939 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
940 return true; /* early init */
941
942 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
943#else
944 RT_NOREF(pVM);
945 return true;
946#endif
947}
948
949
950/**
951 * Try to acquire the REM lock.
952 *
953 * @returns VBox status code
954 * @param pVM The cross context VM structure.
955 */
956VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
957{
958#ifdef VBOX_WITH_REM
959 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
960 return VINF_SUCCESS; /* early init */
961
962 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
963#else
964 RT_NOREF(pVM);
965 return VINF_SUCCESS;
966#endif
967}
968
969
970/**
971 * @callback_method_impl{FNDISREADBYTES}
972 */
973static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
974{
975 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
976#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
977 PVM pVM = pVCpu->CTX_SUFF(pVM);
978#endif
979 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
980 int rc;
981
982 /*
983 * Figure how much we can or must read.
984 */
985 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
986 if (cbToRead > cbMaxRead)
987 cbToRead = cbMaxRead;
988 else if (cbToRead < cbMinRead)
989 cbToRead = cbMinRead;
990
991#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
992 /*
993 * We might be called upon to interpret an instruction in a patch.
994 */
995 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
996 {
997# ifdef IN_RC
998 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
999# else
1000 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
1001# endif
1002 rc = VINF_SUCCESS;
1003 }
1004 else
1005#endif
1006 {
1007# ifdef IN_RC
1008 /*
1009 * Try access it thru the shadow page tables first. Fall back on the
1010 * slower PGM method if it fails because the TLB or page table was
1011 * modified recently.
1012 */
1013 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1014 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
1015 {
1016 cbToRead = cbMinRead;
1017 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1018 }
1019 if (rc == VERR_ACCESS_DENIED)
1020#endif
1021 {
1022 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1023 if (RT_FAILURE(rc))
1024 {
1025 if (cbToRead > cbMinRead)
1026 {
1027 cbToRead = cbMinRead;
1028 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1029 }
1030 if (RT_FAILURE(rc))
1031 {
1032#ifndef IN_RC
1033 /*
1034 * If we fail to find the page via the guest's page tables
1035 * we invalidate the page in the host TLB (pertaining to
1036 * the guest in the NestedPaging case). See @bugref{6043}.
1037 */
1038 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1039 {
1040 HMInvalidatePage(pVCpu, uSrcAddr);
1041 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
1042 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
1043 }
1044#endif
1045 }
1046 }
1047 }
1048 }
1049
1050 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
1051 return rc;
1052}
1053
1054
1055#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1056DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1057{
1058 NOREF(pVM);
1059 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
1060}
1061#endif
1062
1063
1064/**
1065 * Disassembles the current instruction.
1066 *
1067 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
1068 * details.
1069 *
1070 * @param pVM The cross context VM structure.
1071 * @param pVCpu The cross context virtual CPU structure.
1072 * @param pDis Where to return the parsed instruction info.
1073 * @param pcbInstr Where to return the instruction size. (optional)
1074 */
1075VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
1076{
1077 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
1078 RTGCPTR GCPtrInstr;
1079#if 0
1080 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
1081#else
1082/** @todo Get the CPU mode as well while we're at it! */
1083 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
1084 pCtxCore->rip, &GCPtrInstr);
1085#endif
1086 if (RT_FAILURE(rc))
1087 {
1088 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
1089 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
1090 return rc;
1091 }
1092 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
1093}
1094
1095
1096/**
1097 * Disassembles one instruction.
1098 *
1099 * This is used by internally by the interpreter and by trap/access handlers.
1100 *
1101 * @returns VBox status code.
1102 *
1103 * @param pVM The cross context VM structure.
1104 * @param pVCpu The cross context virtual CPU structure.
1105 * @param GCPtrInstr The flat address of the instruction.
1106 * @param pCtxCore The context core (used to determine the cpu mode).
1107 * @param pDis Where to return the parsed instruction info.
1108 * @param pcbInstr Where to return the instruction size. (optional)
1109 */
1110VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
1111 PDISCPUSTATE pDis, unsigned *pcbInstr)
1112{
1113 NOREF(pVM);
1114 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
1115 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
1116 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
1117 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1118 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1119 if (RT_SUCCESS(rc))
1120 return VINF_SUCCESS;
1121 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1122 return rc;
1123}
1124
1125
1126#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
1127static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
1128 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
1129 uint32_t cbEm, uint32_t cbIem)
1130{
1131 /* Quick compare. */
1132 if ( rcEm == rcIem
1133 && cbEm == cbIem
1134 && g_cbEmWrote == g_cbIemWrote
1135 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
1136 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
1137 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
1138 )
1139 return;
1140
1141 /* Report exact differences. */
1142 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
1143 if (rcEm != rcIem)
1144 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
1145 else if (cbEm != cbIem)
1146 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
1147
1148 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
1149 {
1150 if (g_cbIemWrote != g_cbEmWrote)
1151 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
1152 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
1153 {
1154 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
1155 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
1156 }
1157
1158 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
1159 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
1160 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
1161
1162# define CHECK_FIELD(a_Field) \
1163 do \
1164 { \
1165 if (pEmCtx->a_Field != pIemCtx->a_Field) \
1166 { \
1167 switch (sizeof(pEmCtx->a_Field)) \
1168 { \
1169 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1170 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1171 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1172 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1173 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
1174 } \
1175 cDiffs++; \
1176 } \
1177 } while (0)
1178
1179# define CHECK_BIT_FIELD(a_Field) \
1180 do \
1181 { \
1182 if (pEmCtx->a_Field != pIemCtx->a_Field) \
1183 { \
1184 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
1185 cDiffs++; \
1186 } \
1187 } while (0)
1188
1189# define CHECK_SEL(a_Sel) \
1190 do \
1191 { \
1192 CHECK_FIELD(a_Sel.Sel); \
1193 CHECK_FIELD(a_Sel.Attr.u); \
1194 CHECK_FIELD(a_Sel.u64Base); \
1195 CHECK_FIELD(a_Sel.u32Limit); \
1196 CHECK_FIELD(a_Sel.fFlags); \
1197 } while (0)
1198
1199 unsigned cDiffs = 0;
1200 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
1201 {
1202 RTLogPrintf(" the FPU state differs\n");
1203 cDiffs++;
1204 CHECK_FIELD(fpu.FCW);
1205 CHECK_FIELD(fpu.FSW);
1206 CHECK_FIELD(fpu.FTW);
1207 CHECK_FIELD(fpu.FOP);
1208 CHECK_FIELD(fpu.FPUIP);
1209 CHECK_FIELD(fpu.CS);
1210 CHECK_FIELD(fpu.Rsrvd1);
1211 CHECK_FIELD(fpu.FPUDP);
1212 CHECK_FIELD(fpu.DS);
1213 CHECK_FIELD(fpu.Rsrvd2);
1214 CHECK_FIELD(fpu.MXCSR);
1215 CHECK_FIELD(fpu.MXCSR_MASK);
1216 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
1217 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
1218 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
1219 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
1220 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
1221 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
1222 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
1223 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
1224 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
1225 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
1226 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
1227 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
1228 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
1229 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
1230 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
1231 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
1232 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
1233 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
1234 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
1235 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
1236 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
1237 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
1238 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
1239 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
1240 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
1241 CHECK_FIELD(fpu.au32RsrvdRest[i]);
1242 }
1243 CHECK_FIELD(rip);
1244 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
1245 {
1246 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
1247 CHECK_BIT_FIELD(rflags.Bits.u1CF);
1248 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
1249 CHECK_BIT_FIELD(rflags.Bits.u1PF);
1250 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
1251 CHECK_BIT_FIELD(rflags.Bits.u1AF);
1252 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
1253 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
1254 CHECK_BIT_FIELD(rflags.Bits.u1SF);
1255 CHECK_BIT_FIELD(rflags.Bits.u1TF);
1256 CHECK_BIT_FIELD(rflags.Bits.u1IF);
1257 CHECK_BIT_FIELD(rflags.Bits.u1DF);
1258 CHECK_BIT_FIELD(rflags.Bits.u1OF);
1259 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
1260 CHECK_BIT_FIELD(rflags.Bits.u1NT);
1261 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
1262 CHECK_BIT_FIELD(rflags.Bits.u1RF);
1263 CHECK_BIT_FIELD(rflags.Bits.u1VM);
1264 CHECK_BIT_FIELD(rflags.Bits.u1AC);
1265 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
1266 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
1267 CHECK_BIT_FIELD(rflags.Bits.u1ID);
1268 }
1269
1270 if (!g_fIgnoreRaxRdx)
1271 CHECK_FIELD(rax);
1272 CHECK_FIELD(rcx);
1273 if (!g_fIgnoreRaxRdx)
1274 CHECK_FIELD(rdx);
1275 CHECK_FIELD(rbx);
1276 CHECK_FIELD(rsp);
1277 CHECK_FIELD(rbp);
1278 CHECK_FIELD(rsi);
1279 CHECK_FIELD(rdi);
1280 CHECK_FIELD(r8);
1281 CHECK_FIELD(r9);
1282 CHECK_FIELD(r10);
1283 CHECK_FIELD(r11);
1284 CHECK_FIELD(r12);
1285 CHECK_FIELD(r13);
1286 CHECK_SEL(cs);
1287 CHECK_SEL(ss);
1288 CHECK_SEL(ds);
1289 CHECK_SEL(es);
1290 CHECK_SEL(fs);
1291 CHECK_SEL(gs);
1292 CHECK_FIELD(cr0);
1293 CHECK_FIELD(cr2);
1294 CHECK_FIELD(cr3);
1295 CHECK_FIELD(cr4);
1296 CHECK_FIELD(dr[0]);
1297 CHECK_FIELD(dr[1]);
1298 CHECK_FIELD(dr[2]);
1299 CHECK_FIELD(dr[3]);
1300 CHECK_FIELD(dr[6]);
1301 CHECK_FIELD(dr[7]);
1302 CHECK_FIELD(gdtr.cbGdt);
1303 CHECK_FIELD(gdtr.pGdt);
1304 CHECK_FIELD(idtr.cbIdt);
1305 CHECK_FIELD(idtr.pIdt);
1306 CHECK_SEL(ldtr);
1307 CHECK_SEL(tr);
1308 CHECK_FIELD(SysEnter.cs);
1309 CHECK_FIELD(SysEnter.eip);
1310 CHECK_FIELD(SysEnter.esp);
1311 CHECK_FIELD(msrEFER);
1312 CHECK_FIELD(msrSTAR);
1313 CHECK_FIELD(msrPAT);
1314 CHECK_FIELD(msrLSTAR);
1315 CHECK_FIELD(msrCSTAR);
1316 CHECK_FIELD(msrSFMASK);
1317 CHECK_FIELD(msrKERNELGSBASE);
1318
1319# undef CHECK_FIELD
1320# undef CHECK_BIT_FIELD
1321 }
1322}
1323#endif /* VBOX_COMPARE_IEM_AND_EM */
1324
1325
1326/**
1327 * Interprets the current instruction.
1328 *
1329 * @returns VBox status code.
1330 * @retval VINF_* Scheduling instructions.
1331 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1332 * @retval VERR_* Fatal errors.
1333 *
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param pRegFrame The register frame.
1336 * Updates the EIP if an instruction was executed successfully.
1337 * @param pvFault The fault address (CR2).
1338 *
1339 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1340 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1341 * to worry about e.g. invalid modrm combinations (!)
1342 */
1343VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1344{
1345 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1346 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1347#ifdef VBOX_WITH_IEM
1348 NOREF(pvFault);
1349
1350# ifdef VBOX_COMPARE_IEM_AND_EM
1351 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1352 g_IncomingCtx = *pCtx;
1353 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1354 g_cbEmWrote = g_cbIemWrote = 0;
1355
1356# ifdef VBOX_COMPARE_IEM_FIRST
1357 /* IEM */
1358 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1359 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1360 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1361 rcIem = VERR_EM_INTERPRETER;
1362 g_IemCtx = *pCtx;
1363 g_fIemFFs = pVCpu->fLocalForcedActions;
1364 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1365 *pCtx = g_IncomingCtx;
1366# endif
1367
1368 /* EM */
1369 RTGCPTR pbCode;
1370 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1371 if (RT_SUCCESS(rcEm))
1372 {
1373 uint32_t cbOp;
1374 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1375 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1376 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1377 if (RT_SUCCESS(rcEm))
1378 {
1379 Assert(cbOp == pDis->cbInstr);
1380 uint32_t cbIgnored;
1381 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1382 if (RT_SUCCESS(rcEm))
1383 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1384
1385 }
1386 rcEm = VERR_EM_INTERPRETER;
1387 }
1388 else
1389 rcEm = VERR_EM_INTERPRETER;
1390# ifdef VBOX_SAME_AS_EM
1391 if (rcEm == VERR_EM_INTERPRETER)
1392 {
1393 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1394 return rcEm;
1395 }
1396# endif
1397 g_EmCtx = *pCtx;
1398 g_fEmFFs = pVCpu->fLocalForcedActions;
1399 VBOXSTRICTRC rc = rcEm;
1400
1401# ifdef VBOX_COMPARE_IEM_LAST
1402 /* IEM */
1403 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1404 *pCtx = g_IncomingCtx;
1405 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1406 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1407 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1408 rcIem = VERR_EM_INTERPRETER;
1409 g_IemCtx = *pCtx;
1410 g_fIemFFs = pVCpu->fLocalForcedActions;
1411 rc = rcIem;
1412# endif
1413
1414# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1415 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1416# endif
1417
1418# else
1419 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1420 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1421 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1422 rc = VERR_EM_INTERPRETER;
1423# endif
1424 if (rc != VINF_SUCCESS)
1425 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1426
1427 return rc;
1428#else
1429 RTGCPTR pbCode;
1430 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1431 if (RT_SUCCESS(rc))
1432 {
1433 uint32_t cbOp;
1434 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1435 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1436 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1437 if (RT_SUCCESS(rc))
1438 {
1439 Assert(cbOp == pDis->cbInstr);
1440 uint32_t cbIgnored;
1441 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1442 if (RT_SUCCESS(rc))
1443 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1444
1445 return rc;
1446 }
1447 }
1448 return VERR_EM_INTERPRETER;
1449#endif
1450}
1451
1452
1453/**
1454 * Interprets the current instruction.
1455 *
1456 * @returns VBox status code.
1457 * @retval VINF_* Scheduling instructions.
1458 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1459 * @retval VERR_* Fatal errors.
1460 *
1461 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1462 * @param pRegFrame The register frame.
1463 * Updates the EIP if an instruction was executed successfully.
1464 * @param pvFault The fault address (CR2).
1465 * @param pcbWritten Size of the write (if applicable).
1466 *
1467 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1468 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1469 * to worry about e.g. invalid modrm combinations (!)
1470 */
1471VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1472{
1473 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1474 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1475#ifdef VBOX_WITH_IEM
1476 NOREF(pvFault);
1477
1478# ifdef VBOX_COMPARE_IEM_AND_EM
1479 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1480 g_IncomingCtx = *pCtx;
1481 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1482 g_cbEmWrote = g_cbIemWrote = 0;
1483
1484# ifdef VBOX_COMPARE_IEM_FIRST
1485 /* IEM */
1486 uint32_t cbIemWritten = 0;
1487 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1488 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1489 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1490 rcIem = VERR_EM_INTERPRETER;
1491 g_IemCtx = *pCtx;
1492 g_fIemFFs = pVCpu->fLocalForcedActions;
1493 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1494 *pCtx = g_IncomingCtx;
1495# endif
1496
1497 /* EM */
1498 uint32_t cbEmWritten = 0;
1499 RTGCPTR pbCode;
1500 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1501 if (RT_SUCCESS(rcEm))
1502 {
1503 uint32_t cbOp;
1504 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1505 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1506 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1507 if (RT_SUCCESS(rcEm))
1508 {
1509 Assert(cbOp == pDis->cbInstr);
1510 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
1511 if (RT_SUCCESS(rcEm))
1512 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1513
1514 }
1515 else
1516 rcEm = VERR_EM_INTERPRETER;
1517 }
1518 else
1519 rcEm = VERR_EM_INTERPRETER;
1520# ifdef VBOX_SAME_AS_EM
1521 if (rcEm == VERR_EM_INTERPRETER)
1522 {
1523 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1524 return rcEm;
1525 }
1526# endif
1527 g_EmCtx = *pCtx;
1528 g_fEmFFs = pVCpu->fLocalForcedActions;
1529 *pcbWritten = cbEmWritten;
1530 VBOXSTRICTRC rc = rcEm;
1531
1532# ifdef VBOX_COMPARE_IEM_LAST
1533 /* IEM */
1534 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1535 *pCtx = g_IncomingCtx;
1536 uint32_t cbIemWritten = 0;
1537 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1538 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1539 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1540 rcIem = VERR_EM_INTERPRETER;
1541 g_IemCtx = *pCtx;
1542 g_fIemFFs = pVCpu->fLocalForcedActions;
1543 *pcbWritten = cbIemWritten;
1544 rc = rcIem;
1545# endif
1546
1547# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1548 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
1549# endif
1550
1551# else
1552 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1553 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1554 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1555 rc = VERR_EM_INTERPRETER;
1556# endif
1557 if (rc != VINF_SUCCESS)
1558 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1559
1560 return rc;
1561#else
1562 RTGCPTR pbCode;
1563 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1564 if (RT_SUCCESS(rc))
1565 {
1566 uint32_t cbOp;
1567 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1568 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1569 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1570 if (RT_SUCCESS(rc))
1571 {
1572 Assert(cbOp == pDis->cbInstr);
1573 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
1574 if (RT_SUCCESS(rc))
1575 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1576
1577 return rc;
1578 }
1579 }
1580 return VERR_EM_INTERPRETER;
1581#endif
1582}
1583
1584
1585/**
1586 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1587 *
1588 * IP/EIP/RIP *IS* updated!
1589 *
1590 * @returns VBox strict status code.
1591 * @retval VINF_* Scheduling instructions. When these are returned, it
1592 * starts to get a bit tricky to know whether code was
1593 * executed or not... We'll address this when it becomes a problem.
1594 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1595 * @retval VERR_* Fatal errors.
1596 *
1597 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1598 * @param pDis The disassembler cpu state for the instruction to be
1599 * interpreted.
1600 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1601 * @param pvFault The fault address (CR2).
1602 * @param enmCodeType Code type (user/supervisor)
1603 *
1604 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1605 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1606 * to worry about e.g. invalid modrm combinations (!)
1607 *
1608 * @todo At this time we do NOT check if the instruction overwrites vital information.
1609 * Make sure this can't happen!! (will add some assertions/checks later)
1610 */
1611VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1612 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1613{
1614 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1615 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1616#ifdef VBOX_WITH_IEM
1617 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1618
1619# ifdef VBOX_COMPARE_IEM_AND_EM
1620 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1621 g_IncomingCtx = *pCtx;
1622 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1623 g_cbEmWrote = g_cbIemWrote = 0;
1624
1625# ifdef VBOX_COMPARE_IEM_FIRST
1626 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1627 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1628 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1629 rcIem = VERR_EM_INTERPRETER;
1630 g_IemCtx = *pCtx;
1631 g_fIemFFs = pVCpu->fLocalForcedActions;
1632 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1633 *pCtx = g_IncomingCtx;
1634# endif
1635
1636 /* EM */
1637 uint32_t cbIgnored;
1638 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1639 if (RT_SUCCESS(rcEm))
1640 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1641# ifdef VBOX_SAME_AS_EM
1642 if (rcEm == VERR_EM_INTERPRETER)
1643 {
1644 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1645 return rcEm;
1646 }
1647# endif
1648 g_EmCtx = *pCtx;
1649 g_fEmFFs = pVCpu->fLocalForcedActions;
1650 VBOXSTRICTRC rc = rcEm;
1651
1652# ifdef VBOX_COMPARE_IEM_LAST
1653 /* IEM */
1654 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1655 *pCtx = g_IncomingCtx;
1656 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1657 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1658 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1659 rcIem = VERR_EM_INTERPRETER;
1660 g_IemCtx = *pCtx;
1661 g_fIemFFs = pVCpu->fLocalForcedActions;
1662 rc = rcIem;
1663# endif
1664
1665# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1666 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1667# endif
1668
1669# else
1670 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1671 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1672 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1673 rc = VERR_EM_INTERPRETER;
1674# endif
1675
1676 if (rc != VINF_SUCCESS)
1677 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1678
1679 return rc;
1680#else
1681 uint32_t cbIgnored;
1682 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1683 if (RT_SUCCESS(rc))
1684 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1685 return rc;
1686#endif
1687}
1688
1689#ifdef IN_RC
1690
1691DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1692{
1693 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1694 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1695 return rc;
1696 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1697}
1698
1699
1700/**
1701 * Interpret IRET (currently only to V86 code) - PATM only.
1702 *
1703 * @returns VBox status code.
1704 * @param pVM The cross context VM structure.
1705 * @param pVCpu The cross context virtual CPU structure.
1706 * @param pRegFrame The register frame.
1707 *
1708 */
1709VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1710{
1711 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1712 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1713 int rc;
1714
1715 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1716 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1717 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1718 * this function. Fear that it may guru on us, thus not converted to
1719 * IEM. */
1720
1721 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1722 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1723 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1724 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1725 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1726
1727 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1728 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1729 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1730 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1731 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1732 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1733 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1734
1735 pRegFrame->eip = eip & 0xffff;
1736 pRegFrame->cs.Sel = cs;
1737
1738 /* Mask away all reserved bits */
1739 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1740 eflags &= uMask;
1741
1742 CPUMRawSetEFlags(pVCpu, eflags);
1743 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1744
1745 pRegFrame->esp = esp;
1746 pRegFrame->ss.Sel = ss;
1747 pRegFrame->ds.Sel = ds;
1748 pRegFrame->es.Sel = es;
1749 pRegFrame->fs.Sel = fs;
1750 pRegFrame->gs.Sel = gs;
1751
1752 return VINF_SUCCESS;
1753}
1754
1755# ifndef VBOX_WITH_IEM
1756/**
1757 * IRET Emulation.
1758 */
1759static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1760{
1761#ifdef VBOX_WITH_RAW_RING1
1762 NOREF(pvFault); NOREF(pcbSize); NOREF(pDis);
1763 if (EMIsRawRing1Enabled(pVM))
1764 {
1765 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1766 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1767 int rc;
1768 uint32_t cpl, rpl;
1769
1770 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1771 /** @todo we don't verify all the edge cases that generate #GP faults */
1772
1773 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1774 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1775 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1776 * this function. Fear that it may guru on us, thus not converted to
1777 * IEM. */
1778
1779 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1780 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1781 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1782 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1783 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1784
1785 /* Deal with V86 above. */
1786 if (eflags & X86_EFL_VM)
1787 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1788
1789 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1790 rpl = cs & X86_SEL_RPL;
1791
1792 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1793 if (rpl != cpl)
1794 {
1795 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1796 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1797 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1798 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1799 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1800 pRegFrame->ss.Sel = ss;
1801 pRegFrame->esp = esp;
1802 }
1803 pRegFrame->cs.Sel = cs;
1804 pRegFrame->eip = eip;
1805
1806 /* Adjust CS & SS as required. */
1807 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1808
1809 /* Mask away all reserved bits */
1810 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1811 eflags &= uMask;
1812
1813 CPUMRawSetEFlags(pVCpu, eflags);
1814 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1815 return VINF_SUCCESS;
1816 }
1817#else
1818 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1819#endif
1820 return VERR_EM_INTERPRETER;
1821}
1822# endif /* !VBOX_WITH_IEM */
1823
1824#endif /* IN_RC */
1825
1826
1827
1828/*
1829 *
1830 * Old interpreter primitives used by HM, move/eliminate later.
1831 * Old interpreter primitives used by HM, move/eliminate later.
1832 * Old interpreter primitives used by HM, move/eliminate later.
1833 * Old interpreter primitives used by HM, move/eliminate later.
1834 * Old interpreter primitives used by HM, move/eliminate later.
1835 *
1836 */
1837
1838
1839/**
1840 * Interpret CPUID given the parameters in the CPU context.
1841 *
1842 * @returns VBox status code.
1843 * @param pVM The cross context VM structure.
1844 * @param pVCpu The cross context virtual CPU structure.
1845 * @param pRegFrame The register frame.
1846 *
1847 */
1848VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1849{
1850 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1851 uint32_t iLeaf = pRegFrame->eax;
1852 uint32_t iSubLeaf = pRegFrame->ecx;
1853 NOREF(pVM);
1854
1855 /* cpuid clears the high dwords of the affected 64 bits registers. */
1856 pRegFrame->rax = 0;
1857 pRegFrame->rbx = 0;
1858 pRegFrame->rcx = 0;
1859 pRegFrame->rdx = 0;
1860
1861 /* Note: operates the same in 64 and non-64 bits mode. */
1862 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1863 Log(("Emulate: CPUID %x/%x -> %08x %08x %08x %08x\n", iLeaf, iSubLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1864 return VINF_SUCCESS;
1865}
1866
1867
1868/**
1869 * Interpret RDPMC.
1870 *
1871 * @returns VBox status code.
1872 * @param pVM The cross context VM structure.
1873 * @param pVCpu The cross context virtual CPU structure.
1874 * @param pRegFrame The register frame.
1875 *
1876 */
1877VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1878{
1879 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1880 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1881
1882 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1883 if ( !(uCR4 & X86_CR4_PCE)
1884 && CPUMGetGuestCPL(pVCpu) != 0)
1885 {
1886 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1887 return VERR_EM_INTERPRETER; /* genuine #GP */
1888 }
1889
1890 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1891 pRegFrame->rax = 0;
1892 pRegFrame->rdx = 0;
1893 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1894 * ecx but see @bugref{3472}! */
1895
1896 NOREF(pVM);
1897 return VINF_SUCCESS;
1898}
1899
1900
1901/**
1902 * MWAIT Emulation.
1903 */
1904VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1905{
1906 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1907 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1908 NOREF(pVM);
1909
1910 /* Get the current privilege level. */
1911 cpl = CPUMGetGuestCPL(pVCpu);
1912 if (cpl != 0)
1913 return VERR_EM_INTERPRETER; /* supervisor only */
1914
1915 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1916 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1917 return VERR_EM_INTERPRETER; /* not supported */
1918
1919 /*
1920 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1921 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1922 */
1923 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1924 if (pRegFrame->ecx > 1)
1925 {
1926 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1927 return VERR_EM_INTERPRETER; /* illegal value. */
1928 }
1929
1930 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1931 {
1932 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1933 return VERR_EM_INTERPRETER; /* illegal value. */
1934 }
1935
1936 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1937}
1938
1939
1940/**
1941 * MONITOR Emulation.
1942 */
1943VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1944{
1945 uint32_t u32Dummy, u32ExtFeatures, cpl;
1946 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1947 NOREF(pVM);
1948
1949 if (pRegFrame->ecx != 0)
1950 {
1951 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1952 return VERR_EM_INTERPRETER; /* illegal value. */
1953 }
1954
1955 /* Get the current privilege level. */
1956 cpl = CPUMGetGuestCPL(pVCpu);
1957 if (cpl != 0)
1958 return VERR_EM_INTERPRETER; /* supervisor only */
1959
1960 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1961 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1962 return VERR_EM_INTERPRETER; /* not supported */
1963
1964 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/* VT-x only: */
1970
1971/**
1972 * Interpret INVLPG.
1973 *
1974 * @returns VBox status code.
1975 * @param pVM The cross context VM structure.
1976 * @param pVCpu The cross context virtual CPU structure.
1977 * @param pRegFrame The register frame.
1978 * @param pAddrGC Operand address.
1979 *
1980 */
1981VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1982{
1983 /** @todo is addr always a flat linear address or ds based
1984 * (in absence of segment override prefixes)????
1985 */
1986 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1987 NOREF(pVM); NOREF(pRegFrame);
1988#ifdef IN_RC
1989 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1990#endif
1991 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1992 if ( rc == VINF_SUCCESS
1993 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1994 return VINF_SUCCESS;
1995 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1996 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1997 VERR_EM_INTERPRETER);
1998 return rc;
1999}
2000
2001
2002#ifdef LOG_ENABLED
2003static const char *emMSRtoString(uint32_t uMsr)
2004{
2005 switch (uMsr)
2006 {
2007 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
2008 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
2009 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
2010 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
2011 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
2012 case MSR_K6_EFER: return "MSR_K6_EFER";
2013 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
2014 case MSR_K6_STAR: return "MSR_K6_STAR";
2015 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
2016 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
2017 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
2018 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
2019 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
2020 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
2021 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
2022 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
2023 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
2024 case MSR_IA32_TSC: return "MSR_IA32_TSC";
2025 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
2026 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
2027 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
2028 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
2029 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
2030 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
2031 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
2032 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
2033 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
2034 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
2035 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
2036 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
2037 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
2038 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
2039 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
2040 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
2041 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
2042 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
2043 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
2044 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
2045 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
2046 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
2047 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
2048 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
2049 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
2050 }
2051 return "Unknown MSR";
2052}
2053#endif /* LOG_ENABLED */
2054
2055
2056/**
2057 * Interpret RDMSR
2058 *
2059 * @returns VBox status code.
2060 * @param pVM The cross context VM structure.
2061 * @param pVCpu The cross context virtual CPU structure.
2062 * @param pRegFrame The register frame.
2063 */
2064VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
2065{
2066 NOREF(pVM);
2067
2068 /* Get the current privilege level. */
2069 if (CPUMGetGuestCPL(pVCpu) != 0)
2070 {
2071 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
2072 return VERR_EM_INTERPRETER; /* supervisor only */
2073 }
2074
2075 uint64_t uValue;
2076 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 {
2079 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2080 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
2081 return VERR_EM_INTERPRETER;
2082 }
2083 pRegFrame->rax = RT_LO_U32(uValue);
2084 pRegFrame->rdx = RT_HI_U32(uValue);
2085 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
2086 return VINF_SUCCESS;
2087}
2088
2089
2090/**
2091 * Interpret WRMSR
2092 *
2093 * @returns VBox status code.
2094 * @param pVM The cross context VM structure.
2095 * @param pVCpu The cross context virtual CPU structure.
2096 * @param pRegFrame The register frame.
2097 */
2098VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
2099{
2100 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2101
2102 /* Check the current privilege level, this instruction is supervisor only. */
2103 if (CPUMGetGuestCPL(pVCpu) != 0)
2104 {
2105 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
2106 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
2107 }
2108
2109 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
2110 if (rcStrict != VINF_SUCCESS)
2111 {
2112 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2113 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
2114 return VERR_EM_INTERPRETER;
2115 }
2116 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
2117 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
2118 NOREF(pVM);
2119 return VINF_SUCCESS;
2120}
2121
2122
2123/**
2124 * Interpret DRx write.
2125 *
2126 * @returns VBox status code.
2127 * @param pVM The cross context VM structure.
2128 * @param pVCpu The cross context virtual CPU structure.
2129 * @param pRegFrame The register frame.
2130 * @param DestRegDrx DRx register index (USE_REG_DR*)
2131 * @param SrcRegGen General purpose register index (USE_REG_E**))
2132 *
2133 */
2134VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
2135{
2136 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2137 uint64_t uNewDrX;
2138 int rc;
2139 NOREF(pVM);
2140
2141 if (CPUMIsGuestIn64BitCode(pVCpu))
2142 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
2143 else
2144 {
2145 uint32_t val32;
2146 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
2147 uNewDrX = val32;
2148 }
2149
2150 if (RT_SUCCESS(rc))
2151 {
2152 if (DestRegDrx == 6)
2153 {
2154 uNewDrX |= X86_DR6_RA1_MASK;
2155 uNewDrX &= ~X86_DR6_RAZ_MASK;
2156 }
2157 else if (DestRegDrx == 7)
2158 {
2159 uNewDrX |= X86_DR7_RA1_MASK;
2160 uNewDrX &= ~X86_DR7_RAZ_MASK;
2161 }
2162
2163 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
2164 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
2165 if (RT_SUCCESS(rc))
2166 return rc;
2167 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
2168 }
2169 return VERR_EM_INTERPRETER;
2170}
2171
2172
2173/**
2174 * Interpret DRx read.
2175 *
2176 * @returns VBox status code.
2177 * @param pVM The cross context VM structure.
2178 * @param pVCpu The cross context virtual CPU structure.
2179 * @param pRegFrame The register frame.
2180 * @param DestRegGen General purpose register index (USE_REG_E**))
2181 * @param SrcRegDrx DRx register index (USE_REG_DR*)
2182 */
2183VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
2184{
2185 uint64_t val64;
2186 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2187 NOREF(pVM);
2188
2189 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
2190 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
2191 if (CPUMIsGuestIn64BitCode(pVCpu))
2192 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
2193 else
2194 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
2195
2196 if (RT_SUCCESS(rc))
2197 return VINF_SUCCESS;
2198
2199 return VERR_EM_INTERPRETER;
2200}
2201
2202
2203#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
2204
2205
2206
2207
2208
2209
2210/*
2211 *
2212 * The old interpreter.
2213 * The old interpreter.
2214 * The old interpreter.
2215 * The old interpreter.
2216 * The old interpreter.
2217 *
2218 */
2219
2220DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
2221{
2222#ifdef IN_RC
2223 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
2224 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
2225 return rc;
2226 /*
2227 * The page pool cache may end up here in some cases because it
2228 * flushed one of the shadow mappings used by the trapping
2229 * instruction and it either flushed the TLB or the CPU reused it.
2230 */
2231#else
2232 NOREF(pVM);
2233#endif
2234 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
2235}
2236
2237
2238DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
2239{
2240 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
2241 pages or write monitored pages. */
2242 NOREF(pVM);
2243#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
2244 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
2245#else
2246 int rc = VINF_SUCCESS;
2247#endif
2248#ifdef VBOX_COMPARE_IEM_AND_EM
2249 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
2250 g_cbEmWrote = cb;
2251 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
2252#endif
2253 return rc;
2254}
2255
2256
2257/** Convert sel:addr to a flat GC address. */
2258DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
2259{
2260 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
2261 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
2262}
2263
2264
2265#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2266/**
2267 * Get the mnemonic for the disassembled instruction.
2268 *
2269 * GC/R0 doesn't include the strings in the DIS tables because
2270 * of limited space.
2271 */
2272static const char *emGetMnemonic(PDISCPUSTATE pDis)
2273{
2274 switch (pDis->pCurInstr->uOpcode)
2275 {
2276 case OP_XCHG: return "Xchg";
2277 case OP_DEC: return "Dec";
2278 case OP_INC: return "Inc";
2279 case OP_POP: return "Pop";
2280 case OP_OR: return "Or";
2281 case OP_AND: return "And";
2282 case OP_MOV: return "Mov";
2283 case OP_INVLPG: return "InvlPg";
2284 case OP_CPUID: return "CpuId";
2285 case OP_MOV_CR: return "MovCRx";
2286 case OP_MOV_DR: return "MovDRx";
2287 case OP_LLDT: return "LLdt";
2288 case OP_LGDT: return "LGdt";
2289 case OP_LIDT: return "LIdt";
2290 case OP_CLTS: return "Clts";
2291 case OP_MONITOR: return "Monitor";
2292 case OP_MWAIT: return "MWait";
2293 case OP_RDMSR: return "Rdmsr";
2294 case OP_WRMSR: return "Wrmsr";
2295 case OP_ADD: return "Add";
2296 case OP_ADC: return "Adc";
2297 case OP_SUB: return "Sub";
2298 case OP_SBB: return "Sbb";
2299 case OP_RDTSC: return "Rdtsc";
2300 case OP_STI: return "Sti";
2301 case OP_CLI: return "Cli";
2302 case OP_XADD: return "XAdd";
2303 case OP_HLT: return "Hlt";
2304 case OP_IRET: return "Iret";
2305 case OP_MOVNTPS: return "MovNTPS";
2306 case OP_STOSWD: return "StosWD";
2307 case OP_WBINVD: return "WbInvd";
2308 case OP_XOR: return "Xor";
2309 case OP_BTR: return "Btr";
2310 case OP_BTS: return "Bts";
2311 case OP_BTC: return "Btc";
2312 case OP_LMSW: return "Lmsw";
2313 case OP_SMSW: return "Smsw";
2314 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
2315 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
2316
2317 default:
2318 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
2319 return "???";
2320 }
2321}
2322#endif /* VBOX_STRICT || LOG_ENABLED */
2323
2324
2325/**
2326 * XCHG instruction emulation.
2327 */
2328static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2329{
2330 DISQPVPARAMVAL param1, param2;
2331 NOREF(pvFault);
2332
2333 /* Source to make DISQueryParamVal read the register value - ugly hack */
2334 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2335 if(RT_FAILURE(rc))
2336 return VERR_EM_INTERPRETER;
2337
2338 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2339 if(RT_FAILURE(rc))
2340 return VERR_EM_INTERPRETER;
2341
2342#ifdef IN_RC
2343 if (TRPMHasTrap(pVCpu))
2344 {
2345 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2346 {
2347#endif
2348 RTGCPTR pParam1 = 0, pParam2 = 0;
2349 uint64_t valpar1, valpar2;
2350
2351 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2352 switch(param1.type)
2353 {
2354 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2355 valpar1 = param1.val.val64;
2356 break;
2357
2358 case DISQPV_TYPE_ADDRESS:
2359 pParam1 = (RTGCPTR)param1.val.val64;
2360 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2361 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2362 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2363 if (RT_FAILURE(rc))
2364 {
2365 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2366 return VERR_EM_INTERPRETER;
2367 }
2368 break;
2369
2370 default:
2371 AssertFailed();
2372 return VERR_EM_INTERPRETER;
2373 }
2374
2375 switch(param2.type)
2376 {
2377 case DISQPV_TYPE_ADDRESS:
2378 pParam2 = (RTGCPTR)param2.val.val64;
2379 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
2380 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
2381 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
2382 if (RT_FAILURE(rc))
2383 {
2384 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2385 }
2386 break;
2387
2388 case DISQPV_TYPE_IMMEDIATE:
2389 valpar2 = param2.val.val64;
2390 break;
2391
2392 default:
2393 AssertFailed();
2394 return VERR_EM_INTERPRETER;
2395 }
2396
2397 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
2398 if (pParam1 == 0)
2399 {
2400 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2401 switch(param1.size)
2402 {
2403 case 1: //special case for AH etc
2404 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
2405 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
2406 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
2407 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
2408 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2409 }
2410 if (RT_FAILURE(rc))
2411 return VERR_EM_INTERPRETER;
2412 }
2413 else
2414 {
2415 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
2416 if (RT_FAILURE(rc))
2417 {
2418 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2419 return VERR_EM_INTERPRETER;
2420 }
2421 }
2422
2423 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
2424 if (pParam2 == 0)
2425 {
2426 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2427 switch(param2.size)
2428 {
2429 case 1: //special case for AH etc
2430 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
2431 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
2432 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
2433 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
2434 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2435 }
2436 if (RT_FAILURE(rc))
2437 return VERR_EM_INTERPRETER;
2438 }
2439 else
2440 {
2441 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
2442 if (RT_FAILURE(rc))
2443 {
2444 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2445 return VERR_EM_INTERPRETER;
2446 }
2447 }
2448
2449 *pcbSize = param2.size;
2450 return VINF_SUCCESS;
2451#ifdef IN_RC
2452 }
2453 }
2454 return VERR_EM_INTERPRETER;
2455#endif
2456}
2457
2458
2459/**
2460 * INC and DEC emulation.
2461 */
2462static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2463 PFNEMULATEPARAM2 pfnEmulate)
2464{
2465 DISQPVPARAMVAL param1;
2466 NOREF(pvFault);
2467
2468 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2469 if(RT_FAILURE(rc))
2470 return VERR_EM_INTERPRETER;
2471
2472#ifdef IN_RC
2473 if (TRPMHasTrap(pVCpu))
2474 {
2475 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2476 {
2477#endif
2478 RTGCPTR pParam1 = 0;
2479 uint64_t valpar1;
2480
2481 if (param1.type == DISQPV_TYPE_ADDRESS)
2482 {
2483 pParam1 = (RTGCPTR)param1.val.val64;
2484 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2485#ifdef IN_RC
2486 /* Safety check (in theory it could cross a page boundary and fault there though) */
2487 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2488#endif
2489 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2490 if (RT_FAILURE(rc))
2491 {
2492 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2493 return VERR_EM_INTERPRETER;
2494 }
2495 }
2496 else
2497 {
2498 AssertFailed();
2499 return VERR_EM_INTERPRETER;
2500 }
2501
2502 uint32_t eflags;
2503
2504 eflags = pfnEmulate(&valpar1, param1.size);
2505
2506 /* Write result back */
2507 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2508 if (RT_FAILURE(rc))
2509 {
2510 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2511 return VERR_EM_INTERPRETER;
2512 }
2513
2514 /* Update guest's eflags and finish. */
2515 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2516 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2517
2518 /* All done! */
2519 *pcbSize = param1.size;
2520 return VINF_SUCCESS;
2521#ifdef IN_RC
2522 }
2523 }
2524 return VERR_EM_INTERPRETER;
2525#endif
2526}
2527
2528
2529/**
2530 * POP Emulation.
2531 */
2532static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2533{
2534 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2535 DISQPVPARAMVAL param1;
2536 NOREF(pvFault);
2537
2538 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2539 if(RT_FAILURE(rc))
2540 return VERR_EM_INTERPRETER;
2541
2542#ifdef IN_RC
2543 if (TRPMHasTrap(pVCpu))
2544 {
2545 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2546 {
2547#endif
2548 RTGCPTR pParam1 = 0;
2549 uint32_t valpar1;
2550 RTGCPTR pStackVal;
2551
2552 /* Read stack value first */
2553 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2554 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2555
2556 /* Convert address; don't bother checking limits etc, as we only read here */
2557 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2558 if (pStackVal == 0)
2559 return VERR_EM_INTERPRETER;
2560
2561 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2562 if (RT_FAILURE(rc))
2563 {
2564 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2565 return VERR_EM_INTERPRETER;
2566 }
2567
2568 if (param1.type == DISQPV_TYPE_ADDRESS)
2569 {
2570 pParam1 = (RTGCPTR)param1.val.val64;
2571
2572 /* pop [esp+xx] uses esp after the actual pop! */
2573 AssertCompile(DISGREG_ESP == DISGREG_SP);
2574 if ( (pDis->Param1.fUse & DISUSE_BASE)
2575 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2576 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2577 )
2578 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2579
2580 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2581 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2582 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2583 if (RT_FAILURE(rc))
2584 {
2585 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2586 return VERR_EM_INTERPRETER;
2587 }
2588
2589 /* Update ESP as the last step */
2590 pRegFrame->esp += param1.size;
2591 }
2592 else
2593 {
2594#ifndef DEBUG_bird // annoying assertion.
2595 AssertFailed();
2596#endif
2597 return VERR_EM_INTERPRETER;
2598 }
2599
2600 /* All done! */
2601 *pcbSize = param1.size;
2602 return VINF_SUCCESS;
2603#ifdef IN_RC
2604 }
2605 }
2606 return VERR_EM_INTERPRETER;
2607#endif
2608}
2609
2610
2611/**
2612 * XOR/OR/AND Emulation.
2613 */
2614static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2615 PFNEMULATEPARAM3 pfnEmulate)
2616{
2617 DISQPVPARAMVAL param1, param2;
2618 NOREF(pvFault);
2619
2620 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2621 if(RT_FAILURE(rc))
2622 return VERR_EM_INTERPRETER;
2623
2624 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2625 if(RT_FAILURE(rc))
2626 return VERR_EM_INTERPRETER;
2627
2628#ifdef IN_RC
2629 if (TRPMHasTrap(pVCpu))
2630 {
2631 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2632 {
2633#endif
2634 RTGCPTR pParam1;
2635 uint64_t valpar1, valpar2;
2636
2637 if (pDis->Param1.cb != pDis->Param2.cb)
2638 {
2639 if (pDis->Param1.cb < pDis->Param2.cb)
2640 {
2641 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2642 return VERR_EM_INTERPRETER;
2643 }
2644 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2645 pDis->Param2.cb = pDis->Param1.cb;
2646 param2.size = param1.size;
2647 }
2648
2649 /* The destination is always a virtual address */
2650 if (param1.type == DISQPV_TYPE_ADDRESS)
2651 {
2652 pParam1 = (RTGCPTR)param1.val.val64;
2653 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2654 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2655 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2656 if (RT_FAILURE(rc))
2657 {
2658 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2659 return VERR_EM_INTERPRETER;
2660 }
2661 }
2662 else
2663 {
2664 AssertFailed();
2665 return VERR_EM_INTERPRETER;
2666 }
2667
2668 /* Register or immediate data */
2669 switch(param2.type)
2670 {
2671 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2672 valpar2 = param2.val.val64;
2673 break;
2674
2675 default:
2676 AssertFailed();
2677 return VERR_EM_INTERPRETER;
2678 }
2679
2680 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2681
2682 /* Data read, emulate instruction. */
2683 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2684
2685 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2686
2687 /* Update guest's eflags and finish. */
2688 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2689 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2690
2691 /* And write it back */
2692 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2693 if (RT_SUCCESS(rc))
2694 {
2695 /* All done! */
2696 *pcbSize = param2.size;
2697 return VINF_SUCCESS;
2698 }
2699#ifdef IN_RC
2700 }
2701 }
2702#endif
2703 return VERR_EM_INTERPRETER;
2704}
2705
2706
2707#ifndef VBOX_COMPARE_IEM_AND_EM
2708/**
2709 * LOCK XOR/OR/AND Emulation.
2710 */
2711static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2712 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2713{
2714 void *pvParam1;
2715 DISQPVPARAMVAL param1, param2;
2716 NOREF(pvFault);
2717
2718#if HC_ARCH_BITS == 32
2719 Assert(pDis->Param1.cb <= 4);
2720#endif
2721
2722 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2723 if(RT_FAILURE(rc))
2724 return VERR_EM_INTERPRETER;
2725
2726 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2727 if(RT_FAILURE(rc))
2728 return VERR_EM_INTERPRETER;
2729
2730 if (pDis->Param1.cb != pDis->Param2.cb)
2731 {
2732 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2733 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2734 VERR_EM_INTERPRETER);
2735
2736 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2737 pDis->Param2.cb = pDis->Param1.cb;
2738 param2.size = param1.size;
2739 }
2740
2741#ifdef IN_RC
2742 /* Safety check (in theory it could cross a page boundary and fault there though) */
2743 Assert( TRPMHasTrap(pVCpu)
2744 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2745 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2746#endif
2747
2748 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2749 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2750 RTGCUINTREG ValPar2 = param2.val.val64;
2751
2752 /* The destination is always a virtual address */
2753 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2754
2755 RTGCPTR GCPtrPar1 = param1.val.val64;
2756 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2757 PGMPAGEMAPLOCK Lock;
2758 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2759 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2760
2761 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2762 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2763
2764 RTGCUINTREG32 eflags = 0;
2765 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2766 PGMPhysReleasePageMappingLock(pVM, &Lock);
2767 if (RT_FAILURE(rc))
2768 {
2769 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2770 return VERR_EM_INTERPRETER;
2771 }
2772
2773 /* Update guest's eflags and finish. */
2774 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2775 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2776
2777 *pcbSize = param2.size;
2778 return VINF_SUCCESS;
2779}
2780#endif /* !VBOX_COMPARE_IEM_AND_EM */
2781
2782
2783/**
2784 * ADD, ADC & SUB Emulation.
2785 */
2786static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2787 PFNEMULATEPARAM3 pfnEmulate)
2788{
2789 NOREF(pvFault);
2790 DISQPVPARAMVAL param1, param2;
2791 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2792 if(RT_FAILURE(rc))
2793 return VERR_EM_INTERPRETER;
2794
2795 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2796 if(RT_FAILURE(rc))
2797 return VERR_EM_INTERPRETER;
2798
2799#ifdef IN_RC
2800 if (TRPMHasTrap(pVCpu))
2801 {
2802 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2803 {
2804#endif
2805 RTGCPTR pParam1;
2806 uint64_t valpar1, valpar2;
2807
2808 if (pDis->Param1.cb != pDis->Param2.cb)
2809 {
2810 if (pDis->Param1.cb < pDis->Param2.cb)
2811 {
2812 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2813 return VERR_EM_INTERPRETER;
2814 }
2815 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2816 pDis->Param2.cb = pDis->Param1.cb;
2817 param2.size = param1.size;
2818 }
2819
2820 /* The destination is always a virtual address */
2821 if (param1.type == DISQPV_TYPE_ADDRESS)
2822 {
2823 pParam1 = (RTGCPTR)param1.val.val64;
2824 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2825 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2826 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2827 if (RT_FAILURE(rc))
2828 {
2829 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2830 return VERR_EM_INTERPRETER;
2831 }
2832 }
2833 else
2834 {
2835#ifndef DEBUG_bird
2836 AssertFailed();
2837#endif
2838 return VERR_EM_INTERPRETER;
2839 }
2840
2841 /* Register or immediate data */
2842 switch(param2.type)
2843 {
2844 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2845 valpar2 = param2.val.val64;
2846 break;
2847
2848 default:
2849 AssertFailed();
2850 return VERR_EM_INTERPRETER;
2851 }
2852
2853 /* Data read, emulate instruction. */
2854 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2855
2856 /* Update guest's eflags and finish. */
2857 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2858 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2859
2860 /* And write it back */
2861 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2862 if (RT_SUCCESS(rc))
2863 {
2864 /* All done! */
2865 *pcbSize = param2.size;
2866 return VINF_SUCCESS;
2867 }
2868#ifdef IN_RC
2869 }
2870 }
2871#endif
2872 return VERR_EM_INTERPRETER;
2873}
2874
2875
2876/**
2877 * ADC Emulation.
2878 */
2879static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2880{
2881 if (pRegFrame->eflags.Bits.u1CF)
2882 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2883 else
2884 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2885}
2886
2887
2888/**
2889 * BTR/C/S Emulation.
2890 */
2891static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2892 PFNEMULATEPARAM2UINT32 pfnEmulate)
2893{
2894 DISQPVPARAMVAL param1, param2;
2895 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2896 if(RT_FAILURE(rc))
2897 return VERR_EM_INTERPRETER;
2898
2899 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2900 if(RT_FAILURE(rc))
2901 return VERR_EM_INTERPRETER;
2902
2903#ifdef IN_RC
2904 if (TRPMHasTrap(pVCpu))
2905 {
2906 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2907 {
2908#endif
2909 RTGCPTR pParam1;
2910 uint64_t valpar1 = 0, valpar2;
2911 uint32_t eflags;
2912
2913 /* The destination is always a virtual address */
2914 if (param1.type != DISQPV_TYPE_ADDRESS)
2915 return VERR_EM_INTERPRETER;
2916
2917 pParam1 = (RTGCPTR)param1.val.val64;
2918 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2919
2920 /* Register or immediate data */
2921 switch(param2.type)
2922 {
2923 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2924 valpar2 = param2.val.val64;
2925 break;
2926
2927 default:
2928 AssertFailed();
2929 return VERR_EM_INTERPRETER;
2930 }
2931
2932 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2933 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2934 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2935 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2936 if (RT_FAILURE(rc))
2937 {
2938 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2939 return VERR_EM_INTERPRETER;
2940 }
2941
2942 Log2(("emInterpretBtx: val=%x\n", valpar1));
2943 /* Data read, emulate bit test instruction. */
2944 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2945
2946 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2947
2948 /* Update guest's eflags and finish. */
2949 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2950 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2951
2952 /* And write it back */
2953 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2954 if (RT_SUCCESS(rc))
2955 {
2956 /* All done! */
2957 *pcbSize = 1;
2958 return VINF_SUCCESS;
2959 }
2960#ifdef IN_RC
2961 }
2962 }
2963#endif
2964 return VERR_EM_INTERPRETER;
2965}
2966
2967
2968#ifndef VBOX_COMPARE_IEM_AND_EM
2969/**
2970 * LOCK BTR/C/S Emulation.
2971 */
2972static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2973 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2974{
2975 void *pvParam1;
2976
2977 DISQPVPARAMVAL param1, param2;
2978 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2979 if(RT_FAILURE(rc))
2980 return VERR_EM_INTERPRETER;
2981
2982 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2983 if(RT_FAILURE(rc))
2984 return VERR_EM_INTERPRETER;
2985
2986 /* The destination is always a virtual address */
2987 if (param1.type != DISQPV_TYPE_ADDRESS)
2988 return VERR_EM_INTERPRETER;
2989
2990 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2991 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2992 uint64_t ValPar2 = param2.val.val64;
2993
2994 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2995 RTGCPTR GCPtrPar1 = param1.val.val64;
2996 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2997 ValPar2 &= 7;
2998
2999 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3000#ifdef IN_RC
3001 Assert(TRPMHasTrap(pVCpu));
3002 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
3003#endif
3004
3005 PGMPAGEMAPLOCK Lock;
3006 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3007 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3008
3009 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
3010 NOREF(pvFault);
3011
3012 /* Try emulate it with a one-shot #PF handler in place. (RC) */
3013 RTGCUINTREG32 eflags = 0;
3014 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
3015 PGMPhysReleasePageMappingLock(pVM, &Lock);
3016 if (RT_FAILURE(rc))
3017 {
3018 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
3019 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
3020 return VERR_EM_INTERPRETER;
3021 }
3022
3023 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
3024
3025 /* Update guest's eflags and finish. */
3026 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3027 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3028
3029 *pcbSize = 1;
3030 return VINF_SUCCESS;
3031}
3032#endif /* !VBOX_COMPARE_IEM_AND_EM */
3033
3034
3035/**
3036 * MOV emulation.
3037 */
3038static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3039{
3040 NOREF(pvFault);
3041 DISQPVPARAMVAL param1, param2;
3042 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
3043 if(RT_FAILURE(rc))
3044 return VERR_EM_INTERPRETER;
3045
3046 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3047 if(RT_FAILURE(rc))
3048 return VERR_EM_INTERPRETER;
3049
3050 /* If destination is a segment register, punt. We can't handle it here.
3051 * NB: Source can be a register and still trigger a #PF!
3052 */
3053 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
3054 return VERR_EM_INTERPRETER;
3055
3056 if (param1.type == DISQPV_TYPE_ADDRESS)
3057 {
3058 RTGCPTR pDest;
3059 uint64_t val64;
3060
3061 switch(param1.type)
3062 {
3063 case DISQPV_TYPE_IMMEDIATE:
3064 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3065 return VERR_EM_INTERPRETER;
3066 RT_FALL_THRU();
3067
3068 case DISQPV_TYPE_ADDRESS:
3069 pDest = (RTGCPTR)param1.val.val64;
3070 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
3071 break;
3072
3073 default:
3074 AssertFailed();
3075 return VERR_EM_INTERPRETER;
3076 }
3077
3078 switch(param2.type)
3079 {
3080 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
3081 val64 = param2.val.val64;
3082 break;
3083
3084 default:
3085 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
3086 return VERR_EM_INTERPRETER;
3087 }
3088#ifdef LOG_ENABLED
3089 if (pDis->uCpuMode == DISCPUMODE_64BIT)
3090 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
3091 else
3092 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
3093#endif
3094
3095 Assert(param2.size <= 8 && param2.size > 0);
3096 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
3097 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
3098 if (RT_FAILURE(rc))
3099 return VERR_EM_INTERPRETER;
3100
3101 *pcbSize = param2.size;
3102 }
3103#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
3104 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
3105 else if ( param1.type == DISQPV_TYPE_REGISTER
3106 && param2.type == DISQPV_TYPE_REGISTER)
3107 {
3108 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
3109 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
3110 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
3111
3112 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
3113 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
3114
3115 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
3116 switch (param1.size)
3117 {
3118 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
3119 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
3120 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
3121 default:
3122 AssertFailed();
3123 return VERR_EM_INTERPRETER;
3124 }
3125 AssertRCReturn(rc, rc);
3126 }
3127#endif
3128 else
3129 { /* read fault */
3130 RTGCPTR pSrc;
3131 uint64_t val64;
3132
3133 /* Source */
3134 switch(param2.type)
3135 {
3136 case DISQPV_TYPE_IMMEDIATE:
3137 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3138 return VERR_EM_INTERPRETER;
3139 RT_FALL_THRU();
3140
3141 case DISQPV_TYPE_ADDRESS:
3142 pSrc = (RTGCPTR)param2.val.val64;
3143 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
3144 break;
3145
3146 default:
3147 return VERR_EM_INTERPRETER;
3148 }
3149
3150 Assert(param1.size <= 8 && param1.size > 0);
3151 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
3152 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
3153 if (RT_FAILURE(rc))
3154 return VERR_EM_INTERPRETER;
3155
3156 /* Destination */
3157 switch(param1.type)
3158 {
3159 case DISQPV_TYPE_REGISTER:
3160 switch(param1.size)
3161 {
3162 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
3163 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
3164 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
3165 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
3166 default:
3167 return VERR_EM_INTERPRETER;
3168 }
3169 if (RT_FAILURE(rc))
3170 return rc;
3171 break;
3172
3173 default:
3174 return VERR_EM_INTERPRETER;
3175 }
3176#ifdef LOG_ENABLED
3177 if (pDis->uCpuMode == DISCPUMODE_64BIT)
3178 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
3179 else
3180 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
3181#endif
3182 }
3183 return VINF_SUCCESS;
3184}
3185
3186
3187#ifndef IN_RC
3188/**
3189 * [REP] STOSWD emulation
3190 */
3191static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3192{
3193 int rc;
3194 RTGCPTR GCDest, GCOffset;
3195 uint32_t cbSize;
3196 uint64_t cTransfers;
3197 int offIncrement;
3198 NOREF(pvFault);
3199
3200 /* Don't support any but these three prefix bytes. */
3201 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
3202 return VERR_EM_INTERPRETER;
3203
3204 switch (pDis->uAddrMode)
3205 {
3206 case DISCPUMODE_16BIT:
3207 GCOffset = pRegFrame->di;
3208 cTransfers = pRegFrame->cx;
3209 break;
3210 case DISCPUMODE_32BIT:
3211 GCOffset = pRegFrame->edi;
3212 cTransfers = pRegFrame->ecx;
3213 break;
3214 case DISCPUMODE_64BIT:
3215 GCOffset = pRegFrame->rdi;
3216 cTransfers = pRegFrame->rcx;
3217 break;
3218 default:
3219 AssertFailed();
3220 return VERR_EM_INTERPRETER;
3221 }
3222
3223 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
3224 switch (pDis->uOpMode)
3225 {
3226 case DISCPUMODE_16BIT:
3227 cbSize = 2;
3228 break;
3229 case DISCPUMODE_32BIT:
3230 cbSize = 4;
3231 break;
3232 case DISCPUMODE_64BIT:
3233 cbSize = 8;
3234 break;
3235 default:
3236 AssertFailed();
3237 return VERR_EM_INTERPRETER;
3238 }
3239
3240 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
3241
3242 if (!(pDis->fPrefix & DISPREFIX_REP))
3243 {
3244 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
3245
3246 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
3247 if (RT_FAILURE(rc))
3248 return VERR_EM_INTERPRETER;
3249 Assert(rc == VINF_SUCCESS);
3250
3251 /* Update (e/r)di. */
3252 switch (pDis->uAddrMode)
3253 {
3254 case DISCPUMODE_16BIT:
3255 pRegFrame->di += offIncrement;
3256 break;
3257 case DISCPUMODE_32BIT:
3258 pRegFrame->edi += offIncrement;
3259 break;
3260 case DISCPUMODE_64BIT:
3261 pRegFrame->rdi += offIncrement;
3262 break;
3263 default:
3264 AssertFailed();
3265 return VERR_EM_INTERPRETER;
3266 }
3267
3268 }
3269 else
3270 {
3271 if (!cTransfers)
3272 return VINF_SUCCESS;
3273
3274 /*
3275 * Do *not* try emulate cross page stuff here because we don't know what might
3276 * be waiting for us on the subsequent pages. The caller has only asked us to
3277 * ignore access handlers fro the current page.
3278 * This also fends off big stores which would quickly kill PGMR0DynMap.
3279 */
3280 if ( cbSize > PAGE_SIZE
3281 || cTransfers > PAGE_SIZE
3282 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
3283 {
3284 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
3285 GCDest, cbSize, offIncrement, cTransfers));
3286 return VERR_EM_INTERPRETER;
3287 }
3288
3289 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
3290 /* Access verification first; we currently can't recover properly from traps inside this instruction */
3291 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
3292 cTransfers * cbSize,
3293 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
3294 if (rc != VINF_SUCCESS)
3295 {
3296 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
3297 return VERR_EM_INTERPRETER;
3298 }
3299
3300 /* REP case */
3301 while (cTransfers)
3302 {
3303 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
3304 if (RT_FAILURE(rc))
3305 {
3306 rc = VERR_EM_INTERPRETER;
3307 break;
3308 }
3309
3310 Assert(rc == VINF_SUCCESS);
3311 GCOffset += offIncrement;
3312 GCDest += offIncrement;
3313 cTransfers--;
3314 }
3315
3316 /* Update the registers. */
3317 switch (pDis->uAddrMode)
3318 {
3319 case DISCPUMODE_16BIT:
3320 pRegFrame->di = GCOffset;
3321 pRegFrame->cx = cTransfers;
3322 break;
3323 case DISCPUMODE_32BIT:
3324 pRegFrame->edi = GCOffset;
3325 pRegFrame->ecx = cTransfers;
3326 break;
3327 case DISCPUMODE_64BIT:
3328 pRegFrame->rdi = GCOffset;
3329 pRegFrame->rcx = cTransfers;
3330 break;
3331 default:
3332 AssertFailed();
3333 return VERR_EM_INTERPRETER;
3334 }
3335 }
3336
3337 *pcbSize = cbSize;
3338 return rc;
3339}
3340#endif /* !IN_RC */
3341
3342
3343/**
3344 * [LOCK] CMPXCHG emulation.
3345 */
3346static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3347{
3348 DISQPVPARAMVAL param1, param2;
3349 NOREF(pvFault);
3350
3351#if HC_ARCH_BITS == 32
3352 Assert(pDis->Param1.cb <= 4);
3353#endif
3354
3355 /* Source to make DISQueryParamVal read the register value - ugly hack */
3356 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3357 if(RT_FAILURE(rc))
3358 return VERR_EM_INTERPRETER;
3359
3360 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3361 if(RT_FAILURE(rc))
3362 return VERR_EM_INTERPRETER;
3363
3364 uint64_t valpar;
3365 switch(param2.type)
3366 {
3367 case DISQPV_TYPE_IMMEDIATE: /* register actually */
3368 valpar = param2.val.val64;
3369 break;
3370
3371 default:
3372 return VERR_EM_INTERPRETER;
3373 }
3374
3375 PGMPAGEMAPLOCK Lock;
3376 RTGCPTR GCPtrPar1;
3377 void *pvParam1;
3378 uint64_t eflags;
3379
3380 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3381 switch(param1.type)
3382 {
3383 case DISQPV_TYPE_ADDRESS:
3384 GCPtrPar1 = param1.val.val64;
3385 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3386
3387 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3388 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3389 break;
3390
3391 default:
3392 return VERR_EM_INTERPRETER;
3393 }
3394
3395 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
3396
3397#ifndef VBOX_COMPARE_IEM_AND_EM
3398 if (pDis->fPrefix & DISPREFIX_LOCK)
3399 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3400 else
3401 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3402#else /* VBOX_COMPARE_IEM_AND_EM */
3403 uint64_t u64;
3404 switch (pDis->Param2.cb)
3405 {
3406 case 1: u64 = *(uint8_t *)pvParam1; break;
3407 case 2: u64 = *(uint16_t *)pvParam1; break;
3408 case 4: u64 = *(uint32_t *)pvParam1; break;
3409 default:
3410 case 8: u64 = *(uint64_t *)pvParam1; break;
3411 }
3412 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
3413 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3414#endif /* VBOX_COMPARE_IEM_AND_EM */
3415
3416 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
3417
3418 /* Update guest's eflags and finish. */
3419 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3420 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3421
3422 *pcbSize = param2.size;
3423 PGMPhysReleasePageMappingLock(pVM, &Lock);
3424 return VINF_SUCCESS;
3425}
3426
3427
3428/**
3429 * [LOCK] CMPXCHG8B emulation.
3430 */
3431static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3432{
3433 DISQPVPARAMVAL param1;
3434 NOREF(pvFault);
3435
3436 /* Source to make DISQueryParamVal read the register value - ugly hack */
3437 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3438 if(RT_FAILURE(rc))
3439 return VERR_EM_INTERPRETER;
3440
3441 RTGCPTR GCPtrPar1;
3442 void *pvParam1;
3443 uint64_t eflags;
3444 PGMPAGEMAPLOCK Lock;
3445
3446 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
3447 switch(param1.type)
3448 {
3449 case DISQPV_TYPE_ADDRESS:
3450 GCPtrPar1 = param1.val.val64;
3451 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3452
3453 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3454 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3455 break;
3456
3457 default:
3458 return VERR_EM_INTERPRETER;
3459 }
3460
3461 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
3462
3463#ifndef VBOX_COMPARE_IEM_AND_EM
3464 if (pDis->fPrefix & DISPREFIX_LOCK)
3465 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3466 else
3467 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3468#else /* VBOX_COMPARE_IEM_AND_EM */
3469 uint64_t u64 = *(uint64_t *)pvParam1;
3470 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3471 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
3472#endif /* VBOX_COMPARE_IEM_AND_EM */
3473
3474 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
3475
3476 /* Update guest's eflags and finish; note that *only* ZF is affected. */
3477 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
3478 | (eflags & (X86_EFL_ZF));
3479
3480 *pcbSize = 8;
3481 PGMPhysReleasePageMappingLock(pVM, &Lock);
3482 return VINF_SUCCESS;
3483}
3484
3485
3486#ifdef IN_RC /** @todo test+enable for HM as well. */
3487/**
3488 * [LOCK] XADD emulation.
3489 */
3490static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3491{
3492 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
3493 DISQPVPARAMVAL param1;
3494 void *pvParamReg2;
3495 size_t cbParamReg2;
3496 NOREF(pvFault);
3497
3498 /* Source to make DISQueryParamVal read the register value - ugly hack */
3499 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3500 if(RT_FAILURE(rc))
3501 return VERR_EM_INTERPRETER;
3502
3503 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3504 Assert(cbParamReg2 <= 4);
3505 if(RT_FAILURE(rc))
3506 return VERR_EM_INTERPRETER;
3507
3508#ifdef IN_RC
3509 if (TRPMHasTrap(pVCpu))
3510 {
3511 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3512 {
3513#endif
3514 RTGCPTR GCPtrPar1;
3515 void *pvParam1;
3516 uint32_t eflags;
3517 PGMPAGEMAPLOCK Lock;
3518
3519 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3520 switch(param1.type)
3521 {
3522 case DISQPV_TYPE_ADDRESS:
3523 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3524#ifdef IN_RC
3525 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3526#endif
3527
3528 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3529 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3530 break;
3531
3532 default:
3533 return VERR_EM_INTERPRETER;
3534 }
3535
3536 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3537
3538#ifndef VBOX_COMPARE_IEM_AND_EM
3539 if (pDis->fPrefix & DISPREFIX_LOCK)
3540 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3541 else
3542 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3543#else /* VBOX_COMPARE_IEM_AND_EM */
3544 uint64_t u64;
3545 switch (cbParamReg2)
3546 {
3547 case 1: u64 = *(uint8_t *)pvParam1; break;
3548 case 2: u64 = *(uint16_t *)pvParam1; break;
3549 case 4: u64 = *(uint32_t *)pvParam1; break;
3550 default:
3551 case 8: u64 = *(uint64_t *)pvParam1; break;
3552 }
3553 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3554 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3555#endif /* VBOX_COMPARE_IEM_AND_EM */
3556
3557 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3558
3559 /* Update guest's eflags and finish. */
3560 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3561 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3562
3563 *pcbSize = cbParamReg2;
3564 PGMPhysReleasePageMappingLock(pVM, &Lock);
3565 return VINF_SUCCESS;
3566#ifdef IN_RC
3567 }
3568 }
3569
3570 return VERR_EM_INTERPRETER;
3571#endif
3572}
3573#endif /* IN_RC */
3574
3575
3576/**
3577 * WBINVD Emulation.
3578 */
3579static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3580{
3581 /* Nothing to do. */
3582 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3583 return VINF_SUCCESS;
3584}
3585
3586
3587/**
3588 * INVLPG Emulation.
3589 */
3590static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3591{
3592 DISQPVPARAMVAL param1;
3593 RTGCPTR addr;
3594 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3595
3596 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3597 if(RT_FAILURE(rc))
3598 return VERR_EM_INTERPRETER;
3599
3600 switch(param1.type)
3601 {
3602 case DISQPV_TYPE_IMMEDIATE:
3603 case DISQPV_TYPE_ADDRESS:
3604 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3605 return VERR_EM_INTERPRETER;
3606 addr = (RTGCPTR)param1.val.val64;
3607 break;
3608
3609 default:
3610 return VERR_EM_INTERPRETER;
3611 }
3612
3613 /** @todo is addr always a flat linear address or ds based
3614 * (in absence of segment override prefixes)????
3615 */
3616#ifdef IN_RC
3617 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3618#endif
3619 rc = PGMInvalidatePage(pVCpu, addr);
3620 if ( rc == VINF_SUCCESS
3621 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3622 return VINF_SUCCESS;
3623 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3624 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3625 VERR_EM_INTERPRETER);
3626 return rc;
3627}
3628
3629/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3630
3631/**
3632 * CPUID Emulation.
3633 */
3634static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3635{
3636 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3637 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3638 return rc;
3639}
3640
3641
3642/**
3643 * CLTS Emulation.
3644 */
3645static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3646{
3647 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3648
3649 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3650 if (!(cr0 & X86_CR0_TS))
3651 return VINF_SUCCESS;
3652 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3653}
3654
3655
3656/**
3657 * Update CRx.
3658 *
3659 * @returns VBox status code.
3660 * @param pVM The cross context VM structure.
3661 * @param pVCpu The cross context virtual CPU structure.
3662 * @param pRegFrame The register frame.
3663 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3664 * @param val New CRx value
3665 *
3666 */
3667static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3668{
3669 uint64_t oldval;
3670 uint64_t msrEFER;
3671 uint32_t fValid;
3672 int rc, rc2;
3673 NOREF(pVM);
3674
3675 /** @todo Clean up this mess. */
3676 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3677 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3678 switch (DestRegCrx)
3679 {
3680 case DISCREG_CR0:
3681 oldval = CPUMGetGuestCR0(pVCpu);
3682#ifdef IN_RC
3683 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3684 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3685 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3686 return VERR_EM_INTERPRETER;
3687#endif
3688 rc = VINF_SUCCESS;
3689#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3690 CPUMSetGuestCR0(pVCpu, val);
3691#else
3692 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3693#endif
3694 val = CPUMGetGuestCR0(pVCpu);
3695 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3696 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3697 {
3698 /* global flush */
3699 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3700 AssertRCReturn(rc, rc);
3701 }
3702
3703 /* Deal with long mode enabling/disabling. */
3704 msrEFER = CPUMGetGuestEFER(pVCpu);
3705 if (msrEFER & MSR_K6_EFER_LME)
3706 {
3707 if ( !(oldval & X86_CR0_PG)
3708 && (val & X86_CR0_PG))
3709 {
3710 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3711 if (pRegFrame->cs.Attr.n.u1Long)
3712 {
3713 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3714 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3715 }
3716
3717 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3718 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3719 {
3720 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3721 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3722 }
3723 msrEFER |= MSR_K6_EFER_LMA;
3724 }
3725 else
3726 if ( (oldval & X86_CR0_PG)
3727 && !(val & X86_CR0_PG))
3728 {
3729 msrEFER &= ~MSR_K6_EFER_LMA;
3730 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3731 }
3732 CPUMSetGuestEFER(pVCpu, msrEFER);
3733 }
3734 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3735 return rc2 == VINF_SUCCESS ? rc : rc2;
3736
3737 case DISCREG_CR2:
3738 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3739 return VINF_SUCCESS;
3740
3741 case DISCREG_CR3:
3742 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3743 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3744 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3745 {
3746 /* flush */
3747 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3748 AssertRC(rc);
3749 }
3750 return rc;
3751
3752 case DISCREG_CR4:
3753 oldval = CPUMGetGuestCR4(pVCpu);
3754 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3755 val = CPUMGetGuestCR4(pVCpu);
3756
3757 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3758 msrEFER = CPUMGetGuestEFER(pVCpu);
3759 if ( (msrEFER & MSR_K6_EFER_LMA)
3760 && (oldval & X86_CR4_PAE)
3761 && !(val & X86_CR4_PAE))
3762 {
3763 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3764 }
3765
3766 /* From IEM iemCImpl_load_CrX. */
3767 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3768 fValid = X86_CR4_VME | X86_CR4_PVI
3769 | X86_CR4_TSD | X86_CR4_DE
3770 | X86_CR4_PSE | X86_CR4_PAE
3771 | X86_CR4_MCE | X86_CR4_PGE
3772 | X86_CR4_PCE | X86_CR4_OSFXSR
3773 | X86_CR4_OSXMMEEXCPT;
3774 //if (xxx)
3775 // fValid |= X86_CR4_VMXE;
3776 //if (xxx)
3777 // fValid |= X86_CR4_OSXSAVE;
3778 if (val & ~(uint64_t)fValid)
3779 {
3780 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3781 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3782 }
3783
3784 rc = VINF_SUCCESS;
3785 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3786 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3787 {
3788 /* global flush */
3789 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3790 AssertRCReturn(rc, rc);
3791 }
3792
3793 /* Feeling extremely lazy. */
3794# ifdef IN_RC
3795 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3796 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3797 {
3798 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3799 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3800 }
3801# endif
3802# ifdef VBOX_WITH_RAW_MODE
3803 if (((val ^ oldval) & X86_CR4_VME) && VM_IS_RAW_MODE_ENABLED(pVM))
3804 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3805# endif
3806
3807 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3808 return rc2 == VINF_SUCCESS ? rc : rc2;
3809
3810 case DISCREG_CR8:
3811 return APICSetTpr(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3812
3813 default:
3814 AssertFailed();
3815 case DISCREG_CR1: /* illegal op */
3816 break;
3817 }
3818 return VERR_EM_INTERPRETER;
3819}
3820
3821
3822/**
3823 * LMSW Emulation.
3824 */
3825static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3826{
3827 DISQPVPARAMVAL param1;
3828 uint32_t val;
3829 NOREF(pvFault); NOREF(pcbSize);
3830 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3831
3832 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3833 if(RT_FAILURE(rc))
3834 return VERR_EM_INTERPRETER;
3835
3836 switch(param1.type)
3837 {
3838 case DISQPV_TYPE_IMMEDIATE:
3839 case DISQPV_TYPE_ADDRESS:
3840 if(!(param1.flags & DISQPV_FLAG_16))
3841 return VERR_EM_INTERPRETER;
3842 val = param1.val.val32;
3843 break;
3844
3845 default:
3846 return VERR_EM_INTERPRETER;
3847 }
3848
3849 LogFlow(("emInterpretLmsw %x\n", val));
3850 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3851
3852 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3853 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3854 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3855
3856 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3857
3858}
3859
3860#ifdef EM_EMULATE_SMSW
3861/**
3862 * SMSW Emulation.
3863 */
3864static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3865{
3866 NOREF(pvFault); NOREF(pcbSize);
3867 DISQPVPARAMVAL param1;
3868 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3869
3870 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3871 if(RT_FAILURE(rc))
3872 return VERR_EM_INTERPRETER;
3873
3874 switch(param1.type)
3875 {
3876 case DISQPV_TYPE_IMMEDIATE:
3877 if(param1.size != sizeof(uint16_t))
3878 return VERR_EM_INTERPRETER;
3879 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3880 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3881 break;
3882
3883 case DISQPV_TYPE_ADDRESS:
3884 {
3885 RTGCPTR pParam1;
3886
3887 /* Actually forced to 16 bits regardless of the operand size. */
3888 if(param1.size != sizeof(uint16_t))
3889 return VERR_EM_INTERPRETER;
3890
3891 pParam1 = (RTGCPTR)param1.val.val64;
3892 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3893 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3894
3895 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3896 if (RT_FAILURE(rc))
3897 {
3898 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3899 return VERR_EM_INTERPRETER;
3900 }
3901 break;
3902 }
3903
3904 default:
3905 return VERR_EM_INTERPRETER;
3906 }
3907
3908 LogFlow(("emInterpretSmsw %x\n", cr0));
3909 return rc;
3910}
3911#endif
3912
3913
3914/**
3915 * Interpret CRx read.
3916 *
3917 * @returns VBox status code.
3918 * @param pVM The cross context VM structure.
3919 * @param pVCpu The cross context virtual CPU structure.
3920 * @param pRegFrame The register frame.
3921 * @param DestRegGen General purpose register index (USE_REG_E**))
3922 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3923 *
3924 */
3925static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3926{
3927 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3928 uint64_t val64;
3929 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3930 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3931 NOREF(pVM);
3932
3933 if (CPUMIsGuestIn64BitCode(pVCpu))
3934 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3935 else
3936 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3937
3938 if (RT_SUCCESS(rc))
3939 {
3940 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3941 return VINF_SUCCESS;
3942 }
3943 return VERR_EM_INTERPRETER;
3944}
3945
3946
3947/**
3948 * Interpret CRx write.
3949 *
3950 * @returns VBox status code.
3951 * @param pVM The cross context VM structure.
3952 * @param pVCpu The cross context virtual CPU structure.
3953 * @param pRegFrame The register frame.
3954 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3955 * @param SrcRegGen General purpose register index (USE_REG_E**))
3956 *
3957 */
3958static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3959{
3960 uint64_t val;
3961 int rc;
3962 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3963
3964 if (CPUMIsGuestIn64BitCode(pVCpu))
3965 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3966 else
3967 {
3968 uint32_t val32;
3969 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3970 val = val32;
3971 }
3972
3973 if (RT_SUCCESS(rc))
3974 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3975
3976 return VERR_EM_INTERPRETER;
3977}
3978
3979
3980/**
3981 * MOV CRx
3982 */
3983static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3984{
3985 NOREF(pvFault); NOREF(pcbSize);
3986 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3987 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3988
3989 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3990 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3991
3992 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3993}
3994
3995
3996/**
3997 * MOV DRx
3998 */
3999static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4000{
4001 int rc = VERR_EM_INTERPRETER;
4002 NOREF(pvFault); NOREF(pcbSize);
4003
4004 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
4005 {
4006 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
4007 }
4008 else
4009 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
4010 {
4011 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
4012 }
4013 else
4014 AssertMsgFailed(("Unexpected debug register move\n"));
4015
4016 return rc;
4017}
4018
4019
4020/**
4021 * LLDT Emulation.
4022 */
4023static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4024{
4025 DISQPVPARAMVAL param1;
4026 RTSEL sel;
4027 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
4028
4029 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
4030 if(RT_FAILURE(rc))
4031 return VERR_EM_INTERPRETER;
4032
4033 switch(param1.type)
4034 {
4035 case DISQPV_TYPE_ADDRESS:
4036 return VERR_EM_INTERPRETER; //feeling lazy right now
4037
4038 case DISQPV_TYPE_IMMEDIATE:
4039 if(!(param1.flags & DISQPV_FLAG_16))
4040 return VERR_EM_INTERPRETER;
4041 sel = (RTSEL)param1.val.val16;
4042 break;
4043
4044 default:
4045 return VERR_EM_INTERPRETER;
4046 }
4047
4048#ifdef IN_RING0
4049 /* Only for the VT-x real-mode emulation case. */
4050 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
4051 CPUMSetGuestLDTR(pVCpu, sel);
4052 return VINF_SUCCESS;
4053#else
4054 if (sel == 0)
4055 {
4056 if (CPUMGetHyperLDTR(pVCpu) == 0)
4057 {
4058 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
4059 return VINF_SUCCESS;
4060 }
4061 }
4062 //still feeling lazy
4063 return VERR_EM_INTERPRETER;
4064#endif
4065}
4066
4067#ifdef IN_RING0
4068/**
4069 * LIDT/LGDT Emulation.
4070 */
4071static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4072{
4073 DISQPVPARAMVAL param1;
4074 RTGCPTR pParam1;
4075 X86XDTR32 dtr32;
4076 NOREF(pvFault); NOREF(pcbSize);
4077
4078 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
4079
4080 /* Only for the VT-x real-mode emulation case. */
4081 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
4082
4083 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
4084 if(RT_FAILURE(rc))
4085 return VERR_EM_INTERPRETER;
4086
4087 switch(param1.type)
4088 {
4089 case DISQPV_TYPE_ADDRESS:
4090 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
4091 break;
4092
4093 default:
4094 return VERR_EM_INTERPRETER;
4095 }
4096
4097 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
4098 AssertRCReturn(rc, VERR_EM_INTERPRETER);
4099
4100 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
4101 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
4102
4103 if (pDis->pCurInstr->uOpcode == OP_LIDT)
4104 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
4105 else
4106 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
4107
4108 return VINF_SUCCESS;
4109}
4110#endif
4111
4112
4113#ifdef IN_RC
4114/**
4115 * STI Emulation.
4116 *
4117 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
4118 */
4119static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4120{
4121 NOREF(pcbSize);
4122 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
4123
4124 if(!pGCState)
4125 {
4126 Assert(pGCState);
4127 return VERR_EM_INTERPRETER;
4128 }
4129 pGCState->uVMFlags |= X86_EFL_IF;
4130
4131 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
4132 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
4133
4134 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
4135 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4136
4137 return VINF_SUCCESS;
4138}
4139#endif /* IN_RC */
4140
4141
4142/**
4143 * HLT Emulation.
4144 */
4145static VBOXSTRICTRC
4146emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4147{
4148 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
4149 return VINF_EM_HALT;
4150}
4151
4152
4153/**
4154 * RDTSC Emulation.
4155 */
4156static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4157{
4158 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4159 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
4160}
4161
4162/**
4163 * RDPMC Emulation
4164 */
4165static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4166{
4167 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4168 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
4169}
4170
4171
4172static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4173{
4174 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4175 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
4176}
4177
4178
4179static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4180{
4181 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4182 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
4183}
4184
4185
4186/**
4187 * RDMSR Emulation.
4188 */
4189static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4190{
4191 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
4192 different, so we play safe by completely disassembling the instruction. */
4193 Assert(!(pDis->fPrefix & DISPREFIX_REX));
4194 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4195 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
4196}
4197
4198
4199/**
4200 * WRMSR Emulation.
4201 */
4202static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4203{
4204 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4205 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
4206}
4207
4208
4209/**
4210 * Internal worker.
4211 * @copydoc emInterpretInstructionCPUOuter
4212 * @param pVM The cross context VM structure.
4213 */
4214DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4215 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4216{
4217 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
4218 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
4219 Assert(pcbSize);
4220 *pcbSize = 0;
4221
4222 if (enmCodeType == EMCODETYPE_SUPERVISOR)
4223 {
4224 /*
4225 * Only supervisor guest code!!
4226 * And no complicated prefixes.
4227 */
4228 /* Get the current privilege level. */
4229 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4230#ifdef VBOX_WITH_RAW_RING1
4231 if ( !EMIsRawRing1Enabled(pVM)
4232 || cpl > 1
4233 || pRegFrame->eflags.Bits.u2IOPL > cpl
4234 )
4235#endif
4236 {
4237 if ( cpl != 0
4238 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
4239 {
4240 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
4241 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
4242 return VERR_EM_INTERPRETER;
4243 }
4244 }
4245 }
4246 else
4247 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
4248
4249#ifdef IN_RC
4250 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
4251 || ( (pDis->fPrefix & DISPREFIX_LOCK)
4252 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
4253 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
4254 && pDis->pCurInstr->uOpcode != OP_XADD
4255 && pDis->pCurInstr->uOpcode != OP_OR
4256 && pDis->pCurInstr->uOpcode != OP_AND
4257 && pDis->pCurInstr->uOpcode != OP_XOR
4258 && pDis->pCurInstr->uOpcode != OP_BTR
4259 )
4260 )
4261#else
4262 if ( (pDis->fPrefix & DISPREFIX_REPNE)
4263 || ( (pDis->fPrefix & DISPREFIX_REP)
4264 && pDis->pCurInstr->uOpcode != OP_STOSWD
4265 )
4266 || ( (pDis->fPrefix & DISPREFIX_LOCK)
4267 && pDis->pCurInstr->uOpcode != OP_OR
4268 && pDis->pCurInstr->uOpcode != OP_AND
4269 && pDis->pCurInstr->uOpcode != OP_XOR
4270 && pDis->pCurInstr->uOpcode != OP_BTR
4271 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
4272 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
4273 )
4274 )
4275#endif
4276 {
4277 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
4278 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
4279 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
4280 return VERR_EM_INTERPRETER;
4281 }
4282
4283#if HC_ARCH_BITS == 32
4284 /*
4285 * Unable to emulate most >4 bytes accesses in 32 bits mode.
4286 * Whitelisted instructions are safe.
4287 */
4288 if ( pDis->Param1.cb > 4
4289 && CPUMIsGuestIn64BitCode(pVCpu))
4290 {
4291 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
4292 if ( uOpCode != OP_STOSWD
4293 && uOpCode != OP_MOV
4294 && uOpCode != OP_CMPXCHG8B
4295 && uOpCode != OP_XCHG
4296 && uOpCode != OP_BTS
4297 && uOpCode != OP_BTR
4298 && uOpCode != OP_BTC
4299 )
4300 {
4301# ifdef VBOX_WITH_STATISTICS
4302 switch (pDis->pCurInstr->uOpcode)
4303 {
4304# define INTERPRET_FAILED_CASE(opcode, Instr) \
4305 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
4306 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
4307 INTERPRET_FAILED_CASE(OP_DEC,Dec);
4308 INTERPRET_FAILED_CASE(OP_INC,Inc);
4309 INTERPRET_FAILED_CASE(OP_POP,Pop);
4310 INTERPRET_FAILED_CASE(OP_OR, Or);
4311 INTERPRET_FAILED_CASE(OP_XOR,Xor);
4312 INTERPRET_FAILED_CASE(OP_AND,And);
4313 INTERPRET_FAILED_CASE(OP_MOV,Mov);
4314 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
4315 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
4316 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
4317 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
4318 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
4319 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
4320 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
4321 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
4322 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
4323 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
4324 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
4325 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
4326 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
4327 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
4328 INTERPRET_FAILED_CASE(OP_ADD,Add);
4329 INTERPRET_FAILED_CASE(OP_SUB,Sub);
4330 INTERPRET_FAILED_CASE(OP_ADC,Adc);
4331 INTERPRET_FAILED_CASE(OP_BTR,Btr);
4332 INTERPRET_FAILED_CASE(OP_BTS,Bts);
4333 INTERPRET_FAILED_CASE(OP_BTC,Btc);
4334 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
4335 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
4336 INTERPRET_FAILED_CASE(OP_STI, Sti);
4337 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
4338 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
4339 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
4340 INTERPRET_FAILED_CASE(OP_IRET,Iret);
4341 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
4342 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
4343# undef INTERPRET_FAILED_CASE
4344 default:
4345 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4346 break;
4347 }
4348# endif /* VBOX_WITH_STATISTICS */
4349 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
4350 return VERR_EM_INTERPRETER;
4351 }
4352 }
4353#endif
4354
4355 VBOXSTRICTRC rc;
4356#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
4357 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
4358#endif
4359 switch (pDis->pCurInstr->uOpcode)
4360 {
4361 /*
4362 * Macros for generating the right case statements.
4363 */
4364# ifndef VBOX_COMPARE_IEM_AND_EM
4365# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4366 case opcode:\
4367 if (pDis->fPrefix & DISPREFIX_LOCK) \
4368 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
4369 else \
4370 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4371 if (RT_SUCCESS(rc)) \
4372 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4373 else \
4374 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4375 return rc
4376# else /* VBOX_COMPARE_IEM_AND_EM */
4377# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4378 case opcode:\
4379 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4380 if (RT_SUCCESS(rc)) \
4381 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4382 else \
4383 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4384 return rc
4385# endif /* VBOX_COMPARE_IEM_AND_EM */
4386
4387#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
4388 case opcode:\
4389 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4390 if (RT_SUCCESS(rc)) \
4391 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4392 else \
4393 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4394 return rc
4395
4396#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
4397 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
4398#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4399 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
4400
4401#define INTERPRET_CASE(opcode, Instr) \
4402 case opcode:\
4403 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4404 if (RT_SUCCESS(rc)) \
4405 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4406 else \
4407 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4408 return rc
4409
4410#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
4411 case opcode:\
4412 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4413 if (RT_SUCCESS(rc)) \
4414 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4415 else \
4416 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4417 return rc
4418
4419#define INTERPRET_STAT_CASE(opcode, Instr) \
4420 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
4421
4422 /*
4423 * The actual case statements.
4424 */
4425 INTERPRET_CASE(OP_XCHG,Xchg);
4426 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
4427 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
4428 INTERPRET_CASE(OP_POP,Pop);
4429 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
4430 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
4431 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
4432 INTERPRET_CASE(OP_MOV,Mov);
4433#ifndef IN_RC
4434 INTERPRET_CASE(OP_STOSWD,StosWD);
4435#endif
4436 INTERPRET_CASE(OP_INVLPG,InvlPg);
4437 INTERPRET_CASE(OP_CPUID,CpuId);
4438 INTERPRET_CASE(OP_MOV_CR,MovCRx);
4439 INTERPRET_CASE(OP_MOV_DR,MovDRx);
4440#ifdef IN_RING0
4441 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
4442 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
4443#endif
4444 INTERPRET_CASE(OP_LLDT,LLdt);
4445 INTERPRET_CASE(OP_LMSW,Lmsw);
4446#ifdef EM_EMULATE_SMSW
4447 INTERPRET_CASE(OP_SMSW,Smsw);
4448#endif
4449 INTERPRET_CASE(OP_CLTS,Clts);
4450 INTERPRET_CASE(OP_MONITOR, Monitor);
4451 INTERPRET_CASE(OP_MWAIT, MWait);
4452 INTERPRET_CASE(OP_RDMSR, Rdmsr);
4453 INTERPRET_CASE(OP_WRMSR, Wrmsr);
4454 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
4455 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
4456 INTERPRET_CASE(OP_ADC,Adc);
4457 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
4458 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
4459 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
4460 INTERPRET_CASE(OP_RDPMC,Rdpmc);
4461 INTERPRET_CASE(OP_RDTSC,Rdtsc);
4462 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
4463#ifdef IN_RC
4464 INTERPRET_CASE(OP_STI,Sti);
4465 INTERPRET_CASE(OP_XADD, XAdd);
4466 INTERPRET_CASE(OP_IRET,Iret);
4467#endif
4468 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
4469 INTERPRET_CASE(OP_HLT,Hlt);
4470 INTERPRET_CASE(OP_WBINVD,WbInvd);
4471#ifdef VBOX_WITH_STATISTICS
4472# ifndef IN_RC
4473 INTERPRET_STAT_CASE(OP_XADD, XAdd);
4474# endif
4475 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
4476#endif
4477
4478 default:
4479 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
4480 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4481 return VERR_EM_INTERPRETER;
4482
4483#undef INTERPRET_CASE_EX_PARAM2
4484#undef INTERPRET_STAT_CASE
4485#undef INTERPRET_CASE_EX
4486#undef INTERPRET_CASE
4487 } /* switch (opcode) */
4488 /* not reached */
4489}
4490
4491/**
4492 * Interprets the current instruction using the supplied DISCPUSTATE structure.
4493 *
4494 * EIP is *NOT* updated!
4495 *
4496 * @returns VBox strict status code.
4497 * @retval VINF_* Scheduling instructions. When these are returned, it
4498 * starts to get a bit tricky to know whether code was
4499 * executed or not... We'll address this when it becomes a problem.
4500 * @retval VERR_EM_INTERPRETER Something we can't cope with.
4501 * @retval VERR_* Fatal errors.
4502 *
4503 * @param pVCpu The cross context virtual CPU structure.
4504 * @param pDis The disassembler cpu state for the instruction to be
4505 * interpreted.
4506 * @param pRegFrame The register frame. EIP is *NOT* changed!
4507 * @param pvFault The fault address (CR2).
4508 * @param pcbSize Size of the write (if applicable).
4509 * @param enmCodeType Code type (user/supervisor)
4510 *
4511 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
4512 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
4513 * to worry about e.g. invalid modrm combinations (!)
4514 *
4515 * @todo At this time we do NOT check if the instruction overwrites vital information.
4516 * Make sure this can't happen!! (will add some assertions/checks later)
4517 */
4518DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4519 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4520{
4521 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4522 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
4523 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4524 if (RT_SUCCESS(rc))
4525 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
4526 else
4527 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
4528 return rc;
4529}
4530
4531
4532#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette