VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 72670

Last change on this file since 72670 was 72657, checked in by vboxsync, 7 years ago

EM: Make the EMHistoryExec parameters adjustable. bugref:9198

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 166.6 KB
Line 
1/* $Id: EMAll.cpp 72657 2018-06-22 11:05:11Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef VBOX_WITH_IEM
51//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
52//# define VBOX_SAME_AS_EM
53//# define VBOX_COMPARE_IEM_LAST
54#endif
55
56#ifdef VBOX_WITH_RAW_RING1
57# define EM_EMULATE_SMSW
58#endif
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** @def EM_ASSERT_FAULT_RETURN
65 * Safety check.
66 *
67 * Could in theory misfire on a cross page boundary access...
68 *
69 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
70 * turns up an alias page instead of the original faulting one and annoying the
71 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
72 */
73#if 0
74# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
75#else
76# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
77#endif
78
79
80/*********************************************************************************************************************************
81* Internal Functions *
82*********************************************************************************************************************************/
83#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
84DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
85 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92#ifdef VBOX_COMPARE_IEM_AND_EM
93static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
94 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
95 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
96 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
97static uint32_t g_fIncomingFFs;
98static CPUMCTX g_IncomingCtx;
99static bool g_fIgnoreRaxRdx = false;
100
101static uint32_t g_fEmFFs;
102static CPUMCTX g_EmCtx;
103static uint8_t g_abEmWrote[256];
104static size_t g_cbEmWrote;
105
106static uint32_t g_fIemFFs;
107static CPUMCTX g_IemCtx;
108extern uint8_t g_abIemWrote[256];
109#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
110extern size_t g_cbIemWrote;
111#else
112static size_t g_cbIemWrote;
113#endif
114#endif
115
116
117/**
118 * Get the current execution manager status.
119 *
120 * @returns Current status.
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
124{
125 return pVCpu->em.s.enmState;
126}
127
128
129/**
130 * Sets the current execution manager status. (use only when you know what you're doing!)
131 *
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
134 */
135VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
136{
137 /* Only allowed combination: */
138 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
139 pVCpu->em.s.enmState = enmNewState;
140}
141
142
143/**
144 * Sets the PC for which interrupts should be inhibited.
145 *
146 * @param pVCpu The cross context virtual CPU structure.
147 * @param PC The PC.
148 */
149VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
150{
151 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
152 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
153}
154
155
156/**
157 * Gets the PC for which interrupts should be inhibited.
158 *
159 * There are a few instructions which inhibits or delays interrupts
160 * for the instruction following them. These instructions are:
161 * - STI
162 * - MOV SS, r/m16
163 * - POP SS
164 *
165 * @returns The PC for which interrupts should be inhibited.
166 * @param pVCpu The cross context virtual CPU structure.
167 *
168 */
169VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
170{
171 return pVCpu->em.s.GCPtrInhibitInterrupts;
172}
173
174
175/**
176 * Enables / disable hypercall instructions.
177 *
178 * This interface is used by GIM to tell the execution monitors whether the
179 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
180 *
181 * @param pVCpu The cross context virtual CPU structure this applies to.
182 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
183 */
184VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
185{
186 pVCpu->em.s.fHypercallEnabled = fEnabled;
187}
188
189
190/**
191 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
192 *
193 * @returns true if enabled, false if not.
194 * @param pVCpu The cross context virtual CPU structure.
195 *
196 * @note If this call becomes a performance factor, we can make the data
197 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
198 */
199VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
200{
201 return pVCpu->em.s.fHypercallEnabled;
202}
203
204
205/**
206 * Prepare an MWAIT - essentials of the MONITOR instruction.
207 *
208 * @returns VINF_SUCCESS
209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
210 * @param rax The content of RAX.
211 * @param rcx The content of RCX.
212 * @param rdx The content of RDX.
213 * @param GCPhys The physical address corresponding to rax.
214 */
215VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
216{
217 pVCpu->em.s.MWait.uMonitorRAX = rax;
218 pVCpu->em.s.MWait.uMonitorRCX = rcx;
219 pVCpu->em.s.MWait.uMonitorRDX = rdx;
220 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
221 /** @todo Make use of GCPhys. */
222 NOREF(GCPhys);
223 /** @todo Complete MONITOR implementation. */
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Checks if the monitor hardware is armed / active.
230 *
231 * @returns true if armed, false otherwise.
232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
233 */
234VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
235{
236 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
237}
238
239
240/**
241 * Performs an MWAIT.
242 *
243 * @returns VINF_SUCCESS
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 * @param rax The content of RAX.
246 * @param rcx The content of RCX.
247 */
248VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
249{
250 pVCpu->em.s.MWait.uMWaitRAX = rax;
251 pVCpu->em.s.MWait.uMWaitRCX = rcx;
252 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
253 if (rcx)
254 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
255 else
256 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
257 /** @todo not completely correct?? */
258 return VINF_EM_HALT;
259}
260
261
262
263/**
264 * Determine if we should continue execution in HM after encountering an mwait
265 * instruction.
266 *
267 * Clears MWAIT flags if returning @c true.
268 *
269 * @returns true if we should continue, false if we should halt.
270 * @param pVCpu The cross context virtual CPU structure.
271 * @param pCtx Current CPU context.
272 */
273VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
274{
275 if ( pCtx->eflags.Bits.u1IF
276 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
277 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
278 {
279 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
280 {
281 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
282 return true;
283 }
284 }
285
286 return false;
287}
288
289
290/**
291 * Determine if we should continue execution in HM after encountering a hlt
292 * instruction.
293 *
294 * @returns true if we should continue, false if we should halt.
295 * @param pVCpu The cross context virtual CPU structure.
296 * @param pCtx Current CPU context.
297 */
298VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
299{
300 /** @todo Shouldn't we be checking GIF here? */
301 if (pCtx->eflags.Bits.u1IF)
302 return VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
303 return false;
304}
305
306
307/**
308 * Unhalts and wakes up the given CPU.
309 *
310 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
311 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
312 * the CPU isn't currently in a halt, the next HLT instruction it executes will
313 * be affected.
314 *
315 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
316 * @param pVM The cross context VM structure.
317 * @param pVCpuDst The cross context virtual CPU structure of the
318 * CPU to unhalt and wake up. This is usually not the
319 * same as the caller.
320 * @thread EMT
321 */
322VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
323{
324 /*
325 * Flag the current(/next) HLT to unhalt immediately.
326 */
327 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
328
329 /*
330 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
331 * just do it here for now).
332 */
333#ifdef IN_RING0
334 /* We might be here with preemption disabled or enabled (i.e. depending on
335 thread-context hooks being used), so don't try obtaining the GVMMR0 used
336 lock here. See @bugref{7270#c148}. */
337 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
338 AssertRC(rc);
339
340#elif defined(IN_RING3)
341 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
342 AssertRC(rc);
343
344#else
345 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
346 Assert(pVM->cCpus == 1); NOREF(pVM);
347 int rc = VINF_SUCCESS;
348#endif
349 return rc;
350}
351
352#ifndef IN_RING3
353
354/**
355 * Makes an I/O port write pending for ring-3 processing.
356 *
357 * @returns VINF_EM_PENDING_R3_IOPORT_READ
358 * @param pVCpu The cross context virtual CPU structure.
359 * @param uPort The I/O port.
360 * @param cbInstr The instruction length (for RIP updating).
361 * @param cbValue The write size.
362 * @param uValue The value being written.
363 * @sa emR3ExecutePendingIoPortWrite
364 *
365 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
366 */
367VMMRZ_INT_DECL(VBOXSTRICTRC)
368EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
369{
370 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
371 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
372 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
373 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
374 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
375 return VINF_EM_PENDING_R3_IOPORT_WRITE;
376}
377
378
379/**
380 * Makes an I/O port read pending for ring-3 processing.
381 *
382 * @returns VINF_EM_PENDING_R3_IOPORT_READ
383 * @param pVCpu The cross context virtual CPU structure.
384 * @param uPort The I/O port.
385 * @param cbInstr The instruction length (for RIP updating).
386 * @param cbValue The read size.
387 * @sa emR3ExecutePendingIoPortRead
388 *
389 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
390 */
391VMMRZ_INT_DECL(VBOXSTRICTRC)
392EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
393{
394 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
395 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
396 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
397 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
398 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
399 return VINF_EM_PENDING_R3_IOPORT_READ;
400}
401
402#endif /* IN_RING3 */
403
404
405/**
406 * Worker for EMHistoryExec that checks for ring-3 returns and flags
407 * continuation of the EMHistoryExec run there.
408 */
409DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
410{
411 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
412#ifdef IN_RING3
413 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
414#else
415 switch (VBOXSTRICTRC_VAL(rcStrict))
416 {
417 case VINF_SUCCESS:
418 default:
419 break;
420
421 /*
422 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
423 */
424 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
425 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
426 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
427 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
428 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
429 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
430 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
431 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
432 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
433 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
434 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
435 break;
436 }
437#endif /* !IN_RING3 */
438}
439
440#ifndef IN_RC
441
442/**
443 * Execute using history.
444 *
445 * This function will be called when EMHistoryAddExit() and friends returns a
446 * non-NULL result. This happens in response to probing or when probing has
447 * uncovered adjacent exits which can more effectively be reached by using IEM
448 * than restarting execution using the main execution engine and fielding an
449 * regular exit.
450 *
451 * @returns VBox strict status code, see IEMExecForExits.
452 * @param pVCpu The cross context virtual CPU structure.
453 * @param pExitRec The exit record return by a previous history add
454 * or update call.
455 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
456 */
457VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPU pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
458{
459 Assert(pExitRec);
460 VMCPU_ASSERT_EMT(pVCpu);
461 IEMEXECFOREXITSTATS ExecStats;
462 switch (pExitRec->enmAction)
463 {
464 /*
465 * Executes multiple instruction stopping only when we've gone a given
466 * number without perceived exits.
467 */
468 case EMEXITACTION_EXEC_WITH_MAX:
469 {
470 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
471 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
472 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
473 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
474 pVCpu->em.s.cHistoryExecMaxInstructions,
475 pExitRec->cMaxInstructionsWithoutExit,
476 &ExecStats);
477 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
478 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
479 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
480 if (ExecStats.cExits > 1)
481 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
482 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
483 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
484 return rcStrict;
485 }
486
487 /*
488 * Probe a exit for close by exits.
489 */
490 case EMEXITACTION_EXEC_PROBE:
491 {
492 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
493 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
494 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
495 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
496 pVCpu->em.s.cHistoryProbeMinInstructions,
497 pVCpu->em.s.cHistoryExecMaxInstructions,
498 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
499 &ExecStats);
500 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
501 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
502 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
503 if (ExecStats.cExits >= 2)
504 {
505 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
506 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
507 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
508 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
509 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
510 }
511#ifndef IN_RING3
512 else if (pVCpu->em.s.idxContinueExitRec != UINT16_MAX)
513 {
514 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
515 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
516 }
517#endif
518 else
519 {
520 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
521 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
522 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
523 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
524 }
525 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
526 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
527 return rcStrict;
528 }
529
530 /* We shouldn't ever see these here! */
531 case EMEXITACTION_FREE_RECORD:
532 case EMEXITACTION_NORMAL:
533 case EMEXITACTION_NORMAL_PROBED:
534 break;
535
536 /* No default case, want compiler warnings. */
537 }
538 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
539}
540
541
542/**
543 * Worker for emHistoryAddOrUpdateRecord.
544 */
545DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
546{
547 pExitRec->uFlatPC = uFlatPC;
548 pExitRec->uFlagsAndType = uFlagsAndType;
549 pExitRec->enmAction = EMEXITACTION_NORMAL;
550 pExitRec->bUnused = 0;
551 pExitRec->cMaxInstructionsWithoutExit = 64;
552 pExitRec->uLastExitNo = uExitNo;
553 pExitRec->cHits = 1;
554 return NULL;
555}
556
557
558/**
559 * Worker for emHistoryAddOrUpdateRecord.
560 */
561DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
562 PEMEXITREC pExitRec, uint64_t uFlatPC,
563 uint32_t uFlagsAndType, uint64_t uExitNo)
564{
565 pHistEntry->idxSlot = (uint32_t)idxSlot;
566 pVCpu->em.s.cExitRecordUsed++;
567 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
568 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
569 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
570}
571
572
573/**
574 * Worker for emHistoryAddOrUpdateRecord.
575 */
576DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
577 PEMEXITREC pExitRec, uint64_t uFlatPC,
578 uint32_t uFlagsAndType, uint64_t uExitNo)
579{
580 pHistEntry->idxSlot = (uint32_t)idxSlot;
581 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
582 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
583 uExitNo - pExitRec->uLastExitNo));
584 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
585}
586
587
588/**
589 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
590 *
591 * @returns Pointer to an exit record if special action should be taken using
592 * EMHistoryExec(). Take normal exit action when NULL.
593 *
594 * @param pVCpu The cross context virtual CPU structure.
595 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
596 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
597 * @param uFlatPC The flattened program counter.
598 * @param pHistEntry The exit history entry.
599 * @param uExitNo The current exit number.
600 */
601static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
602 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
603{
604# ifdef IN_RING0
605 /* Disregard the hm flag. */
606 uFlagsAndType &= ~EMEXIT_F_HM;
607# endif
608
609 /*
610 * Work the hash table.
611 */
612 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
613# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
614 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
615 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
616 if (pExitRec->uFlatPC == uFlatPC)
617 {
618 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
619 pHistEntry->idxSlot = (uint32_t)idxSlot;
620 if (pExitRec->uFlagsAndType == uFlagsAndType)
621 {
622 pExitRec->uLastExitNo = uExitNo;
623 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
624 }
625 else
626 {
627 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
628 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
629 }
630 }
631 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
632 {
633 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
634 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
635 }
636 else
637 {
638 /*
639 * Collision. We calculate a new hash for stepping away from the first,
640 * doing up to 8 steps away before replacing the least recently used record.
641 */
642 uintptr_t idxOldest = idxSlot;
643 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
644 unsigned iOldestStep = 0;
645 unsigned iStep = 1;
646 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
647 for (;;)
648 {
649 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
650 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
651 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
652 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
653
654 /* Step to the next slot. */
655 idxSlot += idxAdd;
656 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
657 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
658
659 /* Does it match? */
660 if (pExitRec->uFlatPC == uFlatPC)
661 {
662 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
663 pHistEntry->idxSlot = (uint32_t)idxSlot;
664 if (pExitRec->uFlagsAndType == uFlagsAndType)
665 {
666 pExitRec->uLastExitNo = uExitNo;
667 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
668 break;
669 }
670 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
671 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
672 }
673
674 /* Is it free? */
675 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
676 {
677 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
678 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
679 }
680
681 /* Is it the least recently used one? */
682 if (pExitRec->uLastExitNo < uOldestExitNo)
683 {
684 uOldestExitNo = pExitRec->uLastExitNo;
685 idxOldest = idxSlot;
686 iOldestStep = iStep;
687 }
688
689 /* Next iteration? */
690 iStep++;
691 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
692 if (RT_LIKELY(iStep < 8 + 1))
693 { /* likely */ }
694 else
695 {
696 /* Replace the least recently used slot. */
697 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
698 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
699 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
700 }
701 }
702 }
703
704 /*
705 * Found an existing record.
706 */
707 switch (pExitRec->enmAction)
708 {
709 case EMEXITACTION_NORMAL:
710 {
711 uint64_t const cHits = ++pExitRec->cHits;
712 if (cHits < 256)
713 return NULL;
714 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
715 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
716 return pExitRec;
717 }
718
719 case EMEXITACTION_NORMAL_PROBED:
720 pExitRec->cHits += 1;
721 return NULL;
722
723 default:
724 pExitRec->cHits += 1;
725 return pExitRec;
726
727 /* This will happen if the caller ignores or cannot serve the probe
728 request (forced to ring-3, whatever). We retry this 256 times. */
729 case EMEXITACTION_EXEC_PROBE:
730 {
731 uint64_t const cHits = ++pExitRec->cHits;
732 if (cHits < 512)
733 return pExitRec;
734 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
735 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
736 return NULL;
737 }
738 }
739}
740
741#endif /* !IN_RC */
742
743/**
744 * Adds an exit to the history for this CPU.
745 *
746 * @returns Pointer to an exit record if special action should be taken using
747 * EMHistoryExec(). Take normal exit action when NULL.
748 *
749 * @param pVCpu The cross context virtual CPU structure.
750 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
751 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
752 * @param uTimestamp The TSC value for the exit, 0 if not available.
753 * @thread EMT(pVCpu)
754 */
755VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
756{
757 VMCPU_ASSERT_EMT(pVCpu);
758
759 /*
760 * Add the exit history entry.
761 */
762 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
763 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
764 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
765 pHistEntry->uFlatPC = uFlatPC;
766 pHistEntry->uTimestamp = uTimestamp;
767 pHistEntry->uFlagsAndType = uFlagsAndType;
768 pHistEntry->idxSlot = UINT32_MAX;
769
770#ifndef IN_RC
771 /*
772 * If common exit type, we will insert/update the exit into the exit record hash table.
773 */
774 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
775# ifdef IN_RING0
776 && pVCpu->em.s.fExitOptimizationEnabledR0
777 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
778# else
779 && pVCpu->em.s.fExitOptimizationEnabled
780# endif
781 && uFlatPC != UINT64_MAX
782 )
783 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
784#endif
785 return NULL;
786}
787
788
789#ifdef IN_RC
790/**
791 * Special raw-mode interface for adding an exit to the history.
792 *
793 * Currently this is only for recording, not optimizing, so no return value. If
794 * we start seriously caring about raw-mode again, we may extend it.
795 *
796 * @param pVCpu The cross context virtual CPU structure.
797 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
798 * @param uCs The CS.
799 * @param uEip The EIP.
800 * @param uTimestamp The TSC value for the exit, 0 if not available.
801 * @thread EMT(0)
802 */
803VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
804{
805 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
806 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
807 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
808 pHistEntry->uTimestamp = uTimestamp;
809 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
810 pHistEntry->idxSlot = UINT32_MAX;
811}
812#endif
813
814
815#ifdef IN_RING0
816/**
817 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
818 *
819 * @param pVCpu The cross context virtual CPU structure.
820 * @param uFlatPC The flattened program counter (RIP).
821 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
822 */
823VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
824{
825 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
826 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
827 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
828 pHistEntry->uFlatPC = uFlatPC;
829 if (fFlattened)
830 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
831 else
832 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
833}
834#endif
835
836
837/**
838 * Interface for convering a engine specific exit to a generic one and get guidance.
839 *
840 * @returns Pointer to an exit record if special action should be taken using
841 * EMHistoryExec(). Take normal exit action when NULL.
842 *
843 * @param pVCpu The cross context virtual CPU structure.
844 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
845 * @thread EMT(pVCpu)
846 */
847VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
848{
849 VMCPU_ASSERT_EMT(pVCpu);
850
851 /*
852 * Do the updating.
853 */
854 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
855 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
856 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
857 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
858
859#ifndef IN_RC
860 /*
861 * If common exit type, we will insert/update the exit into the exit record hash table.
862 */
863 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
864# ifdef IN_RING0
865 && pVCpu->em.s.fExitOptimizationEnabledR0
866 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
867# else
868 && pVCpu->em.s.fExitOptimizationEnabled
869# endif
870 && pHistEntry->uFlatPC != UINT64_MAX
871 )
872 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
873#endif
874 return NULL;
875}
876
877
878/**
879 * Interface for convering a engine specific exit to a generic one and get
880 * guidance, supplying flattened PC too.
881 *
882 * @returns Pointer to an exit record if special action should be taken using
883 * EMHistoryExec(). Take normal exit action when NULL.
884 *
885 * @param pVCpu The cross context virtual CPU structure.
886 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
887 * @param uFlatPC The flattened program counter (RIP).
888 * @thread EMT(pVCpu)
889 */
890VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
891{
892 VMCPU_ASSERT_EMT(pVCpu);
893 Assert(uFlatPC != UINT64_MAX);
894
895 /*
896 * Do the updating.
897 */
898 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
899 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
900 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
901 pHistEntry->uFlagsAndType = uFlagsAndType;
902 pHistEntry->uFlatPC = uFlatPC;
903
904#ifndef IN_RC
905 /*
906 * If common exit type, we will insert/update the exit into the exit record hash table.
907 */
908 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
909# ifdef IN_RING0
910 && pVCpu->em.s.fExitOptimizationEnabledR0
911 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
912# else
913 && pVCpu->em.s.fExitOptimizationEnabled
914# endif
915 )
916 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
917#endif
918 return NULL;
919}
920
921
922/**
923 * Locks REM execution to a single VCPU.
924 *
925 * @param pVM The cross context VM structure.
926 */
927VMMDECL(void) EMRemLock(PVM pVM)
928{
929#ifdef VBOX_WITH_REM
930 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
931 return; /* early init */
932
933 Assert(!PGMIsLockOwner(pVM));
934 Assert(!IOMIsLockWriteOwner(pVM));
935 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
936 AssertRCSuccess(rc);
937#else
938 RT_NOREF(pVM);
939#endif
940}
941
942
943/**
944 * Unlocks REM execution
945 *
946 * @param pVM The cross context VM structure.
947 */
948VMMDECL(void) EMRemUnlock(PVM pVM)
949{
950#ifdef VBOX_WITH_REM
951 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
952 return; /* early init */
953
954 PDMCritSectLeave(&pVM->em.s.CritSectREM);
955#else
956 RT_NOREF(pVM);
957#endif
958}
959
960
961/**
962 * Check if this VCPU currently owns the REM lock.
963 *
964 * @returns bool owner/not owner
965 * @param pVM The cross context VM structure.
966 */
967VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
968{
969#ifdef VBOX_WITH_REM
970 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
971 return true; /* early init */
972
973 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
974#else
975 RT_NOREF(pVM);
976 return true;
977#endif
978}
979
980
981/**
982 * Try to acquire the REM lock.
983 *
984 * @returns VBox status code
985 * @param pVM The cross context VM structure.
986 */
987VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
988{
989#ifdef VBOX_WITH_REM
990 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
991 return VINF_SUCCESS; /* early init */
992
993 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
994#else
995 RT_NOREF(pVM);
996 return VINF_SUCCESS;
997#endif
998}
999
1000
1001/**
1002 * @callback_method_impl{FNDISREADBYTES}
1003 */
1004static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
1005{
1006 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
1007#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
1008 PVM pVM = pVCpu->CTX_SUFF(pVM);
1009#endif
1010 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
1011 int rc;
1012
1013 /*
1014 * Figure how much we can or must read.
1015 */
1016 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
1017 if (cbToRead > cbMaxRead)
1018 cbToRead = cbMaxRead;
1019 else if (cbToRead < cbMinRead)
1020 cbToRead = cbMinRead;
1021
1022#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
1023 /*
1024 * We might be called upon to interpret an instruction in a patch.
1025 */
1026 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
1027 {
1028# ifdef IN_RC
1029 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1030# else
1031 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
1032# endif
1033 rc = VINF_SUCCESS;
1034 }
1035 else
1036#endif
1037 {
1038# ifdef IN_RC
1039 /*
1040 * Try access it thru the shadow page tables first. Fall back on the
1041 * slower PGM method if it fails because the TLB or page table was
1042 * modified recently.
1043 */
1044 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1045 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
1046 {
1047 cbToRead = cbMinRead;
1048 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1049 }
1050 if (rc == VERR_ACCESS_DENIED)
1051#endif
1052 {
1053 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1054 if (RT_FAILURE(rc))
1055 {
1056 if (cbToRead > cbMinRead)
1057 {
1058 cbToRead = cbMinRead;
1059 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1060 }
1061 if (RT_FAILURE(rc))
1062 {
1063#ifndef IN_RC
1064 /*
1065 * If we fail to find the page via the guest's page tables
1066 * we invalidate the page in the host TLB (pertaining to
1067 * the guest in the NestedPaging case). See @bugref{6043}.
1068 */
1069 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1070 {
1071 HMInvalidatePage(pVCpu, uSrcAddr);
1072 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
1073 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
1074 }
1075#endif
1076 }
1077 }
1078 }
1079 }
1080
1081 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
1082 return rc;
1083}
1084
1085
1086#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1087DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1088{
1089 NOREF(pVM);
1090 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
1091}
1092#endif
1093
1094
1095/**
1096 * Disassembles the current instruction.
1097 *
1098 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
1099 * details.
1100 *
1101 * @param pVM The cross context VM structure.
1102 * @param pVCpu The cross context virtual CPU structure.
1103 * @param pDis Where to return the parsed instruction info.
1104 * @param pcbInstr Where to return the instruction size. (optional)
1105 */
1106VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
1107{
1108 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
1109 RTGCPTR GCPtrInstr;
1110#if 0
1111 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
1112#else
1113/** @todo Get the CPU mode as well while we're at it! */
1114 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
1115 pCtxCore->rip, &GCPtrInstr);
1116#endif
1117 if (RT_FAILURE(rc))
1118 {
1119 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
1120 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
1121 return rc;
1122 }
1123 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
1124}
1125
1126
1127/**
1128 * Disassembles one instruction.
1129 *
1130 * This is used by internally by the interpreter and by trap/access handlers.
1131 *
1132 * @returns VBox status code.
1133 *
1134 * @param pVM The cross context VM structure.
1135 * @param pVCpu The cross context virtual CPU structure.
1136 * @param GCPtrInstr The flat address of the instruction.
1137 * @param pCtxCore The context core (used to determine the cpu mode).
1138 * @param pDis Where to return the parsed instruction info.
1139 * @param pcbInstr Where to return the instruction size. (optional)
1140 */
1141VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
1142 PDISCPUSTATE pDis, unsigned *pcbInstr)
1143{
1144 NOREF(pVM);
1145 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
1146 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
1147 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
1148 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1149 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1150 if (RT_SUCCESS(rc))
1151 return VINF_SUCCESS;
1152 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1153 return rc;
1154}
1155
1156
1157#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
1158static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
1159 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
1160 uint32_t cbEm, uint32_t cbIem)
1161{
1162 /* Quick compare. */
1163 if ( rcEm == rcIem
1164 && cbEm == cbIem
1165 && g_cbEmWrote == g_cbIemWrote
1166 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
1167 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
1168 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
1169 )
1170 return;
1171
1172 /* Report exact differences. */
1173 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
1174 if (rcEm != rcIem)
1175 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
1176 else if (cbEm != cbIem)
1177 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
1178
1179 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
1180 {
1181 if (g_cbIemWrote != g_cbEmWrote)
1182 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
1183 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
1184 {
1185 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
1186 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
1187 }
1188
1189 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
1190 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
1191 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
1192
1193# define CHECK_FIELD(a_Field) \
1194 do \
1195 { \
1196 if (pEmCtx->a_Field != pIemCtx->a_Field) \
1197 { \
1198 switch (sizeof(pEmCtx->a_Field)) \
1199 { \
1200 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1201 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1202 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1203 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
1204 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
1205 } \
1206 cDiffs++; \
1207 } \
1208 } while (0)
1209
1210# define CHECK_BIT_FIELD(a_Field) \
1211 do \
1212 { \
1213 if (pEmCtx->a_Field != pIemCtx->a_Field) \
1214 { \
1215 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
1216 cDiffs++; \
1217 } \
1218 } while (0)
1219
1220# define CHECK_SEL(a_Sel) \
1221 do \
1222 { \
1223 CHECK_FIELD(a_Sel.Sel); \
1224 CHECK_FIELD(a_Sel.Attr.u); \
1225 CHECK_FIELD(a_Sel.u64Base); \
1226 CHECK_FIELD(a_Sel.u32Limit); \
1227 CHECK_FIELD(a_Sel.fFlags); \
1228 } while (0)
1229
1230 unsigned cDiffs = 0;
1231 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
1232 {
1233 RTLogPrintf(" the FPU state differs\n");
1234 cDiffs++;
1235 CHECK_FIELD(fpu.FCW);
1236 CHECK_FIELD(fpu.FSW);
1237 CHECK_FIELD(fpu.FTW);
1238 CHECK_FIELD(fpu.FOP);
1239 CHECK_FIELD(fpu.FPUIP);
1240 CHECK_FIELD(fpu.CS);
1241 CHECK_FIELD(fpu.Rsrvd1);
1242 CHECK_FIELD(fpu.FPUDP);
1243 CHECK_FIELD(fpu.DS);
1244 CHECK_FIELD(fpu.Rsrvd2);
1245 CHECK_FIELD(fpu.MXCSR);
1246 CHECK_FIELD(fpu.MXCSR_MASK);
1247 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
1248 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
1249 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
1250 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
1251 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
1252 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
1253 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
1254 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
1255 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
1256 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
1257 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
1258 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
1259 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
1260 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
1261 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
1262 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
1263 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
1264 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
1265 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
1266 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
1267 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
1268 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
1269 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
1270 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
1271 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
1272 CHECK_FIELD(fpu.au32RsrvdRest[i]);
1273 }
1274 CHECK_FIELD(rip);
1275 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
1276 {
1277 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
1278 CHECK_BIT_FIELD(rflags.Bits.u1CF);
1279 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
1280 CHECK_BIT_FIELD(rflags.Bits.u1PF);
1281 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
1282 CHECK_BIT_FIELD(rflags.Bits.u1AF);
1283 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
1284 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
1285 CHECK_BIT_FIELD(rflags.Bits.u1SF);
1286 CHECK_BIT_FIELD(rflags.Bits.u1TF);
1287 CHECK_BIT_FIELD(rflags.Bits.u1IF);
1288 CHECK_BIT_FIELD(rflags.Bits.u1DF);
1289 CHECK_BIT_FIELD(rflags.Bits.u1OF);
1290 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
1291 CHECK_BIT_FIELD(rflags.Bits.u1NT);
1292 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
1293 CHECK_BIT_FIELD(rflags.Bits.u1RF);
1294 CHECK_BIT_FIELD(rflags.Bits.u1VM);
1295 CHECK_BIT_FIELD(rflags.Bits.u1AC);
1296 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
1297 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
1298 CHECK_BIT_FIELD(rflags.Bits.u1ID);
1299 }
1300
1301 if (!g_fIgnoreRaxRdx)
1302 CHECK_FIELD(rax);
1303 CHECK_FIELD(rcx);
1304 if (!g_fIgnoreRaxRdx)
1305 CHECK_FIELD(rdx);
1306 CHECK_FIELD(rbx);
1307 CHECK_FIELD(rsp);
1308 CHECK_FIELD(rbp);
1309 CHECK_FIELD(rsi);
1310 CHECK_FIELD(rdi);
1311 CHECK_FIELD(r8);
1312 CHECK_FIELD(r9);
1313 CHECK_FIELD(r10);
1314 CHECK_FIELD(r11);
1315 CHECK_FIELD(r12);
1316 CHECK_FIELD(r13);
1317 CHECK_SEL(cs);
1318 CHECK_SEL(ss);
1319 CHECK_SEL(ds);
1320 CHECK_SEL(es);
1321 CHECK_SEL(fs);
1322 CHECK_SEL(gs);
1323 CHECK_FIELD(cr0);
1324 CHECK_FIELD(cr2);
1325 CHECK_FIELD(cr3);
1326 CHECK_FIELD(cr4);
1327 CHECK_FIELD(dr[0]);
1328 CHECK_FIELD(dr[1]);
1329 CHECK_FIELD(dr[2]);
1330 CHECK_FIELD(dr[3]);
1331 CHECK_FIELD(dr[6]);
1332 CHECK_FIELD(dr[7]);
1333 CHECK_FIELD(gdtr.cbGdt);
1334 CHECK_FIELD(gdtr.pGdt);
1335 CHECK_FIELD(idtr.cbIdt);
1336 CHECK_FIELD(idtr.pIdt);
1337 CHECK_SEL(ldtr);
1338 CHECK_SEL(tr);
1339 CHECK_FIELD(SysEnter.cs);
1340 CHECK_FIELD(SysEnter.eip);
1341 CHECK_FIELD(SysEnter.esp);
1342 CHECK_FIELD(msrEFER);
1343 CHECK_FIELD(msrSTAR);
1344 CHECK_FIELD(msrPAT);
1345 CHECK_FIELD(msrLSTAR);
1346 CHECK_FIELD(msrCSTAR);
1347 CHECK_FIELD(msrSFMASK);
1348 CHECK_FIELD(msrKERNELGSBASE);
1349
1350# undef CHECK_FIELD
1351# undef CHECK_BIT_FIELD
1352 }
1353}
1354#endif /* VBOX_COMPARE_IEM_AND_EM */
1355
1356
1357/**
1358 * Interprets the current instruction.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_* Scheduling instructions.
1362 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1363 * @retval VERR_* Fatal errors.
1364 *
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pRegFrame The register frame.
1367 * Updates the EIP if an instruction was executed successfully.
1368 * @param pvFault The fault address (CR2).
1369 *
1370 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1371 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1372 * to worry about e.g. invalid modrm combinations (!)
1373 */
1374VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1375{
1376 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1377 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1378#ifdef VBOX_WITH_IEM
1379 NOREF(pvFault);
1380
1381# ifdef VBOX_COMPARE_IEM_AND_EM
1382 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1383 g_IncomingCtx = *pCtx;
1384 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1385 g_cbEmWrote = g_cbIemWrote = 0;
1386
1387# ifdef VBOX_COMPARE_IEM_FIRST
1388 /* IEM */
1389 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1390 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1391 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1392 rcIem = VERR_EM_INTERPRETER;
1393 g_IemCtx = *pCtx;
1394 g_fIemFFs = pVCpu->fLocalForcedActions;
1395 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1396 *pCtx = g_IncomingCtx;
1397# endif
1398
1399 /* EM */
1400 RTGCPTR pbCode;
1401 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1402 if (RT_SUCCESS(rcEm))
1403 {
1404 uint32_t cbOp;
1405 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1406 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1407 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1408 if (RT_SUCCESS(rcEm))
1409 {
1410 Assert(cbOp == pDis->cbInstr);
1411 uint32_t cbIgnored;
1412 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1413 if (RT_SUCCESS(rcEm))
1414 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1415
1416 }
1417 rcEm = VERR_EM_INTERPRETER;
1418 }
1419 else
1420 rcEm = VERR_EM_INTERPRETER;
1421# ifdef VBOX_SAME_AS_EM
1422 if (rcEm == VERR_EM_INTERPRETER)
1423 {
1424 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1425 return rcEm;
1426 }
1427# endif
1428 g_EmCtx = *pCtx;
1429 g_fEmFFs = pVCpu->fLocalForcedActions;
1430 VBOXSTRICTRC rc = rcEm;
1431
1432# ifdef VBOX_COMPARE_IEM_LAST
1433 /* IEM */
1434 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1435 *pCtx = g_IncomingCtx;
1436 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1437 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1438 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1439 rcIem = VERR_EM_INTERPRETER;
1440 g_IemCtx = *pCtx;
1441 g_fIemFFs = pVCpu->fLocalForcedActions;
1442 rc = rcIem;
1443# endif
1444
1445# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1446 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1447# endif
1448
1449# else
1450 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1451 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1452 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1453 rc = VERR_EM_INTERPRETER;
1454# endif
1455 if (rc != VINF_SUCCESS)
1456 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1457
1458 return rc;
1459#else
1460 RTGCPTR pbCode;
1461 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1462 if (RT_SUCCESS(rc))
1463 {
1464 uint32_t cbOp;
1465 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1466 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1467 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1468 if (RT_SUCCESS(rc))
1469 {
1470 Assert(cbOp == pDis->cbInstr);
1471 uint32_t cbIgnored;
1472 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
1473 if (RT_SUCCESS(rc))
1474 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1475
1476 return rc;
1477 }
1478 }
1479 return VERR_EM_INTERPRETER;
1480#endif
1481}
1482
1483
1484/**
1485 * Interprets the current instruction.
1486 *
1487 * @returns VBox status code.
1488 * @retval VINF_* Scheduling instructions.
1489 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1490 * @retval VERR_* Fatal errors.
1491 *
1492 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1493 * @param pRegFrame The register frame.
1494 * Updates the EIP if an instruction was executed successfully.
1495 * @param pvFault The fault address (CR2).
1496 * @param pcbWritten Size of the write (if applicable).
1497 *
1498 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1499 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1500 * to worry about e.g. invalid modrm combinations (!)
1501 */
1502VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1503{
1504 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1505 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1506#ifdef VBOX_WITH_IEM
1507 NOREF(pvFault);
1508
1509# ifdef VBOX_COMPARE_IEM_AND_EM
1510 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1511 g_IncomingCtx = *pCtx;
1512 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1513 g_cbEmWrote = g_cbIemWrote = 0;
1514
1515# ifdef VBOX_COMPARE_IEM_FIRST
1516 /* IEM */
1517 uint32_t cbIemWritten = 0;
1518 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1519 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1520 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1521 rcIem = VERR_EM_INTERPRETER;
1522 g_IemCtx = *pCtx;
1523 g_fIemFFs = pVCpu->fLocalForcedActions;
1524 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1525 *pCtx = g_IncomingCtx;
1526# endif
1527
1528 /* EM */
1529 uint32_t cbEmWritten = 0;
1530 RTGCPTR pbCode;
1531 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1532 if (RT_SUCCESS(rcEm))
1533 {
1534 uint32_t cbOp;
1535 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1536 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1537 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1538 if (RT_SUCCESS(rcEm))
1539 {
1540 Assert(cbOp == pDis->cbInstr);
1541 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
1542 if (RT_SUCCESS(rcEm))
1543 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1544
1545 }
1546 else
1547 rcEm = VERR_EM_INTERPRETER;
1548 }
1549 else
1550 rcEm = VERR_EM_INTERPRETER;
1551# ifdef VBOX_SAME_AS_EM
1552 if (rcEm == VERR_EM_INTERPRETER)
1553 {
1554 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1555 return rcEm;
1556 }
1557# endif
1558 g_EmCtx = *pCtx;
1559 g_fEmFFs = pVCpu->fLocalForcedActions;
1560 *pcbWritten = cbEmWritten;
1561 VBOXSTRICTRC rc = rcEm;
1562
1563# ifdef VBOX_COMPARE_IEM_LAST
1564 /* IEM */
1565 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1566 *pCtx = g_IncomingCtx;
1567 uint32_t cbIemWritten = 0;
1568 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
1569 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1570 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1571 rcIem = VERR_EM_INTERPRETER;
1572 g_IemCtx = *pCtx;
1573 g_fIemFFs = pVCpu->fLocalForcedActions;
1574 *pcbWritten = cbIemWritten;
1575 rc = rcIem;
1576# endif
1577
1578# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1579 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
1580# endif
1581
1582# else
1583 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1584 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1585 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1586 rc = VERR_EM_INTERPRETER;
1587# endif
1588 if (rc != VINF_SUCCESS)
1589 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1590
1591 return rc;
1592#else
1593 RTGCPTR pbCode;
1594 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
1595 if (RT_SUCCESS(rc))
1596 {
1597 uint32_t cbOp;
1598 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
1599 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
1600 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
1601 if (RT_SUCCESS(rc))
1602 {
1603 Assert(cbOp == pDis->cbInstr);
1604 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
1605 if (RT_SUCCESS(rc))
1606 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1607
1608 return rc;
1609 }
1610 }
1611 return VERR_EM_INTERPRETER;
1612#endif
1613}
1614
1615
1616/**
1617 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1618 *
1619 * IP/EIP/RIP *IS* updated!
1620 *
1621 * @returns VBox strict status code.
1622 * @retval VINF_* Scheduling instructions. When these are returned, it
1623 * starts to get a bit tricky to know whether code was
1624 * executed or not... We'll address this when it becomes a problem.
1625 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1626 * @retval VERR_* Fatal errors.
1627 *
1628 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1629 * @param pDis The disassembler cpu state for the instruction to be
1630 * interpreted.
1631 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1632 * @param pvFault The fault address (CR2).
1633 * @param enmCodeType Code type (user/supervisor)
1634 *
1635 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1636 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1637 * to worry about e.g. invalid modrm combinations (!)
1638 *
1639 * @todo At this time we do NOT check if the instruction overwrites vital information.
1640 * Make sure this can't happen!! (will add some assertions/checks later)
1641 */
1642VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1643 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1644{
1645 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1646 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1647#ifdef VBOX_WITH_IEM
1648 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1649
1650# ifdef VBOX_COMPARE_IEM_AND_EM
1651 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1652 g_IncomingCtx = *pCtx;
1653 g_fIncomingFFs = pVCpu->fLocalForcedActions;
1654 g_cbEmWrote = g_cbIemWrote = 0;
1655
1656# ifdef VBOX_COMPARE_IEM_FIRST
1657 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1658 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1659 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1660 rcIem = VERR_EM_INTERPRETER;
1661 g_IemCtx = *pCtx;
1662 g_fIemFFs = pVCpu->fLocalForcedActions;
1663 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1664 *pCtx = g_IncomingCtx;
1665# endif
1666
1667 /* EM */
1668 uint32_t cbIgnored;
1669 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1670 if (RT_SUCCESS(rcEm))
1671 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1672# ifdef VBOX_SAME_AS_EM
1673 if (rcEm == VERR_EM_INTERPRETER)
1674 {
1675 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1676 return rcEm;
1677 }
1678# endif
1679 g_EmCtx = *pCtx;
1680 g_fEmFFs = pVCpu->fLocalForcedActions;
1681 VBOXSTRICTRC rc = rcEm;
1682
1683# ifdef VBOX_COMPARE_IEM_LAST
1684 /* IEM */
1685 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1686 *pCtx = g_IncomingCtx;
1687 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1688 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1689 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1690 rcIem = VERR_EM_INTERPRETER;
1691 g_IemCtx = *pCtx;
1692 g_fIemFFs = pVCpu->fLocalForcedActions;
1693 rc = rcIem;
1694# endif
1695
1696# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1697 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1698# endif
1699
1700# else
1701 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1702 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1703 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1704 rc = VERR_EM_INTERPRETER;
1705# endif
1706
1707 if (rc != VINF_SUCCESS)
1708 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1709
1710 return rc;
1711#else
1712 uint32_t cbIgnored;
1713 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1714 if (RT_SUCCESS(rc))
1715 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1716 return rc;
1717#endif
1718}
1719
1720#ifdef IN_RC
1721
1722DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1723{
1724 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1725 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1726 return rc;
1727 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1728}
1729
1730
1731/**
1732 * Interpret IRET (currently only to V86 code) - PATM only.
1733 *
1734 * @returns VBox status code.
1735 * @param pVM The cross context VM structure.
1736 * @param pVCpu The cross context virtual CPU structure.
1737 * @param pRegFrame The register frame.
1738 *
1739 */
1740VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1741{
1742 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1743 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1744 int rc;
1745
1746 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1747 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1748 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1749 * this function. Fear that it may guru on us, thus not converted to
1750 * IEM. */
1751
1752 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1753 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1754 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1755 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1756 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1757
1758 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1759 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1760 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1761 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1762 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1763 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1764 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1765
1766 pRegFrame->eip = eip & 0xffff;
1767 pRegFrame->cs.Sel = cs;
1768
1769 /* Mask away all reserved bits */
1770 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1771 eflags &= uMask;
1772
1773 CPUMRawSetEFlags(pVCpu, eflags);
1774 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1775
1776 pRegFrame->esp = esp;
1777 pRegFrame->ss.Sel = ss;
1778 pRegFrame->ds.Sel = ds;
1779 pRegFrame->es.Sel = es;
1780 pRegFrame->fs.Sel = fs;
1781 pRegFrame->gs.Sel = gs;
1782
1783 return VINF_SUCCESS;
1784}
1785
1786# ifndef VBOX_WITH_IEM
1787/**
1788 * IRET Emulation.
1789 */
1790static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1791{
1792#ifdef VBOX_WITH_RAW_RING1
1793 NOREF(pvFault); NOREF(pcbSize); NOREF(pDis);
1794 if (EMIsRawRing1Enabled(pVM))
1795 {
1796 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1797 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1798 int rc;
1799 uint32_t cpl, rpl;
1800
1801 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1802 /** @todo we don't verify all the edge cases that generate #GP faults */
1803
1804 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1805 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1806 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1807 * this function. Fear that it may guru on us, thus not converted to
1808 * IEM. */
1809
1810 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1811 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1812 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1813 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1814 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1815
1816 /* Deal with V86 above. */
1817 if (eflags & X86_EFL_VM)
1818 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1819
1820 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1821 rpl = cs & X86_SEL_RPL;
1822
1823 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1824 if (rpl != cpl)
1825 {
1826 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1827 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1828 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1829 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1830 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1831 pRegFrame->ss.Sel = ss;
1832 pRegFrame->esp = esp;
1833 }
1834 pRegFrame->cs.Sel = cs;
1835 pRegFrame->eip = eip;
1836
1837 /* Adjust CS & SS as required. */
1838 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1839
1840 /* Mask away all reserved bits */
1841 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1842 eflags &= uMask;
1843
1844 CPUMRawSetEFlags(pVCpu, eflags);
1845 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1846 return VINF_SUCCESS;
1847 }
1848#else
1849 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1850#endif
1851 return VERR_EM_INTERPRETER;
1852}
1853# endif /* !VBOX_WITH_IEM */
1854
1855#endif /* IN_RC */
1856
1857
1858
1859/*
1860 *
1861 * Old interpreter primitives used by HM, move/eliminate later.
1862 * Old interpreter primitives used by HM, move/eliminate later.
1863 * Old interpreter primitives used by HM, move/eliminate later.
1864 * Old interpreter primitives used by HM, move/eliminate later.
1865 * Old interpreter primitives used by HM, move/eliminate later.
1866 *
1867 */
1868
1869
1870/**
1871 * Interpret CPUID given the parameters in the CPU context.
1872 *
1873 * @returns VBox status code.
1874 * @param pVM The cross context VM structure.
1875 * @param pVCpu The cross context virtual CPU structure.
1876 * @param pRegFrame The register frame.
1877 *
1878 */
1879VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1880{
1881 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1882 uint32_t iLeaf = pRegFrame->eax;
1883 uint32_t iSubLeaf = pRegFrame->ecx;
1884 NOREF(pVM);
1885
1886 /* cpuid clears the high dwords of the affected 64 bits registers. */
1887 pRegFrame->rax = 0;
1888 pRegFrame->rbx = 0;
1889 pRegFrame->rcx = 0;
1890 pRegFrame->rdx = 0;
1891
1892 /* Note: operates the same in 64 and non-64 bits mode. */
1893 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1894 Log(("Emulate: CPUID %x/%x -> %08x %08x %08x %08x\n", iLeaf, iSubLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1895 return VINF_SUCCESS;
1896}
1897
1898
1899/**
1900 * Interpret RDPMC.
1901 *
1902 * @returns VBox status code.
1903 * @param pVM The cross context VM structure.
1904 * @param pVCpu The cross context virtual CPU structure.
1905 * @param pRegFrame The register frame.
1906 *
1907 */
1908VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1909{
1910 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1911 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1912
1913 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1914 if ( !(uCR4 & X86_CR4_PCE)
1915 && CPUMGetGuestCPL(pVCpu) != 0)
1916 {
1917 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1918 return VERR_EM_INTERPRETER; /* genuine #GP */
1919 }
1920
1921 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1922 pRegFrame->rax = 0;
1923 pRegFrame->rdx = 0;
1924 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1925 * ecx but see @bugref{3472}! */
1926
1927 NOREF(pVM);
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * MWAIT Emulation.
1934 */
1935VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1936{
1937 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1938 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1939 NOREF(pVM);
1940
1941 /* Get the current privilege level. */
1942 cpl = CPUMGetGuestCPL(pVCpu);
1943 if (cpl != 0)
1944 return VERR_EM_INTERPRETER; /* supervisor only */
1945
1946 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1947 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1948 return VERR_EM_INTERPRETER; /* not supported */
1949
1950 /*
1951 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1952 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1953 */
1954 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1955 if (pRegFrame->ecx > 1)
1956 {
1957 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1958 return VERR_EM_INTERPRETER; /* illegal value. */
1959 }
1960
1961 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1962 {
1963 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1964 return VERR_EM_INTERPRETER; /* illegal value. */
1965 }
1966
1967 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1968}
1969
1970
1971/**
1972 * MONITOR Emulation.
1973 */
1974VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1975{
1976 uint32_t u32Dummy, u32ExtFeatures, cpl;
1977 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1978 NOREF(pVM);
1979
1980 if (pRegFrame->ecx != 0)
1981 {
1982 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1983 return VERR_EM_INTERPRETER; /* illegal value. */
1984 }
1985
1986 /* Get the current privilege level. */
1987 cpl = CPUMGetGuestCPL(pVCpu);
1988 if (cpl != 0)
1989 return VERR_EM_INTERPRETER; /* supervisor only */
1990
1991 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1992 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1993 return VERR_EM_INTERPRETER; /* not supported */
1994
1995 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1996 return VINF_SUCCESS;
1997}
1998
1999
2000/* VT-x only: */
2001
2002/**
2003 * Interpret INVLPG.
2004 *
2005 * @returns VBox status code.
2006 * @param pVM The cross context VM structure.
2007 * @param pVCpu The cross context virtual CPU structure.
2008 * @param pRegFrame The register frame.
2009 * @param pAddrGC Operand address.
2010 *
2011 */
2012VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
2013{
2014 /** @todo is addr always a flat linear address or ds based
2015 * (in absence of segment override prefixes)????
2016 */
2017 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2018 NOREF(pVM); NOREF(pRegFrame);
2019#ifdef IN_RC
2020 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
2021#endif
2022 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
2023 if ( rc == VINF_SUCCESS
2024 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
2025 return VINF_SUCCESS;
2026 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
2027 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
2028 VERR_EM_INTERPRETER);
2029 return rc;
2030}
2031
2032
2033#ifdef LOG_ENABLED
2034static const char *emMSRtoString(uint32_t uMsr)
2035{
2036 switch (uMsr)
2037 {
2038 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
2039 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
2040 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
2041 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
2042 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
2043 case MSR_K6_EFER: return "MSR_K6_EFER";
2044 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
2045 case MSR_K6_STAR: return "MSR_K6_STAR";
2046 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
2047 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
2048 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
2049 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
2050 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
2051 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
2052 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
2053 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
2054 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
2055 case MSR_IA32_TSC: return "MSR_IA32_TSC";
2056 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
2057 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
2058 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
2059 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
2060 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
2061 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
2062 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
2063 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
2064 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
2065 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
2066 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
2067 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
2068 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
2069 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
2070 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
2071 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
2072 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
2073 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
2074 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
2075 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
2076 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
2077 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
2078 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
2079 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
2080 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
2081 }
2082 return "Unknown MSR";
2083}
2084#endif /* LOG_ENABLED */
2085
2086
2087/**
2088 * Interpret RDMSR
2089 *
2090 * @returns VBox status code.
2091 * @param pVM The cross context VM structure.
2092 * @param pVCpu The cross context virtual CPU structure.
2093 * @param pRegFrame The register frame.
2094 */
2095VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
2096{
2097 NOREF(pVM);
2098
2099 /* Get the current privilege level. */
2100 if (CPUMGetGuestCPL(pVCpu) != 0)
2101 {
2102 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
2103 return VERR_EM_INTERPRETER; /* supervisor only */
2104 }
2105
2106 uint64_t uValue;
2107 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
2108 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2109 {
2110 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2111 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
2112 return VERR_EM_INTERPRETER;
2113 }
2114 pRegFrame->rax = RT_LO_U32(uValue);
2115 pRegFrame->rdx = RT_HI_U32(uValue);
2116 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
2117 return VINF_SUCCESS;
2118}
2119
2120
2121/**
2122 * Interpret WRMSR
2123 *
2124 * @returns VBox status code.
2125 * @param pVM The cross context VM structure.
2126 * @param pVCpu The cross context virtual CPU structure.
2127 * @param pRegFrame The register frame.
2128 */
2129VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
2130{
2131 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2132
2133 /* Check the current privilege level, this instruction is supervisor only. */
2134 if (CPUMGetGuestCPL(pVCpu) != 0)
2135 {
2136 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
2137 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
2138 }
2139
2140 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
2141 if (rcStrict != VINF_SUCCESS)
2142 {
2143 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2144 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
2145 return VERR_EM_INTERPRETER;
2146 }
2147 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
2148 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
2149 NOREF(pVM);
2150 return VINF_SUCCESS;
2151}
2152
2153
2154/**
2155 * Interpret DRx write.
2156 *
2157 * @returns VBox status code.
2158 * @param pVM The cross context VM structure.
2159 * @param pVCpu The cross context virtual CPU structure.
2160 * @param pRegFrame The register frame.
2161 * @param DestRegDrx DRx register index (USE_REG_DR*)
2162 * @param SrcRegGen General purpose register index (USE_REG_E**))
2163 *
2164 */
2165VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
2166{
2167 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2168 uint64_t uNewDrX;
2169 int rc;
2170 NOREF(pVM);
2171
2172 if (CPUMIsGuestIn64BitCode(pVCpu))
2173 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
2174 else
2175 {
2176 uint32_t val32;
2177 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
2178 uNewDrX = val32;
2179 }
2180
2181 if (RT_SUCCESS(rc))
2182 {
2183 if (DestRegDrx == 6)
2184 {
2185 uNewDrX |= X86_DR6_RA1_MASK;
2186 uNewDrX &= ~X86_DR6_RAZ_MASK;
2187 }
2188 else if (DestRegDrx == 7)
2189 {
2190 uNewDrX |= X86_DR7_RA1_MASK;
2191 uNewDrX &= ~X86_DR7_RAZ_MASK;
2192 }
2193
2194 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
2195 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
2196 if (RT_SUCCESS(rc))
2197 return rc;
2198 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
2199 }
2200 return VERR_EM_INTERPRETER;
2201}
2202
2203
2204/**
2205 * Interpret DRx read.
2206 *
2207 * @returns VBox status code.
2208 * @param pVM The cross context VM structure.
2209 * @param pVCpu The cross context virtual CPU structure.
2210 * @param pRegFrame The register frame.
2211 * @param DestRegGen General purpose register index (USE_REG_E**))
2212 * @param SrcRegDrx DRx register index (USE_REG_DR*)
2213 */
2214VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
2215{
2216 uint64_t val64;
2217 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
2218 NOREF(pVM);
2219
2220 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
2221 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
2222 if (CPUMIsGuestIn64BitCode(pVCpu))
2223 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
2224 else
2225 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
2226
2227 if (RT_SUCCESS(rc))
2228 return VINF_SUCCESS;
2229
2230 return VERR_EM_INTERPRETER;
2231}
2232
2233
2234#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
2235
2236
2237
2238
2239
2240
2241/*
2242 *
2243 * The old interpreter.
2244 * The old interpreter.
2245 * The old interpreter.
2246 * The old interpreter.
2247 * The old interpreter.
2248 *
2249 */
2250
2251DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
2252{
2253#ifdef IN_RC
2254 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
2255 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
2256 return rc;
2257 /*
2258 * The page pool cache may end up here in some cases because it
2259 * flushed one of the shadow mappings used by the trapping
2260 * instruction and it either flushed the TLB or the CPU reused it.
2261 */
2262#else
2263 NOREF(pVM);
2264#endif
2265 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
2266}
2267
2268
2269DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
2270{
2271 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
2272 pages or write monitored pages. */
2273 NOREF(pVM);
2274#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
2275 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
2276#else
2277 int rc = VINF_SUCCESS;
2278#endif
2279#ifdef VBOX_COMPARE_IEM_AND_EM
2280 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
2281 g_cbEmWrote = cb;
2282 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
2283#endif
2284 return rc;
2285}
2286
2287
2288/** Convert sel:addr to a flat GC address. */
2289DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
2290{
2291 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
2292 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
2293}
2294
2295
2296#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2297/**
2298 * Get the mnemonic for the disassembled instruction.
2299 *
2300 * GC/R0 doesn't include the strings in the DIS tables because
2301 * of limited space.
2302 */
2303static const char *emGetMnemonic(PDISCPUSTATE pDis)
2304{
2305 switch (pDis->pCurInstr->uOpcode)
2306 {
2307 case OP_XCHG: return "Xchg";
2308 case OP_DEC: return "Dec";
2309 case OP_INC: return "Inc";
2310 case OP_POP: return "Pop";
2311 case OP_OR: return "Or";
2312 case OP_AND: return "And";
2313 case OP_MOV: return "Mov";
2314 case OP_INVLPG: return "InvlPg";
2315 case OP_CPUID: return "CpuId";
2316 case OP_MOV_CR: return "MovCRx";
2317 case OP_MOV_DR: return "MovDRx";
2318 case OP_LLDT: return "LLdt";
2319 case OP_LGDT: return "LGdt";
2320 case OP_LIDT: return "LIdt";
2321 case OP_CLTS: return "Clts";
2322 case OP_MONITOR: return "Monitor";
2323 case OP_MWAIT: return "MWait";
2324 case OP_RDMSR: return "Rdmsr";
2325 case OP_WRMSR: return "Wrmsr";
2326 case OP_ADD: return "Add";
2327 case OP_ADC: return "Adc";
2328 case OP_SUB: return "Sub";
2329 case OP_SBB: return "Sbb";
2330 case OP_RDTSC: return "Rdtsc";
2331 case OP_STI: return "Sti";
2332 case OP_CLI: return "Cli";
2333 case OP_XADD: return "XAdd";
2334 case OP_HLT: return "Hlt";
2335 case OP_IRET: return "Iret";
2336 case OP_MOVNTPS: return "MovNTPS";
2337 case OP_STOSWD: return "StosWD";
2338 case OP_WBINVD: return "WbInvd";
2339 case OP_XOR: return "Xor";
2340 case OP_BTR: return "Btr";
2341 case OP_BTS: return "Bts";
2342 case OP_BTC: return "Btc";
2343 case OP_LMSW: return "Lmsw";
2344 case OP_SMSW: return "Smsw";
2345 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
2346 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
2347
2348 default:
2349 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
2350 return "???";
2351 }
2352}
2353#endif /* VBOX_STRICT || LOG_ENABLED */
2354
2355
2356/**
2357 * XCHG instruction emulation.
2358 */
2359static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2360{
2361 DISQPVPARAMVAL param1, param2;
2362 NOREF(pvFault);
2363
2364 /* Source to make DISQueryParamVal read the register value - ugly hack */
2365 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2366 if(RT_FAILURE(rc))
2367 return VERR_EM_INTERPRETER;
2368
2369 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2370 if(RT_FAILURE(rc))
2371 return VERR_EM_INTERPRETER;
2372
2373#ifdef IN_RC
2374 if (TRPMHasTrap(pVCpu))
2375 {
2376 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2377 {
2378#endif
2379 RTGCPTR pParam1 = 0, pParam2 = 0;
2380 uint64_t valpar1, valpar2;
2381
2382 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2383 switch(param1.type)
2384 {
2385 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2386 valpar1 = param1.val.val64;
2387 break;
2388
2389 case DISQPV_TYPE_ADDRESS:
2390 pParam1 = (RTGCPTR)param1.val.val64;
2391 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2392 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2393 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2394 if (RT_FAILURE(rc))
2395 {
2396 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2397 return VERR_EM_INTERPRETER;
2398 }
2399 break;
2400
2401 default:
2402 AssertFailed();
2403 return VERR_EM_INTERPRETER;
2404 }
2405
2406 switch(param2.type)
2407 {
2408 case DISQPV_TYPE_ADDRESS:
2409 pParam2 = (RTGCPTR)param2.val.val64;
2410 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
2411 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
2412 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
2413 if (RT_FAILURE(rc))
2414 {
2415 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2416 }
2417 break;
2418
2419 case DISQPV_TYPE_IMMEDIATE:
2420 valpar2 = param2.val.val64;
2421 break;
2422
2423 default:
2424 AssertFailed();
2425 return VERR_EM_INTERPRETER;
2426 }
2427
2428 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
2429 if (pParam1 == 0)
2430 {
2431 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2432 switch(param1.size)
2433 {
2434 case 1: //special case for AH etc
2435 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
2436 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
2437 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
2438 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
2439 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2440 }
2441 if (RT_FAILURE(rc))
2442 return VERR_EM_INTERPRETER;
2443 }
2444 else
2445 {
2446 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
2447 if (RT_FAILURE(rc))
2448 {
2449 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2450 return VERR_EM_INTERPRETER;
2451 }
2452 }
2453
2454 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
2455 if (pParam2 == 0)
2456 {
2457 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
2458 switch(param2.size)
2459 {
2460 case 1: //special case for AH etc
2461 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
2462 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
2463 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
2464 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
2465 default: AssertFailedReturn(VERR_EM_INTERPRETER);
2466 }
2467 if (RT_FAILURE(rc))
2468 return VERR_EM_INTERPRETER;
2469 }
2470 else
2471 {
2472 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
2473 if (RT_FAILURE(rc))
2474 {
2475 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2476 return VERR_EM_INTERPRETER;
2477 }
2478 }
2479
2480 *pcbSize = param2.size;
2481 return VINF_SUCCESS;
2482#ifdef IN_RC
2483 }
2484 }
2485 return VERR_EM_INTERPRETER;
2486#endif
2487}
2488
2489
2490/**
2491 * INC and DEC emulation.
2492 */
2493static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2494 PFNEMULATEPARAM2 pfnEmulate)
2495{
2496 DISQPVPARAMVAL param1;
2497 NOREF(pvFault);
2498
2499 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2500 if(RT_FAILURE(rc))
2501 return VERR_EM_INTERPRETER;
2502
2503#ifdef IN_RC
2504 if (TRPMHasTrap(pVCpu))
2505 {
2506 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2507 {
2508#endif
2509 RTGCPTR pParam1 = 0;
2510 uint64_t valpar1;
2511
2512 if (param1.type == DISQPV_TYPE_ADDRESS)
2513 {
2514 pParam1 = (RTGCPTR)param1.val.val64;
2515 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2516#ifdef IN_RC
2517 /* Safety check (in theory it could cross a page boundary and fault there though) */
2518 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2519#endif
2520 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2521 if (RT_FAILURE(rc))
2522 {
2523 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2524 return VERR_EM_INTERPRETER;
2525 }
2526 }
2527 else
2528 {
2529 AssertFailed();
2530 return VERR_EM_INTERPRETER;
2531 }
2532
2533 uint32_t eflags;
2534
2535 eflags = pfnEmulate(&valpar1, param1.size);
2536
2537 /* Write result back */
2538 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2539 if (RT_FAILURE(rc))
2540 {
2541 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2542 return VERR_EM_INTERPRETER;
2543 }
2544
2545 /* Update guest's eflags and finish. */
2546 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2547 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2548
2549 /* All done! */
2550 *pcbSize = param1.size;
2551 return VINF_SUCCESS;
2552#ifdef IN_RC
2553 }
2554 }
2555 return VERR_EM_INTERPRETER;
2556#endif
2557}
2558
2559
2560/**
2561 * POP Emulation.
2562 */
2563static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2564{
2565 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2566 DISQPVPARAMVAL param1;
2567 NOREF(pvFault);
2568
2569 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2570 if(RT_FAILURE(rc))
2571 return VERR_EM_INTERPRETER;
2572
2573#ifdef IN_RC
2574 if (TRPMHasTrap(pVCpu))
2575 {
2576 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2577 {
2578#endif
2579 RTGCPTR pParam1 = 0;
2580 uint32_t valpar1;
2581 RTGCPTR pStackVal;
2582
2583 /* Read stack value first */
2584 if (CPUMGetGuestCodeBits(pVCpu) == 16)
2585 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
2586
2587 /* Convert address; don't bother checking limits etc, as we only read here */
2588 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
2589 if (pStackVal == 0)
2590 return VERR_EM_INTERPRETER;
2591
2592 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
2593 if (RT_FAILURE(rc))
2594 {
2595 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2596 return VERR_EM_INTERPRETER;
2597 }
2598
2599 if (param1.type == DISQPV_TYPE_ADDRESS)
2600 {
2601 pParam1 = (RTGCPTR)param1.val.val64;
2602
2603 /* pop [esp+xx] uses esp after the actual pop! */
2604 AssertCompile(DISGREG_ESP == DISGREG_SP);
2605 if ( (pDis->Param1.fUse & DISUSE_BASE)
2606 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2607 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2608 )
2609 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2610
2611 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2612 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2613 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2614 if (RT_FAILURE(rc))
2615 {
2616 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2617 return VERR_EM_INTERPRETER;
2618 }
2619
2620 /* Update ESP as the last step */
2621 pRegFrame->esp += param1.size;
2622 }
2623 else
2624 {
2625#ifndef DEBUG_bird // annoying assertion.
2626 AssertFailed();
2627#endif
2628 return VERR_EM_INTERPRETER;
2629 }
2630
2631 /* All done! */
2632 *pcbSize = param1.size;
2633 return VINF_SUCCESS;
2634#ifdef IN_RC
2635 }
2636 }
2637 return VERR_EM_INTERPRETER;
2638#endif
2639}
2640
2641
2642/**
2643 * XOR/OR/AND Emulation.
2644 */
2645static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2646 PFNEMULATEPARAM3 pfnEmulate)
2647{
2648 DISQPVPARAMVAL param1, param2;
2649 NOREF(pvFault);
2650
2651 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2652 if(RT_FAILURE(rc))
2653 return VERR_EM_INTERPRETER;
2654
2655 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2656 if(RT_FAILURE(rc))
2657 return VERR_EM_INTERPRETER;
2658
2659#ifdef IN_RC
2660 if (TRPMHasTrap(pVCpu))
2661 {
2662 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2663 {
2664#endif
2665 RTGCPTR pParam1;
2666 uint64_t valpar1, valpar2;
2667
2668 if (pDis->Param1.cb != pDis->Param2.cb)
2669 {
2670 if (pDis->Param1.cb < pDis->Param2.cb)
2671 {
2672 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2673 return VERR_EM_INTERPRETER;
2674 }
2675 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2676 pDis->Param2.cb = pDis->Param1.cb;
2677 param2.size = param1.size;
2678 }
2679
2680 /* The destination is always a virtual address */
2681 if (param1.type == DISQPV_TYPE_ADDRESS)
2682 {
2683 pParam1 = (RTGCPTR)param1.val.val64;
2684 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2685 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2686 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2687 if (RT_FAILURE(rc))
2688 {
2689 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2690 return VERR_EM_INTERPRETER;
2691 }
2692 }
2693 else
2694 {
2695 AssertFailed();
2696 return VERR_EM_INTERPRETER;
2697 }
2698
2699 /* Register or immediate data */
2700 switch(param2.type)
2701 {
2702 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2703 valpar2 = param2.val.val64;
2704 break;
2705
2706 default:
2707 AssertFailed();
2708 return VERR_EM_INTERPRETER;
2709 }
2710
2711 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2712
2713 /* Data read, emulate instruction. */
2714 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2715
2716 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2717
2718 /* Update guest's eflags and finish. */
2719 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2720 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2721
2722 /* And write it back */
2723 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2724 if (RT_SUCCESS(rc))
2725 {
2726 /* All done! */
2727 *pcbSize = param2.size;
2728 return VINF_SUCCESS;
2729 }
2730#ifdef IN_RC
2731 }
2732 }
2733#endif
2734 return VERR_EM_INTERPRETER;
2735}
2736
2737
2738#ifndef VBOX_COMPARE_IEM_AND_EM
2739/**
2740 * LOCK XOR/OR/AND Emulation.
2741 */
2742static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2743 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2744{
2745 void *pvParam1;
2746 DISQPVPARAMVAL param1, param2;
2747 NOREF(pvFault);
2748
2749#if HC_ARCH_BITS == 32
2750 Assert(pDis->Param1.cb <= 4);
2751#endif
2752
2753 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2754 if(RT_FAILURE(rc))
2755 return VERR_EM_INTERPRETER;
2756
2757 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2758 if(RT_FAILURE(rc))
2759 return VERR_EM_INTERPRETER;
2760
2761 if (pDis->Param1.cb != pDis->Param2.cb)
2762 {
2763 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2764 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2765 VERR_EM_INTERPRETER);
2766
2767 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2768 pDis->Param2.cb = pDis->Param1.cb;
2769 param2.size = param1.size;
2770 }
2771
2772#ifdef IN_RC
2773 /* Safety check (in theory it could cross a page boundary and fault there though) */
2774 Assert( TRPMHasTrap(pVCpu)
2775 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2776 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2777#endif
2778
2779 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2780 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2781 RTGCUINTREG ValPar2 = param2.val.val64;
2782
2783 /* The destination is always a virtual address */
2784 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2785
2786 RTGCPTR GCPtrPar1 = param1.val.val64;
2787 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2788 PGMPAGEMAPLOCK Lock;
2789 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2790 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2791
2792 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2793 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2794
2795 RTGCUINTREG32 eflags = 0;
2796 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2797 PGMPhysReleasePageMappingLock(pVM, &Lock);
2798 if (RT_FAILURE(rc))
2799 {
2800 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2801 return VERR_EM_INTERPRETER;
2802 }
2803
2804 /* Update guest's eflags and finish. */
2805 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2806 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2807
2808 *pcbSize = param2.size;
2809 return VINF_SUCCESS;
2810}
2811#endif /* !VBOX_COMPARE_IEM_AND_EM */
2812
2813
2814/**
2815 * ADD, ADC & SUB Emulation.
2816 */
2817static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2818 PFNEMULATEPARAM3 pfnEmulate)
2819{
2820 NOREF(pvFault);
2821 DISQPVPARAMVAL param1, param2;
2822 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2823 if(RT_FAILURE(rc))
2824 return VERR_EM_INTERPRETER;
2825
2826 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2827 if(RT_FAILURE(rc))
2828 return VERR_EM_INTERPRETER;
2829
2830#ifdef IN_RC
2831 if (TRPMHasTrap(pVCpu))
2832 {
2833 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2834 {
2835#endif
2836 RTGCPTR pParam1;
2837 uint64_t valpar1, valpar2;
2838
2839 if (pDis->Param1.cb != pDis->Param2.cb)
2840 {
2841 if (pDis->Param1.cb < pDis->Param2.cb)
2842 {
2843 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2844 return VERR_EM_INTERPRETER;
2845 }
2846 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2847 pDis->Param2.cb = pDis->Param1.cb;
2848 param2.size = param1.size;
2849 }
2850
2851 /* The destination is always a virtual address */
2852 if (param1.type == DISQPV_TYPE_ADDRESS)
2853 {
2854 pParam1 = (RTGCPTR)param1.val.val64;
2855 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2856 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2857 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2858 if (RT_FAILURE(rc))
2859 {
2860 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2861 return VERR_EM_INTERPRETER;
2862 }
2863 }
2864 else
2865 {
2866#ifndef DEBUG_bird
2867 AssertFailed();
2868#endif
2869 return VERR_EM_INTERPRETER;
2870 }
2871
2872 /* Register or immediate data */
2873 switch(param2.type)
2874 {
2875 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2876 valpar2 = param2.val.val64;
2877 break;
2878
2879 default:
2880 AssertFailed();
2881 return VERR_EM_INTERPRETER;
2882 }
2883
2884 /* Data read, emulate instruction. */
2885 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2886
2887 /* Update guest's eflags and finish. */
2888 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2889 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2890
2891 /* And write it back */
2892 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2893 if (RT_SUCCESS(rc))
2894 {
2895 /* All done! */
2896 *pcbSize = param2.size;
2897 return VINF_SUCCESS;
2898 }
2899#ifdef IN_RC
2900 }
2901 }
2902#endif
2903 return VERR_EM_INTERPRETER;
2904}
2905
2906
2907/**
2908 * ADC Emulation.
2909 */
2910static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2911{
2912 if (pRegFrame->eflags.Bits.u1CF)
2913 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2914 else
2915 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2916}
2917
2918
2919/**
2920 * BTR/C/S Emulation.
2921 */
2922static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2923 PFNEMULATEPARAM2UINT32 pfnEmulate)
2924{
2925 DISQPVPARAMVAL param1, param2;
2926 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2927 if(RT_FAILURE(rc))
2928 return VERR_EM_INTERPRETER;
2929
2930 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2931 if(RT_FAILURE(rc))
2932 return VERR_EM_INTERPRETER;
2933
2934#ifdef IN_RC
2935 if (TRPMHasTrap(pVCpu))
2936 {
2937 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2938 {
2939#endif
2940 RTGCPTR pParam1;
2941 uint64_t valpar1 = 0, valpar2;
2942 uint32_t eflags;
2943
2944 /* The destination is always a virtual address */
2945 if (param1.type != DISQPV_TYPE_ADDRESS)
2946 return VERR_EM_INTERPRETER;
2947
2948 pParam1 = (RTGCPTR)param1.val.val64;
2949 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2950
2951 /* Register or immediate data */
2952 switch(param2.type)
2953 {
2954 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2955 valpar2 = param2.val.val64;
2956 break;
2957
2958 default:
2959 AssertFailed();
2960 return VERR_EM_INTERPRETER;
2961 }
2962
2963 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2964 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2965 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2966 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2967 if (RT_FAILURE(rc))
2968 {
2969 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2970 return VERR_EM_INTERPRETER;
2971 }
2972
2973 Log2(("emInterpretBtx: val=%x\n", valpar1));
2974 /* Data read, emulate bit test instruction. */
2975 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2976
2977 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2978
2979 /* Update guest's eflags and finish. */
2980 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2981 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2982
2983 /* And write it back */
2984 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2985 if (RT_SUCCESS(rc))
2986 {
2987 /* All done! */
2988 *pcbSize = 1;
2989 return VINF_SUCCESS;
2990 }
2991#ifdef IN_RC
2992 }
2993 }
2994#endif
2995 return VERR_EM_INTERPRETER;
2996}
2997
2998
2999#ifndef VBOX_COMPARE_IEM_AND_EM
3000/**
3001 * LOCK BTR/C/S Emulation.
3002 */
3003static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
3004 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
3005{
3006 void *pvParam1;
3007
3008 DISQPVPARAMVAL param1, param2;
3009 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
3010 if(RT_FAILURE(rc))
3011 return VERR_EM_INTERPRETER;
3012
3013 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3014 if(RT_FAILURE(rc))
3015 return VERR_EM_INTERPRETER;
3016
3017 /* The destination is always a virtual address */
3018 if (param1.type != DISQPV_TYPE_ADDRESS)
3019 return VERR_EM_INTERPRETER;
3020
3021 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
3022 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
3023 uint64_t ValPar2 = param2.val.val64;
3024
3025 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
3026 RTGCPTR GCPtrPar1 = param1.val.val64;
3027 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
3028 ValPar2 &= 7;
3029
3030 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3031#ifdef IN_RC
3032 Assert(TRPMHasTrap(pVCpu));
3033 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
3034#endif
3035
3036 PGMPAGEMAPLOCK Lock;
3037 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3038 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3039
3040 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
3041 NOREF(pvFault);
3042
3043 /* Try emulate it with a one-shot #PF handler in place. (RC) */
3044 RTGCUINTREG32 eflags = 0;
3045 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
3046 PGMPhysReleasePageMappingLock(pVM, &Lock);
3047 if (RT_FAILURE(rc))
3048 {
3049 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
3050 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
3051 return VERR_EM_INTERPRETER;
3052 }
3053
3054 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
3055
3056 /* Update guest's eflags and finish. */
3057 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3058 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3059
3060 *pcbSize = 1;
3061 return VINF_SUCCESS;
3062}
3063#endif /* !VBOX_COMPARE_IEM_AND_EM */
3064
3065
3066/**
3067 * MOV emulation.
3068 */
3069static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3070{
3071 NOREF(pvFault);
3072 DISQPVPARAMVAL param1, param2;
3073 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
3074 if(RT_FAILURE(rc))
3075 return VERR_EM_INTERPRETER;
3076
3077 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3078 if(RT_FAILURE(rc))
3079 return VERR_EM_INTERPRETER;
3080
3081 /* If destination is a segment register, punt. We can't handle it here.
3082 * NB: Source can be a register and still trigger a #PF!
3083 */
3084 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
3085 return VERR_EM_INTERPRETER;
3086
3087 if (param1.type == DISQPV_TYPE_ADDRESS)
3088 {
3089 RTGCPTR pDest;
3090 uint64_t val64;
3091
3092 switch(param1.type)
3093 {
3094 case DISQPV_TYPE_IMMEDIATE:
3095 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3096 return VERR_EM_INTERPRETER;
3097 RT_FALL_THRU();
3098
3099 case DISQPV_TYPE_ADDRESS:
3100 pDest = (RTGCPTR)param1.val.val64;
3101 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
3102 break;
3103
3104 default:
3105 AssertFailed();
3106 return VERR_EM_INTERPRETER;
3107 }
3108
3109 switch(param2.type)
3110 {
3111 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
3112 val64 = param2.val.val64;
3113 break;
3114
3115 default:
3116 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
3117 return VERR_EM_INTERPRETER;
3118 }
3119#ifdef LOG_ENABLED
3120 if (pDis->uCpuMode == DISCPUMODE_64BIT)
3121 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
3122 else
3123 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
3124#endif
3125
3126 Assert(param2.size <= 8 && param2.size > 0);
3127 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
3128 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
3129 if (RT_FAILURE(rc))
3130 return VERR_EM_INTERPRETER;
3131
3132 *pcbSize = param2.size;
3133 }
3134#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
3135 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
3136 else if ( param1.type == DISQPV_TYPE_REGISTER
3137 && param2.type == DISQPV_TYPE_REGISTER)
3138 {
3139 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
3140 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
3141 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
3142
3143 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
3144 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
3145
3146 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
3147 switch (param1.size)
3148 {
3149 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
3150 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
3151 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
3152 default:
3153 AssertFailed();
3154 return VERR_EM_INTERPRETER;
3155 }
3156 AssertRCReturn(rc, rc);
3157 }
3158#endif
3159 else
3160 { /* read fault */
3161 RTGCPTR pSrc;
3162 uint64_t val64;
3163
3164 /* Source */
3165 switch(param2.type)
3166 {
3167 case DISQPV_TYPE_IMMEDIATE:
3168 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3169 return VERR_EM_INTERPRETER;
3170 RT_FALL_THRU();
3171
3172 case DISQPV_TYPE_ADDRESS:
3173 pSrc = (RTGCPTR)param2.val.val64;
3174 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
3175 break;
3176
3177 default:
3178 return VERR_EM_INTERPRETER;
3179 }
3180
3181 Assert(param1.size <= 8 && param1.size > 0);
3182 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
3183 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
3184 if (RT_FAILURE(rc))
3185 return VERR_EM_INTERPRETER;
3186
3187 /* Destination */
3188 switch(param1.type)
3189 {
3190 case DISQPV_TYPE_REGISTER:
3191 switch(param1.size)
3192 {
3193 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
3194 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
3195 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
3196 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
3197 default:
3198 return VERR_EM_INTERPRETER;
3199 }
3200 if (RT_FAILURE(rc))
3201 return rc;
3202 break;
3203
3204 default:
3205 return VERR_EM_INTERPRETER;
3206 }
3207#ifdef LOG_ENABLED
3208 if (pDis->uCpuMode == DISCPUMODE_64BIT)
3209 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
3210 else
3211 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
3212#endif
3213 }
3214 return VINF_SUCCESS;
3215}
3216
3217
3218#ifndef IN_RC
3219/**
3220 * [REP] STOSWD emulation
3221 */
3222static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3223{
3224 int rc;
3225 RTGCPTR GCDest, GCOffset;
3226 uint32_t cbSize;
3227 uint64_t cTransfers;
3228 int offIncrement;
3229 NOREF(pvFault);
3230
3231 /* Don't support any but these three prefix bytes. */
3232 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
3233 return VERR_EM_INTERPRETER;
3234
3235 switch (pDis->uAddrMode)
3236 {
3237 case DISCPUMODE_16BIT:
3238 GCOffset = pRegFrame->di;
3239 cTransfers = pRegFrame->cx;
3240 break;
3241 case DISCPUMODE_32BIT:
3242 GCOffset = pRegFrame->edi;
3243 cTransfers = pRegFrame->ecx;
3244 break;
3245 case DISCPUMODE_64BIT:
3246 GCOffset = pRegFrame->rdi;
3247 cTransfers = pRegFrame->rcx;
3248 break;
3249 default:
3250 AssertFailed();
3251 return VERR_EM_INTERPRETER;
3252 }
3253
3254 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
3255 switch (pDis->uOpMode)
3256 {
3257 case DISCPUMODE_16BIT:
3258 cbSize = 2;
3259 break;
3260 case DISCPUMODE_32BIT:
3261 cbSize = 4;
3262 break;
3263 case DISCPUMODE_64BIT:
3264 cbSize = 8;
3265 break;
3266 default:
3267 AssertFailed();
3268 return VERR_EM_INTERPRETER;
3269 }
3270
3271 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
3272
3273 if (!(pDis->fPrefix & DISPREFIX_REP))
3274 {
3275 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
3276
3277 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
3278 if (RT_FAILURE(rc))
3279 return VERR_EM_INTERPRETER;
3280 Assert(rc == VINF_SUCCESS);
3281
3282 /* Update (e/r)di. */
3283 switch (pDis->uAddrMode)
3284 {
3285 case DISCPUMODE_16BIT:
3286 pRegFrame->di += offIncrement;
3287 break;
3288 case DISCPUMODE_32BIT:
3289 pRegFrame->edi += offIncrement;
3290 break;
3291 case DISCPUMODE_64BIT:
3292 pRegFrame->rdi += offIncrement;
3293 break;
3294 default:
3295 AssertFailed();
3296 return VERR_EM_INTERPRETER;
3297 }
3298
3299 }
3300 else
3301 {
3302 if (!cTransfers)
3303 return VINF_SUCCESS;
3304
3305 /*
3306 * Do *not* try emulate cross page stuff here because we don't know what might
3307 * be waiting for us on the subsequent pages. The caller has only asked us to
3308 * ignore access handlers fro the current page.
3309 * This also fends off big stores which would quickly kill PGMR0DynMap.
3310 */
3311 if ( cbSize > PAGE_SIZE
3312 || cTransfers > PAGE_SIZE
3313 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
3314 {
3315 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
3316 GCDest, cbSize, offIncrement, cTransfers));
3317 return VERR_EM_INTERPRETER;
3318 }
3319
3320 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
3321 /* Access verification first; we currently can't recover properly from traps inside this instruction */
3322 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
3323 cTransfers * cbSize,
3324 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
3325 if (rc != VINF_SUCCESS)
3326 {
3327 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
3328 return VERR_EM_INTERPRETER;
3329 }
3330
3331 /* REP case */
3332 while (cTransfers)
3333 {
3334 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
3335 if (RT_FAILURE(rc))
3336 {
3337 rc = VERR_EM_INTERPRETER;
3338 break;
3339 }
3340
3341 Assert(rc == VINF_SUCCESS);
3342 GCOffset += offIncrement;
3343 GCDest += offIncrement;
3344 cTransfers--;
3345 }
3346
3347 /* Update the registers. */
3348 switch (pDis->uAddrMode)
3349 {
3350 case DISCPUMODE_16BIT:
3351 pRegFrame->di = GCOffset;
3352 pRegFrame->cx = cTransfers;
3353 break;
3354 case DISCPUMODE_32BIT:
3355 pRegFrame->edi = GCOffset;
3356 pRegFrame->ecx = cTransfers;
3357 break;
3358 case DISCPUMODE_64BIT:
3359 pRegFrame->rdi = GCOffset;
3360 pRegFrame->rcx = cTransfers;
3361 break;
3362 default:
3363 AssertFailed();
3364 return VERR_EM_INTERPRETER;
3365 }
3366 }
3367
3368 *pcbSize = cbSize;
3369 return rc;
3370}
3371#endif /* !IN_RC */
3372
3373
3374/**
3375 * [LOCK] CMPXCHG emulation.
3376 */
3377static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3378{
3379 DISQPVPARAMVAL param1, param2;
3380 NOREF(pvFault);
3381
3382#if HC_ARCH_BITS == 32
3383 Assert(pDis->Param1.cb <= 4);
3384#endif
3385
3386 /* Source to make DISQueryParamVal read the register value - ugly hack */
3387 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3388 if(RT_FAILURE(rc))
3389 return VERR_EM_INTERPRETER;
3390
3391 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
3392 if(RT_FAILURE(rc))
3393 return VERR_EM_INTERPRETER;
3394
3395 uint64_t valpar;
3396 switch(param2.type)
3397 {
3398 case DISQPV_TYPE_IMMEDIATE: /* register actually */
3399 valpar = param2.val.val64;
3400 break;
3401
3402 default:
3403 return VERR_EM_INTERPRETER;
3404 }
3405
3406 PGMPAGEMAPLOCK Lock;
3407 RTGCPTR GCPtrPar1;
3408 void *pvParam1;
3409 uint64_t eflags;
3410
3411 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3412 switch(param1.type)
3413 {
3414 case DISQPV_TYPE_ADDRESS:
3415 GCPtrPar1 = param1.val.val64;
3416 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3417
3418 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3419 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3420 break;
3421
3422 default:
3423 return VERR_EM_INTERPRETER;
3424 }
3425
3426 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
3427
3428#ifndef VBOX_COMPARE_IEM_AND_EM
3429 if (pDis->fPrefix & DISPREFIX_LOCK)
3430 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3431 else
3432 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
3433#else /* VBOX_COMPARE_IEM_AND_EM */
3434 uint64_t u64;
3435 switch (pDis->Param2.cb)
3436 {
3437 case 1: u64 = *(uint8_t *)pvParam1; break;
3438 case 2: u64 = *(uint16_t *)pvParam1; break;
3439 case 4: u64 = *(uint32_t *)pvParam1; break;
3440 default:
3441 case 8: u64 = *(uint64_t *)pvParam1; break;
3442 }
3443 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
3444 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3445#endif /* VBOX_COMPARE_IEM_AND_EM */
3446
3447 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
3448
3449 /* Update guest's eflags and finish. */
3450 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3451 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3452
3453 *pcbSize = param2.size;
3454 PGMPhysReleasePageMappingLock(pVM, &Lock);
3455 return VINF_SUCCESS;
3456}
3457
3458
3459/**
3460 * [LOCK] CMPXCHG8B emulation.
3461 */
3462static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3463{
3464 DISQPVPARAMVAL param1;
3465 NOREF(pvFault);
3466
3467 /* Source to make DISQueryParamVal read the register value - ugly hack */
3468 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3469 if(RT_FAILURE(rc))
3470 return VERR_EM_INTERPRETER;
3471
3472 RTGCPTR GCPtrPar1;
3473 void *pvParam1;
3474 uint64_t eflags;
3475 PGMPAGEMAPLOCK Lock;
3476
3477 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
3478 switch(param1.type)
3479 {
3480 case DISQPV_TYPE_ADDRESS:
3481 GCPtrPar1 = param1.val.val64;
3482 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
3483
3484 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3485 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3486 break;
3487
3488 default:
3489 return VERR_EM_INTERPRETER;
3490 }
3491
3492 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
3493
3494#ifndef VBOX_COMPARE_IEM_AND_EM
3495 if (pDis->fPrefix & DISPREFIX_LOCK)
3496 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3497 else
3498 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3499#else /* VBOX_COMPARE_IEM_AND_EM */
3500 uint64_t u64 = *(uint64_t *)pvParam1;
3501 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
3502 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
3503#endif /* VBOX_COMPARE_IEM_AND_EM */
3504
3505 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
3506
3507 /* Update guest's eflags and finish; note that *only* ZF is affected. */
3508 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
3509 | (eflags & (X86_EFL_ZF));
3510
3511 *pcbSize = 8;
3512 PGMPhysReleasePageMappingLock(pVM, &Lock);
3513 return VINF_SUCCESS;
3514}
3515
3516
3517#ifdef IN_RC /** @todo test+enable for HM as well. */
3518/**
3519 * [LOCK] XADD emulation.
3520 */
3521static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3522{
3523 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
3524 DISQPVPARAMVAL param1;
3525 void *pvParamReg2;
3526 size_t cbParamReg2;
3527 NOREF(pvFault);
3528
3529 /* Source to make DISQueryParamVal read the register value - ugly hack */
3530 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3531 if(RT_FAILURE(rc))
3532 return VERR_EM_INTERPRETER;
3533
3534 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
3535 Assert(cbParamReg2 <= 4);
3536 if(RT_FAILURE(rc))
3537 return VERR_EM_INTERPRETER;
3538
3539#ifdef IN_RC
3540 if (TRPMHasTrap(pVCpu))
3541 {
3542 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
3543 {
3544#endif
3545 RTGCPTR GCPtrPar1;
3546 void *pvParam1;
3547 uint32_t eflags;
3548 PGMPAGEMAPLOCK Lock;
3549
3550 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
3551 switch(param1.type)
3552 {
3553 case DISQPV_TYPE_ADDRESS:
3554 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
3555#ifdef IN_RC
3556 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
3557#endif
3558
3559 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
3560 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3561 break;
3562
3563 default:
3564 return VERR_EM_INTERPRETER;
3565 }
3566
3567 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
3568
3569#ifndef VBOX_COMPARE_IEM_AND_EM
3570 if (pDis->fPrefix & DISPREFIX_LOCK)
3571 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
3572 else
3573 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
3574#else /* VBOX_COMPARE_IEM_AND_EM */
3575 uint64_t u64;
3576 switch (cbParamReg2)
3577 {
3578 case 1: u64 = *(uint8_t *)pvParam1; break;
3579 case 2: u64 = *(uint16_t *)pvParam1; break;
3580 case 4: u64 = *(uint32_t *)pvParam1; break;
3581 default:
3582 case 8: u64 = *(uint64_t *)pvParam1; break;
3583 }
3584 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
3585 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
3586#endif /* VBOX_COMPARE_IEM_AND_EM */
3587
3588 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
3589
3590 /* Update guest's eflags and finish. */
3591 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
3592 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
3593
3594 *pcbSize = cbParamReg2;
3595 PGMPhysReleasePageMappingLock(pVM, &Lock);
3596 return VINF_SUCCESS;
3597#ifdef IN_RC
3598 }
3599 }
3600
3601 return VERR_EM_INTERPRETER;
3602#endif
3603}
3604#endif /* IN_RC */
3605
3606
3607/**
3608 * WBINVD Emulation.
3609 */
3610static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3611{
3612 /* Nothing to do. */
3613 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3614 return VINF_SUCCESS;
3615}
3616
3617
3618/**
3619 * INVLPG Emulation.
3620 */
3621static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3622{
3623 DISQPVPARAMVAL param1;
3624 RTGCPTR addr;
3625 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3626
3627 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3628 if(RT_FAILURE(rc))
3629 return VERR_EM_INTERPRETER;
3630
3631 switch(param1.type)
3632 {
3633 case DISQPV_TYPE_IMMEDIATE:
3634 case DISQPV_TYPE_ADDRESS:
3635 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3636 return VERR_EM_INTERPRETER;
3637 addr = (RTGCPTR)param1.val.val64;
3638 break;
3639
3640 default:
3641 return VERR_EM_INTERPRETER;
3642 }
3643
3644 /** @todo is addr always a flat linear address or ds based
3645 * (in absence of segment override prefixes)????
3646 */
3647#ifdef IN_RC
3648 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3649#endif
3650 rc = PGMInvalidatePage(pVCpu, addr);
3651 if ( rc == VINF_SUCCESS
3652 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3653 return VINF_SUCCESS;
3654 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3655 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3656 VERR_EM_INTERPRETER);
3657 return rc;
3658}
3659
3660/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3661
3662/**
3663 * CPUID Emulation.
3664 */
3665static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3666{
3667 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3668 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3669 return rc;
3670}
3671
3672
3673/**
3674 * CLTS Emulation.
3675 */
3676static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3677{
3678 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3679
3680 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3681 if (!(cr0 & X86_CR0_TS))
3682 return VINF_SUCCESS;
3683 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3684}
3685
3686
3687/**
3688 * Update CRx.
3689 *
3690 * @returns VBox status code.
3691 * @param pVM The cross context VM structure.
3692 * @param pVCpu The cross context virtual CPU structure.
3693 * @param pRegFrame The register frame.
3694 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3695 * @param val New CRx value
3696 *
3697 */
3698static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3699{
3700 uint64_t oldval;
3701 uint64_t msrEFER;
3702 uint32_t fValid;
3703 int rc, rc2;
3704 NOREF(pVM);
3705
3706 /** @todo Clean up this mess. */
3707 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3708 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3709 switch (DestRegCrx)
3710 {
3711 case DISCREG_CR0:
3712 oldval = CPUMGetGuestCR0(pVCpu);
3713#ifdef IN_RC
3714 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3715 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3716 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3717 return VERR_EM_INTERPRETER;
3718#endif
3719 rc = VINF_SUCCESS;
3720#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3721 CPUMSetGuestCR0(pVCpu, val);
3722#else
3723 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3724#endif
3725 val = CPUMGetGuestCR0(pVCpu);
3726 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3727 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3728 {
3729 /* global flush */
3730 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3731 AssertRCReturn(rc, rc);
3732 }
3733
3734 /* Deal with long mode enabling/disabling. */
3735 msrEFER = CPUMGetGuestEFER(pVCpu);
3736 if (msrEFER & MSR_K6_EFER_LME)
3737 {
3738 if ( !(oldval & X86_CR0_PG)
3739 && (val & X86_CR0_PG))
3740 {
3741 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3742 if (pRegFrame->cs.Attr.n.u1Long)
3743 {
3744 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3745 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3746 }
3747
3748 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3749 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3750 {
3751 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3752 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3753 }
3754 msrEFER |= MSR_K6_EFER_LMA;
3755 }
3756 else
3757 if ( (oldval & X86_CR0_PG)
3758 && !(val & X86_CR0_PG))
3759 {
3760 msrEFER &= ~MSR_K6_EFER_LMA;
3761 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3762 }
3763 CPUMSetGuestEFER(pVCpu, msrEFER);
3764 }
3765 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3766 return rc2 == VINF_SUCCESS ? rc : rc2;
3767
3768 case DISCREG_CR2:
3769 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3770 return VINF_SUCCESS;
3771
3772 case DISCREG_CR3:
3773 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3774 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3775 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3776 {
3777 /* flush */
3778 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3779 AssertRC(rc);
3780 }
3781 return rc;
3782
3783 case DISCREG_CR4:
3784 oldval = CPUMGetGuestCR4(pVCpu);
3785 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3786 val = CPUMGetGuestCR4(pVCpu);
3787
3788 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3789 msrEFER = CPUMGetGuestEFER(pVCpu);
3790 if ( (msrEFER & MSR_K6_EFER_LMA)
3791 && (oldval & X86_CR4_PAE)
3792 && !(val & X86_CR4_PAE))
3793 {
3794 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3795 }
3796
3797 /* From IEM iemCImpl_load_CrX. */
3798 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3799 fValid = X86_CR4_VME | X86_CR4_PVI
3800 | X86_CR4_TSD | X86_CR4_DE
3801 | X86_CR4_PSE | X86_CR4_PAE
3802 | X86_CR4_MCE | X86_CR4_PGE
3803 | X86_CR4_PCE | X86_CR4_OSFXSR
3804 | X86_CR4_OSXMMEEXCPT;
3805 //if (xxx)
3806 // fValid |= X86_CR4_VMXE;
3807 //if (xxx)
3808 // fValid |= X86_CR4_OSXSAVE;
3809 if (val & ~(uint64_t)fValid)
3810 {
3811 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3812 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3813 }
3814
3815 rc = VINF_SUCCESS;
3816 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3817 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3818 {
3819 /* global flush */
3820 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3821 AssertRCReturn(rc, rc);
3822 }
3823
3824 /* Feeling extremely lazy. */
3825# ifdef IN_RC
3826 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3827 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3828 {
3829 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3830 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3831 }
3832# endif
3833# ifdef VBOX_WITH_RAW_MODE
3834 if (((val ^ oldval) & X86_CR4_VME) && VM_IS_RAW_MODE_ENABLED(pVM))
3835 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3836# endif
3837
3838 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3839 return rc2 == VINF_SUCCESS ? rc : rc2;
3840
3841 case DISCREG_CR8:
3842 return APICSetTpr(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3843
3844 default:
3845 AssertFailed();
3846 case DISCREG_CR1: /* illegal op */
3847 break;
3848 }
3849 return VERR_EM_INTERPRETER;
3850}
3851
3852
3853/**
3854 * LMSW Emulation.
3855 */
3856static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3857{
3858 DISQPVPARAMVAL param1;
3859 uint32_t val;
3860 NOREF(pvFault); NOREF(pcbSize);
3861 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3862
3863 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3864 if(RT_FAILURE(rc))
3865 return VERR_EM_INTERPRETER;
3866
3867 switch(param1.type)
3868 {
3869 case DISQPV_TYPE_IMMEDIATE:
3870 case DISQPV_TYPE_ADDRESS:
3871 if(!(param1.flags & DISQPV_FLAG_16))
3872 return VERR_EM_INTERPRETER;
3873 val = param1.val.val32;
3874 break;
3875
3876 default:
3877 return VERR_EM_INTERPRETER;
3878 }
3879
3880 LogFlow(("emInterpretLmsw %x\n", val));
3881 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3882
3883 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3884 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3885 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3886
3887 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3888
3889}
3890
3891#ifdef EM_EMULATE_SMSW
3892/**
3893 * SMSW Emulation.
3894 */
3895static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3896{
3897 NOREF(pvFault); NOREF(pcbSize);
3898 DISQPVPARAMVAL param1;
3899 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3900
3901 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3902 if(RT_FAILURE(rc))
3903 return VERR_EM_INTERPRETER;
3904
3905 switch(param1.type)
3906 {
3907 case DISQPV_TYPE_IMMEDIATE:
3908 if(param1.size != sizeof(uint16_t))
3909 return VERR_EM_INTERPRETER;
3910 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3911 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3912 break;
3913
3914 case DISQPV_TYPE_ADDRESS:
3915 {
3916 RTGCPTR pParam1;
3917
3918 /* Actually forced to 16 bits regardless of the operand size. */
3919 if(param1.size != sizeof(uint16_t))
3920 return VERR_EM_INTERPRETER;
3921
3922 pParam1 = (RTGCPTR)param1.val.val64;
3923 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3924 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3925
3926 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3927 if (RT_FAILURE(rc))
3928 {
3929 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3930 return VERR_EM_INTERPRETER;
3931 }
3932 break;
3933 }
3934
3935 default:
3936 return VERR_EM_INTERPRETER;
3937 }
3938
3939 LogFlow(("emInterpretSmsw %x\n", cr0));
3940 return rc;
3941}
3942#endif
3943
3944
3945/**
3946 * Interpret CRx read.
3947 *
3948 * @returns VBox status code.
3949 * @param pVM The cross context VM structure.
3950 * @param pVCpu The cross context virtual CPU structure.
3951 * @param pRegFrame The register frame.
3952 * @param DestRegGen General purpose register index (USE_REG_E**))
3953 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3954 *
3955 */
3956static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3957{
3958 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3959 uint64_t val64;
3960 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3961 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3962 NOREF(pVM);
3963
3964 if (CPUMIsGuestIn64BitCode(pVCpu))
3965 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3966 else
3967 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3968
3969 if (RT_SUCCESS(rc))
3970 {
3971 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3972 return VINF_SUCCESS;
3973 }
3974 return VERR_EM_INTERPRETER;
3975}
3976
3977
3978/**
3979 * Interpret CRx write.
3980 *
3981 * @returns VBox status code.
3982 * @param pVM The cross context VM structure.
3983 * @param pVCpu The cross context virtual CPU structure.
3984 * @param pRegFrame The register frame.
3985 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3986 * @param SrcRegGen General purpose register index (USE_REG_E**))
3987 *
3988 */
3989static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3990{
3991 uint64_t val;
3992 int rc;
3993 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3994
3995 if (CPUMIsGuestIn64BitCode(pVCpu))
3996 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3997 else
3998 {
3999 uint32_t val32;
4000 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
4001 val = val32;
4002 }
4003
4004 if (RT_SUCCESS(rc))
4005 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
4006
4007 return VERR_EM_INTERPRETER;
4008}
4009
4010
4011/**
4012 * MOV CRx
4013 */
4014static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4015{
4016 NOREF(pvFault); NOREF(pcbSize);
4017 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
4018 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
4019
4020 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
4021 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
4022
4023 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
4024}
4025
4026
4027/**
4028 * MOV DRx
4029 */
4030static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4031{
4032 int rc = VERR_EM_INTERPRETER;
4033 NOREF(pvFault); NOREF(pcbSize);
4034
4035 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
4036 {
4037 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
4038 }
4039 else
4040 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
4041 {
4042 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
4043 }
4044 else
4045 AssertMsgFailed(("Unexpected debug register move\n"));
4046
4047 return rc;
4048}
4049
4050
4051/**
4052 * LLDT Emulation.
4053 */
4054static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4055{
4056 DISQPVPARAMVAL param1;
4057 RTSEL sel;
4058 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
4059
4060 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
4061 if(RT_FAILURE(rc))
4062 return VERR_EM_INTERPRETER;
4063
4064 switch(param1.type)
4065 {
4066 case DISQPV_TYPE_ADDRESS:
4067 return VERR_EM_INTERPRETER; //feeling lazy right now
4068
4069 case DISQPV_TYPE_IMMEDIATE:
4070 if(!(param1.flags & DISQPV_FLAG_16))
4071 return VERR_EM_INTERPRETER;
4072 sel = (RTSEL)param1.val.val16;
4073 break;
4074
4075 default:
4076 return VERR_EM_INTERPRETER;
4077 }
4078
4079#ifdef IN_RING0
4080 /* Only for the VT-x real-mode emulation case. */
4081 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
4082 CPUMSetGuestLDTR(pVCpu, sel);
4083 return VINF_SUCCESS;
4084#else
4085 if (sel == 0)
4086 {
4087 if (CPUMGetHyperLDTR(pVCpu) == 0)
4088 {
4089 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
4090 return VINF_SUCCESS;
4091 }
4092 }
4093 //still feeling lazy
4094 return VERR_EM_INTERPRETER;
4095#endif
4096}
4097
4098#ifdef IN_RING0
4099/**
4100 * LIDT/LGDT Emulation.
4101 */
4102static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4103{
4104 DISQPVPARAMVAL param1;
4105 RTGCPTR pParam1;
4106 X86XDTR32 dtr32;
4107 NOREF(pvFault); NOREF(pcbSize);
4108
4109 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
4110
4111 /* Only for the VT-x real-mode emulation case. */
4112 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
4113
4114 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
4115 if(RT_FAILURE(rc))
4116 return VERR_EM_INTERPRETER;
4117
4118 switch(param1.type)
4119 {
4120 case DISQPV_TYPE_ADDRESS:
4121 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
4122 break;
4123
4124 default:
4125 return VERR_EM_INTERPRETER;
4126 }
4127
4128 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
4129 AssertRCReturn(rc, VERR_EM_INTERPRETER);
4130
4131 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
4132 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
4133
4134 if (pDis->pCurInstr->uOpcode == OP_LIDT)
4135 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
4136 else
4137 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
4138
4139 return VINF_SUCCESS;
4140}
4141#endif
4142
4143
4144#ifdef IN_RC
4145/**
4146 * STI Emulation.
4147 *
4148 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
4149 */
4150static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4151{
4152 NOREF(pcbSize);
4153 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
4154
4155 if(!pGCState)
4156 {
4157 Assert(pGCState);
4158 return VERR_EM_INTERPRETER;
4159 }
4160 pGCState->uVMFlags |= X86_EFL_IF;
4161
4162 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
4163 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
4164
4165 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
4166 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4167
4168 return VINF_SUCCESS;
4169}
4170#endif /* IN_RC */
4171
4172
4173/**
4174 * HLT Emulation.
4175 */
4176static VBOXSTRICTRC
4177emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4178{
4179 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
4180 return VINF_EM_HALT;
4181}
4182
4183
4184/**
4185 * RDTSC Emulation.
4186 */
4187static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4188{
4189 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4190 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
4191}
4192
4193/**
4194 * RDPMC Emulation
4195 */
4196static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4197{
4198 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4199 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
4200}
4201
4202
4203static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4204{
4205 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4206 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
4207}
4208
4209
4210static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4211{
4212 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4213 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
4214}
4215
4216
4217/**
4218 * RDMSR Emulation.
4219 */
4220static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4221{
4222 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
4223 different, so we play safe by completely disassembling the instruction. */
4224 Assert(!(pDis->fPrefix & DISPREFIX_REX));
4225 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4226 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
4227}
4228
4229
4230/**
4231 * WRMSR Emulation.
4232 */
4233static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
4234{
4235 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
4236 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
4237}
4238
4239
4240/**
4241 * Internal worker.
4242 * @copydoc emInterpretInstructionCPUOuter
4243 * @param pVM The cross context VM structure.
4244 */
4245DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4246 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4247{
4248 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
4249 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
4250 Assert(pcbSize);
4251 *pcbSize = 0;
4252
4253 if (enmCodeType == EMCODETYPE_SUPERVISOR)
4254 {
4255 /*
4256 * Only supervisor guest code!!
4257 * And no complicated prefixes.
4258 */
4259 /* Get the current privilege level. */
4260 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4261#ifdef VBOX_WITH_RAW_RING1
4262 if ( !EMIsRawRing1Enabled(pVM)
4263 || cpl > 1
4264 || pRegFrame->eflags.Bits.u2IOPL > cpl
4265 )
4266#endif
4267 {
4268 if ( cpl != 0
4269 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
4270 {
4271 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
4272 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
4273 return VERR_EM_INTERPRETER;
4274 }
4275 }
4276 }
4277 else
4278 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
4279
4280#ifdef IN_RC
4281 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
4282 || ( (pDis->fPrefix & DISPREFIX_LOCK)
4283 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
4284 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
4285 && pDis->pCurInstr->uOpcode != OP_XADD
4286 && pDis->pCurInstr->uOpcode != OP_OR
4287 && pDis->pCurInstr->uOpcode != OP_AND
4288 && pDis->pCurInstr->uOpcode != OP_XOR
4289 && pDis->pCurInstr->uOpcode != OP_BTR
4290 )
4291 )
4292#else
4293 if ( (pDis->fPrefix & DISPREFIX_REPNE)
4294 || ( (pDis->fPrefix & DISPREFIX_REP)
4295 && pDis->pCurInstr->uOpcode != OP_STOSWD
4296 )
4297 || ( (pDis->fPrefix & DISPREFIX_LOCK)
4298 && pDis->pCurInstr->uOpcode != OP_OR
4299 && pDis->pCurInstr->uOpcode != OP_AND
4300 && pDis->pCurInstr->uOpcode != OP_XOR
4301 && pDis->pCurInstr->uOpcode != OP_BTR
4302 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
4303 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
4304 )
4305 )
4306#endif
4307 {
4308 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
4309 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
4310 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
4311 return VERR_EM_INTERPRETER;
4312 }
4313
4314#if HC_ARCH_BITS == 32
4315 /*
4316 * Unable to emulate most >4 bytes accesses in 32 bits mode.
4317 * Whitelisted instructions are safe.
4318 */
4319 if ( pDis->Param1.cb > 4
4320 && CPUMIsGuestIn64BitCode(pVCpu))
4321 {
4322 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
4323 if ( uOpCode != OP_STOSWD
4324 && uOpCode != OP_MOV
4325 && uOpCode != OP_CMPXCHG8B
4326 && uOpCode != OP_XCHG
4327 && uOpCode != OP_BTS
4328 && uOpCode != OP_BTR
4329 && uOpCode != OP_BTC
4330 )
4331 {
4332# ifdef VBOX_WITH_STATISTICS
4333 switch (pDis->pCurInstr->uOpcode)
4334 {
4335# define INTERPRET_FAILED_CASE(opcode, Instr) \
4336 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
4337 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
4338 INTERPRET_FAILED_CASE(OP_DEC,Dec);
4339 INTERPRET_FAILED_CASE(OP_INC,Inc);
4340 INTERPRET_FAILED_CASE(OP_POP,Pop);
4341 INTERPRET_FAILED_CASE(OP_OR, Or);
4342 INTERPRET_FAILED_CASE(OP_XOR,Xor);
4343 INTERPRET_FAILED_CASE(OP_AND,And);
4344 INTERPRET_FAILED_CASE(OP_MOV,Mov);
4345 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
4346 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
4347 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
4348 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
4349 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
4350 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
4351 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
4352 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
4353 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
4354 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
4355 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
4356 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
4357 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
4358 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
4359 INTERPRET_FAILED_CASE(OP_ADD,Add);
4360 INTERPRET_FAILED_CASE(OP_SUB,Sub);
4361 INTERPRET_FAILED_CASE(OP_ADC,Adc);
4362 INTERPRET_FAILED_CASE(OP_BTR,Btr);
4363 INTERPRET_FAILED_CASE(OP_BTS,Bts);
4364 INTERPRET_FAILED_CASE(OP_BTC,Btc);
4365 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
4366 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
4367 INTERPRET_FAILED_CASE(OP_STI, Sti);
4368 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
4369 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
4370 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
4371 INTERPRET_FAILED_CASE(OP_IRET,Iret);
4372 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
4373 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
4374# undef INTERPRET_FAILED_CASE
4375 default:
4376 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4377 break;
4378 }
4379# endif /* VBOX_WITH_STATISTICS */
4380 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
4381 return VERR_EM_INTERPRETER;
4382 }
4383 }
4384#endif
4385
4386 VBOXSTRICTRC rc;
4387#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
4388 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
4389#endif
4390 switch (pDis->pCurInstr->uOpcode)
4391 {
4392 /*
4393 * Macros for generating the right case statements.
4394 */
4395# ifndef VBOX_COMPARE_IEM_AND_EM
4396# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4397 case opcode:\
4398 if (pDis->fPrefix & DISPREFIX_LOCK) \
4399 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
4400 else \
4401 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4402 if (RT_SUCCESS(rc)) \
4403 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4404 else \
4405 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4406 return rc
4407# else /* VBOX_COMPARE_IEM_AND_EM */
4408# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4409 case opcode:\
4410 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4411 if (RT_SUCCESS(rc)) \
4412 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4413 else \
4414 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4415 return rc
4416# endif /* VBOX_COMPARE_IEM_AND_EM */
4417
4418#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
4419 case opcode:\
4420 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
4421 if (RT_SUCCESS(rc)) \
4422 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4423 else \
4424 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4425 return rc
4426
4427#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
4428 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
4429#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
4430 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
4431
4432#define INTERPRET_CASE(opcode, Instr) \
4433 case opcode:\
4434 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4435 if (RT_SUCCESS(rc)) \
4436 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4437 else \
4438 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4439 return rc
4440
4441#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
4442 case opcode:\
4443 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
4444 if (RT_SUCCESS(rc)) \
4445 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
4446 else \
4447 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
4448 return rc
4449
4450#define INTERPRET_STAT_CASE(opcode, Instr) \
4451 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
4452
4453 /*
4454 * The actual case statements.
4455 */
4456 INTERPRET_CASE(OP_XCHG,Xchg);
4457 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
4458 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
4459 INTERPRET_CASE(OP_POP,Pop);
4460 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
4461 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
4462 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
4463 INTERPRET_CASE(OP_MOV,Mov);
4464#ifndef IN_RC
4465 INTERPRET_CASE(OP_STOSWD,StosWD);
4466#endif
4467 INTERPRET_CASE(OP_INVLPG,InvlPg);
4468 INTERPRET_CASE(OP_CPUID,CpuId);
4469 INTERPRET_CASE(OP_MOV_CR,MovCRx);
4470 INTERPRET_CASE(OP_MOV_DR,MovDRx);
4471#ifdef IN_RING0
4472 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
4473 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
4474#endif
4475 INTERPRET_CASE(OP_LLDT,LLdt);
4476 INTERPRET_CASE(OP_LMSW,Lmsw);
4477#ifdef EM_EMULATE_SMSW
4478 INTERPRET_CASE(OP_SMSW,Smsw);
4479#endif
4480 INTERPRET_CASE(OP_CLTS,Clts);
4481 INTERPRET_CASE(OP_MONITOR, Monitor);
4482 INTERPRET_CASE(OP_MWAIT, MWait);
4483 INTERPRET_CASE(OP_RDMSR, Rdmsr);
4484 INTERPRET_CASE(OP_WRMSR, Wrmsr);
4485 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
4486 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
4487 INTERPRET_CASE(OP_ADC,Adc);
4488 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
4489 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
4490 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
4491 INTERPRET_CASE(OP_RDPMC,Rdpmc);
4492 INTERPRET_CASE(OP_RDTSC,Rdtsc);
4493 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
4494#ifdef IN_RC
4495 INTERPRET_CASE(OP_STI,Sti);
4496 INTERPRET_CASE(OP_XADD, XAdd);
4497 INTERPRET_CASE(OP_IRET,Iret);
4498#endif
4499 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
4500 INTERPRET_CASE(OP_HLT,Hlt);
4501 INTERPRET_CASE(OP_WBINVD,WbInvd);
4502#ifdef VBOX_WITH_STATISTICS
4503# ifndef IN_RC
4504 INTERPRET_STAT_CASE(OP_XADD, XAdd);
4505# endif
4506 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
4507#endif
4508
4509 default:
4510 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
4511 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
4512 return VERR_EM_INTERPRETER;
4513
4514#undef INTERPRET_CASE_EX_PARAM2
4515#undef INTERPRET_STAT_CASE
4516#undef INTERPRET_CASE_EX
4517#undef INTERPRET_CASE
4518 } /* switch (opcode) */
4519 /* not reached */
4520}
4521
4522/**
4523 * Interprets the current instruction using the supplied DISCPUSTATE structure.
4524 *
4525 * EIP is *NOT* updated!
4526 *
4527 * @returns VBox strict status code.
4528 * @retval VINF_* Scheduling instructions. When these are returned, it
4529 * starts to get a bit tricky to know whether code was
4530 * executed or not... We'll address this when it becomes a problem.
4531 * @retval VERR_EM_INTERPRETER Something we can't cope with.
4532 * @retval VERR_* Fatal errors.
4533 *
4534 * @param pVCpu The cross context virtual CPU structure.
4535 * @param pDis The disassembler cpu state for the instruction to be
4536 * interpreted.
4537 * @param pRegFrame The register frame. EIP is *NOT* changed!
4538 * @param pvFault The fault address (CR2).
4539 * @param pcbSize Size of the write (if applicable).
4540 * @param enmCodeType Code type (user/supervisor)
4541 *
4542 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
4543 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
4544 * to worry about e.g. invalid modrm combinations (!)
4545 *
4546 * @todo At this time we do NOT check if the instruction overwrites vital information.
4547 * Make sure this can't happen!! (will add some assertions/checks later)
4548 */
4549DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
4550 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
4551{
4552 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4553 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
4554 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
4555 if (RT_SUCCESS(rc))
4556 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
4557 else
4558 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
4559 return rc;
4560}
4561
4562
4563#endif /* !VBOX_WITH_IEM */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette