VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 73951

Last change on this file since 73951 was 72895, checked in by vboxsync, 7 years ago

EM,HM: Replaced EMInterpretInvlpg with IEMExecDecodedInvlpg.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 53.4 KB
Line 
1/* $Id: EMAll.cpp 72895 2018-07-04 17:03:57Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_EM
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/patm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/hm.h>
31#include <VBox/vmm/pdmapi.h>
32#include <VBox/vmm/vmm.h>
33#include <VBox/vmm/stam.h>
34#include "EMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/log.h>
41#include <iprt/assert.h>
42#include <iprt/string.h>
43
44
45
46
47/**
48 * Get the current execution manager status.
49 *
50 * @returns Current status.
51 * @param pVCpu The cross context virtual CPU structure.
52 */
53VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
54{
55 return pVCpu->em.s.enmState;
56}
57
58
59/**
60 * Sets the current execution manager status. (use only when you know what you're doing!)
61 *
62 * @param pVCpu The cross context virtual CPU structure.
63 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
64 */
65VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
66{
67 /* Only allowed combination: */
68 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
69 pVCpu->em.s.enmState = enmNewState;
70}
71
72
73/**
74 * Sets the PC for which interrupts should be inhibited.
75 *
76 * @param pVCpu The cross context virtual CPU structure.
77 * @param PC The PC.
78 */
79VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
80{
81 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
82 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
83}
84
85
86/**
87 * Gets the PC for which interrupts should be inhibited.
88 *
89 * There are a few instructions which inhibits or delays interrupts
90 * for the instruction following them. These instructions are:
91 * - STI
92 * - MOV SS, r/m16
93 * - POP SS
94 *
95 * @returns The PC for which interrupts should be inhibited.
96 * @param pVCpu The cross context virtual CPU structure.
97 *
98 */
99VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
100{
101 return pVCpu->em.s.GCPtrInhibitInterrupts;
102}
103
104
105/**
106 * Enables / disable hypercall instructions.
107 *
108 * This interface is used by GIM to tell the execution monitors whether the
109 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
110 *
111 * @param pVCpu The cross context virtual CPU structure this applies to.
112 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
113 */
114VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
115{
116 pVCpu->em.s.fHypercallEnabled = fEnabled;
117}
118
119
120/**
121 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
122 *
123 * @returns true if enabled, false if not.
124 * @param pVCpu The cross context virtual CPU structure.
125 *
126 * @note If this call becomes a performance factor, we can make the data
127 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
128 */
129VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
130{
131 return pVCpu->em.s.fHypercallEnabled;
132}
133
134
135/**
136 * Prepare an MWAIT - essentials of the MONITOR instruction.
137 *
138 * @returns VINF_SUCCESS
139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
140 * @param rax The content of RAX.
141 * @param rcx The content of RCX.
142 * @param rdx The content of RDX.
143 * @param GCPhys The physical address corresponding to rax.
144 */
145VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
146{
147 pVCpu->em.s.MWait.uMonitorRAX = rax;
148 pVCpu->em.s.MWait.uMonitorRCX = rcx;
149 pVCpu->em.s.MWait.uMonitorRDX = rdx;
150 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
151 /** @todo Make use of GCPhys. */
152 NOREF(GCPhys);
153 /** @todo Complete MONITOR implementation. */
154 return VINF_SUCCESS;
155}
156
157
158/**
159 * Checks if the monitor hardware is armed / active.
160 *
161 * @returns true if armed, false otherwise.
162 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
163 */
164VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
165{
166 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
167}
168
169
170/**
171 * Performs an MWAIT.
172 *
173 * @returns VINF_SUCCESS
174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
175 * @param rax The content of RAX.
176 * @param rcx The content of RCX.
177 */
178VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
179{
180 pVCpu->em.s.MWait.uMWaitRAX = rax;
181 pVCpu->em.s.MWait.uMWaitRCX = rcx;
182 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
183 if (rcx)
184 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
185 else
186 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
187 /** @todo not completely correct?? */
188 return VINF_EM_HALT;
189}
190
191
192
193/**
194 * Determine if we should continue execution in HM after encountering an mwait
195 * instruction.
196 *
197 * Clears MWAIT flags if returning @c true.
198 *
199 * @returns true if we should continue, false if we should halt.
200 * @param pVCpu The cross context virtual CPU structure.
201 * @param pCtx Current CPU context.
202 */
203VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
204{
205 if ( pCtx->eflags.Bits.u1IF
206 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
207 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
208 {
209 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
210 {
211 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
212 return true;
213 }
214 }
215
216 return false;
217}
218
219
220/**
221 * Determine if we should continue execution in HM after encountering a hlt
222 * instruction.
223 *
224 * @returns true if we should continue, false if we should halt.
225 * @param pVCpu The cross context virtual CPU structure.
226 * @param pCtx Current CPU context.
227 */
228VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
229{
230 /** @todo Shouldn't we be checking GIF here? */
231 if (pCtx->eflags.Bits.u1IF)
232 return VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
233 return false;
234}
235
236
237/**
238 * Unhalts and wakes up the given CPU.
239 *
240 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
241 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
242 * the CPU isn't currently in a halt, the next HLT instruction it executes will
243 * be affected.
244 *
245 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
246 * @param pVM The cross context VM structure.
247 * @param pVCpuDst The cross context virtual CPU structure of the
248 * CPU to unhalt and wake up. This is usually not the
249 * same as the caller.
250 * @thread EMT
251 */
252VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
253{
254 /*
255 * Flag the current(/next) HLT to unhalt immediately.
256 */
257 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
258
259 /*
260 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
261 * just do it here for now).
262 */
263#ifdef IN_RING0
264 /* We might be here with preemption disabled or enabled (i.e. depending on
265 thread-context hooks being used), so don't try obtaining the GVMMR0 used
266 lock here. See @bugref{7270#c148}. */
267 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
268 AssertRC(rc);
269
270#elif defined(IN_RING3)
271 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
272 AssertRC(rc);
273
274#else
275 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
276 Assert(pVM->cCpus == 1); NOREF(pVM);
277 int rc = VINF_SUCCESS;
278#endif
279 return rc;
280}
281
282#ifndef IN_RING3
283
284/**
285 * Makes an I/O port write pending for ring-3 processing.
286 *
287 * @returns VINF_EM_PENDING_R3_IOPORT_READ
288 * @param pVCpu The cross context virtual CPU structure.
289 * @param uPort The I/O port.
290 * @param cbInstr The instruction length (for RIP updating).
291 * @param cbValue The write size.
292 * @param uValue The value being written.
293 * @sa emR3ExecutePendingIoPortWrite
294 *
295 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
296 */
297VMMRZ_INT_DECL(VBOXSTRICTRC)
298EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
299{
300 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
301 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
302 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
303 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
304 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
305 return VINF_EM_PENDING_R3_IOPORT_WRITE;
306}
307
308
309/**
310 * Makes an I/O port read pending for ring-3 processing.
311 *
312 * @returns VINF_EM_PENDING_R3_IOPORT_READ
313 * @param pVCpu The cross context virtual CPU structure.
314 * @param uPort The I/O port.
315 * @param cbInstr The instruction length (for RIP updating).
316 * @param cbValue The read size.
317 * @sa emR3ExecutePendingIoPortRead
318 *
319 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
320 */
321VMMRZ_INT_DECL(VBOXSTRICTRC)
322EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
323{
324 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
325 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
326 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
327 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
328 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
329 return VINF_EM_PENDING_R3_IOPORT_READ;
330}
331
332#endif /* IN_RING3 */
333
334
335/**
336 * Worker for EMHistoryExec that checks for ring-3 returns and flags
337 * continuation of the EMHistoryExec run there.
338 */
339DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
340{
341 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
342#ifdef IN_RING3
343 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
344#else
345 switch (VBOXSTRICTRC_VAL(rcStrict))
346 {
347 case VINF_SUCCESS:
348 default:
349 break;
350
351 /*
352 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
353 */
354 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
355 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
356 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
357 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
358 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
359 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
360 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
361 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
362 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
363 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
364 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
365 break;
366 }
367#endif /* !IN_RING3 */
368}
369
370#ifndef IN_RC
371
372/**
373 * Execute using history.
374 *
375 * This function will be called when EMHistoryAddExit() and friends returns a
376 * non-NULL result. This happens in response to probing or when probing has
377 * uncovered adjacent exits which can more effectively be reached by using IEM
378 * than restarting execution using the main execution engine and fielding an
379 * regular exit.
380 *
381 * @returns VBox strict status code, see IEMExecForExits.
382 * @param pVCpu The cross context virtual CPU structure.
383 * @param pExitRec The exit record return by a previous history add
384 * or update call.
385 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
386 */
387VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPU pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
388{
389 Assert(pExitRec);
390 VMCPU_ASSERT_EMT(pVCpu);
391 IEMEXECFOREXITSTATS ExecStats;
392 switch (pExitRec->enmAction)
393 {
394 /*
395 * Executes multiple instruction stopping only when we've gone a given
396 * number without perceived exits.
397 */
398 case EMEXITACTION_EXEC_WITH_MAX:
399 {
400 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
401 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
402 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
403 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
404 pVCpu->em.s.cHistoryExecMaxInstructions,
405 pExitRec->cMaxInstructionsWithoutExit,
406 &ExecStats);
407 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
408 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
409 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
410
411 /* Ignore instructions IEM doesn't know about. */
412 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
413 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
414 || ExecStats.cInstructions == 0)
415 { /* likely */ }
416 else
417 rcStrict = VINF_SUCCESS;
418
419 if (ExecStats.cExits > 1)
420 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
421 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
422 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
423 return rcStrict;
424 }
425
426 /*
427 * Probe a exit for close by exits.
428 */
429 case EMEXITACTION_EXEC_PROBE:
430 {
431 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
432 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
433 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
434 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
435 pVCpu->em.s.cHistoryProbeMinInstructions,
436 pVCpu->em.s.cHistoryExecMaxInstructions,
437 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
438 &ExecStats);
439 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
440 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
441 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
442 if ( ExecStats.cExits >= 2
443 && RT_SUCCESS(rcStrict))
444 {
445 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
446 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
447 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
448 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
449 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
450 }
451#ifndef IN_RING3
452 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
453 && RT_SUCCESS(rcStrict))
454 {
455 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
456 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
457 }
458#endif
459 else
460 {
461 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
462 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
463 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
464 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
465 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
466 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
467 rcStrict = VINF_SUCCESS;
468 }
469 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
470 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
471 return rcStrict;
472 }
473
474 /* We shouldn't ever see these here! */
475 case EMEXITACTION_FREE_RECORD:
476 case EMEXITACTION_NORMAL:
477 case EMEXITACTION_NORMAL_PROBED:
478 break;
479
480 /* No default case, want compiler warnings. */
481 }
482 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
483}
484
485
486/**
487 * Worker for emHistoryAddOrUpdateRecord.
488 */
489DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
490{
491 pExitRec->uFlatPC = uFlatPC;
492 pExitRec->uFlagsAndType = uFlagsAndType;
493 pExitRec->enmAction = EMEXITACTION_NORMAL;
494 pExitRec->bUnused = 0;
495 pExitRec->cMaxInstructionsWithoutExit = 64;
496 pExitRec->uLastExitNo = uExitNo;
497 pExitRec->cHits = 1;
498 return NULL;
499}
500
501
502/**
503 * Worker for emHistoryAddOrUpdateRecord.
504 */
505DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
506 PEMEXITREC pExitRec, uint64_t uFlatPC,
507 uint32_t uFlagsAndType, uint64_t uExitNo)
508{
509 pHistEntry->idxSlot = (uint32_t)idxSlot;
510 pVCpu->em.s.cExitRecordUsed++;
511 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
512 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
513 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
514}
515
516
517/**
518 * Worker for emHistoryAddOrUpdateRecord.
519 */
520DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
521 PEMEXITREC pExitRec, uint64_t uFlatPC,
522 uint32_t uFlagsAndType, uint64_t uExitNo)
523{
524 pHistEntry->idxSlot = (uint32_t)idxSlot;
525 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
526 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
527 uExitNo - pExitRec->uLastExitNo));
528 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
529}
530
531
532/**
533 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
534 *
535 * @returns Pointer to an exit record if special action should be taken using
536 * EMHistoryExec(). Take normal exit action when NULL.
537 *
538 * @param pVCpu The cross context virtual CPU structure.
539 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
540 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
541 * @param uFlatPC The flattened program counter.
542 * @param pHistEntry The exit history entry.
543 * @param uExitNo The current exit number.
544 */
545static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
546 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
547{
548# ifdef IN_RING0
549 /* Disregard the hm flag. */
550 uFlagsAndType &= ~EMEXIT_F_HM;
551# endif
552
553 /*
554 * Work the hash table.
555 */
556 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
557# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
558 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
559 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
560 if (pExitRec->uFlatPC == uFlatPC)
561 {
562 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
563 pHistEntry->idxSlot = (uint32_t)idxSlot;
564 if (pExitRec->uFlagsAndType == uFlagsAndType)
565 {
566 pExitRec->uLastExitNo = uExitNo;
567 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
568 }
569 else
570 {
571 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
572 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
573 }
574 }
575 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
576 {
577 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
578 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
579 }
580 else
581 {
582 /*
583 * Collision. We calculate a new hash for stepping away from the first,
584 * doing up to 8 steps away before replacing the least recently used record.
585 */
586 uintptr_t idxOldest = idxSlot;
587 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
588 unsigned iOldestStep = 0;
589 unsigned iStep = 1;
590 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
591 for (;;)
592 {
593 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
594 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
595 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
596 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
597
598 /* Step to the next slot. */
599 idxSlot += idxAdd;
600 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
601 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
602
603 /* Does it match? */
604 if (pExitRec->uFlatPC == uFlatPC)
605 {
606 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
607 pHistEntry->idxSlot = (uint32_t)idxSlot;
608 if (pExitRec->uFlagsAndType == uFlagsAndType)
609 {
610 pExitRec->uLastExitNo = uExitNo;
611 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
612 break;
613 }
614 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
615 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
616 }
617
618 /* Is it free? */
619 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
620 {
621 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
622 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
623 }
624
625 /* Is it the least recently used one? */
626 if (pExitRec->uLastExitNo < uOldestExitNo)
627 {
628 uOldestExitNo = pExitRec->uLastExitNo;
629 idxOldest = idxSlot;
630 iOldestStep = iStep;
631 }
632
633 /* Next iteration? */
634 iStep++;
635 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
636 if (RT_LIKELY(iStep < 8 + 1))
637 { /* likely */ }
638 else
639 {
640 /* Replace the least recently used slot. */
641 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
642 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
643 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
644 }
645 }
646 }
647
648 /*
649 * Found an existing record.
650 */
651 switch (pExitRec->enmAction)
652 {
653 case EMEXITACTION_NORMAL:
654 {
655 uint64_t const cHits = ++pExitRec->cHits;
656 if (cHits < 256)
657 return NULL;
658 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
659 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
660 return pExitRec;
661 }
662
663 case EMEXITACTION_NORMAL_PROBED:
664 pExitRec->cHits += 1;
665 return NULL;
666
667 default:
668 pExitRec->cHits += 1;
669 return pExitRec;
670
671 /* This will happen if the caller ignores or cannot serve the probe
672 request (forced to ring-3, whatever). We retry this 256 times. */
673 case EMEXITACTION_EXEC_PROBE:
674 {
675 uint64_t const cHits = ++pExitRec->cHits;
676 if (cHits < 512)
677 return pExitRec;
678 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
679 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
680 return NULL;
681 }
682 }
683}
684
685#endif /* !IN_RC */
686
687/**
688 * Adds an exit to the history for this CPU.
689 *
690 * @returns Pointer to an exit record if special action should be taken using
691 * EMHistoryExec(). Take normal exit action when NULL.
692 *
693 * @param pVCpu The cross context virtual CPU structure.
694 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
695 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
696 * @param uTimestamp The TSC value for the exit, 0 if not available.
697 * @thread EMT(pVCpu)
698 */
699VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
700{
701 VMCPU_ASSERT_EMT(pVCpu);
702
703 /*
704 * Add the exit history entry.
705 */
706 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
707 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
708 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
709 pHistEntry->uFlatPC = uFlatPC;
710 pHistEntry->uTimestamp = uTimestamp;
711 pHistEntry->uFlagsAndType = uFlagsAndType;
712 pHistEntry->idxSlot = UINT32_MAX;
713
714#ifndef IN_RC
715 /*
716 * If common exit type, we will insert/update the exit into the exit record hash table.
717 */
718 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
719# ifdef IN_RING0
720 && pVCpu->em.s.fExitOptimizationEnabledR0
721 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
722# else
723 && pVCpu->em.s.fExitOptimizationEnabled
724# endif
725 && uFlatPC != UINT64_MAX
726 )
727 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
728#endif
729 return NULL;
730}
731
732
733#ifdef IN_RC
734/**
735 * Special raw-mode interface for adding an exit to the history.
736 *
737 * Currently this is only for recording, not optimizing, so no return value. If
738 * we start seriously caring about raw-mode again, we may extend it.
739 *
740 * @param pVCpu The cross context virtual CPU structure.
741 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
742 * @param uCs The CS.
743 * @param uEip The EIP.
744 * @param uTimestamp The TSC value for the exit, 0 if not available.
745 * @thread EMT(0)
746 */
747VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
748{
749 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
750 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
751 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
752 pHistEntry->uTimestamp = uTimestamp;
753 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
754 pHistEntry->idxSlot = UINT32_MAX;
755}
756#endif
757
758
759#ifdef IN_RING0
760/**
761 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
762 *
763 * @param pVCpu The cross context virtual CPU structure.
764 * @param uFlatPC The flattened program counter (RIP).
765 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
766 */
767VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
768{
769 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
770 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
771 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
772 pHistEntry->uFlatPC = uFlatPC;
773 if (fFlattened)
774 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
775 else
776 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
777}
778#endif
779
780
781/**
782 * Interface for convering a engine specific exit to a generic one and get guidance.
783 *
784 * @returns Pointer to an exit record if special action should be taken using
785 * EMHistoryExec(). Take normal exit action when NULL.
786 *
787 * @param pVCpu The cross context virtual CPU structure.
788 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
789 * @thread EMT(pVCpu)
790 */
791VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
792{
793 VMCPU_ASSERT_EMT(pVCpu);
794
795 /*
796 * Do the updating.
797 */
798 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
799 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
800 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
801 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
802
803#ifndef IN_RC
804 /*
805 * If common exit type, we will insert/update the exit into the exit record hash table.
806 */
807 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
808# ifdef IN_RING0
809 && pVCpu->em.s.fExitOptimizationEnabledR0
810 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
811# else
812 && pVCpu->em.s.fExitOptimizationEnabled
813# endif
814 && pHistEntry->uFlatPC != UINT64_MAX
815 )
816 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
817#endif
818 return NULL;
819}
820
821
822/**
823 * Interface for convering a engine specific exit to a generic one and get
824 * guidance, supplying flattened PC too.
825 *
826 * @returns Pointer to an exit record if special action should be taken using
827 * EMHistoryExec(). Take normal exit action when NULL.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
831 * @param uFlatPC The flattened program counter (RIP).
832 * @thread EMT(pVCpu)
833 */
834VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
835{
836 VMCPU_ASSERT_EMT(pVCpu);
837 Assert(uFlatPC != UINT64_MAX);
838
839 /*
840 * Do the updating.
841 */
842 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
843 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
844 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
845 pHistEntry->uFlagsAndType = uFlagsAndType;
846 pHistEntry->uFlatPC = uFlatPC;
847
848#ifndef IN_RC
849 /*
850 * If common exit type, we will insert/update the exit into the exit record hash table.
851 */
852 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
853# ifdef IN_RING0
854 && pVCpu->em.s.fExitOptimizationEnabledR0
855 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
856# else
857 && pVCpu->em.s.fExitOptimizationEnabled
858# endif
859 )
860 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
861#endif
862 return NULL;
863}
864
865
866/**
867 * Locks REM execution to a single VCPU.
868 *
869 * @param pVM The cross context VM structure.
870 */
871VMMDECL(void) EMRemLock(PVM pVM)
872{
873#ifdef VBOX_WITH_REM
874 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
875 return; /* early init */
876
877 Assert(!PGMIsLockOwner(pVM));
878 Assert(!IOMIsLockWriteOwner(pVM));
879 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
880 AssertRCSuccess(rc);
881#else
882 RT_NOREF(pVM);
883#endif
884}
885
886
887/**
888 * Unlocks REM execution
889 *
890 * @param pVM The cross context VM structure.
891 */
892VMMDECL(void) EMRemUnlock(PVM pVM)
893{
894#ifdef VBOX_WITH_REM
895 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
896 return; /* early init */
897
898 PDMCritSectLeave(&pVM->em.s.CritSectREM);
899#else
900 RT_NOREF(pVM);
901#endif
902}
903
904
905/**
906 * Check if this VCPU currently owns the REM lock.
907 *
908 * @returns bool owner/not owner
909 * @param pVM The cross context VM structure.
910 */
911VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
912{
913#ifdef VBOX_WITH_REM
914 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
915 return true; /* early init */
916
917 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
918#else
919 RT_NOREF(pVM);
920 return true;
921#endif
922}
923
924
925/**
926 * Try to acquire the REM lock.
927 *
928 * @returns VBox status code
929 * @param pVM The cross context VM structure.
930 */
931VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
932{
933#ifdef VBOX_WITH_REM
934 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
935 return VINF_SUCCESS; /* early init */
936
937 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
938#else
939 RT_NOREF(pVM);
940 return VINF_SUCCESS;
941#endif
942}
943
944
945/**
946 * @callback_method_impl{FNDISREADBYTES}
947 */
948static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
949{
950 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
951#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
952 PVM pVM = pVCpu->CTX_SUFF(pVM);
953#endif
954 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
955 int rc;
956
957 /*
958 * Figure how much we can or must read.
959 */
960 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
961 if (cbToRead > cbMaxRead)
962 cbToRead = cbMaxRead;
963 else if (cbToRead < cbMinRead)
964 cbToRead = cbMinRead;
965
966#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
967 /*
968 * We might be called upon to interpret an instruction in a patch.
969 */
970 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
971 {
972# ifdef IN_RC
973 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
974# else
975 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
976# endif
977 rc = VINF_SUCCESS;
978 }
979 else
980#endif
981 {
982# ifdef IN_RC
983 /*
984 * Try access it thru the shadow page tables first. Fall back on the
985 * slower PGM method if it fails because the TLB or page table was
986 * modified recently.
987 */
988 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
989 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
990 {
991 cbToRead = cbMinRead;
992 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
993 }
994 if (rc == VERR_ACCESS_DENIED)
995#endif
996 {
997 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
998 if (RT_FAILURE(rc))
999 {
1000 if (cbToRead > cbMinRead)
1001 {
1002 cbToRead = cbMinRead;
1003 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1004 }
1005 if (RT_FAILURE(rc))
1006 {
1007#ifndef IN_RC
1008 /*
1009 * If we fail to find the page via the guest's page tables
1010 * we invalidate the page in the host TLB (pertaining to
1011 * the guest in the NestedPaging case). See @bugref{6043}.
1012 */
1013 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1014 {
1015 HMInvalidatePage(pVCpu, uSrcAddr);
1016 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
1017 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
1018 }
1019#endif
1020 }
1021 }
1022 }
1023 }
1024
1025 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
1026 return rc;
1027}
1028
1029
1030
1031/**
1032 * Disassembles the current instruction.
1033 *
1034 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
1035 * details.
1036 *
1037 * @param pVM The cross context VM structure.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 * @param pDis Where to return the parsed instruction info.
1040 * @param pcbInstr Where to return the instruction size. (optional)
1041 */
1042VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
1043{
1044 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
1045 RTGCPTR GCPtrInstr;
1046#if 0
1047 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
1048#else
1049/** @todo Get the CPU mode as well while we're at it! */
1050 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
1051 pCtxCore->rip, &GCPtrInstr);
1052#endif
1053 if (RT_FAILURE(rc))
1054 {
1055 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
1056 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
1057 return rc;
1058 }
1059 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
1060}
1061
1062
1063/**
1064 * Disassembles one instruction.
1065 *
1066 * This is used by internally by the interpreter and by trap/access handlers.
1067 *
1068 * @returns VBox status code.
1069 *
1070 * @param pVM The cross context VM structure.
1071 * @param pVCpu The cross context virtual CPU structure.
1072 * @param GCPtrInstr The flat address of the instruction.
1073 * @param pCtxCore The context core (used to determine the cpu mode).
1074 * @param pDis Where to return the parsed instruction info.
1075 * @param pcbInstr Where to return the instruction size. (optional)
1076 */
1077VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
1078 PDISCPUSTATE pDis, unsigned *pcbInstr)
1079{
1080 NOREF(pVM);
1081 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
1082 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
1083 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
1084 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1085 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1086 if (RT_SUCCESS(rc))
1087 return VINF_SUCCESS;
1088 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1089 return rc;
1090}
1091
1092
1093/**
1094 * Interprets the current instruction.
1095 *
1096 * @returns VBox status code.
1097 * @retval VINF_* Scheduling instructions.
1098 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1099 * @retval VERR_* Fatal errors.
1100 *
1101 * @param pVCpu The cross context virtual CPU structure.
1102 * @param pRegFrame The register frame.
1103 * Updates the EIP if an instruction was executed successfully.
1104 * @param pvFault The fault address (CR2).
1105 *
1106 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1107 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1108 * to worry about e.g. invalid modrm combinations (!)
1109 */
1110VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1111{
1112 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1113 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1114 NOREF(pvFault);
1115
1116 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1117 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1118 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1119 rc = VERR_EM_INTERPRETER;
1120 if (rc != VINF_SUCCESS)
1121 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1122
1123 return rc;
1124}
1125
1126
1127/**
1128 * Interprets the current instruction.
1129 *
1130 * @returns VBox status code.
1131 * @retval VINF_* Scheduling instructions.
1132 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1133 * @retval VERR_* Fatal errors.
1134 *
1135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1136 * @param pRegFrame The register frame.
1137 * Updates the EIP if an instruction was executed successfully.
1138 * @param pvFault The fault address (CR2).
1139 * @param pcbWritten Size of the write (if applicable).
1140 *
1141 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1142 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1143 * to worry about e.g. invalid modrm combinations (!)
1144 */
1145VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1146{
1147 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1148 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1149 NOREF(pvFault);
1150
1151 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1152 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1153 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1154 rc = VERR_EM_INTERPRETER;
1155 if (rc != VINF_SUCCESS)
1156 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1157
1158 return rc;
1159}
1160
1161
1162/**
1163 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1164 *
1165 * IP/EIP/RIP *IS* updated!
1166 *
1167 * @returns VBox strict status code.
1168 * @retval VINF_* Scheduling instructions. When these are returned, it
1169 * starts to get a bit tricky to know whether code was
1170 * executed or not... We'll address this when it becomes a problem.
1171 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1172 * @retval VERR_* Fatal errors.
1173 *
1174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1175 * @param pDis The disassembler cpu state for the instruction to be
1176 * interpreted.
1177 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1178 * @param pvFault The fault address (CR2).
1179 * @param enmCodeType Code type (user/supervisor)
1180 *
1181 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1182 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1183 * to worry about e.g. invalid modrm combinations (!)
1184 *
1185 * @todo At this time we do NOT check if the instruction overwrites vital information.
1186 * Make sure this can't happen!! (will add some assertions/checks later)
1187 */
1188VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1189 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1190{
1191 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1192 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1193 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1194
1195 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1196 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1197 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1198 rc = VERR_EM_INTERPRETER;
1199
1200 if (rc != VINF_SUCCESS)
1201 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1202
1203 return rc;
1204}
1205
1206#ifdef IN_RC
1207
1208DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1209{
1210 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1211 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1212 return rc;
1213 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1214}
1215
1216
1217/**
1218 * Interpret IRET (currently only to V86 code) - PATM only.
1219 *
1220 * @returns VBox status code.
1221 * @param pVM The cross context VM structure.
1222 * @param pVCpu The cross context virtual CPU structure.
1223 * @param pRegFrame The register frame.
1224 *
1225 */
1226VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1227{
1228 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1229 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1230 int rc;
1231
1232 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1233 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1234 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1235 * this function. Fear that it may guru on us, thus not converted to
1236 * IEM. */
1237
1238 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1239 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1240 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1241 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1242 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1243
1244 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1245 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1246 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1247 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1248 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1249 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1250 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1251
1252 pRegFrame->eip = eip & 0xffff;
1253 pRegFrame->cs.Sel = cs;
1254
1255 /* Mask away all reserved bits */
1256 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1257 eflags &= uMask;
1258
1259 CPUMRawSetEFlags(pVCpu, eflags);
1260 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1261
1262 pRegFrame->esp = esp;
1263 pRegFrame->ss.Sel = ss;
1264 pRegFrame->ds.Sel = ds;
1265 pRegFrame->es.Sel = es;
1266 pRegFrame->fs.Sel = fs;
1267 pRegFrame->gs.Sel = gs;
1268
1269 return VINF_SUCCESS;
1270}
1271
1272
1273#endif /* IN_RC */
1274
1275
1276
1277/*
1278 *
1279 * Old interpreter primitives used by HM, move/eliminate later.
1280 * Old interpreter primitives used by HM, move/eliminate later.
1281 * Old interpreter primitives used by HM, move/eliminate later.
1282 * Old interpreter primitives used by HM, move/eliminate later.
1283 * Old interpreter primitives used by HM, move/eliminate later.
1284 *
1285 */
1286
1287
1288/**
1289 * Interpret RDPMC.
1290 *
1291 * @returns VBox status code.
1292 * @param pVM The cross context VM structure.
1293 * @param pVCpu The cross context virtual CPU structure.
1294 * @param pRegFrame The register frame.
1295 *
1296 */
1297VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1298{
1299 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1300 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1301
1302 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1303 if ( !(uCR4 & X86_CR4_PCE)
1304 && CPUMGetGuestCPL(pVCpu) != 0)
1305 {
1306 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1307 return VERR_EM_INTERPRETER; /* genuine #GP */
1308 }
1309
1310 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1311 pRegFrame->rax = 0;
1312 pRegFrame->rdx = 0;
1313 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1314 * ecx but see @bugref{3472}! */
1315
1316 NOREF(pVM);
1317 return VINF_SUCCESS;
1318}
1319
1320
1321/**
1322 * MWAIT Emulation.
1323 */
1324VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1325{
1326 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1327 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1328 NOREF(pVM);
1329
1330 /* Get the current privilege level. */
1331 cpl = CPUMGetGuestCPL(pVCpu);
1332 if (cpl != 0)
1333 return VERR_EM_INTERPRETER; /* supervisor only */
1334
1335 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1336 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1337 return VERR_EM_INTERPRETER; /* not supported */
1338
1339 /*
1340 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1341 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1342 */
1343 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1344 if (pRegFrame->ecx > 1)
1345 {
1346 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1347 return VERR_EM_INTERPRETER; /* illegal value. */
1348 }
1349
1350 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1351 {
1352 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1353 return VERR_EM_INTERPRETER; /* illegal value. */
1354 }
1355
1356 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1357}
1358
1359
1360/**
1361 * MONITOR Emulation.
1362 */
1363VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1364{
1365 uint32_t u32Dummy, u32ExtFeatures, cpl;
1366 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1367 NOREF(pVM);
1368
1369 if (pRegFrame->ecx != 0)
1370 {
1371 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1372 return VERR_EM_INTERPRETER; /* illegal value. */
1373 }
1374
1375 /* Get the current privilege level. */
1376 cpl = CPUMGetGuestCPL(pVCpu);
1377 if (cpl != 0)
1378 return VERR_EM_INTERPRETER; /* supervisor only */
1379
1380 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1381 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1382 return VERR_EM_INTERPRETER; /* not supported */
1383
1384 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1385 return VINF_SUCCESS;
1386}
1387
1388
1389/* VT-x only: */
1390
1391/**
1392 * Interpret DRx write.
1393 *
1394 * @returns VBox status code.
1395 * @param pVM The cross context VM structure.
1396 * @param pVCpu The cross context virtual CPU structure.
1397 * @param pRegFrame The register frame.
1398 * @param DestRegDrx DRx register index (USE_REG_DR*)
1399 * @param SrcRegGen General purpose register index (USE_REG_E**))
1400 *
1401 */
1402VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1403{
1404 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1405 uint64_t uNewDrX;
1406 int rc;
1407 NOREF(pVM);
1408
1409 if (CPUMIsGuestIn64BitCode(pVCpu))
1410 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1411 else
1412 {
1413 uint32_t val32;
1414 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1415 uNewDrX = val32;
1416 }
1417
1418 if (RT_SUCCESS(rc))
1419 {
1420 if (DestRegDrx == 6)
1421 {
1422 uNewDrX |= X86_DR6_RA1_MASK;
1423 uNewDrX &= ~X86_DR6_RAZ_MASK;
1424 }
1425 else if (DestRegDrx == 7)
1426 {
1427 uNewDrX |= X86_DR7_RA1_MASK;
1428 uNewDrX &= ~X86_DR7_RAZ_MASK;
1429 }
1430
1431 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1432 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1433 if (RT_SUCCESS(rc))
1434 return rc;
1435 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1436 }
1437 return VERR_EM_INTERPRETER;
1438}
1439
1440
1441/**
1442 * Interpret DRx read.
1443 *
1444 * @returns VBox status code.
1445 * @param pVM The cross context VM structure.
1446 * @param pVCpu The cross context virtual CPU structure.
1447 * @param pRegFrame The register frame.
1448 * @param DestRegGen General purpose register index (USE_REG_E**))
1449 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1450 */
1451VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1452{
1453 uint64_t val64;
1454 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1455 NOREF(pVM);
1456
1457 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1458 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1459 if (CPUMIsGuestIn64BitCode(pVCpu))
1460 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1461 else
1462 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1463
1464 if (RT_SUCCESS(rc))
1465 return VINF_SUCCESS;
1466
1467 return VERR_EM_INTERPRETER;
1468}
1469
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette