VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 93582

Last change on this file since 93582 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.1 KB
Line 
1/* $Id: EMAll.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_EM
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/pdmapi.h>
31#include <VBox/vmm/vmm.h>
32#include <VBox/vmm/stam.h>
33#include "EMInternal.h"
34#include <VBox/vmm/vmcc.h>
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/dis.h>
38#include <VBox/disopcode.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/string.h>
42
43
44
45
46/**
47 * Get the current execution manager status.
48 *
49 * @returns Current status.
50 * @param pVCpu The cross context virtual CPU structure.
51 */
52VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
53{
54 return pVCpu->em.s.enmState;
55}
56
57
58/**
59 * Sets the current execution manager status. (use only when you know what you're doing!)
60 *
61 * @param pVCpu The cross context virtual CPU structure.
62 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
63 */
64VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
65{
66 /* Only allowed combination: */
67 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
68 pVCpu->em.s.enmState = enmNewState;
69}
70
71
72/**
73 * Sets the PC for which interrupts should be inhibited.
74 *
75 * @param pVCpu The cross context virtual CPU structure.
76 * @param PC The PC.
77 */
78VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
79{
80 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
81 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
82}
83
84
85/**
86 * Gets the PC for which interrupts should be inhibited.
87 *
88 * There are a few instructions which inhibits or delays interrupts
89 * for the instruction following them. These instructions are:
90 * - STI
91 * - MOV SS, r/m16
92 * - POP SS
93 *
94 * @returns The PC for which interrupts should be inhibited.
95 * @param pVCpu The cross context virtual CPU structure.
96 *
97 */
98VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
99{
100 return pVCpu->em.s.GCPtrInhibitInterrupts;
101}
102
103
104/**
105 * Checks if interrupt inhibiting is enabled for the current instruction.
106 *
107 * @returns true if interrupts are inhibited, false if not.
108 * @param pVCpu The cross context virtual CPU structure.
109 */
110VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu)
111{
112 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
113 return false;
114 if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu))
115 return true;
116 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
117 return false;
118}
119
120
121/**
122 * Enables / disable hypercall instructions.
123 *
124 * This interface is used by GIM to tell the execution monitors whether the
125 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
126 *
127 * @param pVCpu The cross context virtual CPU structure this applies to.
128 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
129 */
130VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
131{
132 pVCpu->em.s.fHypercallEnabled = fEnabled;
133}
134
135
136/**
137 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
138 *
139 * @returns true if enabled, false if not.
140 * @param pVCpu The cross context virtual CPU structure.
141 *
142 * @note If this call becomes a performance factor, we can make the data
143 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
144 */
145VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
146{
147 return pVCpu->em.s.fHypercallEnabled;
148}
149
150
151/**
152 * Prepare an MWAIT - essentials of the MONITOR instruction.
153 *
154 * @returns VINF_SUCCESS
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 * @param rax The content of RAX.
157 * @param rcx The content of RCX.
158 * @param rdx The content of RDX.
159 * @param GCPhys The physical address corresponding to rax.
160 */
161VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
162{
163 pVCpu->em.s.MWait.uMonitorRAX = rax;
164 pVCpu->em.s.MWait.uMonitorRCX = rcx;
165 pVCpu->em.s.MWait.uMonitorRDX = rdx;
166 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
167 /** @todo Make use of GCPhys. */
168 NOREF(GCPhys);
169 /** @todo Complete MONITOR implementation. */
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Checks if the monitor hardware is armed / active.
176 *
177 * @returns true if armed, false otherwise.
178 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
179 */
180VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
181{
182 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
183}
184
185
186/**
187 * Checks if we're in a MWAIT.
188 *
189 * @retval 1 if regular,
190 * @retval > 1 if MWAIT with EMMWAIT_FLAG_BREAKIRQIF0
191 * @retval 0 if not armed
192 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
193 */
194VMM_INT_DECL(unsigned) EMMonitorWaitIsActive(PVMCPU pVCpu)
195{
196 uint32_t fWait = pVCpu->em.s.MWait.fWait;
197 AssertCompile(EMMWAIT_FLAG_ACTIVE == 1);
198 AssertCompile(EMMWAIT_FLAG_BREAKIRQIF0 == 2);
199 AssertCompile((EMMWAIT_FLAG_ACTIVE << 1) == EMMWAIT_FLAG_BREAKIRQIF0);
200 return fWait & (EMMWAIT_FLAG_ACTIVE | ((fWait & EMMWAIT_FLAG_ACTIVE) << 1));
201}
202
203
204/**
205 * Performs an MWAIT.
206 *
207 * @returns VINF_SUCCESS
208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
209 * @param rax The content of RAX.
210 * @param rcx The content of RCX.
211 */
212VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
213{
214 pVCpu->em.s.MWait.uMWaitRAX = rax;
215 pVCpu->em.s.MWait.uMWaitRCX = rcx;
216 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
217 if (rcx)
218 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
219 else
220 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
221 /** @todo not completely correct?? */
222 return VINF_EM_HALT;
223}
224
225
226/**
227 * Clears any address-range monitoring that is active.
228 *
229 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
230 */
231VMM_INT_DECL(void) EMMonitorWaitClear(PVMCPU pVCpu)
232{
233 LogFlowFunc(("Clearing MWAIT\n"));
234 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
235}
236
237
238/**
239 * Determine if we should continue execution in HM after encountering an mwait
240 * instruction.
241 *
242 * Clears MWAIT flags if returning @c true.
243 *
244 * @returns true if we should continue, false if we should halt.
245 * @param pVCpu The cross context virtual CPU structure.
246 * @param pCtx Current CPU context.
247 */
248VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
249{
250 if (CPUMGetGuestGif(pCtx))
251 {
252 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
253 || ( CPUMIsGuestInNestedHwvirtMode(pCtx)
254 && CPUMIsGuestVirtIntrEnabled(pVCpu))
255 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
256 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
257 {
258 if (VMCPU_FF_IS_ANY_SET(pVCpu, ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
259 | VMCPU_FF_INTERRUPT_NESTED_GUEST)))
260 {
261 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
262 return true;
263 }
264 }
265 }
266
267 return false;
268}
269
270
271/**
272 * Determine if we should continue execution in HM after encountering a hlt
273 * instruction.
274 *
275 * @returns true if we should continue, false if we should halt.
276 * @param pVCpu The cross context virtual CPU structure.
277 * @param pCtx Current CPU context.
278 */
279VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
280{
281 if (CPUMGetGuestGif(pCtx))
282 {
283 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
284 return VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
285
286 if ( CPUMIsGuestInNestedHwvirtMode(pCtx)
287 && CPUMIsGuestVirtIntrEnabled(pVCpu))
288 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
289 }
290 return false;
291}
292
293
294/**
295 * Unhalts and wakes up the given CPU.
296 *
297 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
298 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
299 * the CPU isn't currently in a halt, the next HLT instruction it executes will
300 * be affected.
301 *
302 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
303 * @param pVM The cross context VM structure.
304 * @param pVCpuDst The cross context virtual CPU structure of the
305 * CPU to unhalt and wake up. This is usually not the
306 * same as the caller.
307 * @thread EMT
308 */
309VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVMCC pVM, PVMCPUCC pVCpuDst)
310{
311 /*
312 * Flag the current(/next) HLT to unhalt immediately.
313 */
314 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
315
316 /*
317 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
318 * just do it here for now).
319 */
320#ifdef IN_RING0
321 /* We might be here with preemption disabled or enabled (i.e. depending on
322 thread-context hooks being used), so don't try obtaining the GVMMR0 used
323 lock here. See @bugref{7270#c148}. */
324 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
325 AssertRC(rc);
326
327#elif defined(IN_RING3)
328 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, 0 /*fFlags*/);
329 int rc = VINF_SUCCESS;
330 RT_NOREF(pVM);
331
332#else
333 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
334 Assert(pVM->cCpus == 1); NOREF(pVM);
335 int rc = VINF_SUCCESS;
336#endif
337 return rc;
338}
339
340#ifndef IN_RING3
341
342/**
343 * Makes an I/O port write pending for ring-3 processing.
344 *
345 * @returns VINF_EM_PENDING_R3_IOPORT_READ
346 * @param pVCpu The cross context virtual CPU structure.
347 * @param uPort The I/O port.
348 * @param cbInstr The instruction length (for RIP updating).
349 * @param cbValue The write size.
350 * @param uValue The value being written.
351 * @sa emR3ExecutePendingIoPortWrite
352 *
353 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
354 */
355VMMRZ_INT_DECL(VBOXSTRICTRC)
356EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
357{
358 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
359 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
360 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
361 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
362 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
363 return VINF_EM_PENDING_R3_IOPORT_WRITE;
364}
365
366
367/**
368 * Makes an I/O port read pending for ring-3 processing.
369 *
370 * @returns VINF_EM_PENDING_R3_IOPORT_READ
371 * @param pVCpu The cross context virtual CPU structure.
372 * @param uPort The I/O port.
373 * @param cbInstr The instruction length (for RIP updating).
374 * @param cbValue The read size.
375 * @sa emR3ExecutePendingIoPortRead
376 *
377 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
378 */
379VMMRZ_INT_DECL(VBOXSTRICTRC)
380EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
381{
382 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
383 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
384 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
385 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
386 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
387 return VINF_EM_PENDING_R3_IOPORT_READ;
388}
389
390#endif /* IN_RING3 */
391
392
393/**
394 * Worker for EMHistoryExec that checks for ring-3 returns and flags
395 * continuation of the EMHistoryExec run there.
396 */
397DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
398{
399 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
400#ifdef IN_RING3
401 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
402#else
403 switch (VBOXSTRICTRC_VAL(rcStrict))
404 {
405 case VINF_SUCCESS:
406 default:
407 break;
408
409 /*
410 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
411 */
412 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
413 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
414 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
415 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
416 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
417 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
418 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
419 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
420 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
421 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
422 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
423 break;
424 }
425#endif /* !IN_RING3 */
426}
427
428
429/**
430 * Execute using history.
431 *
432 * This function will be called when EMHistoryAddExit() and friends returns a
433 * non-NULL result. This happens in response to probing or when probing has
434 * uncovered adjacent exits which can more effectively be reached by using IEM
435 * than restarting execution using the main execution engine and fielding an
436 * regular exit.
437 *
438 * @returns VBox strict status code, see IEMExecForExits.
439 * @param pVCpu The cross context virtual CPU structure.
440 * @param pExitRec The exit record return by a previous history add
441 * or update call.
442 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
443 */
444VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPUCC pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
445{
446 Assert(pExitRec);
447 VMCPU_ASSERT_EMT(pVCpu);
448 IEMEXECFOREXITSTATS ExecStats;
449 switch (pExitRec->enmAction)
450 {
451 /*
452 * Executes multiple instruction stopping only when we've gone a given
453 * number without perceived exits.
454 */
455 case EMEXITACTION_EXEC_WITH_MAX:
456 {
457 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
458 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
459 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
460 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
461 pVCpu->em.s.cHistoryExecMaxInstructions,
462 pExitRec->cMaxInstructionsWithoutExit,
463 &ExecStats);
464 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
465 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
466 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
467
468 /* Ignore instructions IEM doesn't know about. */
469 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
470 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
471 || ExecStats.cInstructions == 0)
472 { /* likely */ }
473 else
474 rcStrict = VINF_SUCCESS;
475
476 if (ExecStats.cExits > 1)
477 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
478 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
479 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
480 return rcStrict;
481 }
482
483 /*
484 * Probe a exit for close by exits.
485 */
486 case EMEXITACTION_EXEC_PROBE:
487 {
488 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
489 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
490 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
491 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
492 pVCpu->em.s.cHistoryProbeMinInstructions,
493 pVCpu->em.s.cHistoryExecMaxInstructions,
494 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
495 &ExecStats);
496 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
497 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
498 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
499 if ( ExecStats.cExits >= 2
500 && RT_SUCCESS(rcStrict))
501 {
502 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
503 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
504 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
505 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
506 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
507 }
508#ifndef IN_RING3
509 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
510 && RT_SUCCESS(rcStrict))
511 {
512 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
513 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
514 }
515#endif
516 else
517 {
518 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
519 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
520 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
521 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
522 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
523 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
524 rcStrict = VINF_SUCCESS;
525 }
526 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
527 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
528 return rcStrict;
529 }
530
531 /* We shouldn't ever see these here! */
532 case EMEXITACTION_FREE_RECORD:
533 case EMEXITACTION_NORMAL:
534 case EMEXITACTION_NORMAL_PROBED:
535 break;
536
537 /* No default case, want compiler warnings. */
538 }
539 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
540}
541
542
543/**
544 * Worker for emHistoryAddOrUpdateRecord.
545 */
546DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
547{
548 pExitRec->uFlatPC = uFlatPC;
549 pExitRec->uFlagsAndType = uFlagsAndType;
550 pExitRec->enmAction = EMEXITACTION_NORMAL;
551 pExitRec->bUnused = 0;
552 pExitRec->cMaxInstructionsWithoutExit = 64;
553 pExitRec->uLastExitNo = uExitNo;
554 pExitRec->cHits = 1;
555 return NULL;
556}
557
558
559/**
560 * Worker for emHistoryAddOrUpdateRecord.
561 */
562DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
563 PEMEXITREC pExitRec, uint64_t uFlatPC,
564 uint32_t uFlagsAndType, uint64_t uExitNo)
565{
566 pHistEntry->idxSlot = (uint32_t)idxSlot;
567 pVCpu->em.s.cExitRecordUsed++;
568 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
569 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
570 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
571}
572
573
574/**
575 * Worker for emHistoryAddOrUpdateRecord.
576 */
577DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
578 PEMEXITREC pExitRec, uint64_t uFlatPC,
579 uint32_t uFlagsAndType, uint64_t uExitNo)
580{
581 pHistEntry->idxSlot = (uint32_t)idxSlot;
582 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
583 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
584 uExitNo - pExitRec->uLastExitNo));
585 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
586}
587
588
589/**
590 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
591 *
592 * @returns Pointer to an exit record if special action should be taken using
593 * EMHistoryExec(). Take normal exit action when NULL.
594 *
595 * @param pVCpu The cross context virtual CPU structure.
596 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
597 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
598 * @param uFlatPC The flattened program counter.
599 * @param pHistEntry The exit history entry.
600 * @param uExitNo The current exit number.
601 */
602static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
603 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
604{
605# ifdef IN_RING0
606 /* Disregard the hm flag. */
607 uFlagsAndType &= ~EMEXIT_F_HM;
608# endif
609
610 /*
611 * Work the hash table.
612 */
613 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
614# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
615 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
616 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
617 if (pExitRec->uFlatPC == uFlatPC)
618 {
619 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
620 pHistEntry->idxSlot = (uint32_t)idxSlot;
621 if (pExitRec->uFlagsAndType == uFlagsAndType)
622 {
623 pExitRec->uLastExitNo = uExitNo;
624 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
625 }
626 else
627 {
628 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
629 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
630 }
631 }
632 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
633 {
634 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
635 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
636 }
637 else
638 {
639 /*
640 * Collision. We calculate a new hash for stepping away from the first,
641 * doing up to 8 steps away before replacing the least recently used record.
642 */
643 uintptr_t idxOldest = idxSlot;
644 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
645 unsigned iOldestStep = 0;
646 unsigned iStep = 1;
647 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
648 for (;;)
649 {
650 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
651 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
652 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
653 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
654
655 /* Step to the next slot. */
656 idxSlot += idxAdd;
657 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
658 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
659
660 /* Does it match? */
661 if (pExitRec->uFlatPC == uFlatPC)
662 {
663 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
664 pHistEntry->idxSlot = (uint32_t)idxSlot;
665 if (pExitRec->uFlagsAndType == uFlagsAndType)
666 {
667 pExitRec->uLastExitNo = uExitNo;
668 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
669 break;
670 }
671 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
672 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
673 }
674
675 /* Is it free? */
676 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
677 {
678 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
679 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
680 }
681
682 /* Is it the least recently used one? */
683 if (pExitRec->uLastExitNo < uOldestExitNo)
684 {
685 uOldestExitNo = pExitRec->uLastExitNo;
686 idxOldest = idxSlot;
687 iOldestStep = iStep;
688 }
689
690 /* Next iteration? */
691 iStep++;
692 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
693 if (RT_LIKELY(iStep < 8 + 1))
694 { /* likely */ }
695 else
696 {
697 /* Replace the least recently used slot. */
698 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
699 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
700 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
701 }
702 }
703 }
704
705 /*
706 * Found an existing record.
707 */
708 switch (pExitRec->enmAction)
709 {
710 case EMEXITACTION_NORMAL:
711 {
712 uint64_t const cHits = ++pExitRec->cHits;
713 if (cHits < 256)
714 return NULL;
715 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
716 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
717 return pExitRec;
718 }
719
720 case EMEXITACTION_NORMAL_PROBED:
721 pExitRec->cHits += 1;
722 return NULL;
723
724 default:
725 pExitRec->cHits += 1;
726 return pExitRec;
727
728 /* This will happen if the caller ignores or cannot serve the probe
729 request (forced to ring-3, whatever). We retry this 256 times. */
730 case EMEXITACTION_EXEC_PROBE:
731 {
732 uint64_t const cHits = ++pExitRec->cHits;
733 if (cHits < 512)
734 return pExitRec;
735 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
736 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
737 return NULL;
738 }
739 }
740}
741
742
743/**
744 * Adds an exit to the history for this CPU.
745 *
746 * @returns Pointer to an exit record if special action should be taken using
747 * EMHistoryExec(). Take normal exit action when NULL.
748 *
749 * @param pVCpu The cross context virtual CPU structure.
750 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
751 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
752 * @param uTimestamp The TSC value for the exit, 0 if not available.
753 * @thread EMT(pVCpu)
754 */
755VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
756{
757 VMCPU_ASSERT_EMT(pVCpu);
758
759 /*
760 * Add the exit history entry.
761 */
762 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
763 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
764 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
765 pHistEntry->uFlatPC = uFlatPC;
766 pHistEntry->uTimestamp = uTimestamp;
767 pHistEntry->uFlagsAndType = uFlagsAndType;
768 pHistEntry->idxSlot = UINT32_MAX;
769
770 /*
771 * If common exit type, we will insert/update the exit into the exit record hash table.
772 */
773 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
774#ifdef IN_RING0
775 && pVCpu->em.s.fExitOptimizationEnabledR0
776 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
777#else
778 && pVCpu->em.s.fExitOptimizationEnabled
779#endif
780 && uFlatPC != UINT64_MAX
781 )
782 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
783 return NULL;
784}
785
786
787/**
788 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
789 *
790 * @param pVCpu The cross context virtual CPU structure.
791 * @param uFlatPC The flattened program counter (RIP).
792 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
793 */
794VMM_INT_DECL(void) EMHistoryUpdatePC(PVMCPUCC pVCpu, uint64_t uFlatPC, bool fFlattened)
795{
796 VMCPU_ASSERT_EMT(pVCpu);
797
798 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
799 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
800 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
801 pHistEntry->uFlatPC = uFlatPC;
802 if (fFlattened)
803 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
804 else
805 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
806}
807
808
809/**
810 * Interface for convering a engine specific exit to a generic one and get guidance.
811 *
812 * @returns Pointer to an exit record if special action should be taken using
813 * EMHistoryExec(). Take normal exit action when NULL.
814 *
815 * @param pVCpu The cross context virtual CPU structure.
816 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
817 * @thread EMT(pVCpu)
818 */
819VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPUCC pVCpu, uint32_t uFlagsAndType)
820{
821 VMCPU_ASSERT_EMT(pVCpu);
822
823 /*
824 * Do the updating.
825 */
826 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
827 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
828 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
829 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
830
831 /*
832 * If common exit type, we will insert/update the exit into the exit record hash table.
833 */
834 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
835#ifdef IN_RING0
836 && pVCpu->em.s.fExitOptimizationEnabledR0
837 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
838#else
839 && pVCpu->em.s.fExitOptimizationEnabled
840#endif
841 && pHistEntry->uFlatPC != UINT64_MAX
842 )
843 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
844 return NULL;
845}
846
847
848/**
849 * Interface for convering a engine specific exit to a generic one and get
850 * guidance, supplying flattened PC too.
851 *
852 * @returns Pointer to an exit record if special action should be taken using
853 * EMHistoryExec(). Take normal exit action when NULL.
854 *
855 * @param pVCpu The cross context virtual CPU structure.
856 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
857 * @param uFlatPC The flattened program counter (RIP).
858 * @thread EMT(pVCpu)
859 */
860VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
861{
862 VMCPU_ASSERT_EMT(pVCpu);
863 Assert(uFlatPC != UINT64_MAX);
864
865 /*
866 * Do the updating.
867 */
868 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
869 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
870 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
871 pHistEntry->uFlagsAndType = uFlagsAndType;
872 pHistEntry->uFlatPC = uFlatPC;
873
874 /*
875 * If common exit type, we will insert/update the exit into the exit record hash table.
876 */
877 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
878#ifdef IN_RING0
879 && pVCpu->em.s.fExitOptimizationEnabledR0
880 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
881#else
882 && pVCpu->em.s.fExitOptimizationEnabled
883#endif
884 )
885 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
886 return NULL;
887}
888
889
890/**
891 * @callback_method_impl{FNDISREADBYTES}
892 */
893static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
894{
895 PVMCPUCC pVCpu = (PVMCPUCC)pDis->pvUser;
896 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
897
898 /*
899 * Figure how much we can or must read.
900 */
901 size_t cbToRead = GUEST_PAGE_SIZE - (uSrcAddr & (GUEST_PAGE_SIZE - 1));
902 if (cbToRead > cbMaxRead)
903 cbToRead = cbMaxRead;
904 else if (cbToRead < cbMinRead)
905 cbToRead = cbMinRead;
906
907 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
908 if (RT_FAILURE(rc))
909 {
910 if (cbToRead > cbMinRead)
911 {
912 cbToRead = cbMinRead;
913 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
914 }
915 if (RT_FAILURE(rc))
916 {
917 /*
918 * If we fail to find the page via the guest's page tables
919 * we invalidate the page in the host TLB (pertaining to
920 * the guest in the NestedPaging case). See @bugref{6043}.
921 */
922 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
923 {
924 HMInvalidatePage(pVCpu, uSrcAddr);
925 if (((uSrcAddr + cbToRead - 1) >> GUEST_PAGE_SHIFT) != (uSrcAddr >> GUEST_PAGE_SHIFT))
926 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
927 }
928 }
929 }
930
931 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
932 return rc;
933}
934
935
936/**
937 * Disassembles the current instruction.
938 *
939 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
940 * details.
941 *
942 * @param pVM The cross context VM structure.
943 * @param pVCpu The cross context virtual CPU structure.
944 * @param pDis Where to return the parsed instruction info.
945 * @param pcbInstr Where to return the instruction size. (optional)
946 */
947VMM_INT_DECL(int) EMInterpretDisasCurrent(PVMCC pVM, PVMCPUCC pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
948{
949 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
950 RTGCPTR GCPtrInstr;
951#if 0
952 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
953#else
954/** @todo Get the CPU mode as well while we're at it! */
955 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
956 pCtxCore->rip, &GCPtrInstr);
957#endif
958 if (RT_FAILURE(rc))
959 {
960 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
961 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
962 return rc;
963 }
964 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
965}
966
967
968/**
969 * Disassembles one instruction.
970 *
971 * This is used by internally by the interpreter and by trap/access handlers.
972 *
973 * @returns VBox status code.
974 *
975 * @param pVM The cross context VM structure.
976 * @param pVCpu The cross context virtual CPU structure.
977 * @param GCPtrInstr The flat address of the instruction.
978 * @param pCtxCore The context core (used to determine the cpu mode).
979 * @param pDis Where to return the parsed instruction info.
980 * @param pcbInstr Where to return the instruction size. (optional)
981 */
982VMM_INT_DECL(int) EMInterpretDisasOneEx(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
983 PDISCPUSTATE pDis, unsigned *pcbInstr)
984{
985 NOREF(pVM);
986 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
987 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
988 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
989 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
990 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
991 if (RT_SUCCESS(rc))
992 return VINF_SUCCESS;
993 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
994 return rc;
995}
996
997
998/**
999 * Interprets the current instruction.
1000 *
1001 * @returns VBox status code.
1002 * @retval VINF_* Scheduling instructions.
1003 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1004 * @retval VERR_* Fatal errors.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure.
1007 * @param pRegFrame The register frame.
1008 * Updates the EIP if an instruction was executed successfully.
1009 * @param pvFault The fault address (CR2).
1010 *
1011 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1012 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1013 * to worry about e.g. invalid modrm combinations (!)
1014 */
1015VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1016{
1017 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1018 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1019 NOREF(pvFault);
1020
1021 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1022 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1023 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1024 rc = VERR_EM_INTERPRETER;
1025 if (rc != VINF_SUCCESS)
1026 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1027
1028 return rc;
1029}
1030
1031
1032/**
1033 * Interprets the current instruction.
1034 *
1035 * @returns VBox status code.
1036 * @retval VINF_* Scheduling instructions.
1037 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1038 * @retval VERR_* Fatal errors.
1039 *
1040 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1041 * @param pRegFrame The register frame.
1042 * Updates the EIP if an instruction was executed successfully.
1043 * @param pvFault The fault address (CR2).
1044 * @param pcbWritten Size of the write (if applicable).
1045 *
1046 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1047 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1048 * to worry about e.g. invalid modrm combinations (!)
1049 */
1050VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1051{
1052 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1053 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1054 NOREF(pvFault);
1055
1056 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1057 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1058 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1059 rc = VERR_EM_INTERPRETER;
1060 if (rc != VINF_SUCCESS)
1061 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1062
1063 return rc;
1064}
1065
1066
1067/**
1068 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1069 *
1070 * IP/EIP/RIP *IS* updated!
1071 *
1072 * @returns VBox strict status code.
1073 * @retval VINF_* Scheduling instructions. When these are returned, it
1074 * starts to get a bit tricky to know whether code was
1075 * executed or not... We'll address this when it becomes a problem.
1076 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1077 * @retval VERR_* Fatal errors.
1078 *
1079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1080 * @param pDis The disassembler cpu state for the instruction to be
1081 * interpreted.
1082 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1083 * @param pvFault The fault address (CR2).
1084 * @param enmCodeType Code type (user/supervisor)
1085 *
1086 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1087 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1088 * to worry about e.g. invalid modrm combinations (!)
1089 *
1090 * @todo At this time we do NOT check if the instruction overwrites vital information.
1091 * Make sure this can't happen!! (will add some assertions/checks later)
1092 */
1093VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPUCC pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1094 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1095{
1096 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1097 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1098 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1099
1100 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1101 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1102 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1103 rc = VERR_EM_INTERPRETER;
1104
1105 if (rc != VINF_SUCCESS)
1106 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1107
1108 return rc;
1109}
1110
1111
1112
1113
1114/*
1115 *
1116 * Old interpreter primitives used by HM, move/eliminate later.
1117 * Old interpreter primitives used by HM, move/eliminate later.
1118 * Old interpreter primitives used by HM, move/eliminate later.
1119 * Old interpreter primitives used by HM, move/eliminate later.
1120 * Old interpreter primitives used by HM, move/eliminate later.
1121 *
1122 */
1123
1124
1125/**
1126 * Interpret RDPMC.
1127 *
1128 * @returns VBox status code.
1129 * @param pVM The cross context VM structure.
1130 * @param pVCpu The cross context virtual CPU structure.
1131 * @param pRegFrame The register frame.
1132 *
1133 */
1134VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1135{
1136 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1137 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1138
1139 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1140 if ( !(uCR4 & X86_CR4_PCE)
1141 && CPUMGetGuestCPL(pVCpu) != 0)
1142 {
1143 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1144 return VERR_EM_INTERPRETER; /* genuine #GP */
1145 }
1146
1147 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1148 pRegFrame->rax = 0;
1149 pRegFrame->rdx = 0;
1150 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1151 * ecx but see @bugref{3472}! */
1152
1153 NOREF(pVM);
1154 return VINF_SUCCESS;
1155}
1156
1157
1158/* VT-x only: */
1159
1160/**
1161 * Interpret DRx write.
1162 *
1163 * @returns VBox status code.
1164 * @param pVM The cross context VM structure.
1165 * @param pVCpu The cross context virtual CPU structure.
1166 * @param pRegFrame The register frame.
1167 * @param DestRegDrx DRx register index (USE_REG_DR*)
1168 * @param SrcRegGen General purpose register index (USE_REG_E**))
1169 *
1170 */
1171VMM_INT_DECL(int) EMInterpretDRxWrite(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1172{
1173 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1174 uint64_t uNewDrX;
1175 int rc;
1176 NOREF(pVM);
1177
1178 if (CPUMIsGuestIn64BitCode(pVCpu))
1179 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1180 else
1181 {
1182 uint32_t val32;
1183 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1184 uNewDrX = val32;
1185 }
1186
1187 if (RT_SUCCESS(rc))
1188 {
1189 if (DestRegDrx == 6)
1190 {
1191 uNewDrX |= X86_DR6_RA1_MASK;
1192 uNewDrX &= ~X86_DR6_RAZ_MASK;
1193 }
1194 else if (DestRegDrx == 7)
1195 {
1196 uNewDrX |= X86_DR7_RA1_MASK;
1197 uNewDrX &= ~X86_DR7_RAZ_MASK;
1198 }
1199
1200 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1201 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1202 if (RT_SUCCESS(rc))
1203 return rc;
1204 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1205 }
1206 return VERR_EM_INTERPRETER;
1207}
1208
1209
1210/**
1211 * Interpret DRx read.
1212 *
1213 * @returns VBox status code.
1214 * @param pVM The cross context VM structure.
1215 * @param pVCpu The cross context virtual CPU structure.
1216 * @param pRegFrame The register frame.
1217 * @param DestRegGen General purpose register index (USE_REG_E**))
1218 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1219 */
1220VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1221{
1222 uint64_t val64;
1223 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1224 NOREF(pVM);
1225
1226 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1227 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1228 if (CPUMIsGuestIn64BitCode(pVCpu))
1229 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1230 else
1231 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1232
1233 if (RT_SUCCESS(rc))
1234 return VINF_SUCCESS;
1235
1236 return VERR_EM_INTERPRETER;
1237}
1238
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette