VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 78425

Last change on this file since 78425 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 55.3 KB
Line 
1/* $Id: EMAll.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_EM
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/patm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/hm.h>
31#include <VBox/vmm/pdmapi.h>
32#include <VBox/vmm/vmm.h>
33#include <VBox/vmm/stam.h>
34#include "EMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/log.h>
41#include <iprt/assert.h>
42#include <iprt/string.h>
43
44
45
46
47/**
48 * Get the current execution manager status.
49 *
50 * @returns Current status.
51 * @param pVCpu The cross context virtual CPU structure.
52 */
53VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
54{
55 return pVCpu->em.s.enmState;
56}
57
58
59/**
60 * Sets the current execution manager status. (use only when you know what you're doing!)
61 *
62 * @param pVCpu The cross context virtual CPU structure.
63 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
64 */
65VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
66{
67 /* Only allowed combination: */
68 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
69 pVCpu->em.s.enmState = enmNewState;
70}
71
72
73/**
74 * Sets the PC for which interrupts should be inhibited.
75 *
76 * @param pVCpu The cross context virtual CPU structure.
77 * @param PC The PC.
78 */
79VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
80{
81 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
82 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
83}
84
85
86/**
87 * Gets the PC for which interrupts should be inhibited.
88 *
89 * There are a few instructions which inhibits or delays interrupts
90 * for the instruction following them. These instructions are:
91 * - STI
92 * - MOV SS, r/m16
93 * - POP SS
94 *
95 * @returns The PC for which interrupts should be inhibited.
96 * @param pVCpu The cross context virtual CPU structure.
97 *
98 */
99VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
100{
101 return pVCpu->em.s.GCPtrInhibitInterrupts;
102}
103
104
105/**
106 * Checks if interrupt inhibiting is enabled for the current instruction.
107 *
108 * @returns true if interrupts are inhibited, false if not.
109 * @param pVCpu The cross context virtual CPU structure.
110 */
111VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu)
112{
113 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
114 return false;
115 if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu))
116 return true;
117 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
118 return false;
119}
120
121
122/**
123 * Enables / disable hypercall instructions.
124 *
125 * This interface is used by GIM to tell the execution monitors whether the
126 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
127 *
128 * @param pVCpu The cross context virtual CPU structure this applies to.
129 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
130 */
131VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
132{
133 pVCpu->em.s.fHypercallEnabled = fEnabled;
134}
135
136
137/**
138 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
139 *
140 * @returns true if enabled, false if not.
141 * @param pVCpu The cross context virtual CPU structure.
142 *
143 * @note If this call becomes a performance factor, we can make the data
144 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
145 */
146VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
147{
148 return pVCpu->em.s.fHypercallEnabled;
149}
150
151
152/**
153 * Prepare an MWAIT - essentials of the MONITOR instruction.
154 *
155 * @returns VINF_SUCCESS
156 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
157 * @param rax The content of RAX.
158 * @param rcx The content of RCX.
159 * @param rdx The content of RDX.
160 * @param GCPhys The physical address corresponding to rax.
161 */
162VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
163{
164 pVCpu->em.s.MWait.uMonitorRAX = rax;
165 pVCpu->em.s.MWait.uMonitorRCX = rcx;
166 pVCpu->em.s.MWait.uMonitorRDX = rdx;
167 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
168 /** @todo Make use of GCPhys. */
169 NOREF(GCPhys);
170 /** @todo Complete MONITOR implementation. */
171 return VINF_SUCCESS;
172}
173
174
175/**
176 * Checks if the monitor hardware is armed / active.
177 *
178 * @returns true if armed, false otherwise.
179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
180 */
181VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
182{
183 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
184}
185
186
187/**
188 * Checks if we're in a MWAIT.
189 *
190 * @retval 1 if regular,
191 * @retval > 1 if MWAIT with EMMWAIT_FLAG_BREAKIRQIF0
192 * @retval 0 if not armed
193 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
194 */
195VMM_INT_DECL(unsigned) EMMonitorWaitIsActive(PVMCPU pVCpu)
196{
197 uint32_t fWait = pVCpu->em.s.MWait.fWait;
198 AssertCompile(EMMWAIT_FLAG_ACTIVE == 1);
199 AssertCompile(EMMWAIT_FLAG_BREAKIRQIF0 == 2);
200 AssertCompile((EMMWAIT_FLAG_ACTIVE << 1) == EMMWAIT_FLAG_BREAKIRQIF0);
201 return fWait & (EMMWAIT_FLAG_ACTIVE | ((fWait & EMMWAIT_FLAG_ACTIVE) << 1));
202}
203
204
205/**
206 * Performs an MWAIT.
207 *
208 * @returns VINF_SUCCESS
209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
210 * @param rax The content of RAX.
211 * @param rcx The content of RCX.
212 */
213VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
214{
215 pVCpu->em.s.MWait.uMWaitRAX = rax;
216 pVCpu->em.s.MWait.uMWaitRCX = rcx;
217 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
218 if (rcx)
219 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
220 else
221 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
222 /** @todo not completely correct?? */
223 return VINF_EM_HALT;
224}
225
226
227/**
228 * Clears any address-range monitoring that is active.
229 *
230 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
231 */
232VMM_INT_DECL(void) EMMonitorWaitClear(PVMCPU pVCpu)
233{
234 LogFlowFunc(("Clearing MWAIT\n"));
235 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
236}
237
238
239/**
240 * Determine if we should continue execution in HM after encountering an mwait
241 * instruction.
242 *
243 * Clears MWAIT flags if returning @c true.
244 *
245 * @returns true if we should continue, false if we should halt.
246 * @param pVCpu The cross context virtual CPU structure.
247 * @param pCtx Current CPU context.
248 */
249VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
250{
251 if (CPUMGetGuestGif(pCtx))
252 {
253 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
254 || ( CPUMIsGuestInNestedHwvirtMode(pCtx)
255 && CPUMIsGuestVirtIntrEnabled(pVCpu))
256 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
257 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
258 {
259 if (VMCPU_FF_IS_ANY_SET(pVCpu, ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
260 | VMCPU_FF_INTERRUPT_NESTED_GUEST)))
261 {
262 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
263 return true;
264 }
265 }
266 }
267
268 return false;
269}
270
271
272/**
273 * Determine if we should continue execution in HM after encountering a hlt
274 * instruction.
275 *
276 * @returns true if we should continue, false if we should halt.
277 * @param pVCpu The cross context virtual CPU structure.
278 * @param pCtx Current CPU context.
279 */
280VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
281{
282 if (CPUMGetGuestGif(pCtx))
283 {
284 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
285 return VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
286
287 if ( CPUMIsGuestInNestedHwvirtMode(pCtx)
288 && CPUMIsGuestVirtIntrEnabled(pVCpu))
289 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
290 }
291 return false;
292}
293
294
295/**
296 * Unhalts and wakes up the given CPU.
297 *
298 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
299 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
300 * the CPU isn't currently in a halt, the next HLT instruction it executes will
301 * be affected.
302 *
303 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
304 * @param pVM The cross context VM structure.
305 * @param pVCpuDst The cross context virtual CPU structure of the
306 * CPU to unhalt and wake up. This is usually not the
307 * same as the caller.
308 * @thread EMT
309 */
310VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
311{
312 /*
313 * Flag the current(/next) HLT to unhalt immediately.
314 */
315 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
316
317 /*
318 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
319 * just do it here for now).
320 */
321#ifdef IN_RING0
322 /* We might be here with preemption disabled or enabled (i.e. depending on
323 thread-context hooks being used), so don't try obtaining the GVMMR0 used
324 lock here. See @bugref{7270#c148}. */
325 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
326 AssertRC(rc);
327
328#elif defined(IN_RING3)
329 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
330 AssertRC(rc);
331
332#else
333 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
334 Assert(pVM->cCpus == 1); NOREF(pVM);
335 int rc = VINF_SUCCESS;
336#endif
337 return rc;
338}
339
340#ifndef IN_RING3
341
342/**
343 * Makes an I/O port write pending for ring-3 processing.
344 *
345 * @returns VINF_EM_PENDING_R3_IOPORT_READ
346 * @param pVCpu The cross context virtual CPU structure.
347 * @param uPort The I/O port.
348 * @param cbInstr The instruction length (for RIP updating).
349 * @param cbValue The write size.
350 * @param uValue The value being written.
351 * @sa emR3ExecutePendingIoPortWrite
352 *
353 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
354 */
355VMMRZ_INT_DECL(VBOXSTRICTRC)
356EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
357{
358 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
359 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
360 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
361 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
362 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
363 return VINF_EM_PENDING_R3_IOPORT_WRITE;
364}
365
366
367/**
368 * Makes an I/O port read pending for ring-3 processing.
369 *
370 * @returns VINF_EM_PENDING_R3_IOPORT_READ
371 * @param pVCpu The cross context virtual CPU structure.
372 * @param uPort The I/O port.
373 * @param cbInstr The instruction length (for RIP updating).
374 * @param cbValue The read size.
375 * @sa emR3ExecutePendingIoPortRead
376 *
377 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
378 */
379VMMRZ_INT_DECL(VBOXSTRICTRC)
380EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
381{
382 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
383 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
384 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
385 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
386 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
387 return VINF_EM_PENDING_R3_IOPORT_READ;
388}
389
390#endif /* IN_RING3 */
391
392
393/**
394 * Worker for EMHistoryExec that checks for ring-3 returns and flags
395 * continuation of the EMHistoryExec run there.
396 */
397DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
398{
399 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
400#ifdef IN_RING3
401 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
402#else
403 switch (VBOXSTRICTRC_VAL(rcStrict))
404 {
405 case VINF_SUCCESS:
406 default:
407 break;
408
409 /*
410 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
411 */
412 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
413 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
414 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
415 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
416 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
417 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
418 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
419 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
420 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
421 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
422 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
423 break;
424 }
425#endif /* !IN_RING3 */
426}
427
428#ifndef IN_RC
429
430/**
431 * Execute using history.
432 *
433 * This function will be called when EMHistoryAddExit() and friends returns a
434 * non-NULL result. This happens in response to probing or when probing has
435 * uncovered adjacent exits which can more effectively be reached by using IEM
436 * than restarting execution using the main execution engine and fielding an
437 * regular exit.
438 *
439 * @returns VBox strict status code, see IEMExecForExits.
440 * @param pVCpu The cross context virtual CPU structure.
441 * @param pExitRec The exit record return by a previous history add
442 * or update call.
443 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
444 */
445VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPU pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
446{
447 Assert(pExitRec);
448 VMCPU_ASSERT_EMT(pVCpu);
449 IEMEXECFOREXITSTATS ExecStats;
450 switch (pExitRec->enmAction)
451 {
452 /*
453 * Executes multiple instruction stopping only when we've gone a given
454 * number without perceived exits.
455 */
456 case EMEXITACTION_EXEC_WITH_MAX:
457 {
458 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
459 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
460 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
461 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
462 pVCpu->em.s.cHistoryExecMaxInstructions,
463 pExitRec->cMaxInstructionsWithoutExit,
464 &ExecStats);
465 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
466 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
467 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
468
469 /* Ignore instructions IEM doesn't know about. */
470 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
471 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
472 || ExecStats.cInstructions == 0)
473 { /* likely */ }
474 else
475 rcStrict = VINF_SUCCESS;
476
477 if (ExecStats.cExits > 1)
478 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
479 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
480 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
481 return rcStrict;
482 }
483
484 /*
485 * Probe a exit for close by exits.
486 */
487 case EMEXITACTION_EXEC_PROBE:
488 {
489 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
490 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
491 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
492 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
493 pVCpu->em.s.cHistoryProbeMinInstructions,
494 pVCpu->em.s.cHistoryExecMaxInstructions,
495 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
496 &ExecStats);
497 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
498 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
499 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
500 if ( ExecStats.cExits >= 2
501 && RT_SUCCESS(rcStrict))
502 {
503 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
504 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
505 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
506 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
507 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
508 }
509#ifndef IN_RING3
510 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
511 && RT_SUCCESS(rcStrict))
512 {
513 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
514 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
515 }
516#endif
517 else
518 {
519 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
520 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
521 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
522 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
523 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
524 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
525 rcStrict = VINF_SUCCESS;
526 }
527 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
528 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
529 return rcStrict;
530 }
531
532 /* We shouldn't ever see these here! */
533 case EMEXITACTION_FREE_RECORD:
534 case EMEXITACTION_NORMAL:
535 case EMEXITACTION_NORMAL_PROBED:
536 break;
537
538 /* No default case, want compiler warnings. */
539 }
540 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
541}
542
543
544/**
545 * Worker for emHistoryAddOrUpdateRecord.
546 */
547DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
548{
549 pExitRec->uFlatPC = uFlatPC;
550 pExitRec->uFlagsAndType = uFlagsAndType;
551 pExitRec->enmAction = EMEXITACTION_NORMAL;
552 pExitRec->bUnused = 0;
553 pExitRec->cMaxInstructionsWithoutExit = 64;
554 pExitRec->uLastExitNo = uExitNo;
555 pExitRec->cHits = 1;
556 return NULL;
557}
558
559
560/**
561 * Worker for emHistoryAddOrUpdateRecord.
562 */
563DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
564 PEMEXITREC pExitRec, uint64_t uFlatPC,
565 uint32_t uFlagsAndType, uint64_t uExitNo)
566{
567 pHistEntry->idxSlot = (uint32_t)idxSlot;
568 pVCpu->em.s.cExitRecordUsed++;
569 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
570 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
571 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
572}
573
574
575/**
576 * Worker for emHistoryAddOrUpdateRecord.
577 */
578DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
579 PEMEXITREC pExitRec, uint64_t uFlatPC,
580 uint32_t uFlagsAndType, uint64_t uExitNo)
581{
582 pHistEntry->idxSlot = (uint32_t)idxSlot;
583 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
584 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
585 uExitNo - pExitRec->uLastExitNo));
586 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
587}
588
589
590/**
591 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
592 *
593 * @returns Pointer to an exit record if special action should be taken using
594 * EMHistoryExec(). Take normal exit action when NULL.
595 *
596 * @param pVCpu The cross context virtual CPU structure.
597 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
598 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
599 * @param uFlatPC The flattened program counter.
600 * @param pHistEntry The exit history entry.
601 * @param uExitNo The current exit number.
602 */
603static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
604 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
605{
606# ifdef IN_RING0
607 /* Disregard the hm flag. */
608 uFlagsAndType &= ~EMEXIT_F_HM;
609# endif
610
611 /*
612 * Work the hash table.
613 */
614 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
615# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
616 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
617 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
618 if (pExitRec->uFlatPC == uFlatPC)
619 {
620 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
621 pHistEntry->idxSlot = (uint32_t)idxSlot;
622 if (pExitRec->uFlagsAndType == uFlagsAndType)
623 {
624 pExitRec->uLastExitNo = uExitNo;
625 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
626 }
627 else
628 {
629 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
630 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
631 }
632 }
633 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
634 {
635 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
636 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
637 }
638 else
639 {
640 /*
641 * Collision. We calculate a new hash for stepping away from the first,
642 * doing up to 8 steps away before replacing the least recently used record.
643 */
644 uintptr_t idxOldest = idxSlot;
645 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
646 unsigned iOldestStep = 0;
647 unsigned iStep = 1;
648 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
649 for (;;)
650 {
651 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
652 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
653 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
654 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
655
656 /* Step to the next slot. */
657 idxSlot += idxAdd;
658 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
659 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
660
661 /* Does it match? */
662 if (pExitRec->uFlatPC == uFlatPC)
663 {
664 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
665 pHistEntry->idxSlot = (uint32_t)idxSlot;
666 if (pExitRec->uFlagsAndType == uFlagsAndType)
667 {
668 pExitRec->uLastExitNo = uExitNo;
669 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
670 break;
671 }
672 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
673 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
674 }
675
676 /* Is it free? */
677 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
680 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
681 }
682
683 /* Is it the least recently used one? */
684 if (pExitRec->uLastExitNo < uOldestExitNo)
685 {
686 uOldestExitNo = pExitRec->uLastExitNo;
687 idxOldest = idxSlot;
688 iOldestStep = iStep;
689 }
690
691 /* Next iteration? */
692 iStep++;
693 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
694 if (RT_LIKELY(iStep < 8 + 1))
695 { /* likely */ }
696 else
697 {
698 /* Replace the least recently used slot. */
699 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
700 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
701 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
702 }
703 }
704 }
705
706 /*
707 * Found an existing record.
708 */
709 switch (pExitRec->enmAction)
710 {
711 case EMEXITACTION_NORMAL:
712 {
713 uint64_t const cHits = ++pExitRec->cHits;
714 if (cHits < 256)
715 return NULL;
716 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
717 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
718 return pExitRec;
719 }
720
721 case EMEXITACTION_NORMAL_PROBED:
722 pExitRec->cHits += 1;
723 return NULL;
724
725 default:
726 pExitRec->cHits += 1;
727 return pExitRec;
728
729 /* This will happen if the caller ignores or cannot serve the probe
730 request (forced to ring-3, whatever). We retry this 256 times. */
731 case EMEXITACTION_EXEC_PROBE:
732 {
733 uint64_t const cHits = ++pExitRec->cHits;
734 if (cHits < 512)
735 return pExitRec;
736 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
737 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
738 return NULL;
739 }
740 }
741}
742
743#endif /* !IN_RC */
744
745/**
746 * Adds an exit to the history for this CPU.
747 *
748 * @returns Pointer to an exit record if special action should be taken using
749 * EMHistoryExec(). Take normal exit action when NULL.
750 *
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
753 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
754 * @param uTimestamp The TSC value for the exit, 0 if not available.
755 * @thread EMT(pVCpu)
756 */
757VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
758{
759 VMCPU_ASSERT_EMT(pVCpu);
760
761 /*
762 * Add the exit history entry.
763 */
764 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
765 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
766 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
767 pHistEntry->uFlatPC = uFlatPC;
768 pHistEntry->uTimestamp = uTimestamp;
769 pHistEntry->uFlagsAndType = uFlagsAndType;
770 pHistEntry->idxSlot = UINT32_MAX;
771
772#ifndef IN_RC
773 /*
774 * If common exit type, we will insert/update the exit into the exit record hash table.
775 */
776 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
777# ifdef IN_RING0
778 && pVCpu->em.s.fExitOptimizationEnabledR0
779 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
780# else
781 && pVCpu->em.s.fExitOptimizationEnabled
782# endif
783 && uFlatPC != UINT64_MAX
784 )
785 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
786#endif
787 return NULL;
788}
789
790
791#ifdef IN_RC
792/**
793 * Special raw-mode interface for adding an exit to the history.
794 *
795 * Currently this is only for recording, not optimizing, so no return value. If
796 * we start seriously caring about raw-mode again, we may extend it.
797 *
798 * @param pVCpu The cross context virtual CPU structure.
799 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
800 * @param uCs The CS.
801 * @param uEip The EIP.
802 * @param uTimestamp The TSC value for the exit, 0 if not available.
803 * @thread EMT(0)
804 */
805VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
806{
807 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
808 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
809 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
810 pHistEntry->uTimestamp = uTimestamp;
811 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
812 pHistEntry->idxSlot = UINT32_MAX;
813}
814#endif
815
816
817#ifdef IN_RING0
818/**
819 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
820 *
821 * @param pVCpu The cross context virtual CPU structure.
822 * @param uFlatPC The flattened program counter (RIP).
823 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
824 */
825VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
826{
827 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
828 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
829 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
830 pHistEntry->uFlatPC = uFlatPC;
831 if (fFlattened)
832 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
833 else
834 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
835}
836#endif
837
838
839/**
840 * Interface for convering a engine specific exit to a generic one and get guidance.
841 *
842 * @returns Pointer to an exit record if special action should be taken using
843 * EMHistoryExec(). Take normal exit action when NULL.
844 *
845 * @param pVCpu The cross context virtual CPU structure.
846 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
847 * @thread EMT(pVCpu)
848 */
849VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
850{
851 VMCPU_ASSERT_EMT(pVCpu);
852
853 /*
854 * Do the updating.
855 */
856 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
857 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
858 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
859 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
860
861#ifndef IN_RC
862 /*
863 * If common exit type, we will insert/update the exit into the exit record hash table.
864 */
865 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
866# ifdef IN_RING0
867 && pVCpu->em.s.fExitOptimizationEnabledR0
868 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
869# else
870 && pVCpu->em.s.fExitOptimizationEnabled
871# endif
872 && pHistEntry->uFlatPC != UINT64_MAX
873 )
874 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
875#endif
876 return NULL;
877}
878
879
880/**
881 * Interface for convering a engine specific exit to a generic one and get
882 * guidance, supplying flattened PC too.
883 *
884 * @returns Pointer to an exit record if special action should be taken using
885 * EMHistoryExec(). Take normal exit action when NULL.
886 *
887 * @param pVCpu The cross context virtual CPU structure.
888 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
889 * @param uFlatPC The flattened program counter (RIP).
890 * @thread EMT(pVCpu)
891 */
892VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
893{
894 VMCPU_ASSERT_EMT(pVCpu);
895 Assert(uFlatPC != UINT64_MAX);
896
897 /*
898 * Do the updating.
899 */
900 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
901 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
902 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
903 pHistEntry->uFlagsAndType = uFlagsAndType;
904 pHistEntry->uFlatPC = uFlatPC;
905
906#ifndef IN_RC
907 /*
908 * If common exit type, we will insert/update the exit into the exit record hash table.
909 */
910 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
911# ifdef IN_RING0
912 && pVCpu->em.s.fExitOptimizationEnabledR0
913 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
914# else
915 && pVCpu->em.s.fExitOptimizationEnabled
916# endif
917 )
918 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
919#endif
920 return NULL;
921}
922
923
924/**
925 * Locks REM execution to a single VCPU.
926 *
927 * @param pVM The cross context VM structure.
928 */
929VMMDECL(void) EMRemLock(PVM pVM)
930{
931#ifdef VBOX_WITH_REM
932 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
933 return; /* early init */
934
935 Assert(!PGMIsLockOwner(pVM));
936 Assert(!IOMIsLockWriteOwner(pVM));
937 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
938 AssertRCSuccess(rc);
939#else
940 RT_NOREF(pVM);
941#endif
942}
943
944
945/**
946 * Unlocks REM execution
947 *
948 * @param pVM The cross context VM structure.
949 */
950VMMDECL(void) EMRemUnlock(PVM pVM)
951{
952#ifdef VBOX_WITH_REM
953 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
954 return; /* early init */
955
956 PDMCritSectLeave(&pVM->em.s.CritSectREM);
957#else
958 RT_NOREF(pVM);
959#endif
960}
961
962
963/**
964 * Check if this VCPU currently owns the REM lock.
965 *
966 * @returns bool owner/not owner
967 * @param pVM The cross context VM structure.
968 */
969VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
970{
971#ifdef VBOX_WITH_REM
972 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
973 return true; /* early init */
974
975 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
976#else
977 RT_NOREF(pVM);
978 return true;
979#endif
980}
981
982
983/**
984 * Try to acquire the REM lock.
985 *
986 * @returns VBox status code
987 * @param pVM The cross context VM structure.
988 */
989VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
990{
991#ifdef VBOX_WITH_REM
992 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
993 return VINF_SUCCESS; /* early init */
994
995 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
996#else
997 RT_NOREF(pVM);
998 return VINF_SUCCESS;
999#endif
1000}
1001
1002
1003/**
1004 * @callback_method_impl{FNDISREADBYTES}
1005 */
1006static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
1007{
1008 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
1009#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
1010 PVM pVM = pVCpu->CTX_SUFF(pVM);
1011#endif
1012 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
1013 int rc;
1014
1015 /*
1016 * Figure how much we can or must read.
1017 */
1018 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
1019 if (cbToRead > cbMaxRead)
1020 cbToRead = cbMaxRead;
1021 else if (cbToRead < cbMinRead)
1022 cbToRead = cbMinRead;
1023
1024#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
1025 /*
1026 * We might be called upon to interpret an instruction in a patch.
1027 */
1028 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
1029 {
1030# ifdef IN_RC
1031 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1032# else
1033 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
1034# endif
1035 rc = VINF_SUCCESS;
1036 }
1037 else
1038#endif
1039 {
1040# ifdef IN_RC
1041 /*
1042 * Try access it thru the shadow page tables first. Fall back on the
1043 * slower PGM method if it fails because the TLB or page table was
1044 * modified recently.
1045 */
1046 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1047 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
1048 {
1049 cbToRead = cbMinRead;
1050 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1051 }
1052 if (rc == VERR_ACCESS_DENIED)
1053#endif
1054 {
1055 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1056 if (RT_FAILURE(rc))
1057 {
1058 if (cbToRead > cbMinRead)
1059 {
1060 cbToRead = cbMinRead;
1061 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1062 }
1063 if (RT_FAILURE(rc))
1064 {
1065#ifndef IN_RC
1066 /*
1067 * If we fail to find the page via the guest's page tables
1068 * we invalidate the page in the host TLB (pertaining to
1069 * the guest in the NestedPaging case). See @bugref{6043}.
1070 */
1071 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1072 {
1073 HMInvalidatePage(pVCpu, uSrcAddr);
1074 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
1075 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
1076 }
1077#endif
1078 }
1079 }
1080 }
1081 }
1082
1083 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
1084 return rc;
1085}
1086
1087
1088
1089/**
1090 * Disassembles the current instruction.
1091 *
1092 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
1093 * details.
1094 *
1095 * @param pVM The cross context VM structure.
1096 * @param pVCpu The cross context virtual CPU structure.
1097 * @param pDis Where to return the parsed instruction info.
1098 * @param pcbInstr Where to return the instruction size. (optional)
1099 */
1100VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
1101{
1102 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
1103 RTGCPTR GCPtrInstr;
1104#if 0
1105 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
1106#else
1107/** @todo Get the CPU mode as well while we're at it! */
1108 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
1109 pCtxCore->rip, &GCPtrInstr);
1110#endif
1111 if (RT_FAILURE(rc))
1112 {
1113 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
1114 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
1115 return rc;
1116 }
1117 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
1118}
1119
1120
1121/**
1122 * Disassembles one instruction.
1123 *
1124 * This is used by internally by the interpreter and by trap/access handlers.
1125 *
1126 * @returns VBox status code.
1127 *
1128 * @param pVM The cross context VM structure.
1129 * @param pVCpu The cross context virtual CPU structure.
1130 * @param GCPtrInstr The flat address of the instruction.
1131 * @param pCtxCore The context core (used to determine the cpu mode).
1132 * @param pDis Where to return the parsed instruction info.
1133 * @param pcbInstr Where to return the instruction size. (optional)
1134 */
1135VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
1136 PDISCPUSTATE pDis, unsigned *pcbInstr)
1137{
1138 NOREF(pVM);
1139 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
1140 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
1141 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
1142 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1143 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1144 if (RT_SUCCESS(rc))
1145 return VINF_SUCCESS;
1146 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1147 return rc;
1148}
1149
1150
1151/**
1152 * Interprets the current instruction.
1153 *
1154 * @returns VBox status code.
1155 * @retval VINF_* Scheduling instructions.
1156 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1157 * @retval VERR_* Fatal errors.
1158 *
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param pRegFrame The register frame.
1161 * Updates the EIP if an instruction was executed successfully.
1162 * @param pvFault The fault address (CR2).
1163 *
1164 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1165 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1166 * to worry about e.g. invalid modrm combinations (!)
1167 */
1168VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1169{
1170 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1171 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1172 NOREF(pvFault);
1173
1174 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1175 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1176 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1177 rc = VERR_EM_INTERPRETER;
1178 if (rc != VINF_SUCCESS)
1179 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1180
1181 return rc;
1182}
1183
1184
1185/**
1186 * Interprets the current instruction.
1187 *
1188 * @returns VBox status code.
1189 * @retval VINF_* Scheduling instructions.
1190 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1191 * @retval VERR_* Fatal errors.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1194 * @param pRegFrame The register frame.
1195 * Updates the EIP if an instruction was executed successfully.
1196 * @param pvFault The fault address (CR2).
1197 * @param pcbWritten Size of the write (if applicable).
1198 *
1199 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1200 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1201 * to worry about e.g. invalid modrm combinations (!)
1202 */
1203VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1204{
1205 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1206 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1207 NOREF(pvFault);
1208
1209 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1210 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1211 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1212 rc = VERR_EM_INTERPRETER;
1213 if (rc != VINF_SUCCESS)
1214 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1215
1216 return rc;
1217}
1218
1219
1220/**
1221 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1222 *
1223 * IP/EIP/RIP *IS* updated!
1224 *
1225 * @returns VBox strict status code.
1226 * @retval VINF_* Scheduling instructions. When these are returned, it
1227 * starts to get a bit tricky to know whether code was
1228 * executed or not... We'll address this when it becomes a problem.
1229 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1230 * @retval VERR_* Fatal errors.
1231 *
1232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1233 * @param pDis The disassembler cpu state for the instruction to be
1234 * interpreted.
1235 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1236 * @param pvFault The fault address (CR2).
1237 * @param enmCodeType Code type (user/supervisor)
1238 *
1239 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1240 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1241 * to worry about e.g. invalid modrm combinations (!)
1242 *
1243 * @todo At this time we do NOT check if the instruction overwrites vital information.
1244 * Make sure this can't happen!! (will add some assertions/checks later)
1245 */
1246VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1247 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1248{
1249 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1250 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1251 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1252
1253 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1254 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1255 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1256 rc = VERR_EM_INTERPRETER;
1257
1258 if (rc != VINF_SUCCESS)
1259 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1260
1261 return rc;
1262}
1263
1264#ifdef IN_RC
1265
1266DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1267{
1268 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1269 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1270 return rc;
1271 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1272}
1273
1274
1275/**
1276 * Interpret IRET (currently only to V86 code) - PATM only.
1277 *
1278 * @returns VBox status code.
1279 * @param pVM The cross context VM structure.
1280 * @param pVCpu The cross context virtual CPU structure.
1281 * @param pRegFrame The register frame.
1282 *
1283 */
1284VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1285{
1286 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1287 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1288 int rc;
1289
1290 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1291 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1292 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1293 * this function. Fear that it may guru on us, thus not converted to
1294 * IEM. */
1295
1296 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1297 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1298 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1299 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1300 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1301
1302 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1303 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1304 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1305 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1306 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1307 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1308 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1309
1310 pRegFrame->eip = eip & 0xffff;
1311 pRegFrame->cs.Sel = cs;
1312
1313 /* Mask away all reserved bits */
1314 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1315 eflags &= uMask;
1316
1317 CPUMRawSetEFlags(pVCpu, eflags);
1318 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1319
1320 pRegFrame->esp = esp;
1321 pRegFrame->ss.Sel = ss;
1322 pRegFrame->ds.Sel = ds;
1323 pRegFrame->es.Sel = es;
1324 pRegFrame->fs.Sel = fs;
1325 pRegFrame->gs.Sel = gs;
1326
1327 return VINF_SUCCESS;
1328}
1329
1330
1331#endif /* IN_RC */
1332
1333
1334
1335/*
1336 *
1337 * Old interpreter primitives used by HM, move/eliminate later.
1338 * Old interpreter primitives used by HM, move/eliminate later.
1339 * Old interpreter primitives used by HM, move/eliminate later.
1340 * Old interpreter primitives used by HM, move/eliminate later.
1341 * Old interpreter primitives used by HM, move/eliminate later.
1342 *
1343 */
1344
1345
1346/**
1347 * Interpret RDPMC.
1348 *
1349 * @returns VBox status code.
1350 * @param pVM The cross context VM structure.
1351 * @param pVCpu The cross context virtual CPU structure.
1352 * @param pRegFrame The register frame.
1353 *
1354 */
1355VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1356{
1357 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1358 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1359
1360 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1361 if ( !(uCR4 & X86_CR4_PCE)
1362 && CPUMGetGuestCPL(pVCpu) != 0)
1363 {
1364 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1365 return VERR_EM_INTERPRETER; /* genuine #GP */
1366 }
1367
1368 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1369 pRegFrame->rax = 0;
1370 pRegFrame->rdx = 0;
1371 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1372 * ecx but see @bugref{3472}! */
1373
1374 NOREF(pVM);
1375 return VINF_SUCCESS;
1376}
1377
1378
1379/**
1380 * MWAIT Emulation.
1381 */
1382VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1383{
1384 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1385 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1386 NOREF(pVM);
1387
1388 /* Get the current privilege level. */
1389 cpl = CPUMGetGuestCPL(pVCpu);
1390 if (cpl != 0)
1391 return VERR_EM_INTERPRETER; /* supervisor only */
1392
1393 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1394 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1395 return VERR_EM_INTERPRETER; /* not supported */
1396
1397 /*
1398 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1399 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1400 */
1401 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1402 if (pRegFrame->ecx > 1)
1403 {
1404 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1405 return VERR_EM_INTERPRETER; /* illegal value. */
1406 }
1407
1408 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1409 {
1410 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1411 return VERR_EM_INTERPRETER; /* illegal value. */
1412 }
1413
1414 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1415}
1416
1417
1418/**
1419 * MONITOR Emulation.
1420 */
1421VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1422{
1423 uint32_t u32Dummy, u32ExtFeatures, cpl;
1424 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1425 NOREF(pVM);
1426
1427 if (pRegFrame->ecx != 0)
1428 {
1429 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1430 return VERR_EM_INTERPRETER; /* illegal value. */
1431 }
1432
1433 /* Get the current privilege level. */
1434 cpl = CPUMGetGuestCPL(pVCpu);
1435 if (cpl != 0)
1436 return VERR_EM_INTERPRETER; /* supervisor only */
1437
1438 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1439 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1440 return VERR_EM_INTERPRETER; /* not supported */
1441
1442 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1443 return VINF_SUCCESS;
1444}
1445
1446
1447/* VT-x only: */
1448
1449/**
1450 * Interpret DRx write.
1451 *
1452 * @returns VBox status code.
1453 * @param pVM The cross context VM structure.
1454 * @param pVCpu The cross context virtual CPU structure.
1455 * @param pRegFrame The register frame.
1456 * @param DestRegDrx DRx register index (USE_REG_DR*)
1457 * @param SrcRegGen General purpose register index (USE_REG_E**))
1458 *
1459 */
1460VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1461{
1462 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1463 uint64_t uNewDrX;
1464 int rc;
1465 NOREF(pVM);
1466
1467 if (CPUMIsGuestIn64BitCode(pVCpu))
1468 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1469 else
1470 {
1471 uint32_t val32;
1472 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1473 uNewDrX = val32;
1474 }
1475
1476 if (RT_SUCCESS(rc))
1477 {
1478 if (DestRegDrx == 6)
1479 {
1480 uNewDrX |= X86_DR6_RA1_MASK;
1481 uNewDrX &= ~X86_DR6_RAZ_MASK;
1482 }
1483 else if (DestRegDrx == 7)
1484 {
1485 uNewDrX |= X86_DR7_RA1_MASK;
1486 uNewDrX &= ~X86_DR7_RAZ_MASK;
1487 }
1488
1489 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1490 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1491 if (RT_SUCCESS(rc))
1492 return rc;
1493 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1494 }
1495 return VERR_EM_INTERPRETER;
1496}
1497
1498
1499/**
1500 * Interpret DRx read.
1501 *
1502 * @returns VBox status code.
1503 * @param pVM The cross context VM structure.
1504 * @param pVCpu The cross context virtual CPU structure.
1505 * @param pRegFrame The register frame.
1506 * @param DestRegGen General purpose register index (USE_REG_E**))
1507 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1508 */
1509VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1510{
1511 uint64_t val64;
1512 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1513 NOREF(pVM);
1514
1515 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1516 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1517 if (CPUMIsGuestIn64BitCode(pVCpu))
1518 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1519 else
1520 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1521
1522 if (RT_SUCCESS(rc))
1523 return VINF_SUCCESS;
1524
1525 return VERR_EM_INTERPRETER;
1526}
1527
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette