Changeset 78804 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 28, 2019 8:33:35 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130900
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78798 r78804 410 410 /** @} */ 411 411 412 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 413 /** @name Nested-guest VM-exit handlers. 414 * @{ 415 */ 416 //static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi; 417 //static FNVMXEXITHANDLER hmR0VmxExitExtIntNested; 418 //static FNVMXEXITHANDLER hmR0VmxExitTripleFault; 419 static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindowNested; 420 //static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow; 421 static FNVMXEXITHANDLER hmR0VmxExitTaskSwitchNested; 422 //static FNVMXEXITHANDLER hmR0VmxExitCpuid; 423 //static FNVMXEXITHANDLER hmR0VmxExitGetsec; 424 static FNVMXEXITHANDLER hmR0VmxExitHltNested; 425 //static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd; 426 static FNVMXEXITHANDLER hmR0VmxExitInvlpgNested; 427 //static FNVMXEXITHANDLER hmR0VmxExitRdpmc; 428 //static FNVMXEXITHANDLER hmR0VmxExitVmcall; 429 //static FNVMXEXITHANDLER hmR0VmxExitVmclear; 430 //static FNVMXEXITHANDLER hmR0VmxExitVmlaunch; 431 //static FNVMXEXITHANDLER hmR0VmxExitVmptrld; 432 //static FNVMXEXITHANDLER hmR0VmxExitVmptrst; 433 //static FNVMXEXITHANDLER hmR0VmxExitVmread; 434 //static FNVMXEXITHANDLER hmR0VmxExitVmresume; 435 //static FNVMXEXITHANDLER hmR0VmxExitVmwrite; 436 //static FNVMXEXITHANDLER hmR0VmxExitVmxoff; 437 //static FNVMXEXITHANDLER hmR0VmxExitVmxon; 438 //static FNVMXEXITHANDLER hmR0VmxExitInvvpid; 439 static FNVMXEXITHANDLER hmR0VmxExitRdtscNested; 440 static FNVMXEXITHANDLER hmR0VmxExitMovCRxNested; 441 static FNVMXEXITHANDLER hmR0VmxExitMovDRxNested; 442 static FNVMXEXITHANDLER hmR0VmxExitIoInstrNested; 443 static FNVMXEXITHANDLER hmR0VmxExitRdmsrNested; 444 static FNVMXEXITHANDLER hmR0VmxExitWrmsrNested; 445 static FNVMXEXITHANDLER hmR0VmxExitMwaitNested; 446 static FNVMXEXITHANDLER hmR0VmxExitMtfNested; 447 static FNVMXEXITHANDLER hmR0VmxExitMonitorNested; 448 static FNVMXEXITHANDLER hmR0VmxExitPauseNested; 449 //static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold; 450 //static FNVMXEXITHANDLER hmR0VmxExitApicAccess; 451 static FNVMXEXITHANDLER hmR0VmxExitXdtrAccessNested; 452 //static FNVMXEXITHANDLER hmR0VmxExitEptViolation; 453 //static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig; 454 static FNVMXEXITHANDLER hmR0VmxExitRdtscpNested; 455 //static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer; 456 static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvdNested; 457 //static FNVMXEXITHANDLER hmR0VmxExitXsetbv; 458 //static FNVMXEXITHANDLER hmR0VmxExitRdrand; 459 static FNVMXEXITHANDLER hmR0VmxExitInvpcidNested; 460 //static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD; 461 //static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState; 462 //static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUnexpected; 463 static FNVMXEXITHANDLER hmR0VmxExitInstrNested; 464 static FNVMXEXITHANDLER hmR0VmxExitInstrWithInfoNested; 465 /** @} */ 466 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 467 412 468 /** @name Helpers for hardware exceptions VM-exit handlers. 413 469 * @{ … … 420 476 static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 421 477 static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 422 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);423 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, uint8_t cbInstr);424 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);425 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);478 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst); 479 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr); 480 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 481 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 426 482 /** @} */ 427 483 … … 2949 3005 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT); 2950 3006 Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT); 2951 fVal |= VMX_PIN_CTL _POSTED_INT;3007 fVal |= VMX_PIN_CTLS_POSTED_INT; 2952 3008 } 2953 3009 #endif … … 12451 12507 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr 12452 12508 #endif 12453 uint32_t const rcReason = pVmxTransient->uExitReason;12454 switch ( rcReason)12509 uint32_t const uExitReason = pVmxTransient->uExitReason; 12510 switch (uExitReason) 12455 12511 { 12456 12512 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient)); … … 12552 12608 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12553 12609 { 12554 VBOXSTRICTRC rcStrict;12555 12610 uint32_t const uExitReason = pVmxTransient->uExitReason; 12556 12611 switch (uExitReason) 12557 12612 { 12558 case VMX_EXIT_EPT_MISCONFIG: 12559 rcStrict = hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient); 12560 break; 12561 12562 case VMX_EXIT_EPT_VIOLATION: 12563 rcStrict = hmR0VmxExitEptViolation(pVCpu, pVmxTransient); 12564 break; 12565 12566 case VMX_EXIT_IO_INSTR: 12567 { 12568 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12569 AssertRCReturn(rc, rc); 12570 12571 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 12572 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual); 12573 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1); 12574 12575 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */ 12576 uint8_t const cbAccess = s_aIOSizes[uIOSize]; 12577 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess)) 12578 { 12579 /* 12580 * IN/OUT instruction: 12581 * - Provides VM-exit instruction length. 12582 * 12583 * INS/OUTS instruction: 12584 * - Provides VM-exit instruction length. 12585 * - Provides Guest-linear address. 12586 * - Optionally provides VM-exit instruction info (depends on CPU feature). 12587 */ 12588 PVM pVM = pVCpu->CTX_SUFF(pVM); 12589 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12590 AssertRCReturn(rc, rc); 12591 12592 /* Make sure we don't use stale VMX-transient info. */ 12593 pVmxTransient->ExitInstrInfo.u = 0; 12594 pVmxTransient->uGuestLinearAddr = 0; 12595 12596 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo; 12597 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 12598 if (fIOString) 12599 { 12600 rc |= hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 12601 if (fVmxInsOutsInfo) 12602 { 12603 Assert(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */ 12604 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 12605 } 12606 } 12607 AssertRCReturn(rc, rc); 12608 12609 VMXVEXITINFO ExitInfo; 12610 RT_ZERO(ExitInfo); 12611 ExitInfo.uReason = uExitReason; 12612 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12613 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12614 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 12615 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr; 12616 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12617 } 12618 else 12619 rcStrict = hmR0VmxExitIoInstr(pVCpu, pVmxTransient); 12620 break; 12621 } 12622 12623 case VMX_EXIT_HLT: 12624 { 12625 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12626 AssertRCReturn(rc, rc); 12627 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT)) 12628 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12629 else 12630 rcStrict = hmR0VmxExitHlt(pVCpu, pVmxTransient); 12631 break; 12632 } 12633 12634 case VMX_EXIT_RDTSC: 12635 { 12636 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12637 AssertRCReturn(rc, rc); 12638 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT)) 12639 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12640 else 12641 rcStrict = hmR0VmxExitRdtsc(pVCpu, pVmxTransient); 12642 break; 12643 } 12644 12645 case VMX_EXIT_RDTSCP: 12646 { 12647 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12648 AssertRCReturn(rc, rc); 12649 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT)) 12650 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12651 else 12652 rcStrict = hmR0VmxExitRdtsc(pVCpu, pVmxTransient); 12653 break; 12654 } 12613 case VMX_EXIT_EPT_MISCONFIG: return hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient); 12614 case VMX_EXIT_EPT_VIOLATION: return hmR0VmxExitEptViolation(pVCpu, pVmxTransient); 12615 case VMX_EXIT_IO_INSTR: return hmR0VmxExitIoInstrNested(pVCpu, pVmxTransient); 12616 case VMX_EXIT_HLT: return hmR0VmxExitHltNested(pVCpu, pVmxTransient); 12617 case VMX_EXIT_RDTSC: return hmR0VmxExitRdtscNested(pVCpu, pVmxTransient); 12618 case VMX_EXIT_RDTSCP: return hmR0VmxExitRdtscpNested(pVCpu, pVmxTransient); 12655 12619 12656 12620 /* … … 12666 12630 case VMX_EXIT_VMRESUME: 12667 12631 case VMX_EXIT_VMXOFF: 12668 { 12669 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12670 AssertRCReturn(rc, rc); 12671 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12672 break; 12673 } 12632 return hmR0VmxExitInstrNested(pVCpu, pVmxTransient); 12674 12633 12675 12634 /* … … 12691 12650 case VMX_EXIT_VMPTRST: 12692 12651 case VMX_EXIT_VMXON: 12693 { 12694 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12695 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12696 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 12697 AssertRCReturn(rc, rc); 12698 12699 VMXVEXITINFO ExitInfo; 12700 RT_ZERO(ExitInfo); 12701 ExitInfo.uReason = uExitReason; 12702 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12703 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12704 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 12705 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12706 break; 12707 } 12708 12709 case VMX_EXIT_INVLPG: 12710 { 12711 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT)) 12712 { 12713 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12714 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12715 AssertRCReturn(rc, rc); 12716 12717 VMXVEXITINFO ExitInfo; 12718 RT_ZERO(ExitInfo); 12719 ExitInfo.uReason = uExitReason; 12720 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12721 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12722 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12723 } 12724 else 12725 rcStrict = hmR0VmxExitInvlpg(pVCpu, pVmxTransient); 12726 break; 12727 } 12728 12729 case VMX_EXIT_INVPCID: 12730 { 12731 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT)) 12732 { 12733 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12734 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12735 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 12736 AssertRCReturn(rc, rc); 12737 12738 VMXVEXITINFO ExitInfo; 12739 RT_ZERO(ExitInfo); 12740 ExitInfo.uReason = uExitReason; 12741 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12742 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12743 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 12744 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12745 } 12746 else 12747 rcStrict = hmR0VmxExitInvpcid(pVCpu, pVmxTransient); 12748 break; 12749 } 12750 12751 case VMX_EXIT_RDMSR: 12752 { 12753 uint32_t fMsrpm; 12754 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS)) 12755 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx); 12756 else 12757 fMsrpm = VMXMSRPM_EXIT_RD; 12758 12759 if (fMsrpm & VMXMSRPM_EXIT_RD) 12760 { 12761 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12762 AssertRCReturn(rc, rc); 12763 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12764 } 12765 else 12766 rcStrict = hmR0VmxExitRdmsr(pVCpu, pVmxTransient); 12767 break; 12768 } 12769 12770 case VMX_EXIT_WRMSR: 12771 { 12772 uint32_t fMsrpm; 12773 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS)) 12774 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx); 12775 else 12776 fMsrpm = VMXMSRPM_EXIT_WR; 12777 12778 if (fMsrpm & VMXMSRPM_EXIT_WR) 12779 { 12780 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12781 AssertRCReturn(rc, rc); 12782 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12783 } 12784 else 12785 rcStrict = hmR0VmxExitWrmsr(pVCpu, pVmxTransient); 12786 break; 12787 } 12788 12789 case VMX_EXIT_TASK_SWITCH: 12790 { 12791 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12792 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12793 rc |= hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 12794 rc |= hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 12795 AssertRCReturn(rc, rc); 12796 12797 VMXVEXITINFO ExitInfo; 12798 RT_ZERO(ExitInfo); 12799 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12800 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12801 12802 VMXVEXITEVENTINFO ExitEventInfo; 12803 RT_ZERO(ExitInfo); 12804 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo; 12805 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode; 12806 12807 rcStrict = IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo); 12808 break; 12809 } 12810 12811 case VMX_EXIT_WBINVD: 12812 { 12813 if (CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT)) 12814 { 12815 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12816 AssertRCReturn(rc, rc); 12817 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12818 } 12819 else 12820 rcStrict = hmR0VmxExitInvpcid(pVCpu, pVmxTransient); 12821 break; 12822 } 12823 12824 case VMX_EXIT_MTF: 12825 { 12826 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */ 12827 rcStrict = IEMExecVmxVmexit(pVCpu, uExitReason); 12828 break; 12829 } 12652 return hmR0VmxExitInstrWithInfoNested(pVCpu, pVmxTransient); 12653 12654 /* 12655 * We shouldn't direct host physical interrupts to the nested-guest. 12656 */ 12657 case VMX_EXIT_EXT_INT: 12658 return hmR0VmxExitExtInt(pVCpu, pVmxTransient); 12659 12660 case VMX_EXIT_RDMSR: return hmR0VmxExitRdmsrNested(pVCpu, pVmxTransient); 12661 case VMX_EXIT_WRMSR: return hmR0VmxExitWrmsrNested(pVCpu, pVmxTransient); 12662 case VMX_EXIT_INVLPG: return hmR0VmxExitInvlpgNested(pVCpu, pVmxTransient); 12663 case VMX_EXIT_INVPCID: return hmR0VmxExitInvpcidNested(pVCpu, pVmxTransient); 12664 case VMX_EXIT_TASK_SWITCH: return hmR0VmxExitTaskSwitchNested(pVCpu, pVmxTransient); 12665 case VMX_EXIT_WBINVD: return hmR0VmxExitWbinvdNested(pVCpu, pVmxTransient); 12666 case VMX_EXIT_MTF: return hmR0VmxExitMtfNested(pVCpu, pVmxTransient); 12830 12667 12831 12668 case VMX_EXIT_APIC_ACCESS: … … 12833 12670 { 12834 12671 /** @todo NSTVMX: APIC-access, Xcpt or NMI, Mov CRx. */ 12835 rcStrict = hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12836 break; 12837 } 12838 12839 case VMX_EXIT_MOV_CRX: 12840 { 12841 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12842 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12843 AssertRCReturn(rc, rc); 12844 12845 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual); 12846 switch (uAccessType) 12847 { 12848 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: 12849 { 12850 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 12851 Assert(pVmcsNstGst); 12852 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 12853 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual); 12854 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs)); 12855 uint64_t const uNewCrx = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64; 12856 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(pVCpu, &pVCpu->cpum.GstCtx, iCrReg, uNewCrx)) 12857 { 12858 VMXVEXITINFO ExitInfo; 12859 RT_ZERO(ExitInfo); 12860 ExitInfo.uReason = uExitReason; 12861 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12862 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12863 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12864 } 12865 else 12866 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 12867 break; 12868 } 12869 12870 case VMX_EXIT_QUAL_CRX_ACCESS_READ: 12871 { 12872 /* 12873 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking). 12874 * CR2 reads do not cause a VM-exit. 12875 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control. 12876 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control. 12877 */ 12878 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 12879 if ( iCrReg == 3 12880 || iCrReg == 8) 12881 { 12882 static const uint32_t s_aCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0, 12883 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT }; 12884 uint32_t const uIntercept = s_aCrXReadIntercepts[iCrReg]; 12885 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, uIntercept)) 12886 { 12887 VMXVEXITINFO ExitInfo; 12888 RT_ZERO(ExitInfo); 12889 ExitInfo.uReason = uExitReason; 12890 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12891 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12892 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12893 } 12894 else 12895 { 12896 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual); 12897 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 12898 } 12899 } 12900 else 12901 { 12902 pVCpu->hm.s.u32HMError = iCrReg; 12903 AssertMsgFailedReturn(("MOV from CR%d VM-exit must not happen\n", iCrReg), VERR_VMX_UNEXPECTED_EXIT); 12904 } 12905 break; 12906 } 12907 12908 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: 12909 { 12910 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 12911 Assert(pVmcsNstGst); 12912 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u; 12913 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u; 12914 if ( (uGstHostMask & X86_CR0_TS) 12915 && (uReadShadow & X86_CR0_TS)) 12916 { 12917 VMXVEXITINFO ExitInfo; 12918 RT_ZERO(ExitInfo); 12919 ExitInfo.uReason = uExitReason; 12920 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12921 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12922 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12923 } 12924 else 12925 rcStrict = hmR0VmxExitClts(pVCpu, pVmxTransient->cbInstr); 12926 break; 12927 } 12928 12929 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 12930 { 12931 RTGCPTR GCPtrEffDst; 12932 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual); 12933 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual); 12934 if (fMemOperand) 12935 { 12936 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 12937 AssertRCReturn(rc, rc); 12938 GCPtrEffDst = pVmxTransient->uGuestLinearAddr; 12939 } 12940 else 12941 GCPtrEffDst = NIL_RTGCPTR; 12942 12943 if (CPUMIsGuestVmxLmswInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uNewMsw)) 12944 { 12945 VMXVEXITINFO ExitInfo; 12946 RT_ZERO(ExitInfo); 12947 ExitInfo.uReason = uExitReason; 12948 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12949 ExitInfo.u64GuestLinearAddr = GCPtrEffDst; 12950 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12951 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12952 } 12953 else 12954 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmxTransient->cbInstr, uNewMsw, GCPtrEffDst); 12955 break; 12956 } 12957 12958 default: 12959 { 12960 pVCpu->hm.s.u32HMError = uAccessType; 12961 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType), 12962 VERR_VMX_UNEXPECTED_EXIT); 12963 } 12964 } 12965 break; 12966 } 12967 12968 case VMX_EXIT_EXT_INT: 12969 { 12970 /* We shouldn't direct physical interrupts to the nested-guest. */ 12971 rcStrict = hmR0VmxExitExtInt(pVCpu, pVmxTransient); 12972 break; 12973 } 12974 12975 case VMX_EXIT_INT_WINDOW: 12672 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12673 } 12674 12675 case VMX_EXIT_MOV_CRX: return hmR0VmxExitMovCRxNested(pVCpu, pVmxTransient); 12676 case VMX_EXIT_INT_WINDOW: return hmR0VmxExitIntWindowNested(pVCpu, pVmxTransient); 12677 12976 12678 case VMX_EXIT_TPR_BELOW_THRESHOLD: 12977 12679 { 12978 /** @todo NSTVMX: Interrupt window, TPR below threshold. */ 12979 rcStrict = hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12980 break; 12981 } 12982 12983 case VMX_EXIT_MWAIT: 12984 { 12985 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT)) 12986 { 12987 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12988 AssertRCReturn(rc, rc); 12989 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 12990 } 12991 else 12992 rcStrict = hmR0VmxExitMwait(pVCpu, pVmxTransient); 12993 break; 12994 } 12995 12996 case VMX_EXIT_MONITOR: 12997 { 12998 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT)) 12999 { 13000 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13001 AssertRCReturn(rc, rc); 13002 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 13003 } 13004 else 13005 rcStrict = hmR0VmxExitMonitor(pVCpu, pVmxTransient); 13006 break; 13007 } 13008 13009 case VMX_EXIT_PAUSE: 13010 { 13011 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */ 13012 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept 13013 * PAUSE when executing a nested-guest? If it does not, we would not need 13014 * to check for the intercepts here. Just call VM-exit... */ 13015 if ( CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT) 13016 || CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)) 13017 { 13018 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13019 AssertRCReturn(rc, rc); 13020 rcStrict = IEMExecVmxVmexitInstr(pVCpu, uExitReason, pVmxTransient->cbInstr); 13021 } 13022 else 13023 rcStrict = hmR0VmxExitPause(pVCpu, pVmxTransient); 13024 break; 13025 } 12680 /** @todo NSTVMX: TPR below threshold. */ 12681 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12682 } 12683 12684 case VMX_EXIT_MWAIT: return hmR0VmxExitMwaitNested(pVCpu, pVmxTransient); 12685 case VMX_EXIT_MONITOR: return hmR0VmxExitMonitorNested(pVCpu, pVmxTransient); 12686 case VMX_EXIT_PAUSE: return hmR0VmxExitPauseNested(pVCpu, pVmxTransient); 13026 12687 13027 12688 case VMX_EXIT_PREEMPT_TIMER: 13028 12689 { 13029 12690 /** @todo NSTVMX: Preempt timer. */ 13030 rcStrict = hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 13031 break; 13032 } 13033 13034 case VMX_EXIT_MOV_DRX: 13035 { 13036 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT)) 13037 { 13038 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13039 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13040 AssertRCReturn(rc, rc); 13041 13042 VMXVEXITINFO ExitInfo; 13043 RT_ZERO(ExitInfo); 13044 ExitInfo.cbInstr = pVmxTransient->cbInstr; 13045 ExitInfo.u64Qual = pVmxTransient->uExitQual; 13046 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 13047 } 13048 else 13049 rcStrict = hmR0VmxExitMovDRx(pVCpu, pVmxTransient); 13050 break; 13051 } 13052 12691 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12692 } 12693 12694 case VMX_EXIT_MOV_DRX: return hmR0VmxExitMovDRxNested(pVCpu, pVmxTransient); 13053 12695 case VMX_EXIT_GDTR_IDTR_ACCESS: 13054 case VMX_EXIT_LDTR_TR_ACCESS: 13055 { 13056 if (CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT)) 13057 { 13058 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13059 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13060 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13061 AssertRCReturn(rc, rc); 13062 13063 VMXVEXITINFO ExitInfo; 13064 RT_ZERO(ExitInfo); 13065 ExitInfo.cbInstr = pVmxTransient->cbInstr; 13066 ExitInfo.u64Qual = pVmxTransient->uExitQual; 13067 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 13068 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 13069 } 13070 else 13071 rcStrict = hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient); 13072 break; 13073 } 12696 case VMX_EXIT_LDTR_TR_ACCESS: return hmR0VmxExitXdtrAccessNested(pVCpu, pVmxTransient); 13074 12697 13075 12698 case VMX_EXIT_RDRAND: … … 13098 12721 case VMX_EXIT_APIC_WRITE: 13099 12722 default: 13100 rcStrict = hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 13101 break; 13102 } 13103 13104 if (rcStrict == VINF_IEM_RAISED_XCPT) 13105 { 13106 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13107 rcStrict = VINF_SUCCESS; 13108 } 13109 13110 return rcStrict; 12723 { 12724 /** @todo NSTVMX: implement me! */ 12725 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 12726 } 12727 } 13111 12728 } 13112 12729 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ … … 14424 14041 * @retval VINF_SUCCESS when guest execution can continue. 14425 14042 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3. 14426 * @retval VERR_EM_ INTERPRETER when something unexpected happened, fallbackto14427 * in terpreter.14043 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to 14044 * incompatible guest state for VMX execution (real-on-v86 case). 14428 14045 */ 14429 14046 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) … … 14435 14052 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 14436 14053 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14437 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);14438 14054 AssertRCReturn(rc, rc); 14439 14055 … … 14446 14062 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */ 14447 14063 { 14064 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 14065 AssertRCReturn(rc, rc); 14066 14067 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 14448 14068 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 14449 14069 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual); … … 14465 14085 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 14466 14086 14467 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVm xTransient->cbInstr, iGReg, iCrReg);14087 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg); 14468 14088 AssertMsg( rcStrict == VINF_SUCCESS 14469 || rcStrict == VINF_IEM_RAISED_XCPT14470 14089 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 14471 14090 … … 14477 14096 * 14478 14097 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the 14479 * latter is an alias for VINF_IEM_RAISED_XCPT which is converted to VINF_SUCCESs14480 * at the end ofthis function.14098 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of 14099 * this function. 14481 14100 */ 14482 14101 if ( iCrReg == 0 … … 14515 14134 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 14516 14135 14517 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVm xTransient->cbInstr, iGReg, iCrReg);14136 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg); 14518 14137 break; 14519 14138 } … … 14524 14143 * CLTS (Clear Task-Switch Flag in CR0). 14525 14144 */ 14526 rcStrict = hmR0VmxExitClts(pVCpu, pVm xTransient->cbInstr);14145 rcStrict = hmR0VmxExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbInstr); 14527 14146 break; 14528 14147 } … … 14546 14165 else 14547 14166 GCPtrEffDst = NIL_RTGCPTR; 14548 rcStrict = hmR0VmxExitLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);14167 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst); 14549 14168 break; 14550 14169 } … … 14554 14173 pVCpu->hm.s.u32HMError = uAccessType; 14555 14174 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType), 14556 VERR_VMX_UNEXPECTED_EXCEPTION); 14557 } 14558 } 14559 14560 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)) 14561 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)); 14562 if (rcStrict == VINF_IEM_RAISED_XCPT) 14563 { 14564 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 14565 rcStrict = VINF_SUCCESS; 14566 } 14175 VERR_VMX_UNEXPECTED_EXIT); 14176 } 14177 } 14178 14179 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)) 14180 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)); 14181 Assert(rcStrict != VINF_IEM_RAISED_XCPT); 14567 14182 14568 14183 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2); … … 15654 15269 * VM-exit helper for LMSW. 15655 15270 */ 15656 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst) 15657 { 15271 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, 15272 RTGCPTR GCPtrEffDst) 15273 { 15274 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 15275 AssertRCReturn(rc, rc); 15276 15658 15277 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst); 15659 15278 AssertMsg( rcStrict == VINF_SUCCESS 15660 15279 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15280 15281 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 15282 if (rcStrict == VINF_IEM_RAISED_XCPT) 15283 { 15284 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15285 rcStrict = VINF_SUCCESS; 15286 } 15287 15661 15288 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 15662 15289 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15663 15664 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);15665 15290 return rcStrict; 15666 15291 } … … 15670 15295 * VM-exit helper for CLTS. 15671 15296 */ 15672 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, uint8_t cbInstr) 15673 { 15297 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr) 15298 { 15299 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 15300 AssertRCReturn(rc, rc); 15301 15674 15302 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr); 15675 15303 AssertMsg( rcStrict == VINF_SUCCESS 15676 15304 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15677 15305 15306 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 15307 if (rcStrict == VINF_IEM_RAISED_XCPT) 15308 { 15309 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15310 rcStrict = VINF_SUCCESS; 15311 } 15312 15678 15313 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 15679 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);15680 15314 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15681 15315 return rcStrict; … … 15686 15320 * VM-exit helper for MOV from CRx (CRx read). 15687 15321 */ 15688 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)15322 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15689 15323 { 15690 15324 Assert(iCrReg < 16); 15691 15325 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs)); 15326 15327 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 15328 AssertRCReturn(rc, rc); 15692 15329 15693 15330 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg); … … 15699 15336 else 15700 15337 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 15701 15702 15338 #ifdef VBOX_WITH_STATISTICS 15703 15339 switch (iCrReg) … … 15718 15354 * VM-exit helper for MOV to CRx (CRx write). 15719 15355 */ 15720 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15721 { 15356 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15357 { 15358 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 15359 AssertRCReturn(rc, rc); 15360 15722 15361 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg); 15723 15362 AssertMsg( rcStrict == VINF_SUCCESS … … 15772 15411 } 15773 15412 15413 if (rcStrict == VINF_IEM_RAISED_XCPT) 15414 { 15415 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15416 rcStrict = VINF_SUCCESS; 15417 } 15774 15418 return rcStrict; 15775 15419 } … … 16116 15760 16117 15761 /** @} */ 15762 15763 /** @name Nested-guest VM-exit handlers. 15764 * @{ 15765 */ 15766 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 15767 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 15768 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 15769 15770 /** 15771 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW). 15772 */ 15773 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindowNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15774 { 15775 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT)) 15776 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason); 15777 return hmR0VmxExitIntWindow(pVCpu, pVmxTransient); 15778 } 15779 15780 15781 /** 15782 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). 15783 * Unconditional VM-exit. 15784 */ 15785 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitchNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15786 { 15787 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 15788 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 15789 rc |= hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 15790 rc |= hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 15791 AssertRCReturn(rc, rc); 15792 15793 VMXVEXITINFO ExitInfo; 15794 RT_ZERO(ExitInfo); 15795 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15796 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15797 15798 VMXVEXITEVENTINFO ExitEventInfo; 15799 RT_ZERO(ExitInfo); 15800 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo; 15801 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode; 15802 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo); 15803 } 15804 15805 15806 /** 15807 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit. 15808 */ 15809 HMVMX_EXIT_DECL hmR0VmxExitHltNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15810 { 15811 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT)) 15812 { 15813 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 15814 AssertRCReturn(rc, rc); 15815 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 15816 } 15817 return hmR0VmxExitHlt(pVCpu, pVmxTransient); 15818 } 15819 15820 15821 /** 15822 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit. 15823 */ 15824 HMVMX_EXIT_DECL hmR0VmxExitInvlpgNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15825 { 15826 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT)) 15827 { 15828 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 15829 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 15830 AssertRCReturn(rc, rc); 15831 15832 VMXVEXITINFO ExitInfo; 15833 RT_ZERO(ExitInfo); 15834 ExitInfo.uReason = pVmxTransient->uExitReason; 15835 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15836 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15837 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 15838 } 15839 return hmR0VmxExitInvlpg(pVCpu, pVmxTransient); 15840 } 15841 15842 15843 /** 15844 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit. 15845 */ 15846 HMVMX_EXIT_DECL hmR0VmxExitRdtscNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15847 { 15848 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT)) 15849 { 15850 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 15851 AssertRCReturn(rc, rc); 15852 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 15853 } 15854 15855 return hmR0VmxExitRdtsc(pVCpu, pVmxTransient); 15856 } 15857 15858 15859 /** 15860 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). 15861 * Conditional VM-exit. 15862 */ 15863 HMVMX_EXIT_DECL hmR0VmxExitMovCRxNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 15864 { 15865 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 15866 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 15867 AssertRCReturn(rc, rc); 15868 15869 VBOXSTRICTRC rcStrict; 15870 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual); 15871 switch (uAccessType) 15872 { 15873 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: 15874 { 15875 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 15876 Assert(pVmcsNstGst); 15877 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 15878 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual); 15879 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs)); 15880 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64; 15881 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(pVCpu, &pVCpu->cpum.GstCtx, iCrReg, uNewCrX)) 15882 { 15883 VMXVEXITINFO ExitInfo; 15884 RT_ZERO(ExitInfo); 15885 ExitInfo.uReason = pVmxTransient->uExitReason; 15886 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15887 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15888 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 15889 } 15890 else 15891 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg); 15892 break; 15893 } 15894 15895 case VMX_EXIT_QUAL_CRX_ACCESS_READ: 15896 { 15897 /* 15898 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking). 15899 * CR2 reads do not cause a VM-exit. 15900 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control. 15901 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control. 15902 */ 15903 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 15904 if ( iCrReg == 3 15905 || iCrReg == 8) 15906 { 15907 static const uint32_t s_aCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0, 15908 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT }; 15909 uint32_t const uIntercept = s_aCrXReadIntercepts[iCrReg]; 15910 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, uIntercept)) 15911 { 15912 VMXVEXITINFO ExitInfo; 15913 RT_ZERO(ExitInfo); 15914 ExitInfo.uReason = pVmxTransient->uExitReason; 15915 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15916 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15917 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 15918 } 15919 else 15920 { 15921 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual); 15922 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg); 15923 } 15924 } 15925 else 15926 { 15927 pVCpu->hm.s.u32HMError = iCrReg; 15928 AssertMsgFailedReturn(("MOV from CR%d VM-exit must not happen\n", iCrReg), VERR_VMX_UNEXPECTED_EXIT); 15929 } 15930 break; 15931 } 15932 15933 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: 15934 { 15935 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 15936 Assert(pVmcsNstGst); 15937 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u; 15938 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u; 15939 if ( (uGstHostMask & X86_CR0_TS) 15940 && (uReadShadow & X86_CR0_TS)) 15941 { 15942 VMXVEXITINFO ExitInfo; 15943 RT_ZERO(ExitInfo); 15944 ExitInfo.uReason = pVmxTransient->uExitReason; 15945 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15946 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15947 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 15948 } 15949 else 15950 rcStrict = hmR0VmxExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr); 15951 break; 15952 } 15953 15954 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 15955 { 15956 RTGCPTR GCPtrEffDst; 15957 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual); 15958 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual); 15959 if (fMemOperand) 15960 { 15961 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 15962 AssertRCReturn(rc, rc); 15963 GCPtrEffDst = pVmxTransient->uGuestLinearAddr; 15964 } 15965 else 15966 GCPtrEffDst = NIL_RTGCPTR; 15967 15968 if (CPUMIsGuestVmxLmswInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uNewMsw)) 15969 { 15970 VMXVEXITINFO ExitInfo; 15971 RT_ZERO(ExitInfo); 15972 ExitInfo.uReason = pVmxTransient->uExitReason; 15973 ExitInfo.cbInstr = pVmxTransient->cbInstr; 15974 ExitInfo.u64GuestLinearAddr = GCPtrEffDst; 15975 ExitInfo.u64Qual = pVmxTransient->uExitQual; 15976 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 15977 } 15978 else 15979 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, uNewMsw, GCPtrEffDst); 15980 break; 15981 } 15982 15983 default: 15984 { 15985 pVCpu->hm.s.u32HMError = uAccessType; 15986 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType), 15987 VERR_VMX_UNEXPECTED_EXIT); 15988 } 15989 } 15990 15991 if (rcStrict == VINF_IEM_RAISED_XCPT) 15992 { 15993 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15994 rcStrict = VINF_SUCCESS; 15995 } 15996 return rcStrict; 15997 } 15998 15999 16000 /** 16001 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). 16002 * Conditional VM-exit. 16003 */ 16004 HMVMX_EXIT_DECL hmR0VmxExitMovDRxNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16005 { 16006 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT)) 16007 { 16008 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 16009 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16010 AssertRCReturn(rc, rc); 16011 16012 VMXVEXITINFO ExitInfo; 16013 RT_ZERO(ExitInfo); 16014 ExitInfo.cbInstr = pVmxTransient->cbInstr; 16015 ExitInfo.u64Qual = pVmxTransient->uExitQual; 16016 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 16017 } 16018 return hmR0VmxExitMovDRx(pVCpu, pVmxTransient); 16019 } 16020 16021 16022 /** 16023 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). 16024 * Conditional VM-exit. 16025 */ 16026 HMVMX_EXIT_DECL hmR0VmxExitIoInstrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16027 { 16028 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 16029 AssertRCReturn(rc, rc); 16030 16031 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 16032 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual); 16033 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1); 16034 16035 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */ 16036 uint8_t const cbAccess = s_aIOSizes[uIOSize]; 16037 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess)) 16038 { 16039 /* 16040 * IN/OUT instruction: 16041 * - Provides VM-exit instruction length. 16042 * 16043 * INS/OUTS instruction: 16044 * - Provides VM-exit instruction length. 16045 * - Provides Guest-linear address. 16046 * - Optionally provides VM-exit instruction info (depends on CPU feature). 16047 */ 16048 PVM pVM = pVCpu->CTX_SUFF(pVM); 16049 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16050 AssertRCReturn(rc, rc); 16051 16052 /* Make sure we don't use stale VMX-transient info. */ 16053 pVmxTransient->ExitInstrInfo.u = 0; 16054 pVmxTransient->uGuestLinearAddr = 0; 16055 16056 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo; 16057 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 16058 if (fIOString) 16059 { 16060 rc |= hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 16061 if (fVmxInsOutsInfo) 16062 { 16063 Assert(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */ 16064 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 16065 } 16066 } 16067 AssertRCReturn(rc, rc); 16068 16069 VMXVEXITINFO ExitInfo; 16070 RT_ZERO(ExitInfo); 16071 ExitInfo.uReason = pVmxTransient->uExitReason; 16072 ExitInfo.cbInstr = pVmxTransient->cbInstr; 16073 ExitInfo.u64Qual = pVmxTransient->uExitQual; 16074 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 16075 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr; 16076 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 16077 } 16078 return hmR0VmxExitIoInstr(pVCpu, pVmxTransient); 16079 } 16080 16081 16082 /** 16083 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR). 16084 */ 16085 HMVMX_EXIT_DECL hmR0VmxExitRdmsrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16086 { 16087 uint32_t fMsrpm; 16088 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS)) 16089 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx); 16090 else 16091 fMsrpm = VMXMSRPM_EXIT_RD; 16092 16093 if (fMsrpm & VMXMSRPM_EXIT_RD) 16094 { 16095 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16096 AssertRCReturn(rc, rc); 16097 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16098 } 16099 return hmR0VmxExitRdmsr(pVCpu, pVmxTransient); 16100 } 16101 16102 16103 /** 16104 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR). 16105 */ 16106 HMVMX_EXIT_DECL hmR0VmxExitWrmsrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16107 { 16108 uint32_t fMsrpm; 16109 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS)) 16110 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx); 16111 else 16112 fMsrpm = VMXMSRPM_EXIT_WR; 16113 16114 if (fMsrpm & VMXMSRPM_EXIT_WR) 16115 { 16116 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16117 AssertRCReturn(rc, rc); 16118 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16119 } 16120 return hmR0VmxExitWrmsr(pVCpu, pVmxTransient); 16121 } 16122 16123 16124 /** 16125 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit. 16126 */ 16127 HMVMX_EXIT_DECL hmR0VmxExitMwaitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16128 { 16129 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT)) 16130 { 16131 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16132 AssertRCReturn(rc, rc); 16133 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16134 } 16135 return hmR0VmxExitMwait(pVCpu, pVmxTransient); 16136 } 16137 16138 16139 /** 16140 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional 16141 * VM-exit. 16142 */ 16143 HMVMX_EXIT_DECL hmR0VmxExitMtfNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16144 { 16145 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */ 16146 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason); 16147 } 16148 16149 16150 /** 16151 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit. 16152 */ 16153 HMVMX_EXIT_DECL hmR0VmxExitMonitorNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16154 { 16155 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT)) 16156 { 16157 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16158 AssertRCReturn(rc, rc); 16159 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16160 } 16161 return hmR0VmxExitMonitor(pVCpu, pVmxTransient); 16162 } 16163 16164 16165 /** 16166 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit. 16167 */ 16168 HMVMX_EXIT_DECL hmR0VmxExitPauseNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16169 { 16170 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept 16171 * PAUSE when executing a nested-guest? If it does not, we would not need 16172 * to check for the intercepts here. Just call VM-exit... */ 16173 16174 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */ 16175 if ( CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT) 16176 || CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)) 16177 { 16178 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16179 AssertRCReturn(rc, rc); 16180 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16181 } 16182 return hmR0VmxExitPause(pVCpu, pVmxTransient); 16183 } 16184 16185 16186 /** 16187 * Nested-guest VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses 16188 * (VMX_EXIT_GDTR_IDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR). 16189 * Conditional VM-exit. 16190 */ 16191 HMVMX_EXIT_DECL hmR0VmxExitXdtrAccessNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16192 { 16193 if (CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT)) 16194 { 16195 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 16196 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16197 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 16198 AssertRCReturn(rc, rc); 16199 16200 VMXVEXITINFO ExitInfo; 16201 RT_ZERO(ExitInfo); 16202 ExitInfo.cbInstr = pVmxTransient->cbInstr; 16203 ExitInfo.u64Qual = pVmxTransient->uExitQual; 16204 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 16205 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 16206 } 16207 return hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient); 16208 } 16209 16210 16211 /** 16212 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit. 16213 */ 16214 HMVMX_EXIT_DECL hmR0VmxExitRdtscpNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16215 { 16216 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT)) 16217 { 16218 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16219 AssertRCReturn(rc, rc); 16220 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16221 } 16222 return hmR0VmxExitRdtscp(pVCpu, pVmxTransient); 16223 } 16224 16225 16226 /** 16227 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit. 16228 */ 16229 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvdNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16230 { 16231 if (CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT)) 16232 { 16233 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16234 AssertRCReturn(rc, rc); 16235 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16236 } 16237 return hmR0VmxExitInvpcid(pVCpu, pVmxTransient); 16238 } 16239 16240 16241 /** 16242 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit. 16243 */ 16244 HMVMX_EXIT_DECL hmR0VmxExitInvpcidNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16245 { 16246 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT)) 16247 { 16248 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16249 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 16250 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 16251 AssertRCReturn(rc, rc); 16252 16253 VMXVEXITINFO ExitInfo; 16254 RT_ZERO(ExitInfo); 16255 ExitInfo.uReason = pVmxTransient->uExitReason; 16256 ExitInfo.cbInstr = pVmxTransient->cbInstr; 16257 ExitInfo.u64Qual = pVmxTransient->uExitQual; 16258 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 16259 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 16260 } 16261 return hmR0VmxExitInvpcid(pVCpu, pVmxTransient); 16262 } 16263 16264 16265 /** 16266 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally 16267 * and only provide the instruction length. 16268 * 16269 * Unconditional VM-exit. 16270 */ 16271 HMVMX_EXIT_DECL hmR0VmxExitInstrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16272 { 16273 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16274 AssertRCReturn(rc, rc); 16275 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr); 16276 } 16277 16278 16279 /** 16280 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally 16281 * but provide instruction length as well as more information. 16282 * 16283 * Unconditional VM-exit. 16284 */ 16285 HMVMX_EXIT_DECL hmR0VmxExitInstrWithInfoNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 16286 { 16287 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16288 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 16289 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 16290 AssertRCReturn(rc, rc); 16291 16292 VMXVEXITINFO ExitInfo; 16293 RT_ZERO(ExitInfo); 16294 ExitInfo.uReason = pVmxTransient->uExitReason; 16295 ExitInfo.cbInstr = pVmxTransient->cbInstr; 16296 ExitInfo.u64Qual = pVmxTransient->uExitQual; 16297 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo; 16298 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 16299 } 16300 16301 /** @} */ 16302 16118 16303 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 16119 16304
Note:
See TracChangeset
for help on using the changeset viewer.