Changeset 72744 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Jun 29, 2018 7:36:19 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72655 r72744 42 42 #include "dtrace/VBoxVMM.h" 43 43 44 #define HMVMX_USE_IEM_EVENT_REFLECTION45 44 #ifdef DEBUG_ramshankar 46 45 # define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS … … 67 66 #define HMVMX_FLUSH_TAGGED_TLB_NONE 3 68 67 69 /** @name Updated-guest-state flags. 70 * @{ */ 71 #define HMVMX_UPDATED_GUEST_RIP RT_BIT(0) 72 #define HMVMX_UPDATED_GUEST_RSP RT_BIT(1) 73 #define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2) 74 #define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3) 75 #define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4) 76 #define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5) 77 #define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6) 78 #define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7) 79 #define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8) 80 #define HMVMX_UPDATED_GUEST_TR RT_BIT(9) 81 #define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10) 82 #define HMVMX_UPDATED_GUEST_DR7 RT_BIT(11) 83 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12) 84 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13) 85 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 86 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15) 87 #define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16) 88 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17) 89 #define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18) 90 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19) 91 #define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \ 92 | HMVMX_UPDATED_GUEST_RSP \ 93 | HMVMX_UPDATED_GUEST_RFLAGS \ 94 | HMVMX_UPDATED_GUEST_CR0 \ 95 | HMVMX_UPDATED_GUEST_CR3 \ 96 | HMVMX_UPDATED_GUEST_CR4 \ 97 | HMVMX_UPDATED_GUEST_GDTR \ 98 | HMVMX_UPDATED_GUEST_IDTR \ 99 | HMVMX_UPDATED_GUEST_LDTR \ 100 | HMVMX_UPDATED_GUEST_TR \ 101 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \ 102 | HMVMX_UPDATED_GUEST_DR7 \ 103 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \ 104 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ 105 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 106 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 107 | HMVMX_UPDATED_GUEST_LAZY_MSRS \ 108 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \ 109 | HMVMX_UPDATED_GUEST_INTR_STATE \ 110 | HMVMX_UPDATED_GUEST_APIC_STATE) 68 /** @name HMVMX_READ_XXX 69 * Flags to skip redundant reads of some common VMCS fields that are not part of 70 * the guest-CPU or VCPU state but are needed while handling VM-exits. 71 */ 72 #define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0) 73 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1) 74 #define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2) 75 #define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3) 76 #define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4) 77 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5) 78 #define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6) 111 79 /** @} */ 112 80 113 /** @name 114 * Flags to skip redundant reads of some common VMCS fields that are not part of 115 * the guest-CPU state but are in the transient structure. 116 */ 117 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0) 118 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1) 119 #define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2) 120 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3) 121 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4) 122 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5) 123 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6) 124 /** @} */ 125 126 /** @name 81 /** 127 82 * States of the VMCS. 128 83 * … … 131 86 * are used. Maybe later this can be extended (i.e. Nested Virtualization). 132 87 */ 133 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 134 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 135 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 136 /** @} */ 88 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 89 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 90 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 137 91 138 92 /** … … 144 98 * MSR which cannot be modified by the guest without causing a VM-exit. 145 99 */ 146 #define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 147 | CPUMCTX_EXTRN_RFLAGS \ 148 | CPUMCTX_EXTRN_SREG_MASK \ 149 | CPUMCTX_EXTRN_TABLE_MASK \ 150 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 151 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 152 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 153 | CPUMCTX_EXTRN_TSC_AUX \ 154 | CPUMCTX_EXTRN_OTHER_MSRS \ 155 | CPUMCTX_EXTRN_CR0 \ 156 | CPUMCTX_EXTRN_CR3 \ 157 | CPUMCTX_EXTRN_CR4 \ 158 | CPUMCTX_EXTRN_DR7) 100 #define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 101 | CPUMCTX_EXTRN_RFLAGS \ 102 | CPUMCTX_EXTRN_RSP \ 103 | CPUMCTX_EXTRN_SREG_MASK \ 104 | CPUMCTX_EXTRN_TABLE_MASK \ 105 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 106 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 107 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 108 | CPUMCTX_EXTRN_TSC_AUX \ 109 | CPUMCTX_EXTRN_OTHER_MSRS \ 110 | CPUMCTX_EXTRN_CR0 \ 111 | CPUMCTX_EXTRN_CR3 \ 112 | CPUMCTX_EXTRN_CR4 \ 113 | CPUMCTX_EXTRN_DR7 \ 114 | CPUMCTX_EXTRN_HM_VMX_MASK) 159 115 160 116 /** … … 217 173 * context. */ 218 174 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 219 # define HMVMX_ SAVE_SREG(Sel, a_pCtxSelReg) \220 hmR0Vmx SaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \221 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))175 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \ 176 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 177 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 222 178 #else 223 # define HMVMX_ SAVE_SREG(Sel, a_pCtxSelReg) \224 hmR0Vmx SaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \225 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))179 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \ 180 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 181 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 226 182 #endif 227 183 … … 318 274 uint32_t uIdtVectoringErrorCode; 319 275 320 /** Mask of currently read VMCS fields; HMVMX_ UPDATED_TRANSIENT_*. */276 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */ 321 277 uint32_t fVmcsFieldsRead; 322 278 … … 417 373 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 418 374 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu); 419 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,420 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,421 bool fStepping, uint32_t *puIntState);375 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat); 376 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, 377 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState); 422 378 #if HC_ARCH_BITS == 32 423 379 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu); … … 649 605 } 650 606 651 652 607 #ifdef VBOX_STRICT 653 608 /** … … 666 621 return VINF_SUCCESS; 667 622 } 668 #endif /* VBOX_STRICT */ 669 670 671 #ifdef VBOX_STRICT 623 624 672 625 /** 673 626 * Reads the VM-entry exception error code field from the VMCS into … … 697 650 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient) 698 651 { 699 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))652 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO)) 700 653 { 701 654 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo); 702 AssertRCReturn(rc, 703 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;655 AssertRCReturn(rc,rc); 656 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO; 704 657 } 705 658 return VINF_SUCCESS; … … 716 669 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 717 670 { 718 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))671 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)) 719 672 { 720 673 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode); 721 674 AssertRCReturn(rc, rc); 722 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;675 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE; 723 676 } 724 677 return VINF_SUCCESS; … … 735 688 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient) 736 689 { 737 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_LEN))690 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN)) 738 691 { 739 692 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr); 740 693 AssertRCReturn(rc, rc); 741 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_LEN;694 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN; 742 695 } 743 696 return VINF_SUCCESS; … … 754 707 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient) 755 708 { 756 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_INFO))709 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO)) 757 710 { 758 711 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u); 759 712 AssertRCReturn(rc, rc); 760 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_INFO;713 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO; 761 714 } 762 715 return VINF_SUCCESS; … … 775 728 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 776 729 { 777 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_QUALIFICATION))730 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION)) 778 731 { 779 732 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu); 780 733 AssertRCReturn(rc, rc); 781 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_QUALIFICATION;734 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION; 782 735 } 783 736 return VINF_SUCCESS; … … 796 749 DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient) 797 750 { 798 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_INFO))799 { 800 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ INFO, &pVmxTransient->uIdtVectoringInfo);751 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO)) 752 { 753 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo); 801 754 AssertRCReturn(rc, rc); 802 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_INFO;755 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO; 803 756 } 804 757 return VINF_SUCCESS; … … 815 768 DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 816 769 { 817 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))818 { 819 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);770 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE)) 771 { 772 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode); 820 773 AssertRCReturn(rc, rc); 821 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;774 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE; 822 775 } 823 776 return VINF_SUCCESS; … … 913 866 * allocation. 914 867 */ 915 DECLINLINE(int)hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)868 static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 916 869 { 917 870 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER); … … 938 891 * allocation as 0. 939 892 */ 940 DECLINLINE(void)hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)893 static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 941 894 { 942 895 AssertPtr(pMemObj); … … 1163 1116 1164 1117 /* 1165 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so 1166 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID. 1118 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been 1119 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get 1120 * invalidated when flushing by VPID. 1167 1121 */ 1168 1122 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs; … … 1303 1257 * @param cMsrs The number of MSRs. 1304 1258 */ 1305 DECLINLINE(int)hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)1259 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs) 1306 1260 { 1307 1261 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ … … 1382 1336 * Update the host MSR only when requested by the caller AND when we're 1383 1337 * adding it to the auto-load/store area. Otherwise, it would have been 1384 * updated by hmR0Vmx SaveHostMsrs(). We do this for performance reasons.1338 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons. 1385 1339 */ 1386 1340 bool fUpdatedMsrValue = false; … … 1451 1405 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1452 1406 1453 Log4 (("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));1407 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs)); 1454 1408 return VINF_SUCCESS; 1455 1409 } … … 1572 1526 #endif 1573 1527 return false; 1574 }1575 1576 1577 /**1578 * Saves a set of guest MSRs back into the guest-CPU context.1579 *1580 * @param pVCpu The cross context virtual CPU structure.1581 * @param pMixedCtx Pointer to the guest-CPU context. The data may be1582 * out-of-sync. Make sure to update the required fields1583 * before using them.1584 *1585 * @remarks No-long-jump zone!!!1586 */1587 static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)1588 {1589 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1590 Assert(!VMMRZCallRing3IsEnabled(pVCpu));1591 1592 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)1593 {1594 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);1595 #if HC_ARCH_BITS == 641596 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)1597 {1598 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);1599 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);1600 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);1601 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);1602 }1603 #else1604 NOREF(pMixedCtx);1605 #endif1606 }1607 1528 } 1608 1529 … … 1678 1599 * @remarks No-long-jump zone!!! 1679 1600 * @remarks The guest MSRs should have been saved back into the guest-CPU 1680 * context by hmR0Vmx SaveGuestLazyMsrs()!!!1601 * context by hmR0VmxImportGuestState()!!! 1681 1602 */ 1682 1603 static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu) … … 1926 1847 { 1927 1848 /* 1928 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case1929 * See @bugref{6043} and @bugref{6177}.1849 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for 1850 * the EPT case. See @bugref{6043} and @bugref{6177}. 1930 1851 * 1931 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this1932 * function maybe called in a loop with individual addresses.1852 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() 1853 * as this function maybe called in a loop with individual addresses. 1933 1854 */ 1934 1855 if (pVM->hm.s.vmx.fVpid) … … 2024 1945 2025 1946 /* 2026 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last. 2027 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB 2028 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore. 1947 * Force a TLB flush for the first world-switch if the current CPU differs from the one we 1948 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID 1949 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we 1950 * cannot reuse the current ASID anymore. 2029 1951 */ 2030 1952 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu … … 2057 1979 { 2058 1980 /* 2059 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates 2060 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use. 2061 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings 2062 * but not guest-physical mappings. 2063 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}. 1981 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU 1982 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT 1983 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only 1984 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical 1985 * mappings, see @bugref{6568}. 1986 * 1987 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". 2064 1988 */ 2065 1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); … … 2246 2170 break; 2247 2171 } 2248 2249 2172 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */ 2250 2173 } … … 2331 2254 { 2332 2255 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */ 2333 Log4 (("hmR0VmxSetupTaggedTlb:VPID supported without INVEPT support. Ignoring VPID.\n"));2256 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n")); 2334 2257 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED; 2335 2258 pVM->hm.s.vmx.fVpid = false; … … 2364 2287 AssertPtr(pVCpu); 2365 2288 2366 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */2367 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */2368 2369 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT/* External interrupts cause a VM-exit. */2370 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;/* Non-maskable interrupts (NMIs) cause a VM-exit. */2289 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */ 2290 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ 2291 2292 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */ 2293 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */ 2371 2294 2372 2295 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 2373 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */2296 fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */ 2374 2297 2375 2298 /* Enable the VMX preemption timer. */ … … 2377 2300 { 2378 2301 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 2379 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;2302 fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER; 2380 2303 } 2381 2304 … … 2386 2309 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR); 2387 2310 Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT); 2388 val |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;2311 fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR; 2389 2312 } 2390 2313 #endif 2391 2314 2392 if (( val & zap) != val)2393 { 2394 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",2395 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));2315 if ((fVal & fZap) != fVal) 2316 { 2317 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2318 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap)); 2396 2319 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC; 2397 2320 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2398 2321 } 2399 2322 2400 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);2323 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal); 2401 2324 AssertRCReturn(rc, rc); 2402 2325 2403 pVCpu->hm.s.vmx.u32PinCtls = val;2326 pVCpu->hm.s.vmx.u32PinCtls = fVal; 2404 2327 return rc; 2405 2328 } … … 2419 2342 2420 2343 int rc = VERR_INTERNAL_ERROR_5; 2421 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;/* Bits set here must be set in the VMCS. */2422 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */2423 2424 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT/* HLT causes a VM-exit. */2425 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING/* Use TSC-offsetting. */2426 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT/* MOV DRx causes a VM-exit. */2427 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT/* All IO instructions cause a VM-exit. */2428 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT/* RDPMC causes a VM-exit. */2429 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT/* MONITOR causes a VM-exit. */2430 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;/* MWAIT causes a VM-exit. */2344 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2345 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2346 2347 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ 2348 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */ 2349 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */ 2350 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */ 2351 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */ 2352 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */ 2353 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */ 2431 2354 2432 2355 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */ … … 2442 2365 if (!pVM->hm.s.fNestedPaging) 2443 2366 { 2444 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); 2445 val |=VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT2446 |VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT2447 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;2367 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */ 2368 fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT 2369 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 2370 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 2448 2371 } 2449 2372 … … 2453 2376 { 2454 2377 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 2455 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); 2378 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 2456 2379 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0); 2457 2380 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 2458 2381 AssertRCReturn(rc, rc); 2459 2382 2460 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;/* CR8 reads from the Virtual-APIC page. */2461 /* CR8 writes cause a VM-exit based on TPR threshold. */2462 Assert(!( val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));2463 Assert(!( val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));2383 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */ 2384 /* CR8 writes cause a VM-exit based on TPR threshold. */ 2385 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT)); 2386 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT)); 2464 2387 } 2465 2388 else … … 2471 2394 if (pVM->hm.s.fAllow64BitGuests) 2472 2395 { 2473 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT/* CR8 reads cause a VM-exit. */2474 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;/* CR8 writes cause a VM-exit. */2396 fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */ 2397 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */ 2475 2398 } 2476 2399 } … … 2479 2402 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 2480 2403 { 2481 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;2404 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS; 2482 2405 2483 2406 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2484 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); 2407 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 2485 2408 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2486 2409 AssertRCReturn(rc, rc); … … 2520 2443 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ 2521 2444 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 2522 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;2523 2524 if (( val & zap) != val)2525 { 2526 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",2527 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));2445 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL; 2446 2447 if ((fVal & fZap) != fVal) 2448 { 2449 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2450 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap)); 2528 2451 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC; 2529 2452 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2530 2453 } 2531 2454 2532 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);2455 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2533 2456 AssertRCReturn(rc, rc); 2534 2457 2535 pVCpu->hm.s.vmx.u32ProcCtls = val;2458 pVCpu->hm.s.vmx.u32ProcCtls = fVal; 2536 2459 2537 2460 /* … … 2540 2463 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)) 2541 2464 { 2542 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;/* Bits set here must be set in the VMCS. */2543 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */2465 fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2466 fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2544 2467 2545 2468 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 2546 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;/* WBINVD causes a VM-exit. */2469 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */ 2547 2470 2548 2471 if (pVM->hm.s.fNestedPaging) 2549 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;/* Enable EPT. */2472 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */ 2550 2473 2551 2474 /* … … 2556 2479 && pVM->cpum.ro.GuestFeatures.fInvpcid) 2557 2480 { 2558 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;2481 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; 2559 2482 } 2560 2483 2561 2484 if (pVM->hm.s.vmx.fVpid) 2562 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;/* Enable VPID. */2485 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */ 2563 2486 2564 2487 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2565 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;/* Enable Unrestricted Execution. */2488 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */ 2566 2489 2567 2490 #if 0 … … 2569 2492 { 2570 2493 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 2571 val |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;/* Enable APIC-register virtualization. */2494 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT; /* Enable APIC-register virtualization. */ 2572 2495 2573 2496 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 2574 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;/* Enable virtual-interrupt delivery. */2497 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY; /* Enable virtual-interrupt delivery. */ 2575 2498 } 2576 2499 #endif … … 2582 2505 { 2583 2506 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 2584 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); 2585 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;/* Virtualize APIC accesses. */2507 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2508 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */ 2586 2509 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess); 2587 2510 AssertRCReturn(rc, rc); … … 2589 2512 2590 2513 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2591 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;/* Enable RDTSCP support. */2514 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 2592 2515 2593 2516 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT … … 2595 2518 && pVM->hm.s.vmx.cPleWindowTicks) 2596 2519 { 2597 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;/* Enable pause-loop exiting. */2520 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */ 2598 2521 2599 2522 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); … … 2602 2525 } 2603 2526 2604 if (( val & zap) != val)2527 if ((fVal & fZap) != fVal) 2605 2528 { 2606 2529 LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! " 2607 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));2530 "cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap)); 2608 2531 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 2609 2532 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2610 2533 } 2611 2534 2612 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);2535 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2613 2536 AssertRCReturn(rc, rc); 2614 2537 2615 pVCpu->hm.s.vmx.u32ProcCtls2 = val;2538 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal; 2616 2539 } 2617 2540 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) … … 2645 2568 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */ 2646 2569 #if 0 2647 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0Vmx LoadGuestCR3AndCR4())*/2570 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/ 2648 2571 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); 2649 2572 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); … … 2657 2580 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); 2658 2581 2659 /** @todo Explore possibility of using IO-bitmaps. */2660 2582 /* All IO & IOIO instructions cause VM-exits. */ 2661 2583 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); … … 2688 2610 #if 0 2689 2611 /* Setup debug controls */ 2690 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */2612 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); 2691 2613 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 2692 2614 AssertRCReturn(rc, rc); … … 2733 2655 AssertRCReturn(rc, rc); 2734 2656 return rc; 2735 }2736 2737 2738 /**2739 * Sets up the initial guest-state mask. The guest-state mask is consulted2740 * before reading guest-state fields from the VMCS as VMREADs can be expensive2741 * for the nested virtualization case (as it would cause a VM-exit).2742 *2743 * @param pVCpu The cross context virtual CPU structure.2744 */2745 static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)2746 {2747 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */2748 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);2749 return VINF_SUCCESS;2750 2657 } 2751 2658 … … 2806 2713 2807 2714 /* 2808 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated. 2809 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel(). 2715 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be 2716 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without 2717 * pRealModeTSS, see hmR3InitFinalizeR0Intel(). 2810 2718 */ 2811 2719 if ( !pVM->hm.s.vmx.fUnrestrictedGuest … … 2852 2760 2853 2761 /* Log the VCPU pointers, useful for debugging SMP VMs. */ 2854 Log4 (("VMXR0SetupVM:pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));2762 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 2855 2763 2856 2764 /* Set revision dword at the beginning of the VMCS structure. */ … … 2881 2789 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu); 2882 2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM), 2883 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);2884 2885 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);2886 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2887 2791 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2888 2792 … … 2912 2816 * 2913 2817 * @returns VBox status code. 2914 * @param pVM The cross context VM structure. 2915 * @param pVCpu The cross context virtual CPU structure. 2916 */ 2917 DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu) 2918 { 2919 NOREF(pVM); NOREF(pVCpu); 2920 2818 */ 2819 static int hmR0VmxExportHostControlRegs(void) 2820 { 2921 2821 RTCCUINTREG uReg = ASMGetCR0(); 2922 2822 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg); … … 2934 2834 2935 2835 2836 /** 2837 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into 2838 * the host-state area in the VMCS. 2839 * 2840 * @returns VBox status code. 2841 * @param pVCpu The cross context virtual CPU structure. 2842 */ 2843 static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu) 2844 { 2936 2845 #if HC_ARCH_BITS == 64 2937 2846 /** 2938 2847 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry 2939 * requirements. See hmR0Vmx SaveHostSegmentRegs().2848 * requirements. See hmR0VmxExportHostSegmentRegs(). 2940 2849 */ 2941 2850 # define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \ … … 2955 2864 (selValue) = 0; \ 2956 2865 } 2957 #endif 2958 2959 2960 /** 2961 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into 2962 * the host-state area in the VMCS. 2963 * 2964 * @returns VBox status code. 2965 * @param pVM The cross context VM structure. 2966 * @param pVCpu The cross context virtual CPU structure. 2967 */ 2968 DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu) 2969 { 2970 int rc = VERR_INTERNAL_ERROR_5; 2971 2972 #if HC_ARCH_BITS == 64 2866 2973 2867 /* 2974 2868 * If we've executed guest code using VT-x, the host-state bits will be messed up. We 2975 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}. 2869 * should -not- save the messed up state without restoring the original host-state, 2870 * see @bugref{7240}. 2976 2871 * 2977 * This apparently can happen (most likely the FPU changes), deal with it rather than asserting.2978 * Was observed booting Solaris10u10 32-bit guest.2872 * This apparently can happen (most likely the FPU changes), deal with it rather than 2873 * asserting. Was observed booting Solaris 10u10 32-bit guest. 2979 2874 */ 2980 2875 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED) … … 3018 2913 #if HC_ARCH_BITS == 64 3019 2914 /* 3020 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them 3021 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 2915 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to 2916 * gain VM-entry and restore them before we get preempted. 2917 * 2918 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 3022 2919 */ 3023 2920 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS); … … 3046 2943 3047 2944 /* Write these host selector fields into the host-state area in the VMCS. */ 3048 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);3049 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);2945 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS); 2946 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS); 3050 2947 #if HC_ARCH_BITS == 64 3051 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);3052 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);3053 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);3054 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);2948 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS); 2949 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES); 2950 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS); 2951 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS); 3055 2952 #else 3056 2953 NOREF(uSelDS); … … 3059 2956 NOREF(uSelGS); 3060 2957 #endif 3061 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);2958 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR); 3062 2959 AssertRCReturn(rc, rc); 3063 2960 … … 3077 2974 #if HC_ARCH_BITS == 64 3078 2975 /* 3079 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the3080 * maximum limit (0xffff) on every VM-exit.2976 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps 2977 * them to the maximum limit (0xffff) on every VM-exit. 3081 2978 */ 3082 2979 if (Gdtr.cbGdt != 0xffff) … … 3085 2982 /* 3086 2983 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" 3087 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x 3088 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists 3089 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there 3090 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on 3091 * hosts where we are pretty sure it won't cause trouble. 2984 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the 2985 * limit as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU 2986 * behavior. However, several hosts either insists on 0xfff being the limit (Windows 2987 * Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there 2988 * but botches sidt alignment in at least one consumer). So, we're only allowing the 2989 * IDTR.LIMIT to be left at 0xffff on hosts where we are sure it won't cause trouble. 3092 2990 */ 3093 2991 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) … … 3104 3002 3105 3003 /* 3106 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits 3107 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases. 3004 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI 3005 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and 3006 * RPL should be too in most cases. 3108 3007 */ 3109 3008 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt, 3110 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), 3111 VERR_VMX_INVALID_HOST_STATE); 3009 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE); 3112 3010 3113 3011 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); … … 3116 3014 3117 3015 /* 3118 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 3119 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 3120 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 3121 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3016 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on 3017 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual 3018 * restoration if the host has something else. Task switching is not supported in 64-bit 3019 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the 3020 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3122 3021 * 3123 3022 * [1] See Intel spec. 3.5 "System Descriptor Types". 3124 3023 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 3125 3024 */ 3025 PVM pVM = pVCpu->CTX_SUFF(pVM); 3126 3026 Assert(pDesc->System.u4Type == 11); 3127 3027 if ( pDesc->System.u16LimitLow != 0x67 … … 3152 3052 } 3153 3053 #else 3154 NOREF(pVM);3155 3054 uintptr_t uTRBase = X86DESC_BASE(pDesc); 3156 3055 #endif … … 3174 3073 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 3175 3074 #endif 3176 return rc; 3177 } 3178 3179 3180 /** 3181 * Saves certain host MSRs in the VM-exit MSR-load area and some in the 3182 * host-state area of the VMCS. Theses MSRs will be automatically restored on 3183 * the host after every successful VM-exit. 3075 return VINF_SUCCESS; 3076 } 3077 3078 3079 /** 3080 * Exports certain host MSRs in the VM-exit MSR-load area and some in the 3081 * host-state area of the VMCS. 3082 * 3083 * Theses MSRs will be automatically restored on the host after every successful 3084 * VM-exit. 3184 3085 * 3185 3086 * @returns VBox status code. 3186 * @param pVM The cross context VM structure.3187 3087 * @param pVCpu The cross context virtual CPU structure. 3188 3088 * 3189 3089 * @remarks No-long-jump zone!!! 3190 3090 */ 3191 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu) 3192 { 3193 NOREF(pVM); 3194 3091 static int hmR0VmxExportHostMsrs(PVMCPU pVCpu) 3092 { 3195 3093 AssertPtr(pVCpu); 3196 3094 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); … … 3205 3103 * Host Sysenter MSRs. 3206 3104 */ 3207 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));3105 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3208 3106 #if HC_ARCH_BITS == 32 3209 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, 3210 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, 3107 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3108 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3211 3109 #else 3212 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, 3213 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, 3110 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 3111 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3214 3112 #endif 3215 3113 AssertRCReturn(rc, rc); … … 3217 3115 /* 3218 3116 * Host EFER MSR. 3219 * If the CPU supports the newer VMCS controls for managing EFER, use it. 3220 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs(). 3117 * 3118 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's 3119 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs(). 3221 3120 */ 3121 PVM pVM = pVCpu->CTX_SUFF(pVM); 3222 3122 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 3223 3123 { … … 3226 3126 } 3227 3127 3228 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see 3229 * hmR0VmxLoadGuestExitCtls() !! */ 3230 3231 return rc; 3128 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */ 3129 3130 return VINF_SUCCESS; 3232 3131 } 3233 3132 … … 3237 3136 * 3238 3137 * We check all relevant bits. For now, that's everything besides LMA/LME, as 3239 * these two bits are handled by VM-entry, see hmR0Vmx LoadGuestExitCtls() and3240 * hmR0VMx LoadGuestEntryCtls().3138 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and 3139 * hmR0VMxExportGuestEntryCtls(). 3241 3140 * 3242 3141 * @returns true if we need to load guest EFER, false otherwise. … … 3249 3148 * @remarks No-long-jump zone!!! 3250 3149 */ 3251 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PC PUMCTX pMixedCtx)3150 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3252 3151 { 3253 3152 #ifdef HMVMX_ALWAYS_SWAP_EFER … … 3257 3156 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3258 3157 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */ 3259 if (CPUMIsGuestInLongMode (pVCpu))3158 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3260 3159 return false; 3261 3160 #endif … … 3266 3165 3267 3166 /* 3268 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the3269 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.3167 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the 3168 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}. 3270 3169 */ 3271 if ( CPUMIsGuestInLongMode (pVCpu)3170 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 3272 3171 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3273 3172 { … … 3289 3188 } 3290 3189 3291 /** @todo Check the latest Intel spec. for any other bits,3292 * like SMEP/SMAP? */3293 3190 return false; 3294 3191 } … … 3296 3193 3297 3194 /** 3298 * Sets up VM-entry controls in the VMCS. These controls can affect things done 3299 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry 3300 * controls". 3195 * Exports the guest state with appropriate VM-entry controls in the VMCS. 3196 * 3197 * These controls can affect things done on VM-exit; e.g. "load debug controls", 3198 * see Intel spec. 24.8.1 "VM-entry controls". 3301 3199 * 3302 3200 * @returns VBox status code. … … 3309 3207 * @remarks No-long-jump zone!!! 3310 3208 */ 3311 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3312 { 3313 int rc = VINF_SUCCESS; 3314 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS)) 3315 { 3316 PVM pVM = pVCpu->CTX_SUFF(pVM); 3317 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3318 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3209 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3210 { 3211 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS) 3212 { 3213 PVM pVM = pVCpu->CTX_SUFF(pVM); 3214 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3215 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3319 3216 3320 3217 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ 3321 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;3218 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG; 3322 3219 3323 3220 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 3324 3221 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3325 3222 { 3326 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;3327 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));3223 fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST; 3224 Log4Func(("VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n")); 3328 3225 } 3329 3226 else 3330 Assert(!( val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));3227 Assert(!(fVal & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST)); 3331 3228 3332 3229 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ … … 3334 3231 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3335 3232 { 3336 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;3337 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));3233 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR; 3234 Log4Func(("VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n")); 3338 3235 } 3339 3236 … … 3347 3244 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */ 3348 3245 3349 if (( val & zap) != val)3350 { 3351 Log Rel(("hmR0VmxLoadGuestEntryCtls: Invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",3352 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));3246 if ((fVal & fZap) != fVal) 3247 { 3248 Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3249 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap)); 3353 3250 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 3354 3251 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3355 3252 } 3356 3253 3357 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);3254 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3358 3255 AssertRCReturn(rc, rc); 3359 3256 3360 pVCpu->hm.s.vmx.u32EntryCtls = val;3361 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_VMX_ENTRY_CTLS);3362 } 3363 return rc;3364 } 3365 3366 3367 /** 3368 * Sets up the VM-exit controls in the VMCS.3257 pVCpu->hm.s.vmx.u32EntryCtls = fVal; 3258 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS); 3259 } 3260 return VINF_SUCCESS; 3261 } 3262 3263 3264 /** 3265 * Exports the guest state with appropriate VM-exit controls in the VMCS. 3369 3266 * 3370 3267 * @returns VBox status code. … … 3376 3273 * @remarks Requires EFER. 3377 3274 */ 3378 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3379 { 3380 NOREF(pMixedCtx); 3381 3382 int rc = VINF_SUCCESS; 3383 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) 3384 { 3385 PVM pVM = pVCpu->CTX_SUFF(pVM); 3386 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3387 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3275 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3276 { 3277 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS) 3278 { 3279 PVM pVM = pVCpu->CTX_SUFF(pVM); 3280 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3281 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3388 3282 3389 3283 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ 3390 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;3284 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG; 3391 3285 3392 3286 /* 3393 3287 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. 3394 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs(). 3288 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in 3289 * hmR0VmxExportHostMsrs(). 3395 3290 */ 3396 3291 #if HC_ARCH_BITS == 64 3397 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;3398 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));3292 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3293 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3399 3294 #else 3400 3295 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64 … … 3404 3299 { 3405 3300 /* The switcher returns to long mode, EFER is managed by the switcher. */ 3406 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;3407 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));3301 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3302 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3408 3303 } 3409 3304 else 3410 Assert(!( val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));3305 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3411 3306 #endif 3412 3307 … … 3415 3310 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3416 3311 { 3417 val |=VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR3418 |VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;3419 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));3312 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR 3313 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR; 3314 Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n")); 3420 3315 } 3421 3316 3422 3317 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */ 3423 Assert(!( val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));3318 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)); 3424 3319 3425 3320 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR, … … 3429 3324 if ( pVM->hm.s.vmx.fUsePreemptTimer 3430 3325 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)) 3431 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;3432 3433 if (( val & zap) != val)3434 { 3435 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",3436 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));3326 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER; 3327 3328 if ((fVal & fZap) != fVal) 3329 { 3330 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3331 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap)); 3437 3332 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 3438 3333 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3439 3334 } 3440 3335 3441 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);3336 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3442 3337 AssertRCReturn(rc, rc); 3443 3338 3444 pVCpu->hm.s.vmx.u32ExitCtls = val;3445 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_VMX_EXIT_CTLS);3446 } 3447 return rc;3339 pVCpu->hm.s.vmx.u32ExitCtls = fVal; 3340 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS); 3341 } 3342 return VINF_SUCCESS; 3448 3343 } 3449 3344 … … 3465 3360 3466 3361 /** 3467 * Loads the guest APIC and related state.3362 * Exports the guest APIC TPR state into the VMCS. 3468 3363 * 3469 3364 * @returns VBox status code. 3470 3365 * @param pVCpu The cross context virtual CPU structure. 3471 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3472 * out-of-sync. Make sure to update the required fields3473 * before using them.3474 3366 * 3475 3367 * @remarks No-long-jump zone!!! 3476 3368 */ 3477 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3478 { 3479 NOREF(pMixedCtx); 3480 3481 int rc = VINF_SUCCESS; 3482 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 3369 static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu) 3370 { 3371 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 3483 3372 { 3484 3373 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) … … 3495 3384 uint8_t u8Tpr = 0; 3496 3385 uint8_t u8PendingIntr = 0; 3497 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);3386 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 3498 3387 AssertRCReturn(rc, rc); 3499 3388 3500 3389 /* 3501 * If there are interrupts pending but masked by the TPR, instruct VT-x to cause a TPR-below-threshold VM-exit 3502 * when the guest lowers its TPR below the priority of the pending interrupt so we can deliver the interrupt. 3503 * If there are no interrupts pending, set threshold to 0 to not cause any TPR-below-threshold VM-exits. 3390 * If there are interrupts pending but masked by the TPR, instruct VT-x to 3391 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the 3392 * priority of the pending interrupt so we can deliver the interrupt. If there 3393 * are no interrupts pending, set threshold to 0 to not cause any 3394 * TPR-below-threshold VM-exits. 3504 3395 */ 3505 3396 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr; … … 3518 3409 } 3519 3410 } 3520 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 3521 } 3522 3523 return rc; 3411 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 3412 } 3413 return VINF_SUCCESS; 3524 3414 } 3525 3415 … … 3536 3426 * @remarks No-long-jump zone!!! 3537 3427 */ 3538 DECLINLINE(uint32_t)hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3428 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3539 3429 { 3540 3430 /* 3541 3431 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS. 3542 3432 */ 3543 uint32_t uIntrState = 0;3433 uint32_t fIntrState = 0; 3544 3434 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3545 3435 { 3546 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ 3547 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), 3548 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu))); 3436 /* If inhibition is active, RIP & RFLAGS should've been accessed 3437 (i.e. read previously from the VMCS or from ring-3). */ 3438 #ifdef VBOX_STRICT 3439 uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn); 3440 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn)); 3441 #endif 3549 3442 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu)) 3550 3443 { 3551 3444 if (pMixedCtx->eflags.Bits.u1IF) 3552 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;3445 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 3553 3446 else 3554 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;3447 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS; 3555 3448 } 3556 3449 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3557 3450 { 3558 3451 /* 3559 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in 3560 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct. 3452 * We can clear the inhibit force flag as even if we go back to the recompiler 3453 * without executing guest code in VT-x, the flag's condition to be cleared is 3454 * met and thus the cleared state is correct. 3561 3455 */ 3562 3456 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 3574 3468 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)) 3575 3469 { 3576 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;3577 } 3578 3579 return uIntrState;3580 } 3581 3582 3583 /** 3584 * Loads the guest's interruptibility-state into the guest-state area in the3470 fIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI; 3471 } 3472 3473 return fIntrState; 3474 } 3475 3476 3477 /** 3478 * Exports the guest's interruptibility-state into the guest-state area in the 3585 3479 * VMCS. 3586 3480 * 3587 3481 * @returns VBox status code. 3588 3482 * @param pVCpu The cross context virtual CPU structure. 3589 * @param uIntrState The interruptibility-state to set.3590 */ 3591 static int hmR0Vmx LoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)3483 * @param fIntrState The interruptibility-state to set. 3484 */ 3485 static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState) 3592 3486 { 3593 3487 NOREF(pVCpu); 3594 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */ 3595 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 3596 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState); 3597 AssertRC(rc); 3598 return rc; 3599 } 3600 3601 3602 /** 3603 * Loads the exception intercepts required for guest execution in the VMCS. 3488 AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState)); /* Bits 31:4 MBZ. */ 3489 Assert((fIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 3490 return VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, fIntrState); 3491 } 3492 3493 3494 /** 3495 * Exports the exception intercepts required for guest execution in the VMCS. 3604 3496 * 3605 3497 * @returns VBox status code. 3606 3498 * @param pVCpu The cross context virtual CPU structure. 3607 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3608 * out-of-sync. Make sure to update the required fields 3609 * before using them. 3610 */ 3611 static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3612 { 3613 NOREF(pMixedCtx); 3614 int rc = VINF_SUCCESS; 3615 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 3616 { 3617 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */ 3499 * 3500 * @remarks No-long-jump zone!!! 3501 */ 3502 static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu) 3503 { 3504 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS) 3505 { 3506 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportSharedCR0(). */ 3618 3507 if (pVCpu->hm.s.fGIMTrapXcptUD) 3619 3508 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD); … … 3626 3515 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 3627 3516 3628 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);3517 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3629 3518 AssertRCReturn(rc, rc); 3630 3519 3631 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3632 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, 3633 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu))); 3634 } 3635 return rc; 3636 } 3637 3638 3639 /** 3640 * Loads the guest's RIP into the guest-state area in the VMCS. 3520 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS); 3521 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", pVCpu->hm.s.vmx.u32XcptBitmap)); 3522 } 3523 return VINF_SUCCESS; 3524 } 3525 3526 3527 /** 3528 * Exports the guest's RIP into the guest-state area in the VMCS. 3641 3529 * 3642 3530 * @returns VBox status code. … … 3648 3536 * @remarks No-long-jump zone!!! 3649 3537 */ 3650 static int hmR0Vmx LoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3538 static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3651 3539 { 3652 3540 int rc = VINF_SUCCESS; 3653 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))3541 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 3654 3542 { 3655 3543 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 3656 3544 AssertRCReturn(rc, rc); 3657 3545 3658 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);3659 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,3660 HMCPU_CF_VALUE(pVCpu)));3661 3662 3546 /* Update the exit history entry with the correct CS.BASE + RIP or just RIP. */ 3663 if ( HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))3547 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS) 3664 3548 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true); 3665 3549 else 3666 3550 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->rip, false); 3551 3552 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP); 3553 Log4Func(("RIP=%#RX64\n", pMixedCtx->rip)); 3667 3554 } 3668 3555 return rc; … … 3671 3558 3672 3559 /** 3673 * Loads the guest's RSP into the guest-state area in the VMCS.3560 * Exports the guest's RSP into the guest-state area in the VMCS. 3674 3561 * 3675 3562 * @returns VBox status code. … … 3681 3568 * @remarks No-long-jump zone!!! 3682 3569 */ 3683 static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3684 { 3685 int rc = VINF_SUCCESS; 3686 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)) 3687 { 3688 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 3570 static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3571 { 3572 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP) 3573 { 3574 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 3689 3575 AssertRCReturn(rc, rc); 3690 3576 3691 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP); 3692 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp)); 3693 } 3694 return rc; 3695 } 3696 3697 3698 /** 3699 * Loads the guest's RFLAGS into the guest-state area in the VMCS. 3577 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP); 3578 } 3579 return VINF_SUCCESS; 3580 } 3581 3582 3583 /** 3584 * Exports the guest's RFLAGS into the guest-state area in the VMCS. 3700 3585 * 3701 3586 * @returns VBox status code. … … 3707 3592 * @remarks No-long-jump zone!!! 3708 3593 */ 3709 static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3710 { 3711 int rc = VINF_SUCCESS; 3712 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 3594 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3595 { 3596 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) 3713 3597 { 3714 3598 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 3715 3599 Let us assert it as such and use 32-bit VMWRITE. */ 3716 3600 Assert(!(pMixedCtx->rflags.u64 >> 32)); 3717 X86EFLAGS Eflags = pMixedCtx->eflags; 3718 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor 3719 * shall there be any reason for clearing bits 63:22, 15, 5 and 3. 3720 * These will never be cleared/set, unless some other part of the VMM 3721 * code is buggy - in which case we're better of finding and fixing 3722 * those bugs than hiding them. */ 3723 Assert(Eflags.u32 & X86_EFL_RA1_MASK); 3724 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK))); 3725 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */ 3726 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */ 3601 X86EFLAGS fEFlags = pMixedCtx->eflags; 3602 Assert(fEFlags.u32 & X86_EFL_RA1_MASK); 3603 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK))); 3727 3604 3728 3605 /* 3729 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit. 3730 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode. 3606 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so 3607 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x 3608 * can run the real-mode guest code under Virtual 8086 mode. 3731 3609 */ 3732 3610 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 3734 3612 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 3735 3613 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 3736 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */3737 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */3738 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */3739 } 3740 3741 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);3614 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */ 3615 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 3616 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 3617 } 3618 3619 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32); 3742 3620 AssertRCReturn(rc, rc); 3743 3621 3744 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3745 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32)); 3746 } 3747 return rc; 3748 } 3749 3750 3751 /** 3752 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS. 3622 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS); 3623 Log4Func(("EFlags=%#RX32\n", fEFlags.u32)); 3624 } 3625 return VINF_SUCCESS; 3626 } 3627 3628 3629 /** 3630 * Exports the guest CR0 control register into the guest-state area in the VMCS. 3631 * 3632 * The guest FPU state is always pre-loaded hence we don't need to bother about 3633 * sharing FPU related CR0 bits between the guest and host. 3753 3634 * 3754 3635 * @returns VBox status code. … … 3760 3641 * @remarks No-long-jump zone!!! 3761 3642 */ 3762 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3763 { 3764 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 3765 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 3766 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 3767 AssertRCReturn(rc, rc); 3768 return rc; 3769 } 3770 3771 3772 /** 3773 * Loads the guest CR0 control register into the guest-state area in the VMCS. 3774 * CR0 is partially shared with the host and we have to consider the FPU bits. 3775 * 3776 * @returns VBox status code. 3777 * @param pVCpu The cross context virtual CPU structure. 3778 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3779 * out-of-sync. Make sure to update the required fields 3780 * before using them. 3781 * 3782 * @remarks No-long-jump zone!!! 3783 */ 3784 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3785 { 3786 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 3787 3788 /* 3789 * Guest CR0. 3790 * Guest FPU. 3791 */ 3792 int rc = VINF_SUCCESS; 3793 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 3794 { 3795 Assert(!(pMixedCtx->cr0 >> 32)); 3796 uint32_t u32GuestCR0 = pMixedCtx->cr0; 3797 PVM pVM = pVCpu->CTX_SUFF(pVM); 3798 3799 /* The guest's view (read access) of its CR0 is unblemished. */ 3800 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0); 3801 AssertRCReturn(rc, rc); 3802 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0)); 3803 3804 /* Setup VT-x's view of the guest CR0. */ 3805 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */ 3643 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3644 { 3645 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0) 3646 { 3647 PVM pVM = pVCpu->CTX_SUFF(pVM); 3648 Assert(!RT_HI_U32(pMixedCtx->cr0)); 3649 uint32_t const uShadowCR0 = pMixedCtx->cr0; 3650 uint32_t uGuestCR0 = pMixedCtx->cr0; 3651 3652 /* 3653 * Setup VT-x's view of the guest CR0. 3654 * Minimize VM-exits due to CR3 changes when we have NestedPaging. 3655 */ 3656 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls; 3806 3657 if (pVM->hm.s.fNestedPaging) 3807 3658 { 3808 if (CPUMIsGuestPagingEnabled Ex(pMixedCtx))3659 if (CPUMIsGuestPagingEnabled(pVCpu)) 3809 3660 { 3810 3661 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */ 3811 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT3812 3662 uProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 3663 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT); 3813 3664 } 3814 3665 else 3815 3666 { 3816 3667 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */ 3817 pVCpu->hm.s.vmx.u32ProcCtls |=VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT3818 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;3668 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 3669 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3819 3670 } 3820 3671 3821 3672 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */ 3822 3673 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3823 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3824 3825 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 3826 AssertRCReturn(rc, rc); 3674 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3827 3675 } 3828 3676 else 3829 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3677 { 3678 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3679 uGuestCR0 |= X86_CR0_WP; 3680 } 3830 3681 3831 3682 /* 3832 3683 * Guest FPU bits. 3833 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first 3834 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3684 * 3685 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state 3686 * using CR0.TS. 3687 * 3688 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be 3689 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3835 3690 */ 3836 u32GuestCR0 |= X86_CR0_NE; 3837 3838 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ 3839 bool fInterceptMF = false; 3840 if (!(pMixedCtx->cr0 & X86_CR0_NE)) 3841 fInterceptMF = true; 3842 3843 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */ 3691 uGuestCR0 |= X86_CR0_NE; 3692 3693 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */ 3694 bool const fInterceptMF = !(uShadowCR0 & X86_CR0_NE); 3695 3696 /* 3697 * Update exception intercepts. 3698 */ 3699 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap; 3844 3700 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3845 3701 { 3846 3702 Assert(PDMVmmDevHeapIsEnabled(pVM)); 3847 3703 Assert(pVM->hm.s.vmx.pRealModeTSS); 3848 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;3704 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 3849 3705 } 3850 3706 else 3851 3707 { 3852 3708 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */ 3853 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;3709 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3854 3710 if (fInterceptMF) 3855 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF); 3856 } 3857 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3711 uXcptBitmap |= RT_BIT(X86_XCPT_MF); 3712 } 3858 3713 3859 3714 /* Additional intercepts for debugging, define these yourself explicitly. */ 3860 3715 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 3861 pVCpu->hm.s.vmx.u32XcptBitmap |= 03862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3716 uXcptBitmap |= 0 3717 | RT_BIT(X86_XCPT_BP) 3718 | RT_BIT(X86_XCPT_DE) 3719 | RT_BIT(X86_XCPT_NM) 3720 | RT_BIT(X86_XCPT_TS) 3721 | RT_BIT(X86_XCPT_UD) 3722 | RT_BIT(X86_XCPT_NP) 3723 | RT_BIT(X86_XCPT_SS) 3724 | RT_BIT(X86_XCPT_GP) 3725 | RT_BIT(X86_XCPT_PF) 3726 | RT_BIT(X86_XCPT_MF) 3727 ; 3873 3728 #elif defined(HMVMX_ALWAYS_TRAP_PF) 3874 pVCpu->hm.s.vmx.u32XcptBitmap|= RT_BIT(X86_XCPT_PF);3729 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 3875 3730 #endif 3876 3731 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap) 3732 { 3733 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap; 3734 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS); 3735 } 3877 3736 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF))); 3878 3737 3879 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */ 3880 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3881 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3882 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3883 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 3738 /* 3739 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). 3740 */ 3741 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3742 uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3743 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3744 fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 3884 3745 else 3885 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3886 3887 u32GuestCR0 |= uSetCR0; 3888 u32GuestCR0 &= uZapCR0; 3889 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3890 3891 /* Write VT-x's view of the guest CR0 into the VMCS. */ 3892 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0); 3893 AssertRCReturn(rc, rc); 3894 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0, 3895 uZapCR0)); 3746 Assert((fSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3747 3748 uGuestCR0 |= fSetCR0; 3749 uGuestCR0 &= fZapCR0; 3750 uGuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3896 3751 3897 3752 /* … … 3900 3755 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables). 3901 3756 */ 3902 uint32_t u32CR0Mask = 0; 3903 u32CR0Mask = X86_CR0_PE 3904 | X86_CR0_NE 3905 | X86_CR0_WP 3906 | X86_CR0_PG 3907 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3908 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3909 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3757 uint32_t uCR0Mask = X86_CR0_PE 3758 | X86_CR0_NE 3759 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP) 3760 | X86_CR0_PG 3761 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3762 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3763 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3910 3764 3911 3765 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM … … 3914 3768 #if 0 3915 3769 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3916 u 32CR0Mask &= ~X86_CR0_PE;3770 uCr0Mask &= ~X86_CR0_PE; 3917 3771 #endif 3918 if (pVM->hm.s.fNestedPaging) 3919 u32CR0Mask &= ~X86_CR0_WP; 3920 3921 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ 3922 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask; 3923 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask); 3772 /* Update the HMCPU's copy of the CR0 mask. */ 3773 pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask; 3774 3775 /* 3776 * Finally, update VMCS fields with the CR0 values. 3777 */ 3778 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, uGuestCR0); 3779 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, uShadowCR0); 3780 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask); 3781 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 3782 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 3924 3783 AssertRCReturn(rc, rc); 3925 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask)); 3926 3927 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 3928 } 3929 return rc; 3930 } 3931 3932 3933 /** 3934 * Loads the guest control registers (CR3, CR4) into the guest-state area 3784 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 3785 3786 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0); 3787 3788 Log4Func(("uCr0Mask=%#RX32 uShadowCR0=%#RX32 uGuestCR0=%#RX32 (fSetCR0=%#RX32 fZapCR0=%#RX32\n", uCR0Mask, uShadowCR0, 3789 uGuestCR0, fSetCR0, fZapCR0)); 3790 } 3791 3792 return VINF_SUCCESS; 3793 } 3794 3795 3796 /** 3797 * Exports the guest control registers (CR3, CR4) into the guest-state area 3935 3798 * in the VMCS. 3936 3799 * … … 3947 3810 * @remarks No-long-jump zone!!! 3948 3811 */ 3949 static VBOXSTRICTRC hmR0Vmx LoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3812 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3950 3813 { 3951 3814 int rc = VINF_SUCCESS; … … 3960 3823 * Guest CR3. 3961 3824 */ 3962 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))3825 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3) 3963 3826 { 3964 3827 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS; … … 3986 3849 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP); 3987 3850 AssertRCReturn(rc, rc); 3988 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));3989 3851 3990 3852 if ( pVM->hm.s.vmx.fUnrestrictedGuest … … 4003 3865 } 4004 3866 4005 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we 4006 have Unrestricted Execution to handle the guest when it's not using paging. */ 3867 /* 3868 * The guest's view of its CR3 is unblemished with Nested Paging when the 3869 * guest is using paging or we have unrestricted guest execution to handle 3870 * the guest when it's not using paging. 3871 */ 4007 3872 GCPhysGuestCR3 = pMixedCtx->cr3; 4008 3873 } … … 4010 3875 { 4011 3876 /* 4012 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory 4013 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses. 4014 * EPT takes care of translating it to host-physical addresses. 3877 * The guest is not using paging, but the CPU (VT-x) has to. While the guest 3878 * thinks it accesses physical memory directly, we use our identity-mapped 3879 * page table to map guest-linear to guest-physical addresses. EPT takes care 3880 * of translating it to host-physical addresses. 4015 3881 */ 4016 3882 RTGCPHYS GCPhys; … … 4023 3889 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS) 4024 3890 { 4025 Log4 (("Load[%RU32]: VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n", pVCpu->idCpu));3891 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n")); 4026 3892 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */ 4027 3893 } … … 4032 3898 } 4033 3899 4034 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGp (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));3900 Log4Func(("uGuestCR3=%#RGp (GstN)\n", GCPhysGuestCR3)); 4035 3901 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3); 3902 AssertRCReturn(rc, rc); 4036 3903 } 4037 3904 else … … 4040 3907 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 4041 3908 4042 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));3909 Log4Func(("uGuestCR3=%#RHv (HstN)\n", HCPhysGuestCR3)); 4043 3910 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3); 4044 }4045 AssertRCReturn(rc, rc);4046 4047 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_CR3);3911 AssertRCReturn(rc, rc); 3912 } 3913 3914 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3); 4048 3915 } 4049 3916 … … 4052 3919 * ASSUMES this is done everytime we get in from ring-3! (XCR0) 4053 3920 */ 4054 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))4055 { 4056 Assert(! (pMixedCtx->cr4 >> 32));4057 uint32_t u 32GuestCR4 = pMixedCtx->cr4;3921 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4) 3922 { 3923 Assert(!RT_HI_U32(pMixedCtx->cr4)); 3924 uint32_t uGuestCR4 = pMixedCtx->cr4; 4058 3925 4059 3926 /* The guest's view of its CR4 is unblemished. */ 4060 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u 32GuestCR4);3927 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uGuestCR4); 4061 3928 AssertRCReturn(rc, rc); 4062 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4)); 4063 4064 /* Setup VT-x's view of the guest CR4. */ 3929 Log4Func(("uShadowCR4=%#RX32\n", uGuestCR4)); 3930 4065 3931 /* 4066 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program 4067 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0()) 3932 * Setup VT-x's view of the guest CR4. 3933 * 3934 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software 3935 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt 3936 * redirection bitmap is already all 0, see hmR3InitFinalizeR0()) 3937 * 4068 3938 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode". 4069 3939 */ … … 4072 3942 Assert(pVM->hm.s.vmx.pRealModeTSS); 4073 3943 Assert(PDMVmmDevHeapIsEnabled(pVM)); 4074 u 32GuestCR4 &= ~X86_CR4_VME;3944 uGuestCR4 &= ~X86_CR4_VME; 4075 3945 } 4076 3946 … … 4081 3951 { 4082 3952 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */ 4083 u 32GuestCR4 |= X86_CR4_PSE;3953 uGuestCR4 |= X86_CR4_PSE; 4084 3954 /* Our identity mapping is a 32-bit page directory. */ 4085 u 32GuestCR4 &= ~X86_CR4_PAE;3955 uGuestCR4 &= ~X86_CR4_PAE; 4086 3956 } 4087 3957 /* else use guest CR4.*/ … … 4099 3969 case PGMMODE_32_BIT: /* 32-bit paging. */ 4100 3970 { 4101 u 32GuestCR4 &= ~X86_CR4_PAE;3971 uGuestCR4 &= ~X86_CR4_PAE; 4102 3972 break; 4103 3973 } … … 4106 3976 case PGMMODE_PAE_NX: /* PAE paging with NX. */ 4107 3977 { 4108 u 32GuestCR4 |= X86_CR4_PAE;3978 uGuestCR4 |= X86_CR4_PAE; 4109 3979 break; 4110 3980 } … … 4122 3992 4123 3993 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 4124 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4125 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4126 u 32GuestCR4 |= uSetCR4;4127 u 32GuestCR4 &= uZapCR4;3994 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 3995 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 3996 uGuestCR4 |= fSetCR4; 3997 uGuestCR4 &= fZapCR4; 4128 3998 4129 3999 /* Write VT-x's view of the guest CR4 into the VMCS. */ 4130 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));4131 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u 32GuestCR4);4000 Log4Func(("uGuestCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, fSetCR4, fZapCR4)); 4001 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4); 4132 4002 AssertRCReturn(rc, rc); 4133 4003 … … 4149 4019 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); 4150 4020 4151 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_CR4);4021 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4152 4022 } 4153 4023 return rc; … … 4156 4026 4157 4027 /** 4158 * Loads the guest debug registers into the guest-state area in the VMCS. 4028 * Exports the guest debug registers into the guest-state area in the VMCS. 4029 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3). 4159 4030 * 4160 4031 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits. 4161 *4162 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).4163 4032 * 4164 4033 * @returns VBox status code. … … 4170 4039 * @remarks No-long-jump zone!!! 4171 4040 */ 4172 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4173 { 4174 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 4175 return VINF_SUCCESS; 4041 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4042 { 4043 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 4176 4044 4177 4045 #ifdef VBOX_STRICT … … 4185 4053 #endif 4186 4054 4187 int rc; 4188 PVM pVM = pVCpu->CTX_SUFF(pVM); 4189 bool fSteppingDB = false; 4190 bool fInterceptMovDRx = false; 4055 bool fSteppingDB = false; 4056 bool fInterceptMovDRx = false; 4057 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls; 4191 4058 if (pVCpu->hm.s.fSingleInstruction) 4192 4059 { 4193 4060 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ 4061 PVM pVM = pVCpu->CTX_SUFF(pVM); 4194 4062 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG) 4195 4063 { 4196 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; 4197 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4198 AssertRCReturn(rc, rc); 4064 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; 4199 4065 Assert(fSteppingDB == false); 4200 4066 } … … 4202 4068 { 4203 4069 pMixedCtx->eflags.u32 |= X86_EFL_TF; 4070 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS; 4204 4071 pVCpu->hm.s.fClearTrapFlag = true; 4205 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);4206 4072 fSteppingDB = true; 4207 4073 } 4208 4074 } 4209 4075 4076 uint32_t uGuestDR7; 4210 4077 if ( fSteppingDB 4211 4078 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) 4212 4079 { 4213 4080 /* 4214 * Use the combined guest and host DRx values found in the hypervisor 4215 * register set because the debugger has breakpoints active or someone4216 * is single stepping on thehost side without a monitor trap flag.4081 * Use the combined guest and host DRx values found in the hypervisor register set 4082 * because the debugger has breakpoints active or someone is single stepping on the 4083 * host side without a monitor trap flag. 4217 4084 * 4218 4085 * Note! DBGF expects a clean DR6 state before executing guest code. 4219 4086 */ 4220 4087 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4221 if ( CPUMIsGuestInLongModeEx(pMixedCtx)4088 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4222 4089 && !CPUMIsHyperDebugStateActivePending(pVCpu)) 4223 4090 { … … 4235 4102 } 4236 4103 4237 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */ 4238 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu)); 4239 AssertRCReturn(rc, rc); 4240 4104 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */ 4105 uGuestDR7 = (uint32_t)CPUMGetHyperDR7(pVCpu); 4241 4106 pVCpu->hm.s.fUsingHyperDR7 = true; 4242 4107 fInterceptMovDRx = true; … … 4248 4113 * executing guest code so they'll trigger at the right time. 4249 4114 */ 4250 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */4115 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK) 4251 4116 { 4252 4117 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4253 if ( CPUMIsGuestInLongModeEx(pMixedCtx)4118 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4254 4119 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 4255 4120 { … … 4286 4151 } 4287 4152 4288 /* Update guest DR7. */ 4289 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 4290 AssertRCReturn(rc, rc); 4291 4153 /* Update DR7 with the actual guest value. */ 4154 uGuestDR7 = pMixedCtx->dr[7]; 4292 4155 pVCpu->hm.s.fUsingHyperDR7 = false; 4293 4156 } 4294 4157 4158 if (fInterceptMovDRx) 4159 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4160 else 4161 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4162 4295 4163 /* 4296 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.4164 * Update the processor-based VM-execution controls for MOV-DRx intercepts and the monitor-trap flag. 4297 4165 */ 4298 if (fInterceptMovDRx) 4299 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4300 else 4301 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4302 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4166 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 4167 { 4168 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4169 AssertRCReturn(rc2, rc2); 4170 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 4171 } 4172 4173 /* 4174 * Update guest DR7. 4175 */ 4176 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, uGuestDR7); 4303 4177 AssertRCReturn(rc, rc); 4304 4178 4305 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);4306 4179 return VINF_SUCCESS; 4307 4180 } … … 4312 4185 * Strict function to validate segment registers. 4313 4186 * 4314 * @remarks ASSUMES CR0 is up to date. 4315 */ 4316 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4317 { 4318 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */ 4319 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg() 4320 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */ 4187 * @remarks Will import guest CR0 on strict builds during validation of 4188 * segments. 4189 */ 4190 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx) 4191 { 4192 /* 4193 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". 4194 * 4195 * The reason we check for attribute value 0 in this function and not just the unusable bit is 4196 * because hmR0VmxWriteSegmentReg() only updates the VMCS' copy of the value with the unusable bit 4197 * and doesn't change the guest-context value. 4198 */ 4199 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 4321 4200 if ( !pVM->hm.s.vmx.fUnrestrictedGuest 4322 4201 && ( !CPUMIsGuestInRealModeEx(pCtx) … … 4492 4371 */ 4493 4372 static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, 4494 uint32_t idxAccess, PC PUMSELREG pSelReg)4373 uint32_t idxAccess, PCCPUMSELREG pSelReg) 4495 4374 { 4496 4375 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ … … 4510 4389 { 4511 4390 /* 4512 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in 4513 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in 4514 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors 4515 * loaded in protected-mode have their attribute as 0. 4391 * The way to differentiate between whether this is really a null selector or was just 4392 * a selector loaded with 0 in real-mode is using the segment attributes. A selector 4393 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we 4394 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures 4395 * NULL selectors loaded in protected-mode have their attribute as 0. 4516 4396 */ 4517 4397 if (!u32Access) … … 4530 4410 4531 4411 /** 4532 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)4412 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases) 4533 4413 * into the guest-state area in the VMCS. 4534 4414 * … … 4539 4419 * before using them. 4540 4420 * 4541 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation). 4421 * @remarks Will import guest CR0 on strict builds during validation of 4422 * segments. 4542 4423 * @remarks No-long-jump zone!!! 4543 4424 */ 4544 static int hmR0Vmx LoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)4425 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4545 4426 { 4546 4427 int rc = VERR_INTERNAL_ERROR_5; … … 4550 4431 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 4551 4432 */ 4552 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))4433 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK) 4553 4434 { 4554 4435 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */ … … 4574 4455 in real-mode (e.g. OpenBSD 4.0) */ 4575 4456 REMFlushTBs(pVM); 4576 Log4 (("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));4457 Log4Func(("Switch to protected mode detected!\n")); 4577 4458 pVCpu->hm.s.vmx.fWasInRealMode = false; 4578 4459 } … … 4603 4484 #endif 4604 4485 4605 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);4606 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,4607 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));4608 4609 4486 /* Update the exit history entry with the correct CS.BASE + RIP. */ 4610 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))4487 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 4611 4488 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true); 4489 4490 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SREG_MASK); 4491 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 4492 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); 4612 4493 } 4613 4494 … … 4615 4496 * Guest TR. 4616 4497 */ 4617 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))4498 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR) 4618 4499 { 4619 4500 /* … … 4675 4556 AssertRCReturn(rc, rc); 4676 4557 4677 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_TR);4678 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu,u64Base));4558 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR); 4559 Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base)); 4679 4560 } 4680 4561 … … 4682 4563 * Guest GDTR. 4683 4564 */ 4684 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))4565 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR) 4685 4566 { 4686 4567 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); … … 4691 4572 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4692 4573 4693 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_GDTR);4694 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));4574 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR); 4575 Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt)); 4695 4576 } 4696 4577 … … 4698 4579 * Guest LDTR. 4699 4580 */ 4700 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))4581 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR) 4701 4582 { 4702 4583 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ … … 4728 4609 } 4729 4610 4730 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_LDTR);4731 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));4611 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR); 4612 Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base)); 4732 4613 } 4733 4614 … … 4735 4616 * Guest IDTR. 4736 4617 */ 4737 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))4618 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR) 4738 4619 { 4739 4620 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); … … 4744 4625 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4745 4626 4746 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_IDTR);4747 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));4627 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR); 4628 Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt)); 4748 4629 } 4749 4630 … … 4753 4634 4754 4635 /** 4755 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store4636 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store 4756 4637 * areas. 4757 4638 * … … 4759 4640 * VM-entry and stored from the host CPU on every successful VM-exit. This also 4760 4641 * creates/updates MSR slots for the host MSRs. The actual host MSR values are 4761 * -not- updated here for performance reasons. See hmR0Vmx SaveHostMsrs().4762 * 4763 * Also loads thesysenter MSRs into the guest-state area in the VMCS.4642 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs(). 4643 * 4644 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS. 4764 4645 * 4765 4646 * @returns VBox status code. … … 4771 4652 * @remarks No-long-jump zone!!! 4772 4653 */ 4773 static int hmR0Vmx LoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)4654 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4774 4655 { 4775 4656 AssertPtr(pVCpu); … … 4778 4659 /* 4779 4660 * MSRs that we use the auto-load/store MSR area in the VMCS. 4661 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). 4780 4662 */ 4781 4663 PVM pVM = pVCpu->CTX_SUFF(pVM); 4782 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4783 { 4784 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */ 4664 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 4665 { 4666 if (pVM->hm.s.fAllow64BitGuests) 4667 { 4785 4668 #if HC_ARCH_BITS == 32 4786 if (pVM->hm.s.fAllow64BitGuests)4787 {4788 4669 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL); 4789 4670 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL); … … 4792 4673 AssertRCReturn(rc, rc); 4793 4674 # ifdef LOG_ENABLED 4794 P VMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;4675 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)CpVCpu->hm.s.vmx.pvGuestMsr; 4795 4676 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 4796 { 4797 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr, 4798 pMsr->u64Value)); 4799 } 4677 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4800 4678 # endif 4801 }4802 4679 #endif 4803 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4680 } 4681 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4804 4682 } 4805 4683 … … 4809 4687 * VM-exits on WRMSRs for these MSRs. 4810 4688 */ 4811 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4812 { 4813 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4814 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4815 } 4816 4817 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4818 { 4819 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4820 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4821 } 4822 4823 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4824 { 4825 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4826 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4827 } 4828 4829 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR)) 4689 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK) 4690 { 4691 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 4692 { 4693 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); 4694 AssertRCReturn(rc, rc); 4695 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4696 } 4697 4698 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 4699 { 4700 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); 4701 AssertRCReturn(rc, rc); 4702 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4703 } 4704 4705 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 4706 { 4707 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); 4708 AssertRCReturn(rc, rc); 4709 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4710 } 4711 } 4712 4713 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR) 4830 4714 { 4831 4715 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) … … 4839 4723 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4840 4724 AssertRCReturn(rc,rc); 4841 Log4 (("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));4725 Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER)); 4842 4726 } 4843 4727 else … … 4850 4734 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 4851 4735 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4852 Log4 (("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,4853 pMixedCtx->msrEFER,pVCpu->hm.s.vmx.cMsrs));4736 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER, 4737 pVCpu->hm.s.vmx.cMsrs)); 4854 4738 } 4855 4739 } 4856 4740 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer) 4857 4741 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 4858 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_EFER_MSR);4742 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR); 4859 4743 } 4860 4744 … … 4863 4747 4864 4748 4865 /** 4866 * Loads the guest activity state into the guest-state area in the VMCS. 4749 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 4750 /** 4751 * Check if guest state allows safe use of 32-bit switcher again. 4752 * 4753 * Segment bases and protected mode structures must be 32-bit addressable 4754 * because the 32-bit switcher will ignore high dword when writing these VMCS 4755 * fields. See @bugref{8432} for details. 4756 * 4757 * @returns true if safe, false if must continue to use the 64-bit switcher. 4758 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4759 * out-of-sync. Make sure to update the required fields 4760 * before using them. 4761 * 4762 * @remarks No-long-jump zone!!! 4763 */ 4764 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx) 4765 { 4766 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false; 4767 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false; 4768 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4769 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4770 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false; 4771 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4772 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false; 4773 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false; 4774 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4775 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4776 4777 /* All good, bases are 32-bit. */ 4778 return true; 4779 } 4780 #endif 4781 4782 4783 /** 4784 * Selects up the appropriate function to run guest code. 4867 4785 * 4868 4786 * @returns VBox status code. … … 4874 4792 * @remarks No-long-jump zone!!! 4875 4793 */ 4876 static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4877 { 4878 NOREF(pMixedCtx); 4879 /** @todo See if we can make use of other states, e.g. 4880 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ 4881 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)) 4882 { 4883 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4884 AssertRCReturn(rc, rc); 4885 4886 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE); 4887 } 4888 return VINF_SUCCESS; 4889 } 4890 4891 4892 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 4893 /** 4894 * Check if guest state allows safe use of 32-bit switcher again. 4895 * 4896 * Segment bases and protected mode structures must be 32-bit addressable 4897 * because the 32-bit switcher will ignore high dword when writing these VMCS 4898 * fields. See @bugref{8432} for details. 4899 * 4900 * @returns true if safe, false if must continue to use the 64-bit switcher. 4901 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4902 * out-of-sync. Make sure to update the required fields 4903 * before using them. 4904 * 4905 * @remarks No-long-jump zone!!! 4906 */ 4907 static bool hmR0VmxIs32BitSwitcherSafe(PCPUMCTX pMixedCtx) 4908 { 4909 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) 4910 return false; 4911 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) 4912 return false; 4913 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) 4914 return false; 4915 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) 4916 return false; 4917 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) 4918 return false; 4919 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) 4920 return false; 4921 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) 4922 return false; 4923 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) 4924 return false; 4925 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) 4926 return false; 4927 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) 4928 return false; 4929 /* All good, bases are 32-bit. */ 4930 return true; 4931 } 4932 #endif 4933 4934 4935 /** 4936 * Sets up the appropriate function to run guest code. 4937 * 4938 * @returns VBox status code. 4939 * @param pVCpu The cross context virtual CPU structure. 4940 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4941 * out-of-sync. Make sure to update the required fields 4942 * before using them. 4943 * 4944 * @remarks No-long-jump zone!!! 4945 */ 4946 static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4794 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4947 4795 { 4948 4796 if (CPUMIsGuestInLongModeEx(pMixedCtx)) … … 4956 4804 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64) 4957 4805 { 4806 #ifdef VBOX_STRICT 4958 4807 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4959 4808 { 4960 4809 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4961 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4962 | HM_CHANGED_VMX_ENTRY_CTLS 4963 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4810 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4811 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4812 | HM_CHANGED_VMX_ENTRY_CTLS 4813 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 4964 4814 } 4815 #endif 4965 4816 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 4966 4817 … … 4968 4819 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */ 4969 4820 pVCpu->hm.s.vmx.fSwitchedTo64on32 = true; 4970 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 64-bit switcher\n", pVCpu->idCpu));4821 Log4Func(("Selected 64-bit switcher\n")); 4971 4822 } 4972 4823 #else … … 4983 4834 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4984 4835 { 4836 # ifdef VBOX_STRICT 4985 4837 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4986 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4987 | HM_CHANGED_VMX_ENTRY_CTLS 4988 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4838 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4839 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4840 | HM_CHANGED_VMX_ENTRY_CTLS 4841 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 4842 # endif 4989 4843 } 4990 4844 # ifdef VBOX_ENABLE_64_BITS_GUESTS 4991 4845 /* 4992 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel design, see @bugref{8432#c7}. 4993 * If real-on-v86 mode is active, clear the 64-bit switcher flag because now we know the guest is in a sane 4994 * state where it's safe to use the 32-bit switcher. Otherwise check the guest state if it's safe to use 4846 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel 4847 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit 4848 * switcher flag because now we know the guest is in a sane state where it's safe 4849 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use 4995 4850 * the much faster 32-bit switcher again. 4996 4851 */ … … 4998 4853 { 4999 4854 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32) 5000 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher\n", pVCpu->idCpu));4855 Log4Func(("Selected 32-bit switcher\n")); 5001 4856 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 5002 4857 } … … 5009 4864 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; 5010 4865 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 5011 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR5012 | HM_CHANGED_VMX_ENTRY_CTLS5013 | HM_CHANGED_VMX_EXIT_CTLS5014 | HM_CHANGED_HOST_CONTEXT);5015 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher (safe)\n", pVCpu->idCpu));4866 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 4867 | HM_CHANGED_VMX_ENTRY_CTLS 4868 | HM_CHANGED_VMX_EXIT_CTLS 4869 | HM_CHANGED_HOST_CONTEXT); 4870 Log4Func(("Selected 32-bit switcher (safe)\n")); 5016 4871 } 5017 4872 } … … 5040 4895 DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 5041 4896 { 4897 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4898 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4899 5042 4900 /* 5043 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 5044 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 5045 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4901 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4902 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4903 * callee-saved and thus the need for this XMM wrapper. 4904 * 4905 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 5046 4906 */ 5047 4907 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED); … … 5075 4935 HMVMX_ASSERT_PREEMPT_SAFE(); 5076 4936 5077 Log4 (("VM-entry failure: %Rrc\n", rcVMRun));4937 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun)); 5078 4938 switch (rcVMRun) 5079 4939 { … … 5322 5182 * 5323 5183 * @returns VBox status code (no informational status codes). 5324 * @param pVM The cross context VM structure.5325 5184 * @param pVCpu The cross context virtual CPU structure. 5326 * @param pCtx Pointer to the guest CPU context.5327 5185 * @param enmOp The operation to perform. 5328 5186 * @param cParams Number of parameters. 5329 5187 * @param paParam Array of 32-bit parameters. 5330 5188 */ 5331 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,5189 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, 5332 5190 uint32_t cParams, uint32_t *paParam) 5333 5191 { 5334 NOREF(pCtx); 5335 5192 PVM pVM = pVCpu->CTX_SUFF(pVM); 5336 5193 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 5337 5194 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); … … 5453 5310 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5454 5311 #endif 5455 int rc = VMXR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);5312 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]); 5456 5313 5457 5314 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 5537 5394 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP); 5538 5395 5539 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */ 5396 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for 5397 these 64-bit fields (using "FULL" and "HIGH" fields). */ 5540 5398 #if 0 5541 5399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL); … … 5774 5632 5775 5633 5776 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION5777 5634 /** 5778 5635 * Gets the IEM exception flags for the specified vector and IDT vectoring / … … 5832 5689 } 5833 5690 5834 #else5835 /**5836 * Determines if an exception is a contributory exception.5837 *5838 * Contributory exceptions are ones which can cause double-faults unless the5839 * original exception was a benign exception. Page-fault is intentionally not5840 * included here as it's a conditional contributory exception.5841 *5842 * @returns true if the exception is contributory, false otherwise.5843 * @param uVector The exception vector.5844 */5845 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)5846 {5847 switch (uVector)5848 {5849 case X86_XCPT_GP:5850 case X86_XCPT_SS:5851 case X86_XCPT_NP:5852 case X86_XCPT_TS:5853 case X86_XCPT_DE:5854 return true;5855 default:5856 break;5857 }5858 return false;5859 }5860 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */5861 5862 5691 5863 5692 /** … … 5930 5759 uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 5931 5760 5932 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5933 rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5761 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 5762 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 5763 AssertRCReturn(rc2, rc2); 5934 5764 5935 5765 VBOXSTRICTRC rcStrict = VINF_SUCCESS; … … 5938 5768 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 5939 5769 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 5940 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION 5770 5941 5771 /* 5942 * If the event was a software interrupt (generated with INT n) or a software exception (generated 5943 * by INT3/INTO) or a privileged software exception (generated by INT1), we can handle the VM-exit 5944 * and continue guest execution which will re-execute the instruction rather than re-injecting the 5945 * exception, as that can cause premature trips to ring-3 before injection and involve TRPM which 5946 * currently has no way of storing that these exceptions were caused by these instructions 5947 * (ICEBP's #DB poses the problem). 5772 * If the event was a software interrupt (generated with INT n) or a software exception 5773 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we 5774 * can handle the VM-exit and continue guest execution which will re-execute the 5775 * instruction rather than re-injecting the exception, as that can cause premature 5776 * trips to ring-3 before injection and involve TRPM which currently has no way of 5777 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses 5778 * the problem). 5948 5779 */ 5949 5780 IEMXCPTRAISE enmRaise; … … 5965 5796 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n", 5966 5797 uExitVectorType), VERR_VMX_IPE_5); 5798 5967 5799 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo); 5968 5800 … … 6009 5841 case IEMXCPTRAISE_CURRENT_XCPT: 6010 5842 { 6011 Log4 (("IDT: vcpu[%RU32] Pending secondary xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", pVCpu->idCpu,6012 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));5843 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", 5844 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo)); 6013 5845 Assert(rcStrict == VINF_SUCCESS); 6014 5846 break; … … 6032 5864 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2); 6033 5865 6034 Log4 (("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,6035 pVCpu->hm.s.Event.u32ErrCode));5866 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo, 5867 pVCpu->hm.s.Event.u32ErrCode)); 6036 5868 Assert(rcStrict == VINF_SUCCESS); 6037 5869 break; … … 6051 5883 { 6052 5884 pVmxTransient->fVectoringDoublePF = true; 6053 Log4 (("IDT: vcpu[%RU32] Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,5885 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo, 6054 5886 pMixedCtx->cr2)); 6055 5887 rcStrict = VINF_SUCCESS; … … 6059 5891 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 6060 5892 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 6061 Log4 (("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,6062 pVCpu->hm.s.Event.u64IntInfo,uIdtVector, uExitVector));5893 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo, 5894 uIdtVector, uExitVector)); 6063 5895 rcStrict = VINF_HM_DOUBLE_FAULT; 6064 5896 } … … 6068 5900 case IEMXCPTRAISE_TRIPLE_FAULT: 6069 5901 { 6070 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector, 6071 uExitVector)); 5902 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 6072 5903 rcStrict = VINF_EM_RESET; 6073 5904 break; … … 6076 5907 case IEMXCPTRAISE_CPU_HANG: 6077 5908 { 6078 Log4 (("IDT: vcpu[%RU32] Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", pVCpu->idCpu, fRaiseInfo));5909 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo)); 6079 5910 rcStrict = VERR_EM_GUEST_CPU_HANG; 6080 5911 break; … … 6088 5919 } 6089 5920 } 6090 #else6091 typedef enum6092 {6093 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */6094 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */6095 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */6096 VMXREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */6097 VMXREFLECTXCPT_NONE /* Nothing to reflect. */6098 } VMXREFLECTXCPT;6099 6100 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */6101 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;6102 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))6103 {6104 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)6105 {6106 enmReflect = VMXREFLECTXCPT_XCPT;6107 #ifdef VBOX_STRICT6108 if ( hmR0VmxIsContributoryXcpt(uIdtVector)6109 && uExitVector == X86_XCPT_PF)6110 {6111 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6112 }6113 #endif6114 if ( uExitVector == X86_XCPT_PF6115 && uIdtVector == X86_XCPT_PF)6116 {6117 pVmxTransient->fVectoringDoublePF = true;6118 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6119 }6120 else if ( uExitVector == X86_XCPT_AC6121 && uIdtVector == X86_XCPT_AC)6122 {6123 enmReflect = VMXREFLECTXCPT_HANG;6124 Log4(("IDT: Nested #AC - Bad guest\n"));6125 }6126 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)6127 && hmR0VmxIsContributoryXcpt(uExitVector)6128 && ( hmR0VmxIsContributoryXcpt(uIdtVector)6129 || uIdtVector == X86_XCPT_PF))6130 {6131 enmReflect = VMXREFLECTXCPT_DF;6132 }6133 else if (uIdtVector == X86_XCPT_DF)6134 enmReflect = VMXREFLECTXCPT_TF;6135 }6136 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT6137 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)6138 {6139 /*6140 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and6141 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.6142 */6143 enmReflect = VMXREFLECTXCPT_XCPT;6144 6145 if (uExitVector == X86_XCPT_PF)6146 {6147 pVmxTransient->fVectoringPF = true;6148 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6149 }6150 }6151 }6152 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT6153 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT6154 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)6155 {6156 /*6157 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit6158 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,6159 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.6160 */6161 enmReflect = VMXREFLECTXCPT_XCPT;6162 }6163 6164 /*6165 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred6166 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before6167 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.6168 *6169 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.6170 */6171 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI6172 && enmReflect == VMXREFLECTXCPT_XCPT6173 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)6174 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6175 {6176 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);6177 }6178 6179 switch (enmReflect)6180 {6181 case VMXREFLECTXCPT_XCPT:6182 {6183 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT6184 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT6185 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);6186 6187 uint32_t u32ErrCode = 0;6188 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))6189 {6190 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);6191 AssertRCReturn(rc2, rc2);6192 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;6193 }6194 6195 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */6196 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6197 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),6198 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);6199 rcStrict = VINF_SUCCESS;6200 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,6201 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));6202 6203 break;6204 }6205 6206 case VMXREFLECTXCPT_DF:6207 {6208 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6209 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);6210 rcStrict = VINF_HM_DOUBLE_FAULT;6211 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,6212 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));6213 6214 break;6215 }6216 6217 case VMXREFLECTXCPT_TF:6218 {6219 rcStrict = VINF_EM_RESET;6220 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,6221 uExitVector));6222 break;6223 }6224 6225 case VMXREFLECTXCPT_HANG:6226 {6227 rcStrict = VERR_EM_GUEST_CPU_HANG;6228 break;6229 }6230 6231 default:6232 Assert(rcStrict == VINF_SUCCESS);6233 break;6234 }6235 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */6236 5921 } 6237 5922 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo) … … 6247 5932 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6248 5933 { 6249 Log4 (("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS.Valid=%RTbool uExitReason=%u\n",6250 pVCpu->idCpu,VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));5934 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", 5935 VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason)); 6251 5936 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6252 5937 } … … 6260 5945 6261 5946 /** 6262 * Saves the guest's CR0 register from the VMCS into the guest-CPU context. 6263 * 6264 * @returns VBox status code. 6265 * @param pVCpu The cross context virtual CPU structure. 6266 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6267 * out-of-sync. Make sure to update the required fields 6268 * before using them. 6269 * 6270 * @remarks No-long-jump zone!!! 6271 */ 6272 static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6273 { 6274 NOREF(pMixedCtx); 6275 6276 /* 6277 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook, 6278 * see hmR0VmxLeave(). Safer to just make this code non-preemptible. 6279 */ 6280 VMMRZCallRing3Disable(pVCpu); 6281 HM_DISABLE_PREEMPT(); 6282 6283 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0)) 6284 { 6285 #ifndef DEBUG_bird /** @todo this triggers running bs3-cpu-generated-1.img with --debug-command-line 6286 * and 'dbgc-init' containing: 6287 * sxe "xcpt_de" 6288 * sxe "xcpt_bp" 6289 * sxi "xcpt_gp" 6290 * sxi "xcpt_ss" 6291 * sxi "xcpt_np" 6292 */ 6293 /** @todo r=ramshankar: Should be fixed after r119291. */ 6294 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 6295 #endif 6296 uint32_t uVal = 0; 6297 uint32_t uShadow = 0; 6298 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal); 6299 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow); 6300 AssertRCReturn(rc, rc); 6301 6302 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask); 6303 CPUMSetGuestCR0(pVCpu, uVal); 6304 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0); 6305 } 6306 6307 HM_RESTORE_PREEMPT(); 6308 VMMRZCallRing3Enable(pVCpu); 6309 return VINF_SUCCESS; 6310 } 6311 6312 6313 /** 6314 * Saves the guest's CR4 register from the VMCS into the guest-CPU context. 6315 * 6316 * @returns VBox status code. 6317 * @param pVCpu The cross context virtual CPU structure. 6318 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6319 * out-of-sync. Make sure to update the required fields 6320 * before using them. 6321 * 6322 * @remarks No-long-jump zone!!! 6323 */ 6324 static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6325 { 6326 NOREF(pMixedCtx); 6327 6328 int rc = VINF_SUCCESS; 6329 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4)) 6330 { 6331 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)); 6332 uint32_t uVal = 0; 6333 uint32_t uShadow = 0; 6334 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 6335 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 6336 AssertRCReturn(rc, rc); 6337 6338 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask); 6339 CPUMSetGuestCR4(pVCpu, uVal); 6340 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4); 6341 } 6342 return rc; 6343 } 6344 6345 6346 /** 6347 * Saves the guest's RIP register from the VMCS into the guest-CPU context. 6348 * 6349 * @returns VBox status code. 6350 * @param pVCpu The cross context virtual CPU structure. 6351 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6352 * out-of-sync. Make sure to update the required fields 6353 * before using them. 6354 * 6355 * @remarks No-long-jump zone!!! 6356 */ 6357 static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6358 { 6359 int rc = VINF_SUCCESS; 6360 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP)) 6361 { 6362 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP)); 6363 uint64_t u64Val = 0; 6364 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); 6365 AssertRCReturn(rc, rc); 6366 6367 pMixedCtx->rip = u64Val; 6368 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP); 6369 } 6370 return rc; 6371 } 6372 6373 6374 /** 6375 * Saves the guest's RSP register from the VMCS into the guest-CPU context. 6376 * 6377 * @returns VBox status code. 6378 * @param pVCpu The cross context virtual CPU structure. 6379 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6380 * out-of-sync. Make sure to update the required fields 6381 * before using them. 6382 * 6383 * @remarks No-long-jump zone!!! 6384 */ 6385 static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6386 { 6387 int rc = VINF_SUCCESS; 6388 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP)) 6389 { 6390 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)); 6391 uint64_t u64Val = 0; 6392 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); 6393 AssertRCReturn(rc, rc); 6394 6395 pMixedCtx->rsp = u64Val; 6396 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP); 6397 } 6398 return rc; 6399 } 6400 6401 6402 /** 6403 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context. 6404 * 6405 * @returns VBox status code. 6406 * @param pVCpu The cross context virtual CPU structure. 6407 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6408 * out-of-sync. Make sure to update the required fields 6409 * before using them. 6410 * 6411 * @remarks No-long-jump zone!!! 6412 */ 6413 static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6414 { 6415 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)) 6416 { 6417 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)); 6418 uint32_t uVal = 0; 6419 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal); 6420 AssertRCReturn(rc, rc); 6421 6422 pMixedCtx->eflags.u32 = uVal; 6423 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */ 6424 { 6425 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 6426 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32)); 6427 6428 pMixedCtx->eflags.Bits.u1VM = 0; 6429 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL; 6430 } 6431 6432 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS); 6433 } 6434 return VINF_SUCCESS; 6435 } 6436 6437 6438 /** 6439 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the 6440 * guest-CPU context. 6441 */ 6442 DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6443 { 6444 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6445 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 6446 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6447 return rc; 6448 } 6449 6450 6451 /** 6452 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it) 6453 * from the guest-state area in the VMCS. 6454 * 6455 * @param pVCpu The cross context virtual CPU structure. 6456 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6457 * out-of-sync. Make sure to update the required fields 6458 * before using them. 6459 * 6460 * @remarks No-long-jump zone!!! 6461 */ 6462 static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6463 { 6464 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE)) 6465 { 6466 uint32_t uIntrState = 0; 6467 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState); 6468 AssertRC(rc); 6469 6470 if (!uIntrState) 6471 { 6472 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6473 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6474 6475 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6476 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6477 } 6478 else 6479 { 6480 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 6481 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 6482 { 6483 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6484 AssertRC(rc); 6485 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */ 6486 AssertRC(rc); 6487 6488 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 6489 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 6490 } 6491 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6492 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6493 6494 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 6495 { 6496 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6497 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6498 } 6499 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6500 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6501 } 6502 6503 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE); 6504 } 6505 } 6506 6507 6508 /** 6509 * Saves the guest's activity state. 6510 * 6511 * @returns VBox status code. 6512 * @param pVCpu The cross context virtual CPU structure. 6513 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6514 * out-of-sync. Make sure to update the required fields 6515 * before using them. 6516 * 6517 * @remarks No-long-jump zone!!! 6518 */ 6519 static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6520 { 6521 NOREF(pMixedCtx); 6522 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */ 6523 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE); 6524 return VINF_SUCCESS; 6525 } 6526 6527 6528 /** 6529 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from 6530 * the current VMCS into the guest-CPU context. 6531 * 6532 * @returns VBox status code. 6533 * @param pVCpu The cross context virtual CPU structure. 6534 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6535 * out-of-sync. Make sure to update the required fields 6536 * before using them. 6537 * 6538 * @remarks No-long-jump zone!!! 6539 */ 6540 static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6541 { 6542 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR)) 6543 { 6544 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)); 6545 uint32_t u32Val = 0; 6546 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc); 6547 pMixedCtx->SysEnter.cs = u32Val; 6548 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR); 6549 } 6550 6551 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR)) 6552 { 6553 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)); 6554 uint64_t u64Val = 0; 6555 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc); 6556 pMixedCtx->SysEnter.eip = u64Val; 6557 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR); 6558 } 6559 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR)) 6560 { 6561 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)); 6562 uint64_t u64Val = 0; 6563 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc); 6564 pMixedCtx->SysEnter.esp = u64Val; 6565 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR); 6566 } 6567 return VINF_SUCCESS; 6568 } 6569 6570 6571 /** 6572 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from 6573 * the CPU back into the guest-CPU context. 6574 * 6575 * @returns VBox status code. 6576 * @param pVCpu The cross context virtual CPU structure. 6577 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6578 * out-of-sync. Make sure to update the required fields 6579 * before using them. 6580 * 6581 * @remarks No-long-jump zone!!! 6582 */ 6583 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6584 { 6585 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */ 6586 VMMRZCallRing3Disable(pVCpu); 6587 HM_DISABLE_PREEMPT(); 6588 6589 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */ 6590 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS)) 6591 { 6592 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS)); 6593 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 6594 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS); 6595 } 6596 6597 HM_RESTORE_PREEMPT(); 6598 VMMRZCallRing3Enable(pVCpu); 6599 6600 return VINF_SUCCESS; 6601 } 6602 6603 6604 /** 6605 * Saves the auto load/store'd guest MSRs from the current VMCS into 5947 * Imports a guest segment register from the current VMCS into 6606 5948 * the guest-CPU context. 6607 *6608 * @returns VBox status code.6609 * @param pVCpu The cross context virtual CPU structure.6610 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe6611 * out-of-sync. Make sure to update the required fields6612 * before using them.6613 *6614 * @remarks No-long-jump zone!!!6615 */6616 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6617 {6618 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))6619 return VINF_SUCCESS;6620 6621 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS));6622 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;6623 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;6624 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));6625 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)6626 {6627 switch (pMsr->u32Msr)6628 {6629 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;6630 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;6631 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;6632 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;6633 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;6634 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;6635 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */6636 break;6637 6638 default:6639 {6640 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));6641 pVCpu->hm.s.u32HMError = pMsr->u32Msr;6642 return VERR_HM_UNEXPECTED_LD_ST_MSR;6643 }6644 }6645 }6646 6647 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);6648 return VINF_SUCCESS;6649 }6650 6651 6652 /**6653 * Saves the guest control registers from the current VMCS into the guest-CPU6654 * context.6655 *6656 * @returns VBox status code.6657 * @param pVCpu The cross context virtual CPU structure.6658 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe6659 * out-of-sync. Make sure to update the required fields6660 * before using them.6661 *6662 * @remarks No-long-jump zone!!!6663 */6664 static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6665 {6666 /* Guest CR0. Guest FPU. */6667 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);6668 AssertRCReturn(rc, rc);6669 6670 /* Guest CR4. */6671 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);6672 AssertRCReturn(rc, rc);6673 6674 /* Guest CR2 - updated always during the world-switch or in #PF. */6675 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */6676 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))6677 {6678 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3));6679 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));6680 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));6681 6682 PVM pVM = pVCpu->CTX_SUFF(pVM);6683 if ( pVM->hm.s.vmx.fUnrestrictedGuest6684 || ( pVM->hm.s.fNestedPaging6685 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))6686 {6687 uint64_t u64Val = 0;6688 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);6689 if (pMixedCtx->cr3 != u64Val)6690 {6691 CPUMSetGuestCR3(pVCpu, u64Val);6692 if (VMMRZCallRing3IsEnabled(pVCpu))6693 {6694 PGMUpdateCR3(pVCpu, u64Val);6695 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6696 }6697 else6698 {6699 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/6700 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);6701 }6702 }6703 6704 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */6705 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */6706 {6707 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);6708 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);6709 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);6710 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);6711 AssertRCReturn(rc, rc);6712 6713 if (VMMRZCallRing3IsEnabled(pVCpu))6714 {6715 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);6716 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6717 }6718 else6719 {6720 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */6721 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);6722 }6723 }6724 }6725 6726 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);6727 }6728 6729 /*6730 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()6731 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp6732 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.6733 *6734 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus6735 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that6736 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should6737 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!6738 *6739 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.6740 */6741 if (VMMRZCallRing3IsEnabled(pVCpu))6742 {6743 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))6744 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));6745 6746 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))6747 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);6748 6749 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6750 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6751 }6752 6753 return rc;6754 }6755 6756 6757 /**6758 * Saves a guest segment register from the current VMCS into the guest-CPU6759 * context.6760 5949 * 6761 5950 * @returns VBox status code. … … 6768 5957 * 6769 5958 * @remarks No-long-jump zone!!! 5959 * 6770 5960 * @remarks Never call this function directly!!! Use the 6771 * HMVMX_ SAVE_SREG() macro as that takes care of whether to read6772 * from the VMCS cache or not.6773 */ 6774 static int hmR0Vmx SaveSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,6775 PCPUMSELREG pSelReg)5961 * HMVMX_IMPORT_SREG() macro as that takes care 5962 * of whether to read from the VMCS cache or not. 5963 */ 5964 static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess, 5965 PCPUMSELREG pSelReg) 6776 5966 { 6777 5967 NOREF(pVCpu); 6778 5968 6779 uint32_t u32Val = 0; 6780 int rc = VMXReadVmcs32(idxSel, &u32Val); 5969 uint32_t u32Sel; 5970 uint32_t u32Limit; 5971 uint32_t u32Attr; 5972 uint64_t u64Base; 5973 int rc = VMXReadVmcs32(idxSel, &u32Sel); 5974 rc |= VMXReadVmcs32(idxLimit, &u32Limit); 5975 rc |= VMXReadVmcs32(idxAccess, &u32Attr); 5976 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base); 6781 5977 AssertRCReturn(rc, rc); 6782 pSelReg->Sel = (uint16_t)u32Val; 6783 pSelReg->ValidSel = (uint16_t)u32Val; 5978 5979 pSelReg->Sel = (uint16_t)u32Sel; 5980 pSelReg->ValidSel = (uint16_t)u32Sel; 6784 5981 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 6785 6786 rc = VMXReadVmcs32(idxLimit, &u32Val); 6787 AssertRCReturn(rc, rc); 6788 pSelReg->u32Limit = u32Val; 6789 6790 uint64_t u64Val = 0; 6791 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val); 6792 AssertRCReturn(rc, rc); 6793 pSelReg->u64Base = u64Val; 6794 6795 rc = VMXReadVmcs32(idxAccess, &u32Val); 6796 AssertRCReturn(rc, rc); 6797 pSelReg->Attr.u = u32Val; 5982 pSelReg->u32Limit = u32Limit; 5983 pSelReg->u64Base = u64Base; 5984 pSelReg->Attr.u = u32Attr; 6798 5985 6799 5986 /* … … 6820 6007 6821 6008 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */ 6822 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D| X86DESCATTR_G6823 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;6824 6825 Log4 (("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));6009 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G 6010 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT; 6011 6012 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u)); 6826 6013 #ifdef DEBUG_bird 6827 AssertMsg((u32 Val& ~X86DESCATTR_P) == pSelReg->Attr.u,6014 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u, 6828 6015 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n", 6829 6016 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit)); … … 6833 6020 } 6834 6021 6835 /** 6836 * Saves the guest segment registers from the current VMCS into the guest-CPU 6837 * context. 6838 * 6839 * @returns VBox status code. 6840 * @param pVCpu The cross context virtual CPU structure. 6841 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6842 * out-of-sync. Make sure to update the required fields 6843 * before using them. 6844 * 6845 * @remarks No-long-jump zone!!! 6846 */ 6847 static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6848 { 6849 /* Guest segment registers. */ 6850 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS)) 6851 { 6852 /** @todo r=ramshankar: Why do we save CR0 here? */ 6853 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)); 6854 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6855 AssertRCReturn(rc, rc); 6856 6857 rc = HMVMX_SAVE_SREG(CS, &pMixedCtx->cs); 6858 rc |= HMVMX_SAVE_SREG(SS, &pMixedCtx->ss); 6859 rc |= HMVMX_SAVE_SREG(DS, &pMixedCtx->ds); 6860 rc |= HMVMX_SAVE_SREG(ES, &pMixedCtx->es); 6861 rc |= HMVMX_SAVE_SREG(FS, &pMixedCtx->fs); 6862 rc |= HMVMX_SAVE_SREG(GS, &pMixedCtx->gs); 6863 AssertRCReturn(rc, rc); 6864 6865 /* Restore segment attributes for real-on-v86 mode hack. */ 6866 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6867 { 6868 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 6869 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 6870 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 6871 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 6872 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 6873 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 6874 } 6875 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS); 6876 } 6877 6878 return VINF_SUCCESS; 6879 } 6880 6881 6882 /** 6883 * Saves the guest SS register from the current VMCS into the guest-CPU context. 6884 * 6885 * @returns VBox status code. 6886 * @param pVCpu The cross context virtual CPU structure. 6887 * @remarks No-long-jump zone!!! 6888 */ 6889 static int hmR0VmxSaveGuestCs(PVMCPU pVCpu) 6890 { 6891 /** @todo optimize this? */ 6892 return hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); 6893 } 6894 6895 6896 /** 6897 * Saves the guest descriptor table registers and task register from the current 6898 * VMCS into the guest-CPU context. 6899 * 6900 * @returns VBox status code. 6901 * @param pVCpu The cross context virtual CPU structure. 6902 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6903 * out-of-sync. Make sure to update the required fields 6904 * before using them. 6905 * 6906 * @remarks No-long-jump zone!!! 6907 */ 6908 static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6909 { 6910 int rc = VINF_SUCCESS; 6911 6912 /* Guest LDTR. */ 6913 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR)) 6914 { 6915 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)); 6916 rc = HMVMX_SAVE_SREG(LDTR, &pMixedCtx->ldtr); 6917 AssertRCReturn(rc, rc); 6918 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR); 6919 } 6920 6921 /* Guest GDTR. */ 6922 uint64_t u64Val = 0; 6923 uint32_t u32Val = 0; 6924 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR)) 6925 { 6926 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)); 6927 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6928 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6929 pMixedCtx->gdtr.pGdt = u64Val; 6930 pMixedCtx->gdtr.cbGdt = u32Val; 6931 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR); 6932 } 6933 6934 /* Guest IDTR. */ 6935 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR)) 6936 { 6937 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)); 6938 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6939 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6940 pMixedCtx->idtr.pIdt = u64Val; 6941 pMixedCtx->idtr.cbIdt = u32Val; 6942 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR); 6943 } 6944 6945 /* Guest TR. */ 6946 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR)) 6947 { 6948 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)); 6949 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6950 AssertRCReturn(rc, rc); 6951 6952 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */ 6953 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6954 { 6955 rc = HMVMX_SAVE_SREG(TR, &pMixedCtx->tr); 6956 AssertRCReturn(rc, rc); 6957 } 6958 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR); 6959 } 6960 return rc; 6961 } 6962 6963 6964 /** 6965 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU 6966 * context. 6967 * 6968 * @returns VBox status code. 6969 * @param pVCpu The cross context virtual CPU structure. 6970 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6971 * out-of-sync. Make sure to update the required fields 6972 * before using them. 6973 * 6974 * @remarks No-long-jump zone!!! 6975 */ 6976 static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6977 { 6978 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7)) 6979 { 6980 if (!pVCpu->hm.s.fUsingHyperDR7) 6981 { 6982 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 6983 uint32_t u32Val; 6984 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc); 6985 pMixedCtx->dr[7] = u32Val; 6986 } 6987 6988 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7); 6989 } 6990 return VINF_SUCCESS; 6991 } 6992 6993 6994 /** 6995 * Saves the guest APIC state from the current VMCS into the guest-CPU context. 6996 * 6997 * @returns VBox status code. 6998 * @param pVCpu The cross context virtual CPU structure. 6999 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 7000 * out-of-sync. Make sure to update the required fields 7001 * before using them. 7002 * 7003 * @remarks No-long-jump zone!!! 7004 */ 7005 static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7006 { 7007 NOREF(pMixedCtx); 7008 7009 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */ 7010 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE); 7011 return VINF_SUCCESS; 7012 } 7013 7014 7015 /** 7016 * Worker for VMXR0ImportStateOnDemand. 6022 6023 /** 6024 * Imports the guest RIP from the VMCS back into the guest-CPU context. 7017 6025 * 7018 6026 * @returns VBox status code. 7019 6027 * @param pVCpu The cross context virtual CPU structure. 7020 * @param pCtx Pointer to the guest-CPU context. 6028 * 6029 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6030 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6031 * instead!!! 6032 */ 6033 DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu) 6034 { 6035 uint64_t u64Val; 6036 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6037 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP) 6038 { 6039 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); 6040 if (RT_SUCCESS(rc)) 6041 { 6042 pCtx->rip = u64Val; 6043 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP; 6044 } 6045 return rc; 6046 } 6047 return VINF_SUCCESS; 6048 } 6049 6050 6051 /** 6052 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context. 6053 * 6054 * @returns VBox status code. 6055 * @param pVCpu The cross context virtual CPU structure. 6056 * 6057 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6058 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6059 * instead!!! 6060 */ 6061 DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu) 6062 { 6063 uint32_t u32Val; 6064 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6065 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS) 6066 { 6067 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); 6068 if (RT_SUCCESS(rc)) 6069 { 6070 pCtx->eflags.u32 = u32Val; 6071 6072 /* Restore eflags for real-on-v86-mode hack. */ 6073 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6074 { 6075 pCtx->eflags.Bits.u1VM = 0; 6076 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL; 6077 } 6078 } 6079 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS; 6080 return rc; 6081 } 6082 return VINF_SUCCESS; 6083 } 6084 6085 6086 /** 6087 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU 6088 * context. 6089 * 6090 * @returns VBox status code. 6091 * @param pVCpu The cross context virtual CPU structure. 6092 * 6093 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6094 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6095 * instead!!! 6096 */ 6097 DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu) 6098 { 6099 uint32_t u32Val; 6100 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6101 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val); 6102 if (RT_SUCCESS(rc)) 6103 { 6104 /* 6105 * We additionally have a requirement to import RIP, RFLAGS depending on whether we 6106 * might need them in hmR0VmxEvaluatePendingEvent(). 6107 */ 6108 if (!u32Val) 6109 { 6110 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6111 { 6112 rc = hmR0VmxImportGuestRip(pVCpu); 6113 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6114 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6115 } 6116 6117 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6118 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6119 } 6120 else 6121 { 6122 rc = hmR0VmxImportGuestRip(pVCpu); 6123 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6124 6125 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 6126 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 6127 { 6128 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6129 } 6130 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6131 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6132 6133 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 6134 { 6135 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6136 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6137 } 6138 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6140 } 6141 } 6142 return rc; 6143 } 6144 6145 6146 /** 6147 * Worker for VMXR0ImportStateOnDemand. 6148 * 6149 * @returns VBox status code. 6150 * @param pVCpu The cross context virtual CPU structure. 7021 6151 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7022 6152 */ 7023 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7024 { 7025 int rc = VINF_SUCCESS; 7026 PVM pVM = pVCpu->CTX_SUFF(pVM); 6153 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat) 6154 { 6155 #define VMXLOCAL_BREAK_RC(a_rc) \ 6156 if (RT_FAILURE(a_rc)) \ 6157 break 6158 6159 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x); 6160 6161 int rc = VINF_SUCCESS; 6162 PVM pVM = pVCpu->CTX_SUFF(pVM); 6163 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7027 6164 uint64_t u64Val; 7028 6165 uint32_t u32Val; 7029 uint32_t u32Shadow; 6166 6167 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 7030 6168 7031 6169 /* 7032 * Though we can longjmp to ring-3 due to log-flushes here and get re-invoked7033 * on the ring-3 callback path, there is no real need to.6170 * We disable interrupts to make the updating of the state and in particular 6171 * the fExtrn modification atomic wrt to preemption hooks. 7034 6172 */ 7035 if (VMMRZCallRing3IsEnabled(pVCpu)) 7036 VMMR0LogFlushDisable(pVCpu); 6173 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 6174 6175 fWhat &= pCtx->fExtrn; 6176 if (fWhat & pCtx->fExtrn) 6177 { 6178 do 6179 { 6180 if (fWhat & CPUMCTX_EXTRN_RIP) 6181 { 6182 rc = hmR0VmxImportGuestRip(pVCpu); 6183 VMXLOCAL_BREAK_RC(rc); 6184 } 6185 6186 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 6187 { 6188 rc = hmR0VmxImportGuestRFlags(pVCpu); 6189 VMXLOCAL_BREAK_RC(rc); 6190 } 6191 6192 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE) 6193 { 6194 rc = hmR0VmxImportGuestIntrState(pVCpu); 6195 VMXLOCAL_BREAK_RC(rc); 6196 } 6197 6198 if (fWhat & CPUMCTX_EXTRN_RSP) 6199 { 6200 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); 6201 VMXLOCAL_BREAK_RC(rc); 6202 pCtx->rsp = u64Val; 6203 } 6204 6205 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 6206 { 6207 if (fWhat & CPUMCTX_EXTRN_CS) 6208 { 6209 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs); 6210 VMXLOCAL_BREAK_RC(rc); 6211 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6212 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 6213 } 6214 if (fWhat & CPUMCTX_EXTRN_SS) 6215 { 6216 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss); 6217 VMXLOCAL_BREAK_RC(rc); 6218 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6219 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 6220 } 6221 if (fWhat & CPUMCTX_EXTRN_DS) 6222 { 6223 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds); 6224 VMXLOCAL_BREAK_RC(rc); 6225 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6226 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 6227 } 6228 if (fWhat & CPUMCTX_EXTRN_ES) 6229 { 6230 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es); 6231 VMXLOCAL_BREAK_RC(rc); 6232 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6233 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 6234 } 6235 if (fWhat & CPUMCTX_EXTRN_FS) 6236 { 6237 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs); 6238 VMXLOCAL_BREAK_RC(rc); 6239 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6240 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 6241 } 6242 if (fWhat & CPUMCTX_EXTRN_GS) 6243 { 6244 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs); 6245 VMXLOCAL_BREAK_RC(rc); 6246 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6247 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 6248 } 6249 } 6250 6251 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 6252 { 6253 if (fWhat & CPUMCTX_EXTRN_LDTR) 6254 { 6255 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr); 6256 VMXLOCAL_BREAK_RC(rc); 6257 } 6258 6259 if (fWhat & CPUMCTX_EXTRN_GDTR) 6260 { 6261 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6262 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 6263 VMXLOCAL_BREAK_RC(rc); 6264 pCtx->gdtr.pGdt = u64Val; 6265 pCtx->gdtr.cbGdt = u32Val; 6266 } 6267 6268 /* Guest IDTR. */ 6269 if (fWhat & CPUMCTX_EXTRN_IDTR) 6270 { 6271 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6272 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 6273 VMXLOCAL_BREAK_RC(rc); 6274 pCtx->idtr.pIdt = u64Val; 6275 pCtx->idtr.cbIdt = u32Val; 6276 } 6277 6278 /* Guest TR. */ 6279 if (fWhat & CPUMCTX_EXTRN_TR) 6280 { 6281 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */ 6282 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6283 { 6284 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr); 6285 VMXLOCAL_BREAK_RC(rc); 6286 } 6287 } 6288 } 6289 6290 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 6291 { 6292 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); 6293 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); 6294 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); 6295 pCtx->SysEnter.cs = u32Val; 6296 VMXLOCAL_BREAK_RC(rc); 6297 } 6298 6299 #if HC_ARCH_BITS == 64 6300 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 6301 { 6302 if ( pVM->hm.s.fAllow64BitGuests 6303 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 6304 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 6305 } 6306 6307 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 6308 { 6309 if ( pVM->hm.s.fAllow64BitGuests 6310 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 6311 { 6312 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 6313 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 6314 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 6315 } 6316 } 6317 #endif 6318 6319 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 6320 #if HC_ARCH_BITS == 32 6321 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS)) 6322 #endif 6323 ) 6324 { 6325 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 6326 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 6327 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 6328 { 6329 switch (pMsr->u32Msr) 6330 { 6331 #if HC_ARCH_BITS == 32 6332 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break; 6333 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break; 6334 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break; 6335 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break; 6336 #endif 6337 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break; 6338 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break; 6339 default: 6340 { 6341 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, 6342 cMsrs)); 6343 pVCpu->hm.s.u32HMError = pMsr->u32Msr; 6344 return VERR_HM_UNEXPECTED_LD_ST_MSR; 6345 } 6346 } 6347 } 6348 } 6349 6350 if (fWhat & CPUMCTX_EXTRN_DR7) 6351 { 6352 if (!pVCpu->hm.s.fUsingHyperDR7) 6353 { 6354 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 6355 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); 6356 VMXLOCAL_BREAK_RC(rc); 6357 pCtx->dr[7] = u32Val; 6358 } 6359 } 6360 6361 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 6362 { 6363 uint32_t u32Shadow; 6364 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */ 6365 if (fWhat & CPUMCTX_EXTRN_CR0) 6366 { 6367 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 6368 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 6369 VMXLOCAL_BREAK_RC(rc); 6370 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask) 6371 | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask); 6372 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 6373 CPUMSetGuestCR0(pVCpu, u32Val); 6374 VMMRZCallRing3Enable(pVCpu); 6375 } 6376 6377 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */ 6378 if (fWhat & CPUMCTX_EXTRN_CR4) 6379 { 6380 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 6381 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 6382 VMXLOCAL_BREAK_RC(rc); 6383 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask) 6384 | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask); 6385 CPUMSetGuestCR4(pVCpu, u32Val); 6386 } 6387 6388 if (fWhat & CPUMCTX_EXTRN_CR3) 6389 { 6390 if ( pVM->hm.s.vmx.fUnrestrictedGuest 6391 || ( pVM->hm.s.fNestedPaging 6392 && CPUMIsGuestPagingEnabledEx(pCtx))) /* PG bit changes are always intercepted, so it's up to date. */ 6393 { 6394 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val); 6395 if (pCtx->cr3 != u64Val) 6396 { 6397 CPUMSetGuestCR3(pVCpu, u64Val); 6398 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 6399 } 6400 6401 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 6402 if (CPUMIsGuestInPAEModeEx(pCtx)) 6403 { 6404 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); 6405 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); 6406 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); 6407 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); 6408 VMXLOCAL_BREAK_RC(rc); 6409 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 6410 } 6411 } 6412 } 6413 } 6414 } while (0); 6415 6416 if (RT_SUCCESS(rc)) 6417 { 6418 /* Update fExtrn. */ 6419 pCtx->fExtrn &= ~fWhat; 6420 6421 /* If everything has been imported, clear the HM keeper bit. */ 6422 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL)) 6423 { 6424 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM; 6425 Assert(!pCtx->fExtrn); 6426 } 6427 } 6428 } 7037 6429 else 7038 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7039 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 6430 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn)); 6431 6432 ASMSetFlags(fEFlags); 6433 6434 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x); 7040 6435 7041 6436 /* 6437 * Honor any pending CR3 updates. 6438 * 7042 6439 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback() 7043 6440 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp … … 7047 6444 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that 7048 6445 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should 7049 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!6446 * -NOT- check if CPUMCTX_EXTRN_CR3 is set! 7050 6447 * 7051 6448 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here. … … 7053 6450 if (VMMRZCallRing3IsEnabled(pVCpu)) 7054 6451 { 6452 VMMR0LogFlushDisable(pVCpu); 6453 7055 6454 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 7056 6455 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); … … 7065 6464 } 7066 6465 7067 Assert(!(fWhat & CPUMCTX_EXTRN_KEEPER_HM));7068 fWhat &= pCtx->fExtrn;7069 7070 /* If there is nothing more to import, bail early. */7071 if (!(fWhat & HMVMX_CPUMCTX_EXTRN_ALL))7072 return VINF_SUCCESS;7073 7074 /* RIP required while saving interruptibility-state below, see EMSetInhibitInterruptsPC(). */7075 if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_HM_VMX_INT_STATE))7076 {7077 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);7078 AssertRCReturn(rc, rc);7079 pCtx->rip = u64Val;7080 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);7081 }7082 7083 /* RFLAGS and interruptibility-state required while re-evaluating interrupt injection, see hmR0VmxGetGuestIntrState(). */7084 if (fWhat & (CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_HM_VMX_INT_STATE))7085 {7086 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);7087 AssertRCReturn(rc, rc);7088 pCtx->eflags.u32 = u32Val;7089 /* Restore eflags for real-on-v86-mode hack. */7090 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7091 {7092 Assert(pVM->hm.s.vmx.pRealModeTSS);7093 pCtx->eflags.Bits.u1VM = 0;7094 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;7095 }7096 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);7097 }7098 7099 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)7100 {7101 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);7102 AssertRCReturn(rc, rc);7103 if (!u32Val)7104 {7105 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))7106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);7107 7108 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7109 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);7110 }7111 else7112 {7113 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS7114 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))7115 {7116 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);7117 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));7118 }7119 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))7120 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);7121 7122 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)7123 {7124 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7125 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);7126 }7127 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);7129 }7130 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_VMX_INT_STATE);7131 }7132 7133 if (fWhat & CPUMCTX_EXTRN_RSP)7134 {7135 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);7136 AssertRCReturn(rc, rc);7137 pCtx->rsp = u64Val;7138 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);7139 }7140 7141 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)7142 {7143 if (fWhat & CPUMCTX_EXTRN_CS)7144 {7145 rc = HMVMX_SAVE_SREG(CS, &pCtx->cs);7146 AssertRCReturn(rc, rc);7147 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7148 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;7149 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);7150 }7151 if (fWhat & CPUMCTX_EXTRN_SS)7152 {7153 rc = HMVMX_SAVE_SREG(SS, &pCtx->ss);7154 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7155 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;7156 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);7157 }7158 if (fWhat & CPUMCTX_EXTRN_DS)7159 {7160 rc = HMVMX_SAVE_SREG(DS, &pCtx->ds);7161 AssertRCReturn(rc, rc);7162 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7163 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;7164 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);7165 }7166 if (fWhat & CPUMCTX_EXTRN_ES)7167 {7168 rc = HMVMX_SAVE_SREG(ES, &pCtx->es);7169 AssertRCReturn(rc, rc);7170 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7171 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;7172 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);7173 }7174 if (fWhat & CPUMCTX_EXTRN_FS)7175 {7176 rc = HMVMX_SAVE_SREG(FS, &pCtx->fs);7177 AssertRCReturn(rc, rc);7178 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7179 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;7180 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);7181 }7182 if (fWhat & CPUMCTX_EXTRN_GS)7183 {7184 rc = HMVMX_SAVE_SREG(GS, &pCtx->gs);7185 AssertRCReturn(rc, rc);7186 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7187 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;7188 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);7189 }7190 }7191 7192 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)7193 {7194 if (fWhat & CPUMCTX_EXTRN_LDTR)7195 {7196 rc = HMVMX_SAVE_SREG(LDTR, &pCtx->ldtr);7197 AssertRCReturn(rc, rc);7198 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);7199 }7200 7201 if (fWhat & CPUMCTX_EXTRN_GDTR)7202 {7203 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);7204 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);7205 AssertRCReturn(rc, rc);7206 pCtx->gdtr.pGdt = u64Val;7207 pCtx->gdtr.cbGdt = u32Val;7208 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);7209 }7210 7211 /* Guest IDTR. */7212 if (fWhat & CPUMCTX_EXTRN_IDTR)7213 {7214 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);7215 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);7216 AssertRCReturn(rc, rc);7217 pCtx->idtr.pIdt = u64Val;7218 pCtx->idtr.cbIdt = u32Val;7219 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);7220 }7221 7222 /* Guest TR. */7223 if (fWhat & CPUMCTX_EXTRN_TR)7224 {7225 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */7226 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7227 {7228 rc = HMVMX_SAVE_SREG(TR, &pCtx->tr);7229 AssertRCReturn(rc, rc);7230 }7231 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);7232 }7233 }7234 7235 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)7236 {7237 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);7238 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);7239 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);7240 pCtx->SysEnter.cs = u32Val;7241 AssertRCReturn(rc, rc);7242 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);7243 }7244 7245 #if HC_ARCH_BITS == 647246 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)7247 {7248 if ( pVM->hm.s.fAllow64BitGuests7249 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7250 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);7251 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);7252 }7253 7254 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)7255 {7256 if ( pVM->hm.s.fAllow64BitGuests7257 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7258 {7259 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);7260 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);7261 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);7262 }7263 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);7264 }7265 #endif7266 7267 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))7268 #if HC_ARCH_BITS == 327269 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))7270 #endif7271 )7272 {7273 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;7274 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;7275 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)7276 {7277 switch (pMsr->u32Msr)7278 {7279 #if HC_ARCH_BITS == 327280 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;7281 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;7282 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;7283 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;7284 #endif7285 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;7286 case MSR_K8_TSC_AUX:7287 {7288 /* CPUMSetGuestTscAux alters fExtrn without using atomics, so disable preemption temporarily. */7289 HM_DISABLE_PREEMPT();7290 CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);7291 HM_RESTORE_PREEMPT();7292 break;7293 }7294 default:7295 {7296 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));7297 pVCpu->hm.s.u32HMError = pMsr->u32Msr;7298 return VERR_HM_UNEXPECTED_LD_ST_MSR;7299 }7300 }7301 }7302 ASMAtomicUoAndU64(&pCtx->fExtrn, ~( CPUMCTX_EXTRN_TSC_AUX7303 | CPUMCTX_EXTRN_OTHER_MSRS7304 #if HC_ARCH_BITS == 327305 | CPUMCTX_EXTRN_KERNEL_GS_BASE7306 | CPUMCTX_EXTRN_SYSCALL_MSRS7307 #endif7308 ));7309 }7310 7311 if (fWhat & CPUMCTX_EXTRN_DR7)7312 {7313 if (!pVCpu->hm.s.fUsingHyperDR7)7314 {7315 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */7316 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);7317 AssertRCReturn(rc, rc);7318 pCtx->dr[7] = u32Val;7319 }7320 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);7321 }7322 7323 if (fWhat & CPUMCTX_EXTRN_CR_MASK)7324 {7325 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */7326 if (fWhat & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3))7327 {7328 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);7329 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);7330 AssertRCReturn(rc, rc);7331 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask)7332 | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask);7333 CPUMSetGuestCR0(pVCpu, u32Val);7334 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);7335 }7336 7337 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */7338 if (fWhat & (CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3))7339 {7340 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);7341 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);7342 AssertRCReturn(rc, rc);7343 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask)7344 | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask);7345 CPUMSetGuestCR4(pVCpu, u32Val);7346 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR4);7347 }7348 7349 if (fWhat & CPUMCTX_EXTRN_CR3)7350 {7351 if ( pVM->hm.s.vmx.fUnrestrictedGuest7352 || ( pVM->hm.s.fNestedPaging7353 && CPUMIsGuestPagingEnabledEx(pCtx)))7354 {7355 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);7356 if (pCtx->cr3 != u64Val)7357 {7358 CPUMSetGuestCR3(pVCpu, u64Val);7359 if (VMMRZCallRing3IsEnabled(pVCpu))7360 {7361 PGMUpdateCR3(pVCpu, u64Val);7362 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));7363 }7364 else7365 {7366 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/7367 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);7368 }7369 }7370 7371 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */7372 if (CPUMIsGuestInPAEModeEx(pCtx))7373 {7374 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);7375 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);7376 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);7377 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);7378 AssertRCReturn(rc, rc);7379 7380 if (VMMRZCallRing3IsEnabled(pVCpu))7381 {7382 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);7383 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));7384 }7385 else7386 {7387 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */7388 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);7389 }7390 }7391 }7392 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);7393 }7394 }7395 7396 /* If everything has been imported, clear the HM keeper bit. */7397 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))7398 {7399 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);7400 Assert(!pCtx->fExtrn);7401 }7402 7403 6466 return VINF_SUCCESS; 6467 #undef VMXLOCAL_BREAK_RC 7404 6468 } 7405 6469 … … 7410 6474 * @returns VBox status code. 7411 6475 * @param pVCpu The cross context virtual CPU structure. 7412 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.7413 6476 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7414 6477 */ 7415 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7416 { 7417 return hmR0VmxImportGuestState(pVCpu, pCtx, fWhat); 7418 } 7419 7420 7421 /** 7422 * Saves the entire guest state from the currently active VMCS into the 7423 * guest-CPU context. 7424 * 7425 * This essentially VMREADs all guest-data. 7426 * 7427 * @returns VBox status code. 7428 * @param pVCpu The cross context virtual CPU structure. 7429 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7430 * out-of-sync. Make sure to update the required fields 7431 * before using them. 7432 */ 7433 static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7434 { 7435 Assert(pVCpu); 7436 Assert(pMixedCtx); 7437 7438 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL) 7439 return VINF_SUCCESS; 7440 7441 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled 7442 again on the ring-3 callback path, there is no real need to. */ 7443 if (VMMRZCallRing3IsEnabled(pVCpu)) 7444 VMMR0LogFlushDisable(pVCpu); 7445 else 7446 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7447 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu)); 7448 7449 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 7450 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7451 7452 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7453 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7454 7455 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7456 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7457 7458 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx); 7459 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7460 7461 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 7462 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7463 7464 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx); 7465 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7466 7467 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 7468 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7469 7470 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 7471 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7472 7473 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx); 7474 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7475 7476 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx); 7477 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7478 7479 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL, 7480 ("Missed guest state bits while saving state; missing %RX32 (got %RX32, want %RX32) - check log for any previous errors!\n", 7481 HMVMX_UPDATED_GUEST_ALL ^ HMVMXCPU_GST_VALUE(pVCpu), HMVMXCPU_GST_VALUE(pVCpu), HMVMX_UPDATED_GUEST_ALL)); 7482 7483 if (VMMRZCallRing3IsEnabled(pVCpu)) 7484 VMMR0LogFlushEnable(pVCpu); 7485 7486 return VINF_SUCCESS; 7487 } 7488 7489 7490 /** 7491 * Saves basic guest registers needed for IEM instruction execution. 7492 * 7493 * @returns VBox status code (OR-able). 7494 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7495 * @param pMixedCtx Pointer to the CPU context of the guest. 7496 * @param fMemory Whether the instruction being executed operates on 7497 * memory or not. Only CR0 is synced up if clear. 7498 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack). 7499 */ 7500 static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp) 7501 { 7502 /* 7503 * We assume all general purpose registers other than RSP are available. 7504 * 7505 * - RIP is a must, as it will be incremented or otherwise changed. 7506 * - RFLAGS are always required to figure the CPL. 7507 * - RSP isn't always required, however it's a GPR, so frequently required. 7508 * - SS and CS are the only segment register needed if IEM doesn't do memory 7509 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers. 7510 * - CR0 is always required by IEM for the CPL, while CR3 and CR4 will only 7511 * be required for memory accesses. 7512 * 7513 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 7514 */ 7515 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 7516 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7517 if (fNeedRsp) 7518 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 7519 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /** @todo Only CS and SS are required here. */ 7520 if (!fMemory) 7521 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7522 else 7523 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7524 AssertRCReturn(rc, rc); 7525 return rc; 7526 } 7527 7528 7529 /** 7530 * Saves guest registers needed for IEM instruction interpretation. 7531 * 7532 * @returns VBox status code (OR-able). 7533 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7534 */ 7535 static int hmR0VmxSaveGuestRegsForIemInterpreting(PVMCPU pVCpu) 7536 { 7537 /* 7538 * Our goal here is IEM_CPUMCTX_EXTRN_MUST_MASK. 7539 * 7540 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 7541 */ 7542 #if 0 /* later with CPUMCTX_EXTRN_XXX */ 7543 int rc = hmR0VmxSaveGuestRip(pVCpu, &pVCpu->cpum.GstCtx); 7544 rc |= hmR0VmxSaveGuestRflags(pVCpu, &pVCpu->cpum.GstCtx); 7545 rc |= hmR0VmxSaveGuestRsp(pVCpu, &pVCpu->cpum.GstCtx); 7546 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only CS and SS are strictly required here. */ 7547 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo We don't need CR2 here. */ 7548 rc |= hmR0VmxSaveGuestApicState(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only TPR is needed here. */ 7549 rc |= hmR0VmxSaveGuestDR7(pVCpu, &pVCpu->cpum.GstCtx); 7550 /* EFER is always up to date. */ 7551 AssertRCReturn(rc, rc); 7552 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST - fixme); /** @todo fix me */ 7553 #else 7554 int rc = hmR0VmxSaveGuestState(pVCpu, &pVCpu->cpum.GstCtx); 7555 AssertRCReturn(rc, rc); 7556 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7557 #endif 7558 7559 return rc; 7560 } 7561 7562 7563 /** 7564 * Ensures that we've got a complete basic guest-context. 7565 * 7566 * This excludes the FPU, SSE, AVX, and similar extended state. The interface 7567 * is for the interpreter. 7568 * 7569 * @returns VBox status code. 7570 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7571 * @param pMixedCtx Pointer to the guest-CPU context which may have data 7572 * needing to be synced in. 7573 * @thread EMT(pVCpu) 7574 */ 7575 VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7576 { 7577 /* Note! Since this is only applicable to VT-x, the implementation is placed 7578 in the VT-x part of the sources instead of the generic stuff. */ 7579 int rc; 7580 PVM pVM = pVCpu->CTX_SUFF(pVM); 7581 if ( pVM->hm.s.vmx.fSupported 7582 && VM_IS_HM_ENABLED(pVM)) 7583 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7584 else 7585 rc = VINF_SUCCESS; 7586 7587 /* 7588 * For now, imply that the caller might change everything too. Do this after 7589 * saving the guest state so as to not trigger assertions. 7590 * 7591 * This is required for AMD-V too as it too only selectively re-loads changed 7592 * guest state back in to the VMCB. 7593 */ 7594 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7595 return rc; 6478 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat) 6479 { 6480 return hmR0VmxImportGuestState(pVCpu, fWhat); 7596 6481 } 7597 6482 … … 7634 6519 return VINF_SUCCESS; 7635 6520 6521 #if 0 7636 6522 /* We need the control registers now, make sure the guest-CPU context is updated. */ 7637 int rc3 = hmR0Vmx SaveGuestControlRegs(pVCpu, pMixedCtx);6523 int rc3 = hmR0VmxImportGuestStatae(pVCpu, CPUMCTX_EXTRN_CR0); 7638 6524 AssertRCReturn(rc3, rc3); 7639 6525 … … 7657 6543 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 7658 6544 } 6545 #endif 7659 6546 7660 6547 /* Pending PGM C3 sync. */ 7661 6548 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 7662 6549 { 6550 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 7663 6551 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 7664 6552 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); … … 7666 6554 { 7667 6555 AssertRC(VBOXSTRICTRC_VAL(rcStrict2)); 7668 Log4 (("hmR0VmxCheckForceFlags:PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));6556 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2))); 7669 6557 return rcStrict2; 7670 6558 } … … 7677 6565 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 7678 6566 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 7679 Log4 (("hmR0VmxCheckForceFlags:HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));6567 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2)); 7680 6568 return rc2; 7681 6569 } … … 7685 6573 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 7686 6574 { 7687 Log4 (("hmR0VmxCheckForceFlags:Pending VM request forcing us back to ring-3\n"));6575 Log4Func(("Pending VM request forcing us back to ring-3\n")); 7688 6576 return VINF_EM_PENDING_REQUEST; 7689 6577 } … … 7692 6580 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 7693 6581 { 7694 Log4 (("hmR0VmxCheckForceFlags:PGM pool flush pending forcing us back to ring-3\n"));6582 Log4Func(("PGM pool flush pending forcing us back to ring-3\n")); 7695 6583 return VINF_PGM_POOL_FLUSH_PENDING; 7696 6584 } … … 7699 6587 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 7700 6588 { 7701 Log4 (("hmR0VmxCheckForceFlags:Pending DMA request forcing us back to ring-3\n"));6589 Log4Func(("Pending DMA request forcing us back to ring-3\n")); 7702 6590 return VINF_EM_RAW_TO_R3; 7703 6591 } … … 7766 6654 AssertRC(rc); 7767 6655 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n", 7768 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));6656 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress)); 7769 6657 7770 6658 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress); … … 7850 6738 * 7851 6739 * @returns VBox status code. 7852 * @param pVCpu The cross context virtual CPU structure. 7853 * @param pMixedCtx Pointer to the guest-CPU context. The data may 7854 * be out-of-sync. Make sure to update the required 7855 * fields before using them. 7856 * @param fSaveGuestState Whether to save the guest state or not. 6740 * @param pVCpu The cross context virtual CPU structure. 6741 * @param fImportState Whether to import the guest state from the VMCS back 6742 * to the guest-CPU context. 7857 6743 * 7858 6744 * @remarks No-long-jmp zone!!! 7859 6745 */ 7860 static int hmR0VmxLeave(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)6746 static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState) 7861 6747 { 7862 6748 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 7872 6758 7873 6759 /* Save the guest state if necessary. */ 7874 if ( fSaveGuestState 7875 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL) 7876 { 7877 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6760 if (fImportState) 6761 { 6762 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 7878 6763 AssertRCReturn(rc, rc); 7879 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL); 7880 } 7881 7882 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 7883 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu)) 7884 { 7885 /* We shouldn't reload CR0 without saving it first. */ 7886 if (!fSaveGuestState) 7887 { 7888 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7889 AssertRCReturn(rc, rc); 7890 } 7891 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7892 } 7893 7894 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 6764 } 6765 6766 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */ 6767 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 6768 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6769 6770 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */ 7895 6771 #ifdef VBOX_STRICT 7896 6772 if (CPUMIsHyperDebugStateActive(pVCpu)) 7897 6773 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 7898 6774 #endif 7899 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 7900 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 6775 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); 7901 6776 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 7902 6777 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); … … 7914 6789 7915 6790 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 7916 if (pVCpu->hm.s.vmx.fLazyMsrs) 7917 { 7918 /* We shouldn't reload the guest MSRs without saving it first. */ 7919 if (!fSaveGuestState) 7920 { 7921 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 6791 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 6792 { 6793 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */ 6794 if (!fImportState) 6795 { 6796 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE 6797 | CPUMCTX_EXTRN_SYSCALL_MSRS); 7922 6798 AssertRCReturn(rc, rc); 7923 6799 } 7924 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));7925 6800 hmR0VmxLazyRestoreHostMsrs(pVCpu); 7926 6801 Assert(!pVCpu->hm.s.vmx.fLazyMsrs); 7927 6802 } 6803 else 6804 pVCpu->hm.s.vmx.fLazyMsrs = 0; 7928 6805 7929 6806 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ … … 7931 6808 7932 6809 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 7933 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 6810 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 6811 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 7934 6812 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 7935 6813 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); … … 7972 6850 * @remarks No-long-jmp zone!!! 7973 6851 */ 7974 DECLINLINE(int)hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6852 static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7975 6853 { 7976 6854 HM_DISABLE_PREEMPT(); … … 7983 6861 if (!pVCpu->hm.s.fLeaveDone) 7984 6862 { 7985 int rc2 = hmR0VmxLeave(pVCpu, pMixedCtx, true /* fSaveGuestState */);6863 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */); 7986 6864 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2); 7987 6865 pVCpu->hm.s.fLeaveDone = true; 7988 6866 } 7989 Assert( HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);6867 Assert(!pMixedCtx->fExtrn); 7990 6868 7991 6869 /* … … 8059 6937 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 8060 6938 VMMRZCallRing3Disable(pVCpu); 8061 Log4 (("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcExit)));6939 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit))); 8062 6940 8063 6941 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 8074 6952 and if we're injecting an event we should have a TRPM trap pending. */ 8075 6953 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit))); 8076 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a trip ple fault in progress. */6954 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */ 8077 6955 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit))); 8078 6956 #endif … … 8092 6970 | CPUM_CHANGED_TR 8093 6971 | CPUM_CHANGED_HIDDEN_SEL_REGS); 8094 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));8095 6972 if ( pVM->hm.s.fNestedPaging 8096 6973 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) … … 8101 6978 Assert(!pVCpu->hm.s.fClearTrapFlag); 8102 6979 6980 /* Update the exit-to-ring 3 reason. */ 6981 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit); 6982 8103 6983 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 8104 6984 if (rcExit != VINF_EM_RAW_INTERRUPT) 8105 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);6985 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 8106 6986 8107 6987 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 8150 7030 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 8151 7031 #endif 7032 8152 7033 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 8153 if (pVCpu->hm.s.vmx.fLazyMsrs )7034 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 8154 7035 hmR0VmxLazyRestoreHostMsrs(pVCpu); 8155 7036 … … 8178 7059 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 8179 7060 8180 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu, 8181 enmOperation)); 7061 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation)); 8182 7062 8183 7063 int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser); … … 8204 7084 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8205 7085 AssertRC(rc); 8206 Log4 (("Setup interrupt-window exiting\n"));7086 Log4Func(("Setup interrupt-window exiting\n")); 8207 7087 } 8208 7088 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */ … … 8221 7101 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8222 7102 AssertRC(rc); 8223 Log4 (("Cleared interrupt-window exiting\n"));7103 Log4Func(("Cleared interrupt-window exiting\n")); 8224 7104 } 8225 7105 … … 8240 7120 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8241 7121 AssertRC(rc); 8242 Log4 (("Setup NMI-window exiting\n"));7122 Log4Func(("Setup NMI-window exiting\n")); 8243 7123 } 8244 7124 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */ … … 8257 7137 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8258 7138 AssertRC(rc); 8259 Log4 (("Cleared NMI-window exiting\n"));7139 Log4Func(("Cleared NMI-window exiting\n")); 8260 7140 } 8261 7141 … … 8274 7154 { 8275 7155 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 8276 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);8277 bool const fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8278 bool const fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);8279 bool const fBlockNmi = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);8280 8281 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));8282 Assert(!( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7156 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 7157 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7158 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7159 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 7160 7161 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7162 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8283 7163 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8284 7164 Assert(!TRPMHasTrap(pVCpu)); … … 8300 7180 && !fBlockMovSS) 8301 7181 { 8302 Log4 (("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));7182 Log4Func(("Pending NMI\n")); 8303 7183 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID; 8304 7184 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 8318 7198 { 8319 7199 Assert(!DBGFIsStepping(pVCpu)); 8320 int rc = hmR0Vmx SaveGuestRflags(pVCpu, pMixedCtx);8321 AssertRC (rc);7200 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7201 AssertRCReturn(rc, 0); 8322 7202 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF); 8323 7203 if ( !pVCpu->hm.s.Event.fPending … … 8330 7210 if (RT_SUCCESS(rc)) 8331 7211 { 8332 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt)); 8333 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID; 8334 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 7212 Log4Func(("Pending external interrupt u8Interrupt=%#x\n", u8Interrupt)); 7213 uint32_t u32IntInfo = u8Interrupt 7214 | VMX_EXIT_INTERRUPTION_INFO_VALID 7215 | (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8335 7216 8336 7217 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */); … … 8355 7236 } 8356 7237 8357 return uIntrState;7238 return fIntrState; 8358 7239 } 8359 7240 … … 8364 7245 * 8365 7246 * @param pVCpu The cross context virtual CPU structure. 8366 */ 8367 DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu) 8368 { 8369 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu); 8370 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 8371 AssertRC(rc); 7247 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7248 * out-of-sync. Make sure to update the required fields 7249 * before using them. 7250 */ 7251 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7252 { 7253 RT_NOREF(pVCpu); 7254 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7255 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 8372 7256 } 8373 7257 … … 8382 7266 * out-of-sync. Make sure to update the required fields 8383 7267 * before using them. 8384 * @param uIntrState The VT-x guest-interruptibility state.7268 * @param fIntrState The VT-x guest-interruptibility state. 8385 7269 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should 8386 7270 * return VINF_EM_DBG_STEPPED if the event was 8387 7271 * dispatched directly. 8388 7272 */ 8389 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t uIntrState, bool fStepping)7273 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping) 8390 7274 { 8391 7275 HMVMX_ASSERT_PREEMPT_SAFE(); 8392 7276 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 8393 7277 8394 bool fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8395 bool fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);8396 8397 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));8398 Assert(!( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7278 bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7279 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7280 7281 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7282 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8399 7283 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8400 7284 Assert(!TRPMHasTrap(pVCpu)); … … 8421 7305 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 8422 7306 { 8423 bool const fBlockNmi = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);7307 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 8424 7308 Assert(!fBlockSti); 8425 7309 Assert(!fBlockMovSS); … … 8429 7313 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 8430 7314 (uint8_t)uIntType)); 8431 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, p MixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,8432 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, 8433 fStepping, &uIntrState);7315 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7316 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, 7317 &fIntrState); 8434 7318 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 8435 7319 8436 7320 /* Update the interruptibility-state as it could have been changed by 8437 7321 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */ 8438 fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8439 fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);7322 fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7323 fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 8440 7324 8441 7325 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) … … 8457 7341 */ 8458 7342 Assert(!DBGFIsStepping(pVCpu)); 8459 int rc 2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);8460 AssertRCReturn(rc 2, rc2);7343 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7344 AssertRCReturn(rc, rc); 8461 7345 if (pMixedCtx->eflags.Bits.u1TF) 8462 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 7346 { 7347 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 7348 AssertRCReturn(rc2, rc2); 7349 } 8463 7350 } 8464 7351 else if (pMixedCtx->eflags.Bits.u1TF) … … 8469 7356 */ 8470 7357 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)); 8471 uIntrState = 0;7358 fIntrState = 0; 8472 7359 } 8473 7360 } … … 8477 7364 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 8478 7365 */ 8479 int rc 2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);8480 AssertRC (rc2);7366 int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState); 7367 AssertRCReturn(rc3, rc3); 8481 7368 8482 7369 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping)); … … 8514 7401 * is injected directly (register modified by us, not 8515 7402 * by hardware on VM-entry). 8516 * @param p uIntrState Pointer to the current guest interruptibility-state.7403 * @param pfIntrState Pointer to the current guest interruptibility-state. 8517 7404 * This interruptibility-state will be updated if 8518 7405 * necessary. This cannot not be NULL. 8519 7406 */ 8520 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState) 8521 { 7407 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState) 7408 { 7409 NOREF(pMixedCtx); 8522 7410 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; 8523 7411 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8524 7412 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 8525 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,8526 fStepping, puIntrState);7413 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping, 7414 pfIntrState); 8527 7415 } 8528 7416 … … 8580 7468 * directly (register modified by us, not by 8581 7469 * hardware on VM-entry). 8582 * @param p uIntrState Pointer to the current guest interruptibility-state.7470 * @param pfIntrState Pointer to the current guest interruptibility-state. 8583 7471 * This interruptibility-state will be updated if 8584 7472 * necessary. This cannot not be NULL. 8585 7473 */ 8586 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 8587 bool fStepping, uint32_t *puIntrState) 8588 { 7474 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 7475 bool fStepping, uint32_t *pfIntrState) 7476 { 7477 NOREF(pMixedCtx); 8589 7478 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID; 8590 7479 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8591 7480 if (fErrorCodeValid) 8592 7481 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 8593 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,8594 fStepping, puIntrState);7482 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping, 7483 pfIntrState); 8595 7484 } 8596 7485 … … 8677 7566 * 8678 7567 * @param pVCpu The cross context virtual CPU structure. 8679 * @param pMixedCtx Pointer to the guest-CPU context. The data may8680 * be out-of-sync. Make sure to update the required8681 * fields before using them.8682 7568 * @param u64IntInfo The VM-entry interruption-information field. 8683 7569 * @param cbInstr The VM-entry instruction length in bytes (for … … 8686 7572 * @param u32ErrCode The VM-entry exception error code. 8687 7573 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions. 8688 * @param p uIntrState Pointer to the current guest interruptibility-state.7574 * @param pfIntrState Pointer to the current guest interruptibility-state. 8689 7575 * This interruptibility-state will be updated if 8690 7576 * necessary. This cannot not be NULL. … … 8697 7583 * @remarks Requires CR0! 8698 7584 */ 8699 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 8700 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, 8701 uint32_t *puIntrState) 7585 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, 7586 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState) 8702 7587 { 8703 7588 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ 8704 7589 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo)); 8705 Assert(puIntrState); 8706 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 8707 8708 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo); 8709 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo); 7590 Assert(pfIntrState); 7591 7592 PCPUMCTX pMixedCtx = &pVCpu->cpum.GstCtx; 7593 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 7594 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo); 7595 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo); 8710 7596 8711 7597 #ifdef VBOX_STRICT … … 8737 7623 /* Cannot inject an NMI when block-by-MOV SS is in effect. */ 8738 7624 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 8739 || !(*p uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));7625 || !(*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)); 8740 7626 8741 7627 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]); 8742 7628 8743 /* We require CR0 to check if the guest is in real-mode. */ 8744 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8745 AssertRCReturn(rc, rc); 8746 8747 /* 8748 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real 8749 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest. 8750 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes. 8751 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 8752 */ 8753 if (CPUMIsGuestInRealModeEx(pMixedCtx)) 8754 { 8755 PVM pVM = pVCpu->CTX_SUFF(pVM); 8756 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 8757 { 7629 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest) 7630 { 7631 /* 7632 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit. 7633 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 7634 */ 7635 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7636 } 7637 else 7638 { 7639 /* We require CR0 to check if the guest is in real-mode. */ 7640 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 7641 AssertRCReturn(rc, rc); 7642 7643 /* 7644 * Hardware interrupts & exceptions cannot be delivered through the software interrupt 7645 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the 7646 * interrupt handler in the (real-mode) guest. 7647 * 7648 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode". 7649 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 7650 */ 7651 if (CPUMIsGuestInRealModeEx(pMixedCtx)) 7652 { 7653 PVM pVM = pVCpu->CTX_SUFF(pVM); 8758 7654 Assert(PDMVmmDevHeapIsEnabled(pVM)); 8759 7655 Assert(pVM->hm.s.vmx.pRealModeTSS); 8760 7656 8761 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */ 8762 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8763 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx); 8764 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 7657 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7658 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 7659 | CPUMCTX_EXTRN_TABLE_MASK 7660 | CPUMCTX_EXTRN_RIP 7661 | CPUMCTX_EXTRN_RSP 7662 | CPUMCTX_EXTRN_RFLAGS); 8765 7663 AssertRCReturn(rc, rc); 8766 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));8767 7664 8768 7665 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */ … … 8776 7673 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 8777 7674 if (uVector == X86_XCPT_GP) 8778 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, p uIntrState);7675 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState); 8779 7676 8780 7677 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */ 8781 7678 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */ 8782 7679 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, 8783 fStepping, p uIntrState);7680 fStepping, pfIntrState); 8784 7681 } 8785 7682 … … 8803 7700 /* Construct the stack frame for the interrupt/exception handler. */ 8804 7701 VBOXSTRICTRC rcStrict; 8805 rcStrict 7702 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32); 8806 7703 if (rcStrict == VINF_SUCCESS) 8807 7704 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel); … … 8823 7720 /* If any other guest-state bits are changed here, make sure to update 8824 7721 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 8825 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 8826 | HM_CHANGED_GUEST_RIP 8827 | HM_CHANGED_GUEST_RFLAGS 8828 | HM_CHANGED_GUEST_RSP); 7722 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS 7723 | HM_CHANGED_GUEST_CR2 7724 | HM_CHANGED_GUEST_RIP 7725 | HM_CHANGED_GUEST_RFLAGS 7726 | HM_CHANGED_GUEST_RSP); 8829 7727 8830 7728 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ 8831 if (*p uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)7729 if (*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) 8832 7730 { 8833 7731 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 8834 7732 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT); 8835 Log4 (("Clearing inhibition due to STI.\n"));8836 *p uIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;7733 Log4Func(("Clearing inhibition due to STI\n")); 7734 *pfIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 8837 7735 } 8838 7736 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n", … … 8843 7741 pVCpu->hm.s.Event.fPending = false; 8844 7742 8845 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */7743 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */ 8846 7744 if (fStepping) 8847 7745 rcStrict = VINF_EM_DBG_STEPPED; … … 8851 7749 return rcStrict; 8852 7750 } 8853 8854 /*8855 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.8856 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".8857 */8858 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;8859 7751 } 8860 7752 … … 8865 7757 8866 7758 /* Inject. */ 8867 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);7759 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo); 8868 7760 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo)) 8869 7761 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode); 8870 7762 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr); 7763 AssertRCReturn(rc, rc); 8871 7764 8872 7765 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT … … 8874 7767 pMixedCtx->cr2 = GCPtrFaultAddress; 8875 7768 8876 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu, 8877 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2)); 8878 8879 AssertRCReturn(rc, rc); 7769 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, 7770 pMixedCtx->cr2)); 7771 8880 7772 return VINF_SUCCESS; 8881 7773 } … … 8895 7787 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu) 8896 7788 { 8897 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));8898 8899 7789 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT) 7790 { 8900 7791 hmR0VmxClearIntWindowExitVmcs(pVCpu); 7792 Log4Func(("Cleared interrupt widow\n")); 7793 } 8901 7794 8902 7795 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT) 7796 { 8903 7797 hmR0VmxClearNmiWindowExitVmcs(pVCpu); 7798 Log4Func(("Cleared interrupt widow\n")); 7799 } 8904 7800 } 8905 7801 … … 8922 7818 8923 7819 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 8924 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7820 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 7821 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 8925 7822 8926 7823 #ifdef VBOX_STRICT … … 8970 7867 VMCPU_ASSERT_EMT(pVCpu); 8971 7868 8972 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);8973 8974 7869 /* No longjmps (logger flushes, locks) in this fragile context. */ 8975 7870 VMMRZCallRing3Disable(pVCpu); … … 8981 7876 if (!pVCpu->hm.s.fLeaveDone) 8982 7877 { 8983 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are 8984 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */ 8985 hmR0VmxLeave(pVCpu, pMixedCtx, false /* fSaveGuestState */); 7878 /* 7879 * Do -not- import the guest-state here as we might already be in the middle of importing 7880 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState(). 7881 */ 7882 hmR0VmxLeave(pVCpu, false /* fImportState */); 8986 7883 pVCpu->hm.s.fLeaveDone = true; 8987 7884 } … … 9011 7908 int rc = HMR0EnterCpu(pVCpu); 9012 7909 AssertRC(rc); 9013 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7910 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 7911 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 9014 7912 9015 7913 /* Load the active VMCS as the current one. */ … … 9035 7933 9036 7934 /** 7935 * Exports the host state into the VMCS host-state area. 7936 * Sets up the VM-exit MSR-load area. 7937 * 7938 * The CPU state will be loaded from these fields on every successful VM-exit. 7939 * 7940 * @returns VBox status code. 7941 * @param pVCpu The cross context virtual CPU structure. 7942 * 7943 * @remarks No-long-jump zone!!! 7944 */ 7945 static int hmR0VmxExportHostState(PVMCPU pVCpu) 7946 { 7947 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7948 7949 int rc = VINF_SUCCESS; 7950 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT) 7951 { 7952 rc = hmR0VmxExportHostControlRegs(); 7953 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7954 7955 rc = hmR0VmxExportHostSegmentRegs(pVCpu); 7956 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7957 7958 rc = hmR0VmxExportHostMsrs(pVCpu); 7959 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7960 7961 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; 7962 } 7963 return rc; 7964 } 7965 7966 7967 /** 9037 7968 * Saves the host state in the VMCS host-state. 9038 * Sets up the VM-exit MSR-load area.9039 *9040 * The CPU state will be loaded from these fields on every successful VM-exit.9041 7969 * 9042 7970 * @returns VBox status code. … … 9046 7974 * @remarks No-long-jump zone!!! 9047 7975 */ 9048 static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu) 9049 { 7976 VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu) 7977 { 7978 AssertPtr(pVCpu); 9050 7979 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9051 7980 9052 int rc = VINF_SUCCESS; 9053 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 9054 { 9055 rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 9056 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9057 9058 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 9059 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9060 9061 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 9062 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9063 9064 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 9065 } 9066 return rc; 9067 } 9068 9069 9070 /** 9071 * Saves the host state in the VMCS host-state. 9072 * 9073 * @returns VBox status code. 9074 * @param pVM The cross context VM structure. 9075 * @param pVCpu The cross context virtual CPU structure. 9076 * 9077 * @remarks No-long-jump zone!!! 9078 */ 9079 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu) 9080 { 9081 AssertPtr(pVM); 9082 AssertPtr(pVCpu); 9083 9084 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 9085 9086 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted 9087 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */ 9088 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9089 return hmR0VmxSaveHostState(pVM, pVCpu); 9090 } 9091 9092 9093 /** 9094 * Loads the guest state into the VMCS guest-state area. 7981 /* 7982 * Export the host state here while entering HM context. 7983 * When thread-context hooks are used, we might get preempted and have to re-save the host 7984 * state but most of the time we won't be, so do it here before we disable interrupts. 7985 */ 7986 return hmR0VmxExportHostState(pVCpu); 7987 } 7988 7989 7990 /** 7991 * Exports the guest state into the VMCS guest-state area. 9095 7992 * 9096 7993 * The will typically be done before VM-entry when the guest-CPU state and the … … 9115 8012 * @remarks No-long-jump zone!!! 9116 8013 */ 9117 static VBOXSTRICTRC hmR0Vmx LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)8014 static VBOXSTRICTRC hmR0VmxExportGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 9118 8015 { 9119 8016 AssertPtr(pVM); … … 9124 8021 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 9125 8022 9126 STAM_PROFILE_ADV_START(&pVCpu->hm.s.Stat LoadGuestState, x);8023 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 9127 8024 9128 8025 /* Determine real-on-v86 mode. */ … … 9135 8032 9136 8033 /* 9137 * Load the guest-state into the VMCS.9138 8034 * Any ordering dependency among the sub-functions below must be explicitly stated using comments. 9139 8035 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it. 9140 8036 */ 9141 int rc = hmR0VmxSe tupVMRunHandler(pVCpu, pMixedCtx);9142 AssertLogRelMsgRCReturn(rc, (" hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9143 9144 /* This needs to be done after hmR0VmxSe tupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */9145 rc = hmR0Vmx LoadGuestEntryCtls(pVCpu, pMixedCtx);9146 AssertLogRelMsgRCReturn(rc, (" hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9147 9148 /* This needs to be done after hmR0VmxSe tupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */9149 rc = hmR0Vmx LoadGuestExitCtls(pVCpu, pMixedCtx);9150 AssertLogRelMsgRCReturn(rc, (" hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9151 9152 rc = hmR0Vmx LoadGuestActivityState(pVCpu, pMixedCtx);9153 AssertLogRelMsgRCReturn(rc, (" hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9154 9155 VBOXSTRICTRC rcStrict = hmR0Vmx LoadGuestCR3AndCR4(pVCpu, pMixedCtx);8037 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx); 8038 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8039 8040 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */ 8041 rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx); 8042 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8043 8044 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */ 8045 rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx); 8046 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8047 8048 rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx); 8049 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8050 8051 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx); 9156 8052 if (rcStrict == VINF_SUCCESS) 9157 8053 { /* likely */ } … … 9162 8058 } 9163 8059 9164 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */ 9165 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx); 9166 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9167 9168 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we 9169 determine we don't have to swap EFER after all. */ 9170 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 9171 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9172 9173 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx); 9174 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9175 9176 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx); 9177 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9178 9179 /* 9180 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here). 9181 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState(). 9182 */ 9183 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx); 9184 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9185 9186 /* Clear any unused and reserved bits. */ 9187 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2 9188 | HM_CHANGED_GUEST_HWVIRT); 9189 9190 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 8060 rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx); 8061 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8062 8063 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it 8064 may alter controls if we determine we don't have to swap EFER after all. */ 8065 rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx); 8066 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8067 8068 rc = hmR0VmxExportGuestApicTpr(pVCpu); 8069 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8070 8071 /* This needs to be done after hmR0VmxExportGuestCR0() as it may alter intercepted exceptions. */ 8072 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu); 8073 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8074 8075 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is 8076 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */ 8077 rc = hmR0VmxExportGuestRip(pVCpu, pMixedCtx); 8078 rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx); 8079 rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx); 8080 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8081 8082 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 8083 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) 8084 | HM_CHANGED_GUEST_CR2 8085 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7) 8086 | HM_CHANGED_GUEST_X87 8087 | HM_CHANGED_GUEST_SSE_AVX 8088 | HM_CHANGED_GUEST_OTHER_XSAVE 8089 | HM_CHANGED_GUEST_XCRx 8090 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */ 8091 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */ 8092 | HM_CHANGED_GUEST_TSC_AUX 8093 | HM_CHANGED_GUEST_OTHER_MSRS 8094 | HM_CHANGED_GUEST_HWVIRT 8095 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 8096 8097 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x); 9191 8098 return rc; 9192 8099 } … … 9194 8101 9195 8102 /** 9196 * Loads the state shared between the host and guest into the VMCS.8103 * Exports the state shared between the host and guest into the VMCS. 9197 8104 * 9198 8105 * @param pVM The cross context VM structure. … … 9202 8109 * @remarks No-long-jump zone!!! 9203 8110 */ 9204 static void hmR0Vmx LoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)8111 static void hmR0VmxExportSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 9205 8112 { 9206 8113 NOREF(pVM); … … 9209 8116 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9210 8117 9211 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))9212 { 9213 int rc = hmR0Vmx LoadSharedCR0(pVCpu, pCtx);8118 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 8119 { 8120 int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx); 9214 8121 AssertRC(rc); 9215 } 9216 9217 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 9218 { 9219 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); 9220 AssertRC(rc); 8122 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; 9221 8123 9222 8124 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 9223 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))9224 { 9225 rc = hmR0Vmx LoadGuestRflags(pVCpu, pCtx);8125 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 8126 { 8127 rc = hmR0VmxExportGuestRflags(pVCpu, pCtx); 9226 8128 AssertRC(rc); 9227 8129 } 9228 8130 } 9229 8131 9230 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS))8132 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS) 9231 8133 { 9232 8134 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 9233 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 9234 } 9235 9236 /* Loading CR0, debug state might have changed intercepts, update VMCS. */ 9237 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 9238 { 9239 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC)); 9240 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 9241 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 9242 AssertRC(rc); 9243 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 9244 } 9245 9246 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 9247 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8135 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS; 8136 } 8137 8138 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE), 8139 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 9248 8140 } 9249 8141 … … 9265 8157 * @remarks No-long-jump zone!!! 9266 8158 */ 9267 static VBOXSTRICTRC hmR0Vmx LoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)8159 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 9268 8160 { 9269 8161 HMVMX_ASSERT_PREEMPT_SAFE(); … … 9271 8163 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 9272 8164 9273 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));9274 8165 #ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 9275 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);8166 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_ALL_GUEST; 9276 8167 #endif 9277 8168 9278 8169 /* 9279 * RIP is what changes the most often and hence if it's the only bit needing to be9280 * updated, we shall handle it early for performance reasons.8170 * For many exits it's only RIP that changes and hence try to export it first 8171 * without going through a lot of change flag checks. 9281 8172 */ 9282 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 9283 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) 9284 { 9285 rcStrict = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 8173 VBOXSTRICTRC rcStrict; 8174 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 8175 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP) 8176 { 8177 rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx); 9286 8178 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9287 8179 { /* likely */} 9288 8180 else 9289 { 9290 AssertMsgFailedReturn(("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestRip failed! rc=%Rrc\n", 9291 VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 9292 } 9293 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 9294 } 9295 else if (HMCPU_CF_VALUE(pVCpu)) 9296 { 9297 rcStrict = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); 8181 AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 8182 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal); 8183 } 8184 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8185 { 8186 rcStrict = hmR0VmxExportGuestState(pVM, pVCpu, pMixedCtx); 9298 8187 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9299 8188 { /* likely */} 9300 8189 else 9301 8190 { 9302 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, 9303 ("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestState failed! rc=%Rrc\n",VBOXSTRICTRC_VAL(rcStrict)));8191 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n", 8192 VBOXSTRICTRC_VAL(rcStrict))); 9304 8193 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9305 8194 return rcStrict; 9306 8195 } 9307 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 9308 } 9309 8196 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 8197 } 8198 else 8199 rcStrict = VINF_SUCCESS; 8200 8201 #ifdef VBOX_STRICT 9310 8202 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 9311 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 9312 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 9313 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8203 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 8204 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)), 8205 ("fCtxChanged=%#RX64\n", fCtxChanged)); 8206 #endif 9314 8207 return rcStrict; 9315 8208 } … … 9370 8263 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}. 9371 8264 * 9372 * This is the reason we do it here and not in hmR0Vmx LoadGuestState().8265 * This is the reason we do it here and not in hmR0VmxExportGuestState(). 9373 8266 */ 9374 8267 if ( !pVCpu->hm.s.vmx.u64MsrApicBase … … 9387 8280 9388 8281 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */ 9389 Log4 (("hmR0VmxPreRunGuest: VCPU%u: Mapped HC APIC-access page at %#RGp\n", pVCpu->idCpu, GCPhysApicBase));8282 Log4Func(("Mapped HC APIC-access page at %#RGp\n", GCPhysApicBase)); 9390 8283 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 9391 8284 AssertRCReturn(rc, rc); … … 9397 8290 if (TRPMHasTrap(pVCpu)) 9398 8291 hmR0VmxTrpmTrapToPendingEvent(pVCpu); 9399 uint32_t uIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);8292 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx); 9400 8293 9401 8294 /* … … 9403 8296 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM. 9404 8297 */ 9405 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, uIntrState, fStepping);8298 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping); 9406 8299 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9407 8300 { /* likely */ } … … 9421 8314 9422 8315 /* 9423 * Loadthe guest state bits.8316 * Export the guest state bits. 9424 8317 * 9425 8318 * We cannot perform longjmps while loading the guest state because we do not preserve the … … 9431 8324 * Hence, loading of the guest state needs to be done -after- injection of events. 9432 8325 */ 9433 rcStrict = hmR0Vmx LoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);8326 rcStrict = hmR0VmxExportGuestStateOptimal(pVM, pVCpu, pMixedCtx); 9434 8327 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9435 8328 { /* likely */ } … … 9520 8413 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); 9521 8414 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED) 9522 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);8415 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT; 9523 8416 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x); 9524 8417 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu); 9525 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));9526 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);9527 8418 } 9528 8419 … … 9532 8423 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs 9533 8424 && pVCpu->hm.s.vmx.cMsrs > 0) 9534 {9535 8425 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu); 9536 }9537 8426 9538 8427 /* 9539 * Loadthe host state bits as we may've been preempted (only happens when8428 * Re-save the host state bits as we may've been preempted (only happens when 9540 8429 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 9541 8430 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and … … 9543 8432 * See @bugref{8432}. 9544 8433 */ 9545 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))9546 { 9547 int rc = hmR0Vmx SaveHostState(pVM,pVCpu);8434 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT) 8435 { 8436 int rc = hmR0VmxExportHostState(pVCpu); 9548 8437 AssertRC(rc); 9549 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt SaveHostState);9550 } 9551 Assert(! HMCPU_CF_IS_PENDING(pVCpu,HM_CHANGED_HOST_CONTEXT));8438 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState); 8439 } 8440 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)); 9552 8441 9553 8442 /* 9554 * Loadthe state shared between host and guest (FPU, debug, lazy MSRs).8443 * Export the state shared between host and guest (FPU, debug, lazy MSRs). 9555 8444 */ 9556 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))9557 hmR0Vmx LoadSharedState(pVM, pVCpu, pMixedCtx);9558 AssertMsg(! HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));8445 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE) 8446 hmR0VmxExportSharedState(pVM, pVCpu, pMixedCtx); 8447 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 9559 8448 9560 8449 /* Store status of the shared guest-host state at the time of VM-entry. */ … … 9605 8494 { 9606 8495 bool fMsrUpdated; 9607 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9608 AssertRC(rc2); 9609 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 9610 9611 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */, 8496 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX); 8497 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */, 9612 8498 &fMsrUpdated); 9613 8499 AssertRC(rc2); 9614 8500 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); 9615 9616 8501 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */ 9617 8502 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true; … … 9627 8512 { 9628 8513 bool fMsrUpdated; 9629 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9630 AssertRC(rc2); 9631 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 9632 9633 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */, 9634 &fMsrUpdated); 8514 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS); 8515 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */, 8516 &fMsrUpdated); 9635 8517 AssertRC(rc2); 9636 8518 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); … … 9658 8540 * @param pVM The cross context VM structure. 9659 8541 * @param pVCpu The cross context virtual CPU structure. 9660 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe9661 * out-of-sync. Make sure to update the required fields9662 * before using them.9663 8542 * @param pVmxTransient Pointer to the VMX transient structure. 9664 8543 * @param rcVMRun Return code of VMLAUNCH/VMRESUME. … … 9669 8548 * unconditionally when it is safe to do so. 9670 8549 */ 9671 static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun) 9672 { 9673 NOREF(pVM); 9674 uint64_t uHostTsc = ASMReadTSC(); 9675 8550 static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun) 8551 { 8552 uint64_t const uHostTsc = ASMReadTSC(); 9676 8553 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9677 8554 9678 8555 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ 9679 8556 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ 9680 HMVMXCPU_GST_RESET_TO(pVCpu, 0);/* Exits/longjmps to ring-3 requires saving the guest state. */8557 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */ 9681 8558 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */ 9682 8559 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ … … 9735 8612 if (!pVmxTransient->fVMEntryFailed) 9736 8613 { 9737 /** @todo We can optimize this by only syncing with our force-flags when 9738 * really needed and keeping the VMCS state as it is for most 9739 * VM-exits. */ 9740 /* Update the guest interruptibility-state from the VMCS. */ 9741 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx); 8614 VMMRZCallRing3Enable(pVCpu); 9742 8615 9743 8616 /* 9744 * Allow longjmps to ring-3 -after- saving the guest-interruptibility state 9745 * as it's not part of hmR0VmxSaveGuestState() and thus would trigger an assertion 9746 * on the longjmp path to ring-3 while saving the (rest of) the guest state, 9747 * see @bugref{6208#c63}. 8617 * Import the guest-interruptibility state always as we need it while evaluating 8618 * injecting events on re-entry. 8619 * 8620 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite 8621 * checking for real-mode while exporting the state because all bits that cause 8622 * mode changes wrt CR0 are intercepted. 9748 8623 */ 9749 VMMRZCallRing3Enable(pVCpu); 8624 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE); 8625 AssertRC(rc); 9750 8626 9751 8627 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE) 9752 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);8628 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9753 8629 AssertRC(rc); 9754 8630 #elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS) 9755 rc = hmR0Vmx SaveGuestRflags(pVCpu, pMixedCtx);8631 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS); 9756 8632 AssertRC(rc); 9757 8633 #endif … … 9765 8641 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]); 9766 8642 AssertRC(rc); 9767 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);8643 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 9768 8644 } 9769 8645 … … 9773 8649 else 9774 8650 { 9775 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun, 9776 pVmxTransient->fVMEntryFailed)); 8651 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed)); 9777 8652 } 9778 8653 … … 9816 8691 /* Restore any residual host-state and save any bits shared between host 9817 8692 and guest into the guest-CPU state. Re-enables interrupts! */ 9818 hmR0VmxPostRunGuest(pV M, pVCpu, pCtx, &VmxTransient, rcRun);8693 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 9819 8694 9820 8695 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ … … 9931 8806 * @param pDbgState The structure to initialize. 9932 8807 */ 9933 DECLINLINE(void)hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)8808 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState) 9934 8809 { 9935 8810 pDbgState->uRipStart = pCtx->rip; … … 9962 8837 * @param pDbgState The debug state. 9963 8838 */ 9964 DECLINLINE(void)hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)8839 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState) 9965 8840 { 9966 8841 /* … … 10013 8888 10014 8889 10015 DECLINLINE(VBOXSTRICTRC)hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)8890 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict) 10016 8891 { 10017 8892 /* … … 10055 8930 * the necessary VM-exits demanded by DBGF and DTrace. 10056 8931 * 10057 * @param pVM The cross context VM structure.10058 8932 * @param pVCpu The cross context virtual CPU structure. 10059 * @param pCtx Pointer to the guest-CPU context.10060 8933 * @param pDbgState The debug state. 10061 8934 * @param pVmxTransient Pointer to the VMX transient structure. May update 10062 8935 * fUpdateTscOffsettingAndPreemptTimer. 10063 8936 */ 10064 static void hmR0VmxPreRunGuestDebugStateUpdate(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, 10065 PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient) 8937 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient) 10066 8938 { 10067 8939 /* … … 10085 8957 * Software interrupts (INT XXh) - no idea how to trigger these... 10086 8958 */ 8959 PVM pVM = pVCpu->CTX_SUFF(pVM); 10087 8960 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE) 10088 8961 || VBOXVMM_INT_SOFTWARE_ENABLED()) … … 10129 9002 * Process events and probes for VM-exits, making sure we get the wanted VM-exits. 10130 9003 * 10131 * Note! This is the reverse of w aft hmR0VmxHandleExitDtraceEvents does.9004 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does. 10132 9005 * So, when adding/changing/removing please don't forget to update it. 10133 9006 * … … 10211 9084 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 10212 9085 { 10213 int rc 2 = hmR0VmxSaveGuestCR0(pVCpu, pCtx);10214 rc2 |= hmR0VmxSaveGuestCR4(pVCpu, pCtx);10215 rc2 |= hmR0VmxSaveGuestApicState(pVCpu, pCtx);10216 AssertRC(rc 2);9086 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 9087 | CPUMCTX_EXTRN_CR4 9088 | CPUMCTX_EXTRN_APIC_TPR); 9089 AssertRC(rc); 10217 9090 10218 9091 #if 0 /** @todo fix me */ … … 10234 9107 { 10235 9108 pDbgState->fClearCr0Mask = false; 10236 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);9109 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 10237 9110 } 10238 9111 if (pDbgState->fClearCr4Mask) 10239 9112 { 10240 9113 pDbgState->fClearCr4Mask = false; 10241 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);9114 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4); 10242 9115 } 10243 9116 } … … 10479 9352 case VMX_EXIT_MOV_CRX: 10480 9353 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10481 /** @todo r=bird: I feel these macros aren't very descriptive and needs to be at least 30 chars longer! ;-) 10482 * Sensible abbreviations strongly recommended here because even with 130 columns this stuff get too wide! */ 10483 if ( VMX_EXIT_QUALIFICATION_CRX_ACCESS(pVmxTransient->uExitQualification) 10484 == VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ) 9354 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ) 10485 9355 SET_BOTH(CRX_READ); 10486 9356 else 10487 9357 SET_BOTH(CRX_WRITE); 10488 uEventArg = VMX_EXIT_QUAL IFICATION_CRX_REGISTER(pVmxTransient->uExitQualification);9358 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification); 10489 9359 break; 10490 9360 case VMX_EXIT_MOV_DRX: 10491 9361 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10492 if ( VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification)10493 == VMX_EXIT_QUAL IFICATION_DRX_DIRECTION_READ)9362 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) 9363 == VMX_EXIT_QUAL_DRX_DIRECTION_READ) 10494 9364 SET_BOTH(DRX_READ); 10495 9365 else 10496 9366 SET_BOTH(DRX_WRITE); 10497 uEventArg = VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification);9367 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification); 10498 9368 break; 10499 9369 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break; … … 10571 9441 { 10572 9442 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10573 hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);9443 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 10574 9444 switch (enmEvent1) 10575 9445 { … … 10759 9629 { 10760 9630 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10761 hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 9631 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9632 AssertRC(rc); 10762 9633 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification); 10763 9634 } … … 10841 9712 case VMX_EXIT_XRSTORS: 10842 9713 { 10843 int rc 2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);10844 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);10845 AssertRCReturn(rc 2, rc2);9714 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9715 | CPUMCTX_EXTRN_CS); 9716 AssertRCReturn(rc, rc); 10846 9717 if ( pMixedCtx->rip != pDbgState->uRipStart 10847 9718 || pMixedCtx->cs.Sel != pDbgState->uCsStart) … … 10905 9776 10906 9777 /* Set HMCPU indicators. */ 10907 bool const fSavedSingleInstruction 10908 pVCpu->hm.s.fSingleInstruction 10909 pVCpu->hm.s.fDebugWantRdTscExit 10910 pVCpu->hm.s.fUsingDebugLoop 9778 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction; 9779 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu); 9780 pVCpu->hm.s.fDebugWantRdTscExit = false; 9781 pVCpu->hm.s.fUsingDebugLoop = true; 10911 9782 10912 9783 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 10913 9784 VMXRUNDBGSTATE DbgState; 10914 9785 hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState); 10915 hmR0VmxPreRunGuestDebugStateUpdate(pV M, pVCpu, pCtx, &DbgState, &VmxTransient);9786 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient); 10916 9787 10917 9788 /* … … 10949 9820 * and guest into the guest-CPU state. Re-enables interrupts! 10950 9821 */ 10951 hmR0VmxPostRunGuest(pV M, pVCpu, pCtx, &VmxTransient, rcRun);9822 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 10952 9823 10953 9824 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ … … 10990 9861 if (fStepping) 10991 9862 { 10992 int rc 2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);10993 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);10994 AssertRC Return(rc2, rc2);9863 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9864 | CPUMCTX_EXTRN_CS); 9865 AssertRC(rc); 10995 9866 if ( pCtx->rip != DbgState.uRipStart 10996 9867 || pCtx->cs.Sel != DbgState.uCsStart) … … 10999 9870 break; 11000 9871 } 11001 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);9872 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); 11002 9873 } 11003 9874 … … 11006 9877 */ 11007 9878 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo) 11008 hmR0VmxPreRunGuestDebugStateUpdate(pV M, pVCpu, pCtx, &DbgState, &VmxTransient);9879 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient); 11009 9880 } 11010 9881 … … 11014 9885 if (pVCpu->hm.s.fClearTrapFlag) 11015 9886 { 11016 int rc 2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);11017 AssertRC Return(rc2, rc2);9887 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 9888 AssertRC(rc); 11018 9889 pVCpu->hm.s.fClearTrapFlag = false; 11019 9890 pCtx->eflags.Bits.u1TF = 0; … … 11190 10061 { 11191 10062 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 11192 Assert( HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);10063 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn)); 11193 10064 HMVMX_ASSERT_PREEMPT_SAFE(); 11194 10065 … … 11223 10094 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason) 11224 10095 { 11225 # 11226 # define VMEXIT_CALL_RET(a_CallExpr) \10096 #ifdef DEBUG_ramshankar 10097 #define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \ 11227 10098 do { \ 11228 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \ 10099 if (a_fSave != 0) \ 10100 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \ 11229 10101 VBOXSTRICTRC rcStrict = a_CallExpr; \ 11230 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); \ 10102 if (a_fSave != 0) \ 10103 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \ 11231 10104 return rcStrict; \ 11232 10105 } while (0) 11233 # 11234 # define VMEXIT_CALL_RET(a_CallExpr) return a_CallExpr11235 # 10106 #else 10107 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr 10108 #endif 11236 10109 switch (rcReason) 11237 10110 { 11238 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET( hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));11239 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET( hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));11240 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET( hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));11241 case VMX_EXIT_CPUID: VMEXIT_CALL_RET( hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));11242 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET( hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));11243 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET( hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));11244 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET( hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));11245 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET( hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));11246 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET( hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));11247 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET( hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));11248 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET( hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));11249 case VMX_EXIT_ MWAIT: VMEXIT_CALL_RET(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));11250 case VMX_EXIT_M ONITOR: VMEXIT_CALL_RET(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));11251 case VMX_EXIT_ TASK_SWITCH: VMEXIT_CALL_RET(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));11252 case VMX_EXIT_ PREEMPT_TIMER: VMEXIT_CALL_RET(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));11253 case VMX_EXIT_ RDMSR: VMEXIT_CALL_RET(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));11254 case VMX_EXIT_ WRMSR: VMEXIT_CALL_RET(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));11255 case VMX_EXIT_ MOV_DRX: VMEXIT_CALL_RET(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));11256 case VMX_EXIT_ TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));11257 case VMX_EXIT_ HLT: VMEXIT_CALL_RET(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));11258 case VMX_EXIT_ INVD: VMEXIT_CALL_RET(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));11259 case VMX_EXIT_INV LPG: VMEXIT_CALL_RET(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));11260 case VMX_EXIT_ RSM: VMEXIT_CALL_RET(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));11261 case VMX_EXIT_ MTF: VMEXIT_CALL_RET(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));11262 case VMX_EXIT_ PAUSE: VMEXIT_CALL_RET(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));11263 case VMX_EXIT_ XDTR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));11264 case VMX_EXIT_ TR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));11265 case VMX_EXIT_ WBINVD: VMEXIT_CALL_RET(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));11266 case VMX_EXIT_ XSETBV: VMEXIT_CALL_RET(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));11267 case VMX_EXIT_ RDRAND: VMEXIT_CALL_RET(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));11268 case VMX_EXIT_ INVPCID: VMEXIT_CALL_RET(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));11269 case VMX_EXIT_ GETSEC: VMEXIT_CALL_RET(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));11270 case VMX_EXIT_ RDPMC: VMEXIT_CALL_RET(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));11271 case VMX_EXIT_ VMCALL: VMEXIT_CALL_RET(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));10111 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient)); 10112 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient)); 10113 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient)); 10114 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient)); 10115 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient)); 10116 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient)); 10117 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient)); 10118 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient)); 10119 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient)); 10120 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient)); 10121 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient)); 10122 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient)); 10123 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient)); 10124 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient)); 10125 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient)); 10126 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient)); 10127 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient)); 10128 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient)); 10129 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient)); 10130 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient)); 10131 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient)); 10132 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient)); 10133 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient)); 10134 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient)); 10135 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient)); 10136 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient)); 10137 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 10138 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 10139 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient)); 10140 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient)); 10141 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient)); 10142 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient)); 10143 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient)); 10144 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient)); 11272 10145 11273 10146 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); … … 11296 10169 case VMX_EXIT_XRSTORS: 11297 10170 return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient); 10171 11298 10172 case VMX_EXIT_ENCLS: 11299 10173 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */ … … 11363 10237 /* Advance the RIP. */ 11364 10238 pMixedCtx->rip += cbInstr; 11365 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);10239 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 11366 10240 11367 10241 /* Update interrupt inhibition. */ … … 11387 10261 { 11388 10262 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11389 rc |= hmR0Vmx SaveGuestRip(pVCpu, pMixedCtx);11390 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);10263 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 10264 | CPUMCTX_EXTRN_RFLAGS); 11391 10265 AssertRCReturn(rc, rc); 11392 10266 … … 11401 10275 if ( !pVCpu->hm.s.fSingleInstruction 11402 10276 && pMixedCtx->eflags.Bits.u1TF) 11403 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 10277 { 10278 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 10279 AssertRCReturn(rc, rc); 10280 } 11404 10281 11405 10282 return VINF_SUCCESS; … … 11440 10317 * CR0. 11441 10318 */ 11442 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);11443 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10319 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10320 uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 11444 10321 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 11445 10322 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ 11446 10323 if (fUnrestrictedGuest) 11447 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);11448 11449 uint32_t u 32GuestCR0;11450 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u 32GuestCR0);10324 fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 10325 10326 uint32_t uGuestCR0; 10327 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uGuestCR0); 11451 10328 AssertRCBreak(rc); 11452 HMVMX_CHECK_BREAK((u 32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);11453 HMVMX_CHECK_BREAK(!(u 32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);10329 HMVMX_CHECK_BREAK((uGuestCR0 & fSetCR0) == fSetCR0, VMX_IGS_CR0_FIXED1); 10330 HMVMX_CHECK_BREAK(!(uGuestCR0 & ~fZapCR0), VMX_IGS_CR0_FIXED0); 11454 10331 if ( !fUnrestrictedGuest 11455 && (u32GuestCR0 & X86_CR0_PG)11456 && !(u 32GuestCR0 & X86_CR0_PE))10332 && (uGuestCR0 & X86_CR0_PG) 10333 && !(uGuestCR0 & X86_CR0_PE)) 11457 10334 { 11458 10335 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); … … 11462 10339 * CR4. 11463 10340 */ 11464 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);11465 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);11466 11467 uint32_t u 32GuestCR4;11468 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u 32GuestCR4);10341 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10342 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10343 10344 uint32_t uGuestCR4; 10345 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uGuestCR4); 11469 10346 AssertRCBreak(rc); 11470 HMVMX_CHECK_BREAK((u 32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);11471 HMVMX_CHECK_BREAK(!(u 32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);10347 HMVMX_CHECK_BREAK((uGuestCR4 & fSetCR4) == fSetCR4, VMX_IGS_CR4_FIXED1); 10348 HMVMX_CHECK_BREAK(!(uGuestCR4 & ~fZapCR4), VMX_IGS_CR4_FIXED0); 11472 10349 11473 10350 /* … … 11525 10402 if ( fLongModeGuest 11526 10403 || ( fUnrestrictedGuest 11527 && !(u 32GuestCR0 & X86_CR0_PE)))10404 && !(uGuestCR0 & X86_CR0_PE))) 11528 10405 { 11529 10406 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); … … 11545 10422 if (fLongModeGuest) 11546 10423 { 11547 HMVMX_CHECK_BREAK(u 32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);11548 HMVMX_CHECK_BREAK(u 32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);10424 HMVMX_CHECK_BREAK(uGuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 10425 HMVMX_CHECK_BREAK(uGuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 11549 10426 } 11550 10427 11551 10428 if ( !fLongModeGuest 11552 && (u 32GuestCR4 & X86_CR4_PCIDE))10429 && (uGuestCR4 & X86_CR4_PCIDE)) 11553 10430 { 11554 10431 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); … … 11622 10499 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 11623 10500 HMVMX_CHECK_BREAK( fUnrestrictedGuest 11624 || !(u 32GuestCR0 & X86_CR0_PG)10501 || !(uGuestCR0 & X86_CR0_PG) 11625 10502 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME), 11626 10503 VMX_IGS_EFER_LMA_LME_MISMATCH); … … 12048 10925 { 12049 10926 /* 12050 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and12051 * anything we inject is not going to cause a VM-exit directly for the event being injected.12052 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".10927 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we 10928 * injected it ourselves and anything we inject is not going to cause a VM-exit directly 10929 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2]. 12053 10930 * 12054 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State". 10931 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery". 10932 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State". 12055 10933 */ 12056 10934 VMXDispatchHostNmi(); … … 12123 11001 default: 12124 11002 { 12125 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);12126 AssertRCReturn(rc, rc);12127 12128 11003 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk); 12129 11004 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 12133 11008 Assert(CPUMIsGuestInRealModeEx(pMixedCtx)); 12134 11009 12135 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11010 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 11011 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12136 11012 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 12137 11013 AssertRCReturn(rc, rc); … … 12139 11015 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 12140 11016 0 /* GCPtrFaultAddress */); 12141 AssertRCReturn(rc, rc);12142 11017 } 12143 11018 else … … 12200 11075 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}. 12201 11076 */ 12202 uint32_t uIntrState = 0;12203 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, & uIntrState);11077 uint32_t fIntrState = 0; 11078 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState); 12204 11079 AssertRCReturn(rc, rc); 12205 11080 12206 bool const fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);11081 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 12207 11082 if ( fBlockSti 12208 11083 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 12225 11100 { 12226 11101 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);12228 11102 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 12229 11103 } … … 12236 11110 { 12237 11111 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12238 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);12239 11112 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 12240 11113 } … … 12247 11120 { 12248 11121 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12249 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);12250 11122 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12251 11123 … … 12254 11126 */ 12255 11127 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12256 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 12257 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 12258 rc |= hmR0VmxSaveGuestCs(pVCpu); 11128 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 11129 | CPUMCTX_EXTRN_CS); 12259 11130 AssertRCReturn(rc, rc); 12260 11131 … … 12280 11151 rcStrict = VERR_EM_INTERPRETER; 12281 11152 } 12282 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);12283 11153 } 12284 11154 else … … 12288 11158 */ 12289 11159 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12290 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);11160 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 12291 11161 AssertRCReturn(rc2, rc2); 12292 11162 … … 12295 11165 12296 11166 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 12297 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);11167 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 12298 11168 12299 11169 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 12311 11181 { 12312 11182 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12313 int rc = hmR0Vmx SaveGuestCR4(pVCpu, pMixedCtx);11183 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4); 12314 11184 AssertRCReturn(rc, rc); 12315 11185 … … 12328 11198 { 12329 11199 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12330 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 12331 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 11200 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12332 11201 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12333 11202 AssertRCReturn(rc, rc); 11203 12334 11204 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr); 12335 11205 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 12336 11206 { 12337 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11207 /* If we get a spurious VM-exit when offsetting is enabled, 11208 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 12338 11209 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 12339 11210 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11211 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 11212 | HM_CHANGED_GUEST_RFLAGS); 12340 11213 } 12341 11214 else if (rcStrict == VINF_IEM_RAISED_XCPT) 11215 { 12342 11216 rcStrict = VINF_SUCCESS; 12343 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);12344 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);11217 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 11218 } 12345 11219 return rcStrict; 12346 11220 } … … 12353 11227 { 12354 11228 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12355 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 12356 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 12357 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */ 11229 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11230 | CPUMCTX_EXTRN_TSC_AUX); 12358 11231 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12359 11232 AssertRCReturn(rc, rc); 11233 12360 11234 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr); 12361 11235 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 12362 11236 { 12363 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11237 /* If we get a spurious VM-exit when offsetting is enabled, 11238 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 12364 11239 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 12365 11240 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11241 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 11242 | HM_CHANGED_GUEST_RFLAGS); 12366 11243 } 12367 11244 else if (rcStrict == VINF_IEM_RAISED_XCPT) 11245 { 12368 11246 rcStrict = VINF_SUCCESS; 12369 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);12370 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);11247 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 11248 } 12371 11249 return rcStrict; 12372 11250 } … … 12379 11257 { 12380 11258 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12381 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 12382 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 11259 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4 11260 | CPUMCTX_EXTRN_CR0 11261 | CPUMCTX_EXTRN_RFLAGS 11262 | CPUMCTX_EXTRN_SS); 12383 11263 AssertRCReturn(rc, rc); 12384 11264 … … 12395 11275 rc = VERR_EM_INTERPRETER; 12396 11276 } 12397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);12398 11277 return rc; 12399 11278 } … … 12406 11285 { 12407 11286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12408 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);12409 11287 12410 11288 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3; 12411 11289 if (EMAreHypercallInstructionsEnabled(pVCpu)) 12412 11290 { 12413 #if 0 12414 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12415 #else 12416 /* Aggressive state sync. for now. */ 12417 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 12418 rc |= hmR0VmxSaveGuestRflags(pVCpu,pMixedCtx); /* For CPL checks in gimHvHypercall() & gimKvmHypercall() */ 12419 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */ 11291 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 11292 | CPUMCTX_EXTRN_RFLAGS 11293 | CPUMCTX_EXTRN_CR0 11294 | CPUMCTX_EXTRN_SS 11295 | CPUMCTX_EXTRN_CS 11296 | CPUMCTX_EXTRN_EFER); 12420 11297 AssertRCReturn(rc, rc); 12421 #endif12422 11298 12423 11299 /* Perform the hypercall. */ … … 12437 11313 } 12438 11314 else 12439 Log4 (("hmR0VmxExitVmcall:Hypercalls not enabled\n"));11315 Log4Func(("Hypercalls not enabled\n")); 12440 11316 12441 11317 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */ … … 12460 11336 12461 11337 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12462 rc |= hmR0Vmx SaveGuestControlRegs(pVCpu, pMixedCtx);11338 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK); 12463 11339 AssertRCReturn(rc, rc); 12464 11340 … … 12469 11345 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n", 12470 11346 pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict))); 12471 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);12472 11347 return rcStrict; 12473 11348 } … … 12480 11355 { 12481 11356 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12482 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12483 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12484 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11357 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11358 | CPUMCTX_EXTRN_RFLAGS 11359 | CPUMCTX_EXTRN_SS); 12485 11360 AssertRCReturn(rc, rc); 12486 11361 … … 12505 11380 { 12506 11381 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12507 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12508 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12509 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11382 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11383 | CPUMCTX_EXTRN_RFLAGS 11384 | CPUMCTX_EXTRN_SS); 12510 11385 AssertRCReturn(rc, rc); 12511 11386 … … 12521 11396 if ( rc == VINF_EM_HALT 12522 11397 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx)) 12523 {12524 11398 rc = VINF_SUCCESS; 12525 }12526 11399 } 12527 11400 else … … 12543 11416 { 12544 11417 /* 12545 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never 12546 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by 12547 * executing VMCALL in VMX root operation. If we get here, something funny is going on. 12548 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment". 11418 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root 11419 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor 11420 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in 11421 * VMX root operation. If we get here, something funny is going on. 11422 * 11423 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment". 12549 11424 */ 12550 11425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12560 11435 { 12561 11436 /* 12562 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX 12563 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL 12564 * in VMX root mode or receive an SMI. If we get here, something funny is going on. 12565 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits" 11437 * This can only happen if we support dual-monitor treatment of SMI, which can be activated 11438 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get 11439 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive 11440 * an SMI. If we get here, something funny is going on. 11441 * 11442 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment" 11443 * See Intel spec. 25.3 "Other Causes of VM-Exits" 12566 11444 */ 12567 11445 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12589 11467 { 12590 11468 /* 12591 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently12592 * don't make use of it (see hmR0VmxLoadGuestActivityState())as our guests don't have direct access to the host LAPIC.11469 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. 11470 * We don't make use of it as our guests don't have direct access to the host LAPIC. 12593 11471 * See Intel spec. 25.3 "Other Causes of VM-exits". 12594 11472 */ … … 12689 11567 12690 11568 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12691 rc |= hmR0Vmx SaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);12692 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);11569 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11570 | CPUMCTX_EXTRN_CR4); 12693 11571 AssertRCReturn(rc, rc); 12694 11572 12695 11573 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr); 12696 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST); 11574 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 11575 : HM_CHANGED_XCPT_RAISED_MASK); 12697 11576 12698 11577 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); … … 12719 11598 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 12720 11599 { 12721 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12722 AssertRCReturn(rc, rc); 12723 12724 rc = hmR0VmxCheckVmcsCtls(pVCpu); 11600 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 11601 rc |= hmR0VmxCheckVmcsCtls(pVCpu); 12725 11602 AssertRCReturn(rc, rc); 12726 11603 … … 12729 11606 12730 11607 #ifdef VBOX_STRICT 12731 uint32_t uIntrState;11608 uint32_t fIntrState; 12732 11609 RTHCUINTREG uHCReg; 12733 11610 uint64_t u64Val; … … 12737 11614 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient); 12738 11615 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient); 12739 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, & uIntrState);11616 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState); 12740 11617 AssertRCReturn(rc, rc); 12741 11618 … … 12744 11621 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode)); 12745 11622 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr)); 12746 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));11623 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", fIntrState)); 12747 11624 12748 11625 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc); … … 12829 11706 12830 11707 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */ 12831 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);12832 11708 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT) 12833 11709 return VERR_EM_INTERPRETER; … … 12845 11721 12846 11722 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */ 12847 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);12848 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12849 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11723 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11724 | CPUMCTX_EXTRN_RFLAGS 11725 | CPUMCTX_EXTRN_SS); 12850 11726 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 12851 { 12852 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 12853 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 12854 } 11727 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 12855 11728 AssertRCReturn(rc, rc); 12856 Log4 (("ecx=%#RX32\n", pMixedCtx->ecx));11729 Log4Func(("ecx=%#RX32\n", pMixedCtx->ecx)); 12857 11730 12858 11731 #ifdef VBOX_STRICT … … 12905 11778 12906 11779 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */ 12907 rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12908 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12909 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11780 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11781 | CPUMCTX_EXTRN_RFLAGS 11782 | CPUMCTX_EXTRN_SS); 12910 11783 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 12911 { 12912 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 12913 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 12914 } 11784 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 12915 11785 AssertRCReturn(rc, rc); 12916 Log4 (("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));11786 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax)); 12917 11787 12918 11788 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); … … 12934 11804 * EMInterpretWrmsr() changes it. 12935 11805 */ 12936 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);11806 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 12937 11807 } 12938 11808 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 12945 11815 * the other bits as well, SCE and NXE. See @bugref{7368}. 12946 11816 */ 12947 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS); 11817 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 11818 | HM_CHANGED_VMX_ENTRY_CTLS 11819 | HM_CHANGED_VMX_EXIT_CTLS); 12948 11820 } 12949 11821 … … 12953 11825 switch (pMixedCtx->ecx) 12954 11826 { 12955 /* 12956 * For SYSENTER CS, EIP, ESP MSRs, we set both the flags here so we don't accidentally 12957 * overwrite the changed guest-CPU context value while going to ring-3, see @bufref{8745}. 12958 */ 12959 case MSR_IA32_SYSENTER_CS: 12960 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 12961 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR); 12962 break; 12963 case MSR_IA32_SYSENTER_EIP: 12964 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 12965 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR); 12966 break; 12967 case MSR_IA32_SYSENTER_ESP: 12968 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 12969 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR); 12970 break; 12971 case MSR_K8_FS_BASE: RT_FALL_THRU(); 12972 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 12973 case MSR_K6_EFER: /* already handled above */ break; 11827 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 11828 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 11829 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 11830 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break; 11831 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break; 11832 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break; 12974 11833 default: 12975 11834 { 12976 11835 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 12977 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);11836 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12978 11837 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 12979 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);11838 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS); 12980 11839 break; 12981 11840 } … … 13003 11862 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 13004 11863 { 13005 /* EFER writes are always intercepted, see hmR0Vmx LoadGuestMsrs(). */11864 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */ 13006 11865 if (pMixedCtx->ecx != MSR_K6_EFER) 13007 11866 { … … 13040 11899 { 13041 11900 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 13042 13043 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);11901 /** @todo The guest has likely hit a contended spinlock. We might want to 11902 * poke a schedule different guest VCPU. */ 13044 11903 return VINF_EM_RAW_INTERRUPT; 13045 11904 } … … 13078 11937 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 13079 11938 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 11939 13080 11940 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13081 11941 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11942 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13082 11943 AssertRCReturn(rc, rc); 13083 11944 11945 VBOXSTRICTRC rcStrict; 11946 PVM pVM = pVCpu->CTX_SUFF(pVM); 13084 11947 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification; 13085 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification); 13086 PVM pVM = pVCpu->CTX_SUFF(pVM); 13087 VBOXSTRICTRC rcStrict; 13088 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/); 11948 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification); 13089 11949 switch (uAccessType) 13090 11950 { 13091 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */ 13092 { 13093 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13094 AssertRCReturn(rc, rc); 13095 11951 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */ 11952 { 13096 11953 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, 13097 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), 13098 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification)); 13099 AssertMsg( rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE 11954 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 11955 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification)); 11956 AssertMsg( rcStrict == VINF_SUCCESS 11957 || rcStrict == VINF_IEM_RAISED_XCPT 11958 || rcStrict == VINF_PGM_CHANGE_MODE 13100 11959 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13101 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)) 11960 11961 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)) 13102 11962 { 13103 case 0: /* CR0 */ 13104 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 11963 case 0: 11964 { 11965 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 11966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 13105 11967 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0)); 13106 11968 break; 13107 case 2: /* CR2 */ 11969 } 11970 11971 case 2: 11972 { 11973 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write); 13108 11974 /* Nothing to do here, CR2 it's not part of the VMCS. */ 13109 11975 break; 13110 case 3: /* CR3 */ 11976 } 11977 11978 case 3: 11979 { 13111 11980 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop); 13112 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 11981 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 11982 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3); 13113 11983 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3)); 13114 11984 break; 13115 case 4: /* CR4 */ 13116 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 13117 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", 13118 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 11985 } 11986 11987 case 4: 11988 { 11989 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 11990 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4); 11991 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 11992 pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 13119 11993 break; 13120 case 8: /* CR8 */ 11994 } 11995 11996 case 8: 11997 { 11998 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 13121 11999 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 13122 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */ 13123 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 12000 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 13124 12001 break; 12002 } 13125 12003 default: 13126 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL IFICATION_CRX_REGISTER(uExitQualification)));12004 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))); 13127 12005 break; 13128 12006 } 13129 13130 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);13131 12007 break; 13132 12008 } 13133 12009 13134 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */ 13135 { 13136 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13137 AssertRCReturn(rc, rc); 13138 12010 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */ 12011 { 13139 12012 Assert( !pVM->hm.s.fNestedPaging 13140 12013 || !CPUMIsGuestPagingEnabledEx(pMixedCtx) 13141 12014 || pVCpu->hm.s.fUsingDebugLoop 13142 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3); 13143 12015 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3); 13144 12016 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 13145 Assert( VMX_EXIT_QUAL IFICATION_CRX_REGISTER(uExitQualification) != 812017 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8 13146 12018 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 13147 12019 13148 12020 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, 13149 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification), 13150 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)); 13151 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]); 13153 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), 12021 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification), 12022 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)); 12023 AssertMsg( rcStrict == VINF_SUCCESS 12024 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12025 #ifdef VBOX_WITH_STATISTICS 12026 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)) 12027 { 12028 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; 12029 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break; 12030 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break; 12031 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break; 12032 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break; 12033 } 12034 #endif 12035 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 13154 12036 VBOXSTRICTRC_VAL(rcStrict))); 13155 if (VMX_EXIT_QUAL IFICATION_CRX_GENREG(uExitQualification) == X86_GREG_xSP)13156 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RSP);12037 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP) 12038 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RSP); 13157 12039 break; 13158 12040 } 13159 12041 13160 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 13161 { 13162 AssertRCReturn(rc, rc); 12042 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 12043 { 13163 12044 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr); 13164 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13165 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 12045 AssertMsg( rcStrict == VINF_SUCCESS 12046 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12047 12048 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 13166 12049 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 13167 12050 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 13169 12052 } 13170 12053 13171 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 13172 { 13173 AssertRCReturn(rc, rc); 12054 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 12055 { 13174 12056 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, 13175 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 13176 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE, 12057 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification)); 12058 AssertMsg( rcStrict == VINF_SUCCESS 12059 || rcStrict == VINF_IEM_RAISED_XCPT 12060 || rcStrict == VINF_PGM_CHANGE_MODE, 13177 12061 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13178 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 12062 12063 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 13179 12064 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 13180 12065 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 13187 12072 } 13188 12073 13189 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST); 12074 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 12075 : HM_CHANGED_XCPT_RAISED_MASK); 13190 12076 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2); 13191 12077 NOREF(pVM); … … 13206 12092 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13207 12093 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13208 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 13209 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 13210 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13211 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 12094 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 12095 | CPUMCTX_EXTRN_SREG_MASK 12096 | CPUMCTX_EXTRN_EFER); 13212 12097 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 13213 12098 AssertRCReturn(rc, rc); 13214 12099 13215 12100 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 13216 uint32_t uIOPort = VMX_EXIT_QUAL IFICATION_IO_PORT(pVmxTransient->uExitQualification);13217 uint8_t uIOWidth = VMX_EXIT_QUAL IFICATION_IO_WIDTH(pVmxTransient->uExitQualification);13218 bool fIOWrite = ( VMX_EXIT_QUAL IFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)13219 == VMX_EXIT_QUAL IFICATION_IO_DIRECTION_OUT);13220 bool fIOString = VMX_EXIT_QUAL IFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);12101 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification); 12102 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification); 12103 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification) 12104 == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12105 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification); 13221 12106 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 13222 12107 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; … … 13242 12127 { 13243 12128 /* I/O operation lookup arrays. */ 13244 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 13245 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */ 13246 12129 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 12130 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */ 13247 12131 uint32_t const cbValue = s_aIOSizes[uIOWidth]; 13248 12132 uint32_t const cbInstr = pVmxTransient->cbInstr; … … 13263 12147 { 13264 12148 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13265 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */13266 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);13267 12149 AssertRCReturn(rc2, rc2); 13268 12150 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3); 13269 12151 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 13270 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;13271 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);12152 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12153 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification); 13272 12154 if (fIOWrite) 13273 12155 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, … … 13285 12167 } 13286 12168 else 13287 {13288 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */13289 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);13290 AssertRCReturn(rc2, rc2);13291 12169 rcStrict = IEMExecOne(pVCpu); 13292 } 13293 /** @todo IEM needs to be setting these flags somehow. */ 13294 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12170 12171 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 13295 12172 fUpdateRipAlready = true; 13296 12173 } … … 13300 12177 * IN/OUT - I/O instruction. 13301 12178 */ 13302 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12179 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, 12180 fIOWrite ? 'w' : 'r')); 13303 12181 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 13304 Assert(!VMX_EXIT_QUAL IFICATION_IO_IS_REP(pVmxTransient->uExitQualification));12182 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification)); 13305 12183 if (fIOWrite) 13306 12184 { … … 13328 12206 { 13329 12207 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr); 13330 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);12208 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 13331 12209 } 13332 12210 13333 12211 /* 13334 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest. 12212 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru 12213 * while booting Fedora 17 64-bit guest. 12214 * 13335 12215 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 13336 12216 */ … … 13338 12218 { 13339 12219 /** @todo Single-step for INS/OUTS with REP prefix? */ 13340 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);12220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 13341 12221 } 13342 12222 else if ( !fDbgStepping 13343 12223 && fGstStepping) 13344 12224 { 13345 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12225 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12226 AssertRCReturn(rc, rc); 13346 12227 } 13347 12228 … … 13351 12232 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 13352 12233 */ 13353 int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);13354 AssertRCReturn(rc 2, rc2);12234 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7); 12235 AssertRCReturn(rc, rc); 13355 12236 13356 12237 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the … … 13377 12258 ASMSetDR6(pMixedCtx->dr[6]); 13378 12259 if (pMixedCtx->dr[7] != uDr7) 13379 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);12260 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7; 13380 12261 13381 12262 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); … … 13420 12301 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 13421 12302 */ 13422 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);12303 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13423 12304 AssertRCReturn(rc2, rc2); 13424 12305 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead … … 13426 12307 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 13427 12308 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13428 VMX_EXIT_QUAL IFICATION_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",12309 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "", 13429 12310 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth)); 13430 12311 13431 12312 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13432 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);12313 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13433 12314 13434 12315 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13451 12332 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13452 12333 AssertRCReturn(rc, rc); 13453 if (VMX_EXIT_QUAL IFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)12334 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT) 13454 12335 { 13455 12336 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); … … 13534 12415 } 13535 12416 13536 #if 0 13537 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now 13538 * just sync the whole thing. */ 13539 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 13540 #else 13541 /* Aggressive state sync. for now. */ 13542 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13543 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13544 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13545 #endif 12417 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */ 12418 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13546 12419 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13547 12420 AssertRCReturn(rc, rc); 13548 12421 13549 12422 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */ 13550 uint32_t uAccessType = VMX_EXIT_QUAL IFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);12423 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification); 13551 12424 VBOXSTRICTRC rcStrict2; 13552 12425 switch (uAccessType) … … 13556 12429 { 13557 12430 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 13558 || VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,12431 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR, 13559 12432 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 13560 12433 13561 12434 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */ 13562 12435 GCPhys &= PAGE_BASE_GC_MASK; 13563 GCPhys += VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);12436 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); 13564 12437 PVM pVM = pVCpu->CTX_SUFF(pVM); 13565 Log4 (("ApicAccess uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,13566 VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));12438 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys, 12439 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 13567 12440 13568 12441 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu, 13569 12442 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, 13570 12443 CPUMCTX2CORE(pMixedCtx), GCPhys); 13571 Log4 (("ApicAccess rcStrict2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));12444 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2))); 13572 12445 if ( rcStrict2 == VINF_SUCCESS 13573 12446 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT 13574 12447 || rcStrict2 == VERR_PAGE_NOT_PRESENT) 13575 12448 { 13576 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13577 | HM_CHANGED_GUEST_RSP13578 | HM_CHANGED_GUEST_RFLAGS13579 | HM_CHANGED_GUEST_APIC_STATE);12449 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12450 | HM_CHANGED_GUEST_RSP 12451 | HM_CHANGED_GUEST_RFLAGS 12452 | HM_CHANGED_GUEST_APIC_TPR); 13580 12453 rcStrict2 = VINF_SUCCESS; 13581 12454 } … … 13584 12457 13585 12458 default: 13586 Log4 (("ApicAccessuAccessType=%#x\n", uAccessType));12459 Log4Func(("uAccessType=%#x\n", uAccessType)); 13587 12460 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR; 13588 12461 break; … … 13635 12508 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13636 12509 AssertRCReturn(rc, rc); 13637 if (VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)12510 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 13638 12511 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 13639 12512 else … … 13649 12522 */ 13650 12523 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13651 rc |= hmR0Vmx SaveGuestSegmentRegs(pVCpu, pMixedCtx);13652 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);12524 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 12525 | CPUMCTX_EXTRN_DR7); 13653 12526 AssertRCReturn(rc, rc); 13654 Log4 (("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));12527 Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 13655 12528 13656 12529 PVM pVM = pVCpu->CTX_SUFF(pVM); 13657 if (VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)12530 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 13658 12531 { 13659 12532 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), 13660 VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),13661 VMX_EXIT_QUAL IFICATION_DRX_GENREG(pVmxTransient->uExitQualification));12533 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification), 12534 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification)); 13662 12535 if (RT_SUCCESS(rc)) 13663 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);12536 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); 13664 12537 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 13665 12538 } … … 13667 12540 { 13668 12541 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), 13669 VMX_EXIT_QUAL IFICATION_DRX_GENREG(pVmxTransient->uExitQualification),13670 VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));12542 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification), 12543 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification)); 13671 12544 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 13672 12545 } … … 13714 12587 * Get sufficent state and update the exit history entry. 13715 12588 */ 13716 RTGCPHYS GCPhys = 0;12589 RTGCPHYS GCPhys; 13717 12590 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 13718 13719 #if 0 13720 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 13721 #else 13722 /* Aggressive state sync. for now. */ 13723 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13724 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13725 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13726 #endif 12591 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13727 12592 AssertRCReturn(rc, rc); 13728 12593 … … 13748 12613 { 13749 12614 /* Successfully handled MMIO operation. */ 13750 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13751 | HM_CHANGED_GUEST_RSP13752 | HM_CHANGED_GUEST_RFLAGS13753 | HM_CHANGED_GUEST_APIC_STATE);12615 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12616 | HM_CHANGED_GUEST_RSP 12617 | HM_CHANGED_GUEST_RFLAGS 12618 | HM_CHANGED_GUEST_APIC_TPR); 13754 12619 rcStrict = VINF_SUCCESS; 13755 12620 } … … 13761 12626 */ 13762 12627 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 13763 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);12628 int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13764 12629 AssertRCReturn(rc2, rc2); 13765 12630 … … 13768 12633 13769 12634 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13770 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);12635 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13771 12636 13772 12637 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13793 12658 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */ 13794 12659 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending)) 13795 Log4 (("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));12660 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo)); 13796 12661 } 13797 12662 else … … 13802 12667 } 13803 12668 13804 RTGCPHYS GCPhys = 0;12669 RTGCPHYS GCPhys; 13805 12670 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 13806 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13807 #if 0 13808 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 13809 #else 13810 /* Aggressive state sync. for now. */ 13811 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13812 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13813 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13814 #endif 12671 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12672 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13815 12673 AssertRCReturn(rc, rc); 13816 12674 … … 13819 12677 13820 12678 RTGCUINT uErrorCode = 0; 13821 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_INSTR_FETCH)12679 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH) 13822 12680 uErrorCode |= X86_TRAP_PF_ID; 13823 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_DATA_WRITE)12681 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE) 13824 12682 uErrorCode |= X86_TRAP_PF_RW; 13825 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_ENTRY_PRESENT)12683 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT) 13826 12684 uErrorCode |= X86_TRAP_PF_P; 13827 12685 13828 12686 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode); 13829 12687 13830 Log4 (("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,13831 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));12688 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys, 12689 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 13832 12690 13833 12691 /* Handle the pagefault trap for the nested shadow table. */ … … 13843 12701 /* Successfully synced our nested page tables. */ 13844 12702 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 13845 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13846 | HM_CHANGED_GUEST_RSP13847 | HM_CHANGED_GUEST_RFLAGS);12703 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12704 | HM_CHANGED_GUEST_RSP 12705 | HM_CHANGED_GUEST_RFLAGS); 13848 12706 return VINF_SUCCESS; 13849 12707 } 13850 12708 13851 Log4 (("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));12709 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2))); 13852 12710 return rcStrict2; 13853 12711 } … … 13871 12729 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 13872 12730 13873 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12731 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 13874 12732 AssertRCReturn(rc, rc); 13875 12733 … … 13901 12759 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 13902 12760 13903 /** @todo Try optimize this by not saving the entire guest state unless 13904 * really needed. */ 13905 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12761 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13906 12762 AssertRCReturn(rc, rc); 13907 12763 13908 PVM pVM = pVCpu->CTX_SUFF(pVM); 13909 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 12764 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx)); 13910 12765 if (rc == VINF_EM_RAW_GUEST_TRAP) 13911 12766 { … … 13938 12793 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13939 12794 AssertRCReturn(rc, rc); 13940 Assert( pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);12795 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 13941 12796 13942 12797 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), … … 13953 12808 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 13954 12809 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 13955 Log6(("XcptDB\n"));13956 12810 13957 12811 /* … … 13960 12814 */ 13961 12815 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13962 AssertRCReturn(rc, rc);13963 12816 13964 12817 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ … … 13968 12821 13969 12822 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction); 12823 Log6Func(("rc=%Rrc\n", rc)); 13970 12824 if (rc == VINF_EM_RAW_GUEST_TRAP) 13971 12825 { … … 13986 12840 VMMRZCallRing3Enable(pVCpu); 13987 12841 13988 rc = hmR0Vmx SaveGuestDR7(pVCpu, pMixedCtx);12842 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7); 13989 12843 AssertRCReturn(rc, rc); 13990 12844 … … 14002 12856 * Raise #DB in the guest. 14003 12857 * 14004 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use 14005 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP (INT1) and not the 14006 * regular #DB. Thus it -may- trigger different handling in the CPU (like skipped DPL checks), see @bugref{6398}. 12858 * It is important to reflect exactly what the VM-exit gave us (preserving the 12859 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've 12860 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may 12861 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}. 14007 12862 * 14008 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of Intel 386,14009 * see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".12863 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of 12864 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 14010 12865 */ 14011 12866 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); … … 14051 12906 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 14052 12907 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14053 rc |= hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);12908 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14054 12909 AssertRCReturn(rc, rc); 14055 Log4 (("#GPGst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,14056 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));12910 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip, 12911 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel)); 14057 12912 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 14058 12913 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); … … 14064 12919 14065 12920 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */ 14066 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);12921 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14067 12922 AssertRCReturn(rc, rc); 14068 12923 … … 14076 12931 rc = VINF_SUCCESS; 14077 12932 Assert(cbOp == pDis->cbInstr); 14078 Log4 (("#GPDisas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));12933 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 14079 12934 switch (pDis->pCurInstr->uOpcode) 14080 12935 { … … 14084 12939 pMixedCtx->eflags.Bits.u1RF = 0; 14085 12940 pMixedCtx->rip += pDis->cbInstr; 14086 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14087 12942 if ( !fDbgStepping 14088 12943 && pMixedCtx->eflags.Bits.u1TF) 14089 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12944 { 12945 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12946 AssertRCReturn(rc, rc); 12947 } 14090 12948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 14091 12949 break; … … 14103 12961 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 14104 12962 } 14105 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12963 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14106 12964 if ( !fDbgStepping 14107 12965 && pMixedCtx->eflags.Bits.u1TF) 14108 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12966 { 12967 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12968 AssertRCReturn(rc, rc); 12969 } 14109 12970 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 14110 12971 break; … … 14116 12977 pMixedCtx->rip += pDis->cbInstr; 14117 12978 pMixedCtx->eflags.Bits.u1RF = 0; 14118 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12979 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14119 12980 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 14120 12981 break; … … 14123 12984 case OP_POPF: 14124 12985 { 14125 Log4 (("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));12986 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 14126 12987 uint32_t cbParm; 14127 12988 uint32_t uMask; … … 14155 13016 break; 14156 13017 } 14157 Log4 (("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));13018 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip)); 14158 13019 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 14159 13020 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 14160 pMixedCtx->esp 14161 pMixedCtx->esp 14162 pMixedCtx->rip 14163 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14164 | HM_CHANGED_GUEST_RSP14165 | HM_CHANGED_GUEST_RFLAGS);13021 pMixedCtx->esp += cbParm; 13022 pMixedCtx->esp &= uMask; 13023 pMixedCtx->rip += pDis->cbInstr; 13024 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13025 | HM_CHANGED_GUEST_RSP 13026 | HM_CHANGED_GUEST_RFLAGS); 14166 13027 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 14167 13028 POPF restores EFLAGS.TF. */ 14168 13029 if ( !fDbgStepping 14169 13030 && fGstStepping) 14170 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13031 { 13032 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13033 AssertRCReturn(rc, rc); 13034 } 14171 13035 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 14172 13036 break; … … 14209 13073 break; 14210 13074 } 14211 Log4 (("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));14212 pMixedCtx->esp 14213 pMixedCtx->esp 14214 pMixedCtx->rip 14215 pMixedCtx->eflags.Bits.u1RF 14216 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14217 | HM_CHANGED_GUEST_RSP14218 | HM_CHANGED_GUEST_RFLAGS);13075 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack)); 13076 pMixedCtx->esp -= cbParm; 13077 pMixedCtx->esp &= uMask; 13078 pMixedCtx->rip += pDis->cbInstr; 13079 pMixedCtx->eflags.Bits.u1RF = 0; 13080 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13081 | HM_CHANGED_GUEST_RSP 13082 | HM_CHANGED_GUEST_RFLAGS); 14219 13083 if ( !fDbgStepping 14220 13084 && pMixedCtx->eflags.Bits.u1TF) 14221 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13085 { 13086 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13087 AssertRCReturn(rc, rc); 13088 } 14222 13089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 14223 13090 break; … … 14258 13125 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 14259 13126 pMixedCtx->sp += sizeof(aIretFrame); 14260 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14261 | HM_CHANGED_GUEST_SEGMENT_REGS14262 | HM_CHANGED_GUEST_RSP14263 | HM_CHANGED_GUEST_RFLAGS);13127 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13128 | HM_CHANGED_GUEST_CS 13129 | HM_CHANGED_GUEST_RSP 13130 | HM_CHANGED_GUEST_RFLAGS); 14264 13131 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 14265 13132 if ( !fDbgStepping 14266 13133 && fGstStepping) 14267 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 14268 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 13134 { 13135 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13136 AssertRCReturn(rc, rc); 13137 } 13138 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 14269 13139 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 14270 13140 break; … … 14291 13161 { 14292 13162 pMixedCtx->eflags.Bits.u1RF = 0; 14293 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);13163 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 14294 13164 } 14295 13165 break; … … 14302 13172 EMCODETYPE_SUPERVISOR); 14303 13173 rc = VBOXSTRICTRC_VAL(rc2); 14304 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);13174 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 14305 13175 /** @todo We have to set pending-debug exceptions here when the guest is 14306 13176 * single-stepping depending on the instruction that was interpreted. */ 14307 Log4 (("#GP rc=%Rrc\n", rc));13177 Log4Func(("#GP rc=%Rrc\n", rc)); 14308 13178 break; 14309 13179 } … … 14332 13202 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 14333 13203 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active, 14334 ("uVector=%# 04x u32XcptBitmap=%#010RX32\n",13204 ("uVector=%#x u32XcptBitmap=%#X32\n", 14335 13205 VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.u32XcptBitmap)); 14336 13206 #endif … … 14341 13211 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14342 13212 AssertRCReturn(rc, rc); 14343 Assert( pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);13213 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 14344 13214 14345 13215 #ifdef DEBUG_ramshankar 14346 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13216 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS 13217 | CPUMCTX_EXTRN_RIP); 14347 13218 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 14348 13219 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip)); … … 14384 13255 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 14385 13256 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 14386 Log4 (("Pending #DF due to vectoring #PF. NP\n"));13257 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n")); 14387 13258 } 14388 13259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); … … 14398 13269 } 14399 13270 14400 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);13271 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14401 13272 AssertRCReturn(rc, rc); 14402 13273 14403 Log4 (("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,14404 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));13274 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 13275 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3)); 14405 13276 14406 13277 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode); … … 14408 13279 (RTGCPTR)pVmxTransient->uExitQualification); 14409 13280 14410 Log4 (("#PF: rc=%Rrc\n", rc));13281 Log4Func(("#PF: rc=%Rrc\n", rc)); 14411 13282 if (rc == VINF_SUCCESS) 14412 13283 { 14413 #if 014414 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */14415 /** @todo this isn't quite right, what if guest does lgdt with some MMIO14416 * memory? We don't update the whole state here... */14417 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14418 | HM_CHANGED_GUEST_RSP14419 | HM_CHANGED_GUEST_RFLAGS14420 | HM_CHANGED_GUEST_APIC_STATE);14421 #else14422 13284 /* 14423 13285 * This is typically a shadow page table sync or a MMIO instruction. But we may have 14424 13286 * emulated something like LTR or a far jump. Any part of the CPU context may have changed. 14425 13287 */ 14426 /** @todo take advantage of CPUM changed flags instead of brute forcing. */ 14427 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 14428 #endif 13288 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 14429 13289 TRPMResetTrap(pVCpu); 14430 13290 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); … … 14449 13309 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */ 14450 13310 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 14451 Log4 (("#PF: Pending #DF due to vectoring #PF\n"));13311 Log4Func(("#PF: Pending #DF due to vectoring #PF\n")); 14452 13312 } 14453 13313
Note:
See TracChangeset
for help on using the changeset viewer.