- Timestamp:
- Apr 19, 2018 8:54:23 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r70948 r71910 543 543 { 544 544 pVCpu->hm.s.fGIMTrapXcptUD = true; 545 HMCPU_CF_SET(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);545 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 546 546 } 547 547 … … 555 555 { 556 556 pVCpu->hm.s.fGIMTrapXcptUD = false; 557 HMCPU_CF_SET(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);557 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 558 558 } 559 559 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r71859 r71910 103 103 int rc2 = APICSetTpr(pVCpu, u8Tpr); 104 104 AssertRC(rc2); 105 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);105 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 106 106 107 107 pCtx->rip += pPatch->cbOp; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71909 r71910 1704 1704 * @param pVCpu The cross context virtual CPU structure. 1705 1705 * @param pVmcb Pointer to the VM control block. 1706 * @param pCtx Pointer to the guest-CPU context.1706 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 1707 1707 * 1708 1708 * @remarks No-long-jump zone!!! … … 1818 1818 * While guests can modify and see the modified values throug the shadow values, 1819 1819 * we shall not honor any guest modifications of this MSR to ensure caching is always 1820 * enabled similar to how we always run with CR0.CD and NW bits cleared. 1820 * enabled similar to how we always run with CR0.CD and NW bits cleared, 1821 * 1822 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}. 1821 1823 */ 1822 1824 pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL; … … 1991 1993 static void hmR0SvmLoadGuestApicStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst) 1992 1994 { 1993 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE)) 1994 { 1995 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */ 1996 pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1; 1995 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 1996 { 1997 Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking == 1); 1997 1998 pVCpu->hm.s.svm.fSyncVTpr = false; 1998 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR; 1999 2000 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 1999 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 2000 } 2001 } 2002 2003 2004 /** 2005 * Loads the nested-guest hardware virtualization state. 2006 * 2007 * @param pVCpu The cross context virtual CPU structure. 2008 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2009 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 2010 */ 2011 static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx) 2012 { 2013 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_HWVIRT)) 2014 { 2015 /* 2016 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp. 2017 * since SVM doesn't have a preemption timer. 2018 * 2019 * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the 2020 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters 2021 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between. 2022 */ 2023 PVM pVM = pVCpu->CTX_SUFF(pVM); 2024 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2025 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter; 2026 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks; 2027 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE)) 2028 { 2029 pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount); 2030 pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold); 2031 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2032 } 2033 else 2034 { 2035 pVmcbNstGstCtrl->u16PauseFilterCount = uGuestPauseFilterCount; 2036 pVmcbNstGstCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold; 2037 } 2038 2039 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_HWVIRT); 2001 2040 } 2002 2041 } … … 2013 2052 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 2014 2053 { 2015 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE))2054 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 2016 2055 return VINF_SUCCESS; 2017 2056 … … 2065 2104 } 2066 2105 2067 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);2106 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 2068 2107 return rc; 2069 2108 } … … 2082 2121 /* If we modify intercepts from here, please check & adjust hmR0SvmLoadGuestXcptInterceptsNested() 2083 2122 if required. */ 2084 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS))2123 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 2085 2124 { 2086 2125 /* Trap #UD for GIM provider (e.g. for hypercalls). */ … … 2097 2136 2098 2137 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmLoadSharedCR0(). */ 2099 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);2138 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 2100 2139 } 2101 2140 } … … 2104 2143 #ifdef VBOX_WITH_NESTED_HWVIRT 2105 2144 /** 2106 * Loads the intercepts required for nested-guest execution in the VMCB. 2145 * Merges guest and nested-guest intercepts for executing the nested-guest using 2146 * hardware-assisted SVM. 2107 2147 * 2108 2148 * This merges the guest and nested-guest intercepts in a way that if the outer 2109 * guest intercept s an exceptionwe need to intercept it in the nested-guest as2110 * well and handle it accordingly.2149 * guest intercept is set we need to intercept it in the nested-guest as 2150 * well. 2111 2151 * 2112 2152 * @param pVCpu The cross context virtual CPU structure. … … 2114 2154 * @param pCtx Pointer to the guest-CPU context. 2115 2155 */ 2116 static void hmR0SvmLoadGuestInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx) 2117 { 2118 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)) 2119 { 2120 PVM pVM = pVCpu->CTX_SUFF(pVM); 2121 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2122 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2123 2124 /* Merge the guest's CR intercepts into the nested-guest VMCB. */ 2125 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx; 2126 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx; 2127 2128 /* Always intercept CR4 writes for tracking PGM mode changes. */ 2129 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4); 2130 2131 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */ 2132 if (!pVM->hm.s.fNestedPaging) 2133 { 2134 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3); 2135 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3); 2136 } 2137 2138 /** @todo Figure out debugging with nested-guests, till then just intercept 2139 * all DR[0-15] accesses. */ 2140 pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff; 2141 pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff; 2142 2143 /* 2144 * Merge the guest's exception intercepts into the nested-guest VMCB. 2145 * 2146 * - \#UD: Exclude these as the outer guest's GIM hypercalls are not applicable 2147 * while executing the nested-guest. 2148 * 2149 * - \#BP: Exclude breakpoints set by the VM debugger for the outer guest. This can 2150 * be tweaked later depending on how we wish to implement breakpoints. 2151 * 2152 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP 2153 * for VM debugger breakpoints, see hmR0SvmLoadGuestXcptIntercepts. 2154 */ 2156 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx) 2157 { 2158 PVM pVM = pVCpu->CTX_SUFF(pVM); 2159 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2160 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2161 2162 /* Merge the guest's CR intercepts into the nested-guest VMCB. */ 2163 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx; 2164 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx; 2165 2166 /* Always intercept CR4 writes for tracking PGM mode changes. */ 2167 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4); 2168 2169 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */ 2170 if (!pVM->hm.s.fNestedPaging) 2171 { 2172 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3); 2173 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3); 2174 } 2175 2176 /** @todo Figure out debugging with nested-guests, till then just intercept 2177 * all DR[0-15] accesses. */ 2178 pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff; 2179 pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff; 2180 2181 /* 2182 * Merge the guest's exception intercepts into the nested-guest VMCB. 2183 * 2184 * - \#UD: Exclude these as the outer guest's GIM hypercalls are not applicable 2185 * while executing the nested-guest. 2186 * 2187 * - \#BP: Exclude breakpoints set by the VM debugger for the outer guest. This can 2188 * be tweaked later depending on how we wish to implement breakpoints. 2189 * 2190 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP 2191 * for VM debugger breakpoints, see hmR0SvmLoadGuestXcptIntercepts. 2192 */ 2155 2193 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS 2156 2157 2194 pVmcbNstGstCtrl->u32InterceptXcpt |= (pVmcb->ctrl.u32InterceptXcpt & ~( RT_BIT(X86_XCPT_UD) 2195 | RT_BIT(X86_XCPT_BP))); 2158 2196 #else 2159 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt; 2160 #endif 2161 2162 /* 2163 * Adjust intercepts while executing the nested-guest that differ from the 2164 * outer guest intercepts. 2165 * 2166 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs 2167 * that belong to the nested-guest to the outer guest. 2168 * 2169 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by 2170 * the nested-guest, the physical CPU raises a \#UD exception as expected. 2171 */ 2172 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR 2173 | SVM_CTRL_INTERCEPT_VMMCALL)) 2174 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS; 2175 2176 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) 2177 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS); 2178 2179 /* 2180 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp. 2181 * since SVM doesn't have a preemption timer. 2182 * 2183 * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the 2184 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters. 2185 */ 2186 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE)) 2187 { 2188 pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, pVmcb->ctrl.u16PauseFilterCount); 2189 pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, 2190 pVmcb->ctrl.u16PauseFilterThreshold); 2191 } 2192 else 2193 { 2194 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcb->ctrl.u16PauseFilterCount; 2195 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcb->ctrl.u16PauseFilterThreshold; 2196 } 2197 2198 /* 2199 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we 2200 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest. 2201 */ 2202 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload) 2203 { 2204 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE 2205 | SVM_CTRL_INTERCEPT_VMLOAD; 2206 } 2207 2208 /* 2209 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept 2210 * CLGI/STGI instructions executed by the nested-guest. 2211 */ 2212 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif) 2213 { 2214 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI 2215 | SVM_CTRL_INTERCEPT_STGI; 2216 } 2217 2218 /* Finally, update the VMCB clean bits. */ 2219 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2220 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 2221 } 2197 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt; 2198 #endif 2199 2200 /* 2201 * Adjust intercepts while executing the nested-guest that differ from the 2202 * outer guest intercepts. 2203 * 2204 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs 2205 * that belong to the nested-guest to the outer guest. 2206 * 2207 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by 2208 * the nested-guest, the physical CPU raises a \#UD exception as expected. 2209 */ 2210 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR 2211 | SVM_CTRL_INTERCEPT_VMMCALL)) 2212 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS; 2213 2214 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) 2215 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS); 2216 2217 /* Finally, update the VMCB clean bits. */ 2218 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2222 2219 } 2223 2220 #endif … … 2426 2423 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 2427 2424 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 2428 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */ 2425 | HM_CHANGED_GUEST_HWVIRT /* Unused. */ 2426 | HM_CHANGED_VMM_GUEST_LAZY_MSRS 2429 2427 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 2430 2428 | HM_CHANGED_SVM_RESERVED2 … … 2528 2526 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM. 2529 2527 * 2528 * This is done the first time we enter nested-guest execution using SVM R0 2529 * until the nested-guest \#VMEXIT (not to be confused with physical CPU 2530 * \#VMEXITs which may or may not cause the nested-guest \#VMEXIT). 2531 * 2530 2532 * @param pVCpu The cross context virtual CPU structure. 2531 2533 * @param pCtx Pointer to the guest-CPU context. … … 2556 2558 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging; 2557 2559 2558 /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */ 2559 PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest; 2560 pVmcbNstGstState->u64PAT = MSR_IA32_CR_PAT_INIT_VAL; 2560 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */ 2561 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1; 2561 2562 2562 2563 #ifdef DEBUG_ramshankar 2563 2564 /* For debugging purposes - copy the LBR info. from outer guest VMCB. */ 2564 2565 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt; 2565 pVmcbNstGstState->u64DBGCTL = pVmcb->guest.u64DBGCTL; 2566 #endif 2566 #endif 2567 2568 /* 2569 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we 2570 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest. 2571 */ 2572 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload) 2573 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE 2574 | SVM_CTRL_INTERCEPT_VMLOAD; 2575 2576 /* 2577 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept 2578 * CLGI/STGI instructions executed by the nested-guest. 2579 */ 2580 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif) 2581 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI 2582 | SVM_CTRL_INTERCEPT_STGI; 2583 2584 /* Merge the guest and nested-guest intercepts. */ 2585 hmR0SvmMergeVmcbCtrlsNested(pVCpu, pVmcbNstGst, pCtx); 2586 2587 /* Update the VMCB clean bits. */ 2588 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2567 2589 } 2568 2590 else … … 2587 2609 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 2588 2610 2589 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2590 Assert(pVmcbNstGst); 2591 2592 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx); 2611 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcbNstGst); 2593 2612 2594 2613 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx); … … 2604 2623 hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx); 2605 2624 hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst); 2625 hmR0SvmLoadGuestHwvirtStateNested(pVCpu, pVmcbNstGst, pCtx); 2606 2626 2607 2627 pVmcbNstGst->guest.u64RIP = pCtx->rip; … … 2613 2633 Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0); /* Nested VGIF not supported yet. */ 2614 2634 #endif 2615 2616 hmR0SvmLoadGuestInterceptsNested(pVCpu, pVmcbNstGst, pCtx);2617 2635 2618 2636 rc = hmR0SvmSetupVMRunHandler(pVCpu); … … 2626 2644 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 2627 2645 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 2628 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */ 2646 | HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS /* Unused. */ 2647 | HM_CHANGED_VMM_GUEST_LAZY_MSRS 2629 2648 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 2630 2649 | HM_CHANGED_SVM_RESERVED2 … … 2682 2701 } 2683 2702 2684 /* Unused on AMD-V . */2685 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ GUEST_LAZY_MSRS);2703 /* Unused on AMD-V (no lazy MSRs). */ 2704 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 2686 2705 2687 2706 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), … … 4132 4151 4133 4152 /* 4153 * Set up the nested-guest VMCB for execution using hardware-assisted SVM. 4154 */ 4155 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx); 4156 4157 /* 4134 4158 * Load the nested-guest state. 4135 4159 */ … … 4657 4681 int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff); 4658 4682 AssertRC(rc); 4659 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);4683 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 4660 4684 } 4661 4685 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR) … … 4663 4687 int rc = APICSetTpr(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4); 4664 4688 AssertRC(rc); 4665 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);4689 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 4666 4690 } 4667 4691 } … … 6602 6626 6603 6627 case 8: /* CR8 (TPR). */ 6604 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);6628 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6605 6629 break; 6606 6630 … … 6643 6667 int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff); 6644 6668 AssertRC(rc2); 6645 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);6669 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6646 6670 } 6647 6671 rc = VINF_SUCCESS; … … 6685 6709 * EMInterpretWrmsr() changes it. 6686 6710 */ 6687 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);6711 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6688 6712 } 6689 6713 else … … 7145 7169 { 7146 7170 /* Successfully handled MMIO operation. */ 7147 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_APIC_STATE);7171 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 7148 7172 rc = VINF_SUCCESS; 7149 7173 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r71415 r71910 3467 3467 3468 3468 int rc = VINF_SUCCESS; 3469 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ VMX_GUEST_APIC_STATE))3469 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 3470 3470 { 3471 3471 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) … … 3505 3505 } 3506 3506 } 3507 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ VMX_GUEST_APIC_STATE);3507 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 3508 3508 } 3509 3509 … … 3600 3600 NOREF(pMixedCtx); 3601 3601 int rc = VINF_SUCCESS; 3602 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS))3602 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 3603 3603 { 3604 3604 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */ … … 3616 3616 AssertRCReturn(rc, rc); 3617 3617 3618 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);3618 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3619 3619 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, 3620 3620 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu))); … … 3847 3847 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3848 3848 } 3849 HMCPU_CF_SET(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);3849 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3850 3850 3851 3851 if (fInterceptNM) … … 6595 6595 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS)) 6596 6596 { 6597 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ GUEST_LAZY_MSRS));6597 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS)); 6598 6598 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 6599 6599 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS); … … 7141 7141 /* Note! Since this is only applicable to VT-x, the implementation is placed 7142 7142 in the VT-x part of the sources instead of the generic stuff. */ 7143 int rc; 7143 7144 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported) 7144 { 7145 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7146 /* 7147 * For now, imply that the caller might change everything too. Do this after 7148 * saving the guest state so as to not trigger assertions. 7149 */ 7150 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7151 return rc; 7152 } 7153 return VINF_SUCCESS; 7145 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7146 else 7147 rc= VINF_SUCCESS; 7148 7149 /* 7150 * For now, imply that the caller might change everything too. Do this after 7151 * saving the guest state so as to not trigger assertions. 7152 * 7153 * This is required for AMD-V too as it too only selectively re-loads changed 7154 * guest state back in to the VMCB. 7155 */ 7156 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7157 return rc; 7154 7158 } 7155 7159 … … 8739 8743 8740 8744 /* Clear any unused and reserved bits. */ 8741 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 8745 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2 8746 | HM_CHANGED_GUEST_HWVIRT); 8742 8747 8743 8748 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); … … 8781 8786 } 8782 8787 8783 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ GUEST_LAZY_MSRS))8788 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS)) 8784 8789 { 8785 8790 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 8786 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ GUEST_LAZY_MSRS);8791 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 8787 8792 } 8788 8793 8789 8794 /* Loading CR0, debug state might have changed intercepts, update VMCS. */ 8790 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS))8795 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 8791 8796 { 8792 8797 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC)); … … 8794 8799 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 8795 8800 AssertRC(rc); 8796 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ GUEST_XCPT_INTERCEPTS);8801 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 8797 8802 } 8798 8803 … … 9334 9339 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]); 9335 9340 AssertRC(rc); 9336 HMCPU_CF_SET(pVCpu, HM_CHANGED_ VMX_GUEST_APIC_STATE);9341 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 9337 9342 } 9338 9343 … … 12466 12471 * EMInterpretWrmsr() changes it. 12467 12472 */ 12468 HMCPU_CF_SET(pVCpu, HM_CHANGED_ VMX_GUEST_APIC_STATE);12473 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 12469 12474 } 12470 12475 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 12509 12514 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12510 12515 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 12511 HMCPU_CF_SET(pVCpu, HM_CHANGED_ GUEST_LAZY_MSRS);12516 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 12512 12517 break; 12513 12518 } … … 12653 12658 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 12654 12659 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */ 12655 HMCPU_CF_SET(pVCpu, HM_CHANGED_ VMX_GUEST_APIC_STATE);12660 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 12656 12661 break; 12657 12662 default: … … 13099 13104 | HM_CHANGED_GUEST_RSP 13100 13105 | HM_CHANGED_GUEST_RFLAGS 13101 | HM_CHANGED_ VMX_GUEST_APIC_STATE);13106 | HM_CHANGED_GUEST_APIC_STATE); 13102 13107 rcStrict2 = VINF_SUCCESS; 13103 13108 } … … 13264 13269 | HM_CHANGED_GUEST_RSP 13265 13270 | HM_CHANGED_GUEST_RFLAGS 13266 | HM_CHANGED_ VMX_GUEST_APIC_STATE);13271 | HM_CHANGED_GUEST_APIC_STATE); 13267 13272 return VINF_SUCCESS; 13268 13273 } … … 13970 13975 | HM_CHANGED_GUEST_RSP 13971 13976 | HM_CHANGED_GUEST_RFLAGS 13972 | HM_CHANGED_ VMX_GUEST_APIC_STATE);13977 | HM_CHANGED_GUEST_APIC_STATE); 13973 13978 #else 13974 13979 /* -
trunk/src/VBox/VMM/include/HMInternal.h
r71529 r71910 173 173 #define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15) 174 174 #define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16) 175 #define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */ /** @todo Move this to VT-x specific? */ 176 #define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18) 175 #define HM_CHANGED_GUEST_APIC_STATE RT_BIT(17) 176 #define HM_CHANGED_GUEST_HWVIRT RT_BIT(18) 177 /* Logically common VMM state. */ 178 #define HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS RT_BIT(19) 179 #define HM_CHANGED_VMM_GUEST_LAZY_MSRS RT_BIT(20) 177 180 /* VT-x specific state. */ 178 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19) 179 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20) 180 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21) 181 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22) 182 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23) 181 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(21) 182 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(22) 183 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(23) 184 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(24) 183 185 /* AMD-V specific state. */ 184 #define HM_CHANGED_SVM_ GUEST_APIC_STATE RT_BIT(19)185 #define HM_CHANGED_SVM_RESERVED 1 RT_BIT(20)186 #define HM_CHANGED_SVM_RESERVED 2 RT_BIT(21)187 #define HM_CHANGED_SVM_RESERVED 3 RT_BIT(22)188 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(23) 189 190 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0\191 | HM_CHANGED_GUEST_CR 3\192 | HM_CHANGED_GUEST_ CR4\193 | HM_CHANGED_GUEST_ GDTR\194 | HM_CHANGED_GUEST_ IDTR\195 | HM_CHANGED_GUEST_ LDTR\196 | HM_CHANGED_GUEST_ TR\197 | HM_CHANGED_GUEST_ SEGMENT_REGS\198 | HM_CHANGED_GUEST_ DEBUG\199 | HM_CHANGED_GUEST_R IP\200 | HM_CHANGED_GUEST_R SP\201 | HM_CHANGED_GUEST_ RFLAGS\202 | HM_CHANGED_GUEST_ CR2\203 | HM_CHANGED_GUEST_SYSENTER_ CS_MSR \204 | HM_CHANGED_GUEST_SYSENTER_E IP_MSR\205 | HM_CHANGED_GUEST_ SYSENTER_ESP_MSR\206 | HM_CHANGED_GUEST_ EFER_MSR\207 | HM_CHANGED_GUEST_ LAZY_MSRS\208 | HM_CHANGED_ GUEST_XCPT_INTERCEPTS\209 | HM_CHANGED_VM X_GUEST_AUTO_MSRS\210 | HM_CHANGED_VMX_GUEST_A CTIVITY_STATE\211 | HM_CHANGED_VMX_GUEST_A PIC_STATE\212 | HM_CHANGED_VMX_ENTRY_CTLS \186 #define HM_CHANGED_SVM_RESERVED1 RT_BIT(21) 187 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(22) 188 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(23) 189 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(24) 190 191 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \ 192 | HM_CHANGED_GUEST_CR3 \ 193 | HM_CHANGED_GUEST_CR4 \ 194 | HM_CHANGED_GUEST_GDTR \ 195 | HM_CHANGED_GUEST_IDTR \ 196 | HM_CHANGED_GUEST_LDTR \ 197 | HM_CHANGED_GUEST_TR \ 198 | HM_CHANGED_GUEST_SEGMENT_REGS \ 199 | HM_CHANGED_GUEST_DEBUG \ 200 | HM_CHANGED_GUEST_RIP \ 201 | HM_CHANGED_GUEST_RSP \ 202 | HM_CHANGED_GUEST_RFLAGS \ 203 | HM_CHANGED_GUEST_CR2 \ 204 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \ 205 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \ 206 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \ 207 | HM_CHANGED_GUEST_EFER_MSR \ 208 | HM_CHANGED_GUEST_APIC_STATE \ 209 | HM_CHANGED_GUEST_HWVIRT \ 210 | HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS \ 211 | HM_CHANGED_VMM_GUEST_LAZY_MSRS \ 212 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \ 213 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \ 214 | HM_CHANGED_VMX_ENTRY_CTLS \ 213 215 | HM_CHANGED_VMX_EXIT_CTLS) 214 216 215 #define HM_CHANGED_HOST_CONTEXT RT_BIT(2 4)217 #define HM_CHANGED_HOST_CONTEXT RT_BIT(25) 216 218 217 219 /* Bits shared between host and guest. */ 218 #define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 219 | HM_CHANGED_GUEST_DEBUG 220 | HM_CHANGED_ GUEST_LAZY_MSRS)220 #define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \ 221 | HM_CHANGED_GUEST_DEBUG \ 222 | HM_CHANGED_VMM_GUEST_LAZY_MSRS) 221 223 /** @} */ 222 224
Note:
See TracChangeset
for help on using the changeset viewer.