VirtualBox

Changeset 71910 in vbox for trunk


Ignore:
Timestamp:
Apr 19, 2018 8:54:23 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Clean up and also fix HMR0EnsureCompleteBasicContext for AMD-V which also selectively re-loads guest state back into the VMCB.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r70948 r71910  
    543543{
    544544    pVCpu->hm.s.fGIMTrapXcptUD = true;
    545     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     545    HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    546546}
    547547
     
    555555{
    556556    pVCpu->hm.s.fGIMTrapXcptUD = false;
    557     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     557    HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    558558}
    559559
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r71859 r71910  
    103103                int rc2 = APICSetTpr(pVCpu, u8Tpr);
    104104                AssertRC(rc2);
    105                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     105                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    106106
    107107                pCtx->rip += pPatch->cbOp;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71909 r71910  
    17041704 * @param   pVCpu       The cross context virtual CPU structure.
    17051705 * @param   pVmcb       Pointer to the VM control block.
    1706  * @param   pCtx        Pointer to the guest-CPU context.
     1706 * @param   pCtx        Pointer to the guest-CPU or nested-guest-CPU context.
    17071707 *
    17081708 * @remarks No-long-jump zone!!!
     
    18181818     * While guests can modify and see the modified values throug the shadow values,
    18191819     * we shall not honor any guest modifications of this MSR to ensure caching is always
    1820      * enabled similar to how we always run with CR0.CD and NW bits cleared.
     1820     * enabled similar to how we always run with CR0.CD and NW bits cleared,
     1821     *
     1822     * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
    18211823     */
    18221824    pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
     
    19911993static void hmR0SvmLoadGuestApicStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst)
    19921994{
    1993     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
    1994     {
    1995         /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
    1996         pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1;
     1995    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE))
     1996    {
     1997        Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking == 1);
    19971998        pVCpu->hm.s.svm.fSyncVTpr = false;
    1998         pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;
    1999 
    2000         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     1999        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     2000    }
     2001}
     2002
     2003
     2004/**
     2005 * Loads the nested-guest hardware virtualization state.
     2006 *
     2007 * @param   pVCpu         The cross context virtual CPU structure.
     2008 * @param   pVmcbNstGst   Pointer to the nested-guest VM control block.
     2009 * @param   pCtx          Pointer to the guest-CPU or nested-guest-CPU context.
     2010 */
     2011static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
     2012{
     2013    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_HWVIRT))
     2014    {
     2015        /*
     2016         * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
     2017         * since SVM doesn't have a preemption timer.
     2018         *
     2019         * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the
     2020         * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
     2021         * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
     2022         */
     2023        PVM            pVM = pVCpu->CTX_SUFF(pVM);
     2024        PSVMVMCBCTRL   pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     2025        uint16_t const uGuestPauseFilterCount     = pVM->hm.s.svm.cPauseFilter;
     2026        uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
     2027        if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
     2028        {
     2029            pVmcbNstGstCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
     2030            pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
     2031            pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     2032        }
     2033        else
     2034        {
     2035            pVmcbNstGstCtrl->u16PauseFilterCount     = uGuestPauseFilterCount;
     2036            pVmcbNstGstCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
     2037        }
     2038
     2039        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_HWVIRT);
    20012040    }
    20022041}
     
    20132052static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    20142053{
    2015     if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
     2054    if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE))
    20162055        return VINF_SUCCESS;
    20172056
     
    20652104    }
    20662105
    2067     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     2106    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    20682107    return rc;
    20692108}
     
    20822121    /* If we modify intercepts from here, please check & adjust hmR0SvmLoadGuestXcptInterceptsNested()
    20832122       if required. */
    2084     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     2123    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS))
    20852124    {
    20862125        /* Trap #UD for GIM provider (e.g. for hypercalls). */
     
    20972136
    20982137        /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmLoadSharedCR0(). */
    2099         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     2138        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    21002139    }
    21012140}
     
    21042143#ifdef VBOX_WITH_NESTED_HWVIRT
    21052144/**
    2106  * Loads the intercepts required for nested-guest execution in the VMCB.
     2145 * Merges guest and nested-guest intercepts for executing the nested-guest using
     2146 * hardware-assisted SVM.
    21072147 *
    21082148 * This merges the guest and nested-guest intercepts in a way that if the outer
    2109  * guest intercepts an exception we need to intercept it in the nested-guest as
    2110  * well and handle it accordingly.
     2149 * guest intercept is set we need to intercept it in the nested-guest as
     2150 * well.
    21112151 *
    21122152 * @param   pVCpu           The cross context virtual CPU structure.
     
    21142154 * @param   pCtx            Pointer to the guest-CPU context.
    21152155 */
    2116 static void hmR0SvmLoadGuestInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
    2117 {
    2118     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
    2119     {
    2120         PVM          pVM             = pVCpu->CTX_SUFF(pVM);
    2121         PCSVMVMCB    pVmcb           = pVCpu->hm.s.svm.pVmcb;
    2122         PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    2123 
    2124         /* Merge the guest's CR intercepts into the nested-guest VMCB. */
    2125         pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
    2126         pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
    2127 
    2128         /* Always intercept CR4 writes for tracking PGM mode changes. */
    2129         pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
    2130 
    2131         /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
    2132         if (!pVM->hm.s.fNestedPaging)
    2133         {
    2134             pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
    2135             pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
    2136         }
    2137 
    2138         /** @todo Figure out debugging with nested-guests, till then just intercept
    2139          *        all DR[0-15] accesses. */
    2140         pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff;
    2141         pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff;
    2142 
    2143         /*
    2144          * Merge the guest's exception intercepts into the nested-guest VMCB.
    2145          *
    2146          * - \#UD: Exclude these as the outer guest's GIM hypercalls are not applicable
    2147          * while executing the nested-guest.
    2148          *
    2149          * - \#BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
    2150          * be tweaked later depending on how we wish to implement breakpoints.
    2151          *
    2152          * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
    2153          * for VM debugger breakpoints, see hmR0SvmLoadGuestXcptIntercepts.
    2154          */
     2156static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
     2157{
     2158    PVM          pVM             = pVCpu->CTX_SUFF(pVM);
     2159    PCSVMVMCB    pVmcb           = pVCpu->hm.s.svm.pVmcb;
     2160    PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     2161
     2162    /* Merge the guest's CR intercepts into the nested-guest VMCB. */
     2163    pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
     2164    pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
     2165
     2166    /* Always intercept CR4 writes for tracking PGM mode changes. */
     2167    pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
     2168
     2169    /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
     2170    if (!pVM->hm.s.fNestedPaging)
     2171    {
     2172        pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
     2173        pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
     2174    }
     2175
     2176    /** @todo Figure out debugging with nested-guests, till then just intercept
     2177     *        all DR[0-15] accesses. */
     2178    pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff;
     2179    pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff;
     2180
     2181    /*
     2182     * Merge the guest's exception intercepts into the nested-guest VMCB.
     2183     *
     2184     * - \#UD: Exclude these as the outer guest's GIM hypercalls are not applicable
     2185     * while executing the nested-guest.
     2186     *
     2187     * - \#BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
     2188     * be tweaked later depending on how we wish to implement breakpoints.
     2189     *
     2190     * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
     2191     * for VM debugger breakpoints, see hmR0SvmLoadGuestXcptIntercepts.
     2192     */
    21552193#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    2156         pVmcbNstGstCtrl->u32InterceptXcpt  |= (pVmcb->ctrl.u32InterceptXcpt & ~(  RT_BIT(X86_XCPT_UD)
    2157                                                                                  | RT_BIT(X86_XCPT_BP)));
     2194    pVmcbNstGstCtrl->u32InterceptXcpt  |= (pVmcb->ctrl.u32InterceptXcpt & ~(  RT_BIT(X86_XCPT_UD)
     2195                                                                            | RT_BIT(X86_XCPT_BP)));
    21582196#else
    2159         pVmcbNstGstCtrl->u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
    2160 #endif
    2161 
    2162         /*
    2163          * Adjust intercepts while executing the nested-guest that differ from the
    2164          * outer guest intercepts.
    2165          *
    2166          * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
    2167          *   that belong to the nested-guest to the outer guest.
    2168          *
    2169          * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
    2170          *   the nested-guest, the physical CPU raises a \#UD exception as expected.
    2171          */
    2172         pVmcbNstGstCtrl->u64InterceptCtrl  |= (pVmcb->ctrl.u64InterceptCtrl & ~(  SVM_CTRL_INTERCEPT_VINTR
    2173                                                                                 | SVM_CTRL_INTERCEPT_VMMCALL))
    2174                                            |  HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
    2175 
    2176         Assert(   (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
    2177                == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
    2178 
    2179         /*
    2180          * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
    2181          * since SVM doesn't have a preemption timer.
    2182          *
    2183          * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the
    2184          * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters.
    2185          */
    2186         if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
    2187         {
    2188             pVmcbNstGstCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, pVmcb->ctrl.u16PauseFilterCount);
    2189             pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold,
    2190                                                               pVmcb->ctrl.u16PauseFilterThreshold);
    2191         }
    2192         else
    2193         {
    2194             pVmcbNstGstCtrl->u16PauseFilterCount     = pVmcb->ctrl.u16PauseFilterCount;
    2195             pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcb->ctrl.u16PauseFilterThreshold;
    2196         }
    2197 
    2198         /*
    2199          * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
    2200          * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
    2201          */
    2202         if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
    2203         {
    2204             pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
    2205                                               |  SVM_CTRL_INTERCEPT_VMLOAD;
    2206         }
    2207 
    2208         /*
    2209          * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
    2210          * CLGI/STGI instructions executed by the nested-guest.
    2211          */
    2212         if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
    2213         {
    2214             pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
    2215                                               |  SVM_CTRL_INTERCEPT_STGI;
    2216         }
    2217 
    2218         /* Finally, update the VMCB clean bits. */
    2219         pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    2220         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
    2221     }
     2197    pVmcbNstGstCtrl->u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
     2198#endif
     2199
     2200    /*
     2201     * Adjust intercepts while executing the nested-guest that differ from the
     2202     * outer guest intercepts.
     2203     *
     2204     * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
     2205     *   that belong to the nested-guest to the outer guest.
     2206     *
     2207     * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
     2208     *   the nested-guest, the physical CPU raises a \#UD exception as expected.
     2209     */
     2210    pVmcbNstGstCtrl->u64InterceptCtrl  |= (pVmcb->ctrl.u64InterceptCtrl & ~(  SVM_CTRL_INTERCEPT_VINTR
     2211                                                                            | SVM_CTRL_INTERCEPT_VMMCALL))
     2212                                       |  HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
     2213
     2214    Assert(   (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
     2215           == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
     2216
     2217    /* Finally, update the VMCB clean bits. */
     2218    pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    22222219}
    22232220#endif
     
    24262423                          | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
    24272424                          | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
    2428                           | HM_CHANGED_GUEST_LAZY_MSRS            /* Unused. */
     2425                          | HM_CHANGED_GUEST_HWVIRT               /* Unused. */
     2426                          | HM_CHANGED_VMM_GUEST_LAZY_MSRS
    24292427                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    24302428                          | HM_CHANGED_SVM_RESERVED2
     
    25282526 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
    25292527 *
     2528 * This is done the first time we enter nested-guest execution using SVM R0
     2529 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
     2530 * \#VMEXITs which may or may not cause the nested-guest \#VMEXIT).
     2531 *
    25302532 * @param   pVCpu           The cross context virtual CPU structure.
    25312533 * @param   pCtx            Pointer to the guest-CPU context.
     
    25562558        pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
    25572559
    2558         /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */
    2559         PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
    2560         pVmcbNstGstState->u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
     2560        /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
     2561        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
    25612562
    25622563#ifdef DEBUG_ramshankar
    25632564        /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
    25642565        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
    2565         pVmcbNstGstState->u64DBGCTL = pVmcb->guest.u64DBGCTL;
    2566 #endif
     2566#endif
     2567
     2568        /*
     2569         * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
     2570         * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
     2571         */
     2572        if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
     2573            pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
     2574                                              |  SVM_CTRL_INTERCEPT_VMLOAD;
     2575
     2576        /*
     2577         * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
     2578         * CLGI/STGI instructions executed by the nested-guest.
     2579         */
     2580        if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
     2581            pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
     2582                                              |  SVM_CTRL_INTERCEPT_STGI;
     2583
     2584        /* Merge the guest and nested-guest intercepts. */
     2585        hmR0SvmMergeVmcbCtrlsNested(pVCpu, pVmcbNstGst, pCtx);
     2586
     2587        /* Update the VMCB clean bits. */
     2588        pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    25672589    }
    25682590    else
     
    25872609    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    25882610
    2589     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2590     Assert(pVmcbNstGst);
    2591 
    2592     hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
     2611    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcbNstGst);
    25932612
    25942613    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx);
     
    26042623    hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
    26052624    hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
     2625    hmR0SvmLoadGuestHwvirtStateNested(pVCpu, pVmcbNstGst, pCtx);
    26062626
    26072627    pVmcbNstGst->guest.u64RIP    = pCtx->rip;
     
    26132633    Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0);        /* Nested VGIF not supported yet. */
    26142634#endif
    2615 
    2616     hmR0SvmLoadGuestInterceptsNested(pVCpu, pVmcbNstGst, pCtx);
    26172635
    26182636    rc = hmR0SvmSetupVMRunHandler(pVCpu);
     
    26262644                          | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
    26272645                          | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
    2628                           | HM_CHANGED_GUEST_LAZY_MSRS            /* Unused. */
     2646                          | HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS  /* Unused. */
     2647                          | HM_CHANGED_VMM_GUEST_LAZY_MSRS
    26292648                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    26302649                          | HM_CHANGED_SVM_RESERVED2
     
    26822701    }
    26832702
    2684     /* Unused on AMD-V. */
    2685     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     2703    /* Unused on AMD-V (no lazy MSRs). */
     2704    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);
    26862705
    26872706    AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
     
    41324151
    41334152    /*
     4153     * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
     4154     */
     4155    hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
     4156
     4157    /*
    41344158     * Load the nested-guest state.
    41354159     */
     
    46574681                int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff);
    46584682                AssertRC(rc);
    4659                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     4683                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    46604684            }
    46614685            else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
     
    46634687                int rc = APICSetTpr(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
    46644688                AssertRC(rc);
    4665                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     4689                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    46664690            }
    46674691        }
     
    66026626
    66036627            case 8:     /* CR8 (TPR). */
    6604                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     6628                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    66056629                break;
    66066630
     
    66436667                int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff);
    66446668                AssertRC(rc2);
    6645                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     6669                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    66466670            }
    66476671            rc = VINF_SUCCESS;
     
    66856709                 * EMInterpretWrmsr() changes it.
    66866710                 */
    6687                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     6711                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    66886712            }
    66896713            else
     
    71457169        {
    71467170            /* Successfully handled MMIO operation. */
    7147             HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     7171            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    71487172            rc = VINF_SUCCESS;
    71497173        }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r71415 r71910  
    34673467
    34683468    int rc = VINF_SUCCESS;
    3469     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
     3469    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE))
    34703470    {
    34713471        if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
     
    35053505            }
    35063506        }
    3507         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     3507        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    35083508    }
    35093509
     
    36003600    NOREF(pMixedCtx);
    36013601    int rc = VINF_SUCCESS;
    3602     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     3602    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS))
    36033603    {
    36043604        /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
     
    36163616        AssertRCReturn(rc, rc);
    36173617
    3618         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     3618        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    36193619        Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
    36203620              pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
     
    38473847            pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
    38483848        }
    3849         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     3849        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    38503850
    38513851        if (fInterceptNM)
     
    65956595    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
    65966596    {
    6597         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS));
     6597        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS));
    65986598        hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
    65996599        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
     
    71417141    /* Note! Since this is only applicable to VT-x, the implementation is placed
    71427142             in the VT-x part of the sources instead of the generic stuff. */
     7143    int rc;
    71437144    if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
    7144     {
    7145         int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    7146         /*
    7147          * For now, imply that the caller might change everything too. Do this after
    7148          * saving the guest state so as to not trigger assertions.
    7149          */
    7150         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    7151         return rc;
    7152     }
    7153     return VINF_SUCCESS;
     7145        rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     7146    else
     7147        rc= VINF_SUCCESS;
     7148
     7149    /*
     7150     * For now, imply that the caller might change everything too. Do this after
     7151     * saving the guest state so as to not trigger assertions.
     7152     *
     7153     * This is required for AMD-V too as it too only selectively re-loads changed
     7154     * guest state back in to the VMCB.
     7155     */
     7156    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     7157    return rc;
    71547158}
    71557159
     
    87398743
    87408744    /* Clear any unused and reserved bits. */
    8741     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
     8745    HMCPU_CF_CLEAR(pVCpu,   HM_CHANGED_GUEST_CR2
     8746                          | HM_CHANGED_GUEST_HWVIRT);
    87428747
    87438748    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     
    87818786    }
    87828787
    8783     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
     8788    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS))
    87848789    {
    87858790        hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
    8786         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     8791        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);
    87878792    }
    87888793
    87898794    /* Loading CR0, debug state might have changed intercepts, update VMCS. */
    8790     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     8795    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS))
    87918796    {
    87928797        Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
     
    87948799        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    87958800        AssertRC(rc);
    8796         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     8801        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    87978802    }
    87988803
     
    93349339                rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
    93359340                AssertRC(rc);
    9336                 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     9341                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    93379342            }
    93389343
     
    1246612471             * EMInterpretWrmsr() changes it.
    1246712472             */
    12468             HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     12473            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    1246912474        }
    1247012475        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
     
    1250912514                        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    1251012515                    else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    12511                         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     12516                        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);
    1251212517                    break;
    1251312518                }
     
    1265312658                    Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    1265412659                    /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
    12655                     HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     12660                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    1265612661                    break;
    1265712662                default:
     
    1309913104                                    | HM_CHANGED_GUEST_RSP
    1310013105                                    | HM_CHANGED_GUEST_RFLAGS
    13101                                     | HM_CHANGED_VMX_GUEST_APIC_STATE);
     13106                                    | HM_CHANGED_GUEST_APIC_STATE);
    1310213107                rcStrict2 = VINF_SUCCESS;
    1310313108            }
     
    1326413269                            | HM_CHANGED_GUEST_RSP
    1326513270                            | HM_CHANGED_GUEST_RFLAGS
    13266                             | HM_CHANGED_VMX_GUEST_APIC_STATE);
     13271                            | HM_CHANGED_GUEST_APIC_STATE);
    1326713272        return VINF_SUCCESS;
    1326813273    }
     
    1397013975                            | HM_CHANGED_GUEST_RSP
    1397113976                            | HM_CHANGED_GUEST_RFLAGS
    13972                             | HM_CHANGED_VMX_GUEST_APIC_STATE);
     13977                            | HM_CHANGED_GUEST_APIC_STATE);
    1397313978#else
    1397413979        /*
  • trunk/src/VBox/VMM/include/HMInternal.h

    r71529 r71910  
    173173#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR        RT_BIT(15)
    174174#define HM_CHANGED_GUEST_EFER_MSR                RT_BIT(16)
    175 #define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(17)     /* Shared */ /** @todo Move this to VT-x specific? */
    176 #define HM_CHANGED_GUEST_XCPT_INTERCEPTS         RT_BIT(18)
     175#define HM_CHANGED_GUEST_APIC_STATE              RT_BIT(17)
     176#define HM_CHANGED_GUEST_HWVIRT                  RT_BIT(18)
     177/* Logically common VMM state. */
     178#define HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS     RT_BIT(19)
     179#define HM_CHANGED_VMM_GUEST_LAZY_MSRS           RT_BIT(20)
    177180/* VT-x specific state. */
    178 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(19)
    179 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(20)
    180 #define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(21)
    181 #define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(22)
    182 #define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(23)
     181#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(21)
     182#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(22)
     183#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(23)
     184#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(24)
    183185/* AMD-V specific state. */
    184 #define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(19)
    185 #define HM_CHANGED_SVM_RESERVED1                 RT_BIT(20)
    186 #define HM_CHANGED_SVM_RESERVED2                 RT_BIT(21)
    187 #define HM_CHANGED_SVM_RESERVED3                 RT_BIT(22)
    188 #define HM_CHANGED_SVM_RESERVED4                 RT_BIT(23)
    189 
    190 #define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
    191                                                   | HM_CHANGED_GUEST_CR3                \
    192                                                   | HM_CHANGED_GUEST_CR4                \
    193                                                   | HM_CHANGED_GUEST_GDTR               \
    194                                                   | HM_CHANGED_GUEST_IDTR               \
    195                                                   | HM_CHANGED_GUEST_LDTR               \
    196                                                   | HM_CHANGED_GUEST_TR                 \
    197                                                   | HM_CHANGED_GUEST_SEGMENT_REGS       \
    198                                                   | HM_CHANGED_GUEST_DEBUG              \
    199                                                   | HM_CHANGED_GUEST_RIP                \
    200                                                   | HM_CHANGED_GUEST_RSP                \
    201                                                   | HM_CHANGED_GUEST_RFLAGS             \
    202                                                   | HM_CHANGED_GUEST_CR2                \
    203                                                   | HM_CHANGED_GUEST_SYSENTER_CS_MSR    \
    204                                                   | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
    205                                                   | HM_CHANGED_GUEST_SYSENTER_ESP_MSR   \
    206                                                   | HM_CHANGED_GUEST_EFER_MSR           \
    207                                                   | HM_CHANGED_GUEST_LAZY_MSRS          \
    208                                                   | HM_CHANGED_GUEST_XCPT_INTERCEPTS    \
    209                                                   | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
    210                                                   | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
    211                                                   | HM_CHANGED_VMX_GUEST_APIC_STATE     \
    212                                                   | HM_CHANGED_VMX_ENTRY_CTLS           \
     186#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(21)
     187#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(22)
     188#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(23)
     189#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(24)
     190
     191#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                 \
     192                                                  | HM_CHANGED_GUEST_CR3                 \
     193                                                  | HM_CHANGED_GUEST_CR4                 \
     194                                                  | HM_CHANGED_GUEST_GDTR                \
     195                                                  | HM_CHANGED_GUEST_IDTR                \
     196                                                  | HM_CHANGED_GUEST_LDTR                \
     197                                                  | HM_CHANGED_GUEST_TR                  \
     198                                                  | HM_CHANGED_GUEST_SEGMENT_REGS        \
     199                                                  | HM_CHANGED_GUEST_DEBUG               \
     200                                                  | HM_CHANGED_GUEST_RIP                 \
     201                                                  | HM_CHANGED_GUEST_RSP                 \
     202                                                  | HM_CHANGED_GUEST_RFLAGS              \
     203                                                  | HM_CHANGED_GUEST_CR2                 \
     204                                                  | HM_CHANGED_GUEST_SYSENTER_CS_MSR     \
     205                                                  | HM_CHANGED_GUEST_SYSENTER_EIP_MSR    \
     206                                                  | HM_CHANGED_GUEST_SYSENTER_ESP_MSR    \
     207                                                  | HM_CHANGED_GUEST_EFER_MSR            \
     208                                                  | HM_CHANGED_GUEST_APIC_STATE          \
     209                                                  | HM_CHANGED_GUEST_HWVIRT              \
     210                                                  | HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS \
     211                                                  | HM_CHANGED_VMM_GUEST_LAZY_MSRS       \
     212                                                  | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
     213                                                  | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE  \
     214                                                  | HM_CHANGED_VMX_ENTRY_CTLS            \
    213215                                                  | HM_CHANGED_VMX_EXIT_CTLS)
    214216
    215 #define HM_CHANGED_HOST_CONTEXT                  RT_BIT(24)
     217#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(25)
    216218
    217219/* Bits shared between host and guest. */
    218 #define HM_CHANGED_HOST_GUEST_SHARED_STATE       (  HM_CHANGED_GUEST_CR0                \
    219                                                   | HM_CHANGED_GUEST_DEBUG              \
    220                                                   | HM_CHANGED_GUEST_LAZY_MSRS)
     220#define HM_CHANGED_HOST_GUEST_SHARED_STATE       (  HM_CHANGED_GUEST_CR0           \
     221                                                  | HM_CHANGED_GUEST_DEBUG         \
     222                                                  | HM_CHANGED_VMM_GUEST_LAZY_MSRS)
    221223/** @} */
    222224
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette