VirtualBox

Changeset 68434 in vbox


Ignore:
Timestamp:
Aug 17, 2017 8:28:18 AM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
117577
Message:

VMM: Nested Hw.virt: SVM bits.

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r68433 r68434  
    350350    {
    351351        PSVMVMCBCTRL      pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    352         PSVMVMCBSTATESAVE pVmcbNstGstState =&pVmcbNstGst->guest;
     352        PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
    353353        pVmcbNstGstCtrl->u16InterceptRdCRx        = pNstGstVmcbCache->u16InterceptRdCRx;
    354354        pVmcbNstGstCtrl->u16InterceptWrCRx        = pNstGstVmcbCache->u16InterceptWrCRx;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r68433 r68434  
    13741374        AssertRC(rc);
    13751375        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1376        Log4(("hmR0SvmLoadGuestControlRegsNested: CR3=%#RX64 to HC phys CR3=%#RHp\n", pCtx->cr3, pVmcbNstGst->guest.u64CR3));
    13761377        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    13771378    }
     
    17951796 * well and handle it accordingly.
    17961797 *
     1798 * @param   pVCpu       The cross context virtual CPU structure.
    17971799 * @param   pVmcb           Pointer to the VM control block.
    17981800 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
    17991801 */
    1800 static void hmR0SvmMergeIntercepts(PCSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
    1801 {
    1802     pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
    1803     pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
    1804 
    1805     /** @todo Figure out debugging with nested-guests, till then just intercept
    1806      *        all DR[0-15] accesses. */
    1807     pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
    1808     pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
    1809 
    1810     pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
    1811     pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
    1812                                         |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
    1813 
    1814     Assert((pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
     1802static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
     1803{
     1804    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     1805    {
     1806        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
     1807
     1808        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
     1809        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
     1810
     1811        /** @todo Figure out debugging with nested-guests, till then just intercept
     1812         *        all DR[0-15] accesses. */
     1813        pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
     1814        pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
     1815
     1816        pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
     1817        pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
     1818                                            |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
     1819
     1820        Assert(   (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
     1821               == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
     1822
     1823        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
     1824    }
    18151825}
    18161826#endif
     
    20292039#ifdef VBOX_WITH_NESTED_HWVIRT
    20302040/**
    2031  * Caches the nested-guest VMCB fields before we modify them for executing the
    2032  * nested-guest under SVM R0.
     2041 * Caches the nested-guest VMCB fields before we modify them for execution using
     2042 * hardware-assisted SVM.
    20332043 *
    20342044 * @param   pCtx            Pointer to the guest-CPU context.
     
    20392049{
    20402050    PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2041     PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    2042     PSVMVMCBSTATESAVE   pVmcbNstGstState = &pVmcbNstGst->guest;
     2051    PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     2052    PCSVMVMCBSTATESAVE  pVmcbNstGstState = &pVmcbNstGst->guest;
    20432053    PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    20442054
     
    20602070
    20612071/**
    2062  * Sets up the nested-guest for hardware-assisted SVM execution.
     2072 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
    20632073 *
    20642074 * @param   pVCpu           The cross context virtual CPU structure.
    20652075 * @param   pCtx            Pointer to the guest-CPU context.
    20662076 */
     2077static void hmR0SvmVmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     2078{
     2079    RT_NOREF(pVCpu);
     2080    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2081    PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     2082
     2083    /*
     2084     * First cache the nested-guest VMCB fields we may potentially modify.
     2085     */
     2086    hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
     2087
     2088    /*
     2089     * The IOPM of the nested-guest can be ignored because the the guest always
     2090     * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
     2091     * into the nested-guest one and swap it back on the #VMEXIT.
     2092     */
     2093    pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
     2094
     2095    /*
     2096     * Load the host-physical address into the MSRPM rather than the nested-guest
     2097     * physical address (currently we trap all MSRs in the nested-guest).
     2098     */
     2099    pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
     2100}
     2101
     2102
     2103/**
     2104 * Sets up the nested-guest for hardware-assisted SVM execution.
     2105 *
     2106 * @param   pVCpu           The cross context virtual CPU structure.
     2107 * @param   pCtx            Pointer to the guest-CPU context.
     2108 *
     2109 * @remarks This must be called only after the guest exceptions are up to date as
     2110 *          otherwise we risk overwriting the guest exceptions with the nested-guest
     2111 *          exceptions.
     2112 */
    20672113static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
    20682114{
    20692115    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST))
    20702116    {
    2071         /*
    2072          * Cache the nested-guest VMCB fields before we start modifying them below.
    2073          */
    2074         hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
    2075 
    2076         PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2077         PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    2078 
    2079         /*
    2080          * The IOPM of the nested-guest can be ignored because the the guest always
    2081          * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
    2082          * into the nested-guest one and swap it back on the #VMEXIT.
    2083          */
    2084         pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
    2085 
    2086         /*
    2087          * Load the host-physical address into the MSRPM rather than the nested-guest
    2088          * physical address.
    2089          */
    2090         pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
    2091 
    2092         /*
    2093          * Merge the guest exception intercepts in to the nested-guest ones.
    2094          */
    2095         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    2096         hmR0SvmMergeIntercepts(pVmcb, pVmcbNstGst);
    2097 
     2117        hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    20982118        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST);
    20992119    }
     
    21142134    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    21152135
    2116     /*
    2117      * Load guest intercepts first into the guest VMCB as later we may merge
    2118      * them into the nested-guest VMCB further below.
    2119      */
    2120     {
    2121         PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    2122         hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
    2123     }
    2124 
    21252136    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    21262137    Assert(pVmcbNstGst);
     
    21312142    if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0)
    21322143    {
    2133         /* hmR0SvmLoadGuestVmcbNested needs to be called first which caches the VMCB fields and adjusts others. */
     2144        /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */
    21342145        hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
    21352146
    2136         hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
    21372147        hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
    21382148        hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
     
    21442154    }
    21452155
     2156    hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
    21462157    hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
     2158
     2159    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     2160    hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcb, pVmcbNstGst);
    21472161
    21482162    int rc = hmR0SvmSetupVMRunHandler(pVCpu);
     
    58045818/**
    58055819 * Performs a \#VMEXIT when the VMRUN was emulating using hmR0SvmExecVmrun and
    5806  * optionally then through SVM R0 execution.
     5820 * optionally went ahead with hardware-assisted SVM execution.
    58075821 *
    58085822 * @returns VBox status code.
     
    58145828    /*
    58155829     * Restore the modifications we did to the nested-guest VMCB in order
    5816      * to execute the nested-guest in SVM R0.
     5830     * to executing the nested-guesting using hardware-assisted SVM.
    58175831     */
    58185832    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     
    58785892
    58795893/**
    5880  * Setup execution of the nested-guest in SVM R0.
     5894 * Setup the nested-guest for hardware-assisted SVM execution.
    58815895 *
    58825896 * @returns VBox status code.
     
    60016015        if (fLongModeWithPaging)
    60026016            uValidEfer |= MSR_K6_EFER_LMA;
    6003 
    6004         /*
    6005          * Set up the nested-guest for executing it using hardware-assisted SVM.
    6006          */
    6007         hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
    60086017
    60096018        /*
     
    60746083
    60756084        /*
     6085         * Set up the nested-guest for executing it using hardware-assisted SVM.
     6086         */
     6087        hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
     6088
     6089        /*
    60766090         * VMRUN loads a subset of the guest-CPU state (see above) and nothing else. Ensure
    60776091         * hmR0SvmLoadGuestStateNested doesn't need to load anything back to the VMCB cache
     
    60866100        PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    60876101        pNstGstVmcbCache->fVmrunEmulatedInR0 = true;
     6102
     6103        /*
     6104         * We flag a CR3 change to ensure loading the host-physical address of CR3 into
     6105         * the nested-guest VMCB in hmR0SvmLoadGuestControlRegsNested.
     6106         */
    60886107        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ALL_GUEST);
    6089         HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE);
     6108        HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE | HM_CHANGED_GUEST_CR3);
    60906109
    60916110        /*
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette