VirtualBox

Changeset 69413 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Oct 27, 2017 9:32:26 AM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
118742
Message:

VMM/HMSVMR0: Nested Hw.virt: Fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r69221 r69413  
    288288static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
    289289static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
    290 static FNSVMEXITHANDLER hmR0SvmExitSetPendingXcptUD;
    291290static FNSVMEXITHANDLER hmR0SvmExitMsr;
    292291static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
     
    316315static FNSVMEXITHANDLER hmR0SvmExitVmrun;
    317316static FNSVMEXITHANDLER hmR0SvmNestedExitIret;
     317static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
     318static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
    318319#endif
    319320/** @} */
     
    17561757        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
    17571758
    1758         /** @todo Optimization: we don't need to intercept VMMCALL when the
    1759          *        nested-guest isn't intercepting them, and possibly others. */
    1760 
    17611759        /* Next, merge the intercepts into the nested-guest VMCB. */
    17621760        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
     
    17821780        pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
    17831781                                            |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
    1784 
     1782        /*
     1783         * Remove control intercepts that we don't need while executing the nested-guest.
     1784         *
     1785         * VMMCALL when not intercepted raises a \#UD exception in the guest. However,
     1786         * other SVM instructions like VMSAVE when not intercept can cause havoc on the
     1787         * host as they can write to any location in physical memory, hence they always
     1788         * need to be intercepted (they are included in HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS).
     1789         */
    17851790        Assert(   (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
    17861791               == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
     1792        pVmcbNstGst->ctrl.u64InterceptCtrl  &= ~SVM_CTRL_INTERCEPT_VMMCALL;
     1793
     1794        /* Remove exception intercepts that we don't need while executing the nested-guest. */
     1795        pVmcbNstGst->ctrl.u32InterceptXcpt  &= ~RT_BIT(X86_XCPT_UD);
    17871796
    17881797        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
     
    26582667    Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
    26592668          pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
     2669}
     2670
     2671
     2672/**
     2673 * Sets an exception as pending-for-injection into the VM.
     2674 *
     2675 * @param   pVCpu       The cross context virtual CPU structure.
     2676 */
     2677DECLINLINE(void) hmR0SvmSetPendingXcpt(PVMCPU pVCpu, uint8_t uXcpt)
     2678{
     2679    SVMEVENT Event;
     2680    Event.u          = 0;
     2681    Event.n.u1Valid  = 1;
     2682    Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     2683    Event.n.u8Vector = uXcpt;
     2684    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    26602685}
    26612686
     
    45824607    } while (0) \
    45834608
    4584 #define HM_SVM_HANDLE_XCPT_EXIT_NESTED(a_uXcpt, a_XcptExitFn) \
    4585     do \
    4586     { \
    4587         if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(a_uXcpt)) \
    4588             HM_SVM_RET_VMEXIT_NESTED(pVCpu, pVmcbNstGst->ctrl.u64ExitCode, pVmcbNstGst->ctrl.u64ExitInfo1, \
    4589                                      pVmcbNstGst->ctrl.u64ExitInfo2); \
    4590         return a_XcptExitFn(pVCpu, pCtx, pSvmTransient); \
    4591     } while (0) \
    4592 
    45934609    /*
    45944610     * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected
     
    46024618    uint64_t const      uExitInfo2       = pVmcbNstGstCtrl->u64ExitInfo2;
    46034619
    4604     switch (pSvmTransient->u64ExitCode)
     4620    Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
     4621    switch (uExitCode)
    46054622    {
    46064623        case SVM_EXIT_CPUID:
     
    47074724                /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
    47084725                if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_PF))
    4709                     HM_SVM_RET_VMEXIT_NESTED(pVCpu, SVM_EXIT_EXCEPTION_14, u32ErrCode, uFaultAddress);
     4726                    HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
    47104727
    47114728                /* If the nested-guest is not intercepting #PFs, forward the #PF to the nested-guest. */
     
    47174734
    47184735        case SVM_EXIT_EXCEPTION_7:   /* X86_XCPT_NM */
    4719             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_NM, hmR0SvmExitXcptNM);
     4736        {
     4737            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_NM))
     4738                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4739            hmR0SvmSetPendingXcptNM(pVCpu);
     4740            return VINF_SUCCESS;
     4741        }
    47204742
    47214743        case SVM_EXIT_EXCEPTION_6:   /* X86_XCPT_UD */
    4722             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_UD, hmR0SvmExitXcptUD);
     4744        {
     4745            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_UD))
     4746                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4747            hmR0SvmSetPendingXcptUD(pVCpu);
     4748            return VINF_SUCCESS;
     4749        }
    47234750
    47244751        case SVM_EXIT_EXCEPTION_16:  /* X86_XCPT_MF */
    4725             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_MF, hmR0SvmExitXcptMF);
     4752        {
     4753            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_MF))
     4754                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4755            hmR0SvmSetPendingXcptMF(pVCpu);
     4756            return VINF_SUCCESS;
     4757        }
    47264758
    47274759        case SVM_EXIT_EXCEPTION_1:   /* X86_XCPT_DB */
    4728             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_DB, hmR0SvmExitXcptDB);
     4760        {
     4761            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_DB))
     4762                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4763            return hmR0SvmNestedExitXcptDB(pVCpu, pCtx, pSvmTransient);
     4764        }
    47294765
    47304766        case SVM_EXIT_EXCEPTION_17:  /* X86_XCPT_AC */
    4731             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_AC, hmR0SvmExitXcptAC);
     4767        {
     4768            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_AC))
     4769                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4770            return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
     4771        }
    47324772
    47334773        case SVM_EXIT_EXCEPTION_3:   /* X86_XCPT_BP */
    4734             HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_BP, hmR0SvmExitXcptBP);
     4774        {
     4775            if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_BP))
     4776                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     4777            return hmR0SvmNestedExitXcptBP(pVCpu, pCtx, pSvmTransient);
     4778        }
    47354779
    47364780        case SVM_EXIT_READ_CR0:
     
    49565000                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RSM)
    49575001                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    4958                     return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
     5002                    hmR0SvmSetPendingXcptUD(pVCpu);
     5003                    return VINF_SUCCESS;
    49595004                }
    49605005
     
    49635008                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_SKINIT)
    49645009                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    4965                     return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
     5010                    hmR0SvmSetPendingXcptUD(pVCpu);
     5011                    return VINF_SUCCESS;
    49665012                }
    49675013
     
    49845030    /* not reached */
    49855031
    4986 #undef HM_SVM_HANDLE_XCPT_EXIT_NESTED
    49875032#undef HM_SVM_RET_VMEXIT_NESTED
    49885033}
     
    51505195                case SVM_EXIT_RSM:
    51515196                case SVM_EXIT_SKINIT:
    5152                     return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
     5197                {
     5198                    hmR0SvmSetPendingXcptUD(pVCpu);
     5199                    return VINF_SUCCESS;
     5200                }
    51535201
    51545202#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
     
    61806228
    61816229/**
    6182  * \#VMEXIT handler for instructions that result in a \#UD exception delivered
    6183  * to the guest.
    6184  */
    6185 HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    6186 {
    6187     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6188     hmR0SvmSetPendingXcptUD(pVCpu);
    6189     return VINF_SUCCESS;
    6190 }
    6191 
    6192 
    6193 /**
    61946230 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
    61956231 * \#VMEXIT.
     
    71597195    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    71607196
     7197    /** @todo if triple-fault is returned in nested-guest scenario convert to a
     7198     *        shutdown VMEXIT. */
    71617199    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    71627200
     
    71907228        Event.n.u3Type   = SVM_EVENT_EXCEPTION;
    71917229        Event.n.u8Vector = X86_XCPT_BP;
    7192         Event.n.u1ErrorCodeValid = 0;
    71937230        hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    71947231    }
     
    73537390    /** @todo Stat. */
    73547391    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmrun); */
     7392#if 0
    73557393    VBOXSTRICTRC rcStrict;
    73567394    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
     
    73637401    }
    73647402    return VBOXSTRICTRC_VAL(rcStrict);
     7403#endif
     7404    return VERR_EM_INTERPRETER;
    73657405}
    73667406
     
    73837423    return VINF_SUCCESS;
    73847424}
     7425
     7426
     7427/**
     7428 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1).
     7429 * Unconditional \#VMEXIT.
     7430 */
     7431HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     7432{
     7433    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7434
     7435    /* If this #DB is the result of delivering an event, go back to the interpreter. */
     7436    /** @todo if triple-fault is returned in nested-guest scenario convert to a
     7437     *        shutdown VMEXIT. */
     7438    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     7439    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
     7440    {
     7441        STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
     7442        return VINF_EM_RAW_INJECT_TRPM_EVENT;
     7443    }
     7444
     7445    hmR0SvmSetPendingXcpt(pVCpu, X86_XCPT_DB);
     7446    return VINF_SUCCESS;
     7447}
     7448
     7449
     7450/**
     7451 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_EXCEPTION_3).
     7452 * Conditional \#VMEXIT.
     7453 */
     7454HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     7455{
     7456    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7457
     7458    /** @todo if triple-fault is returned in nested-guest scenario convert to a
     7459     *        shutdown VMEXIT. */
     7460    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     7461
     7462    SVMEVENT Event;
     7463    Event.u          = 0;
     7464    Event.n.u1Valid  = 1;
     7465    Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     7466    Event.n.u8Vector = X86_XCPT_BP;
     7467    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     7468    return VINF_SUCCESS;
     7469}
     7470
    73857471#endif /* VBOX_WITH_NESTED_HWVIRT */
    73867472
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette