VirtualBox

Changeset 69142 in vbox


Ignore:
Timestamp:
Oct 20, 2017 9:59:27 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
118451
Message:

VMM: Nested Hw.virt: SVM fixes.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r69111 r69142  
    17561756        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
    17571757
     1758        /** @todo Optimization: we don't need to intercept VMMCALL when the
     1759         *        nested-guest isn't intercepting them, and possibly others. */
     1760
    17581761        /* Next, merge the intercepts into the nested-guest VMCB. */
    17591762        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
     
    19821985                          | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
    19831986                          | HM_CHANGED_GUEST_LAZY_MSRS            /* Unused. */
    1984                           | HM_CHANGED_SVM_NESTED_GUEST
    19851987                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    19861988                          | HM_CHANGED_SVM_RESERVED2
    1987                           | HM_CHANGED_SVM_RESERVED3);
     1989                          | HM_CHANGED_SVM_RESERVED3
     1990                          | HM_CHANGED_SVM_RESERVED4);
    19881991
    19891992    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     
    20152018    PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    20162019
    2017     pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
    2018     pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
    2019     pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
    2020     pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
    2021     pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
    2022     pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
    2023     pNstGstVmcbCache->u64CR3            = pVmcbNstGstState->u64CR3;
    2024     pNstGstVmcbCache->u64CR4            = pVmcbNstGstState->u64CR4;
    2025     pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
    2026     pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    2027     pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
    2028     pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    2029     pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
    2030     pNstGstVmcbCache->NestedPagingCtrl  = pVmcbNstGstCtrl->NestedPaging;
    2031     pNstGstVmcbCache->fValid            = true;
     2020    /*
     2021     * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
     2022     * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
     2023     */
     2024    if (!pNstGstVmcbCache->fValid)
     2025    {
     2026        pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
     2027        pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
     2028        pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
     2029        pNstGstVmcbCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
     2030        pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
     2031        pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
     2032        pNstGstVmcbCache->u64CR3            = pVmcbNstGstState->u64CR3;
     2033        pNstGstVmcbCache->u64CR4            = pVmcbNstGstState->u64CR4;
     2034        pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
     2035        pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     2036        pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
     2037        pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
     2038        pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
     2039        pNstGstVmcbCache->NestedPagingCtrl  = pVmcbNstGstCtrl->NestedPaging;
     2040        pNstGstVmcbCache->fValid            = true;
     2041        Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
     2042    }
    20322043}
    20332044
     
    20732084
    20742085/**
    2075  * Sets up the nested-guest for hardware-assisted SVM execution.
    2076  *
    2077  * @param   pVCpu           The cross context virtual CPU structure.
    2078  * @param   pCtx            Pointer to the guest-CPU context.
    2079  */
    2080 static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
    2081 {
    2082     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST))
    2083     {
    2084         hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    2085         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST);
    2086     }
    2087 }
    2088 
    2089 
    2090 /**
    20912086 * Loads the nested-guest state into the VMCB.
    20922087 *
     
    21042099    Assert(pVmcbNstGst);
    21052100
    2106     /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */
    2107     hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
     2101    hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    21082102
    21092103    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
     
    21342128                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    21352129                          | HM_CHANGED_SVM_RESERVED2
    2136                           | HM_CHANGED_SVM_RESERVED3);
     2130                          | HM_CHANGED_SVM_RESERVED3
     2131                          | HM_CHANGED_SVM_RESERVED4);
    21372132
    21382133    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     
    30413036    Assert(!pVCpu->hm.s.Event.fPending);
    30423037
    3043     bool const fIntrEnabled = pCtx->hwvirt.svm.fGif && CPUMCanSvmNstGstTakePhysIntr(pCtx);
    3044     if (fIntrEnabled)
     3038    bool const fGif = pCtx->hwvirt.svm.fGif;
     3039    if (fGif)
    30453040    {
    30463041        PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    3047         SVMEVENT Event;
    3048         Event.u = 0;
    30493042
    30503043        bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     
    30703063                Log4(("Pending NMI\n"));
    30713064
     3065                SVMEVENT Event;
     3066                Event.u = 0;
    30723067                Event.n.u1Valid  = 1;
    30733068                Event.n.u8Vector = X86_XCPT_NMI;
     
    30823077
    30833078        /*
    3084          * Check if the nested-guest can receive external interrupts (PIC/APIC).
     3079         * Check if the nested-guest can receive external interrupts (generated by
     3080         * the guest's PIC/APIC).
    30853081         *
    3086          * Physical (from the nested-guest's point of view) intercepts are -always-
    3087          * intercepted, see HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS.
     3082         * External intercepts from the physical CPU are -always- intercepted when
     3083         * executing using hardware-assisted SVM, see HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS.
    30883084         *
    3089          * Physical interrupts take priority over virtual interrupts,
     3085         * External interrupts that are generated for the outer guest may be intercepted
     3086         * depending on how the nested-guest VMCB was programmed by guest software.
     3087         *
     3088         * Physical interrupts always take priority over virtual interrupts,
    30903089         * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    3091          *
    3092          * We must be careful that the call to CPUMCanSvmNstGstTakePhysIntr below
    3093          * happens -before- modifying the nested-guests's V_INTR_MASKING bit,
    3094          * which is currently set later in hmR0SvmLoadGuestApicStateNested.
    30953090         */
    30963091        if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     
    30993094            && CPUMCanSvmNstGstTakePhysIntr(pCtx))
    31003095        {
    3101             return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
     3096            if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR))
     3097            {
     3098                Log4(("Intercepting external interrupt -> #VMEXIT\n"));
     3099                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
     3100            }
     3101
     3102            uint8_t u8Interrupt;
     3103            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     3104            if (RT_SUCCESS(rc))
     3105            {
     3106                Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
     3107
     3108                SVMEVENT Event;
     3109                Event.u = 0;
     3110                Event.n.u1Valid  = 1;
     3111                Event.n.u8Vector = u8Interrupt;
     3112                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     3113
     3114                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3115            }
     3116            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
     3117            {
     3118                /*
     3119                 * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt.
     3120                 * We just avoid delivering the TPR-masked interrupt here. TPR will be updated
     3121                 * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState().
     3122                 */
     3123                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     3124            }
     3125            else
     3126                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    31023127        }
    31033128
    31043129        /*
    3105          * Check if the nested-guest can receive virtual interrupts.
     3130         * Check if the nested-guest can receive virtual (injected by VMRUN) interrupts.
     3131         * We can call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any
     3132         * nested-guest VMCB interrupt control fields besides V_INTR_MASKING, see hmR0SvmVmRunCacheVmcb.
    31063133         */
    31073134        if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
    31083135            && CPUMCanSvmNstGstTakeVirtIntr(pCtx))
    31093136        {
    3110             uint8_t const u8Interrupt = CPUMGetSvmNstGstInterrupt(pCtx);
    3111             Log4(("Injecting virtual interrupt u8Interrupt=%#x\n", u8Interrupt));
    3112 
    3113             Event.n.u1Valid  = 1;
    3114             Event.n.u8Vector = u8Interrupt;
    3115             Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3116 
    3117             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    3118             hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3119             return VINF_SUCCESS;
     3137            Log4(("Intercepting external interrupt -> #VMEXIT\n"));
     3138            return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
    31203139        }
    31213140    }
     
    31533172        bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    31543173        PSVMVMCB pVmcb        = pVCpu->hm.s.svm.pVmcb;
    3155 
    3156         SVMEVENT Event;
    3157         Event.u = 0;
    31583174
    31593175        Log4Func(("fGif=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fGif, fBlockInt, fIntShadow,
     
    31713187                Log4(("Pending NMI\n"));
    31723188
     3189                SVMEVENT Event;
     3190                Event.u = 0;
    31733191                Event.n.u1Valid  = 1;
    31743192                Event.n.u8Vector = X86_XCPT_NMI;
     
    31973215                    Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
    31983216
     3217                    SVMEVENT Event;
     3218                    Event.u = 0;
    31993219                    Event.n.u1Valid  = 1;
    32003220                    Event.n.u8Vector = u8Interrupt;
     
    35323552    HMSVM_ASSERT_PREEMPT_SAFE();
    35333553
     3554    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3555    {
    35343556#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    3535     if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    3536     {
    35373557        Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
    35383558        return VINF_EM_RESCHEDULE_REM;
    3539     }
    35403559#endif
     3560    }
     3561    else
     3562        return VINF_SVM_VMEXIT;
    35413563
    35423564    /* Check force flag actions that might require us to go back to ring-3. */
     
    35733595    /** @todo Get new STAM counter for this? */
    35743596    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     3597
     3598    PCSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     3599    Assert(pNstGstVmcbCache->fValid);
    35753600
    35763601    /*
     
    41064131    hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst);       /* Save the nested-guest state from the VMCB to the
    41074132                                                                   guest-CPU context. */
     4133
     4134    /*
     4135     * Currently, reload the entire nested-guest VMCB due to code that directly inspects
     4136     * the nested-guest VMCB instead of the cache, e.g. hmR0SvmEvaluatePendingEventNested.
     4137     */
     4138    HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst);
     4139    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    41084140}
    41094141#endif
     
    47144746        case SVM_EXIT_WRITE_CR8:   /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?? */
    47154747        {
     4748            Log4(("hmR0SvmHandleExitNested: Write CRx: u16InterceptWrCRx=%#x u64ExitCode=%#RX64 %#x\n",
     4749                  pVmcbNstGstCache->u16InterceptWrCRx, pSvmTransient->u64ExitCode,
     4750                  (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0))));
    47164751            if (pVmcbNstGstCache->u16InterceptWrCRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)))
    47174752                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     
    60926127            uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    60936128            uint8_t const iGReg   = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
     6129            Log4(("hmR0SvmExitWriteCRx: Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
    60946130            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
    60956131            fDecodedInstr = true;
     
    61006136    if (!fDecodedInstr)
    61016137    {
     6138        Log4(("hmR0SvmExitWriteCRx: iCrReg=%#x\n", iCrReg));
    61026139        rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL);
    61036140        if (RT_UNLIKELY(   rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r69111 r69142  
    19901990                        {
    19911991                            VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
    1992                             if (rcStrict == VINF_SVM_VMEXIT)
     1992                            if (RT_SUCCESS(rcStrict))
    19931993                                rc2 = VINF_EM_RESCHEDULE;
    19941994                            else
    19951995                            {
     1996                                AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    19961997                                Log(("EM: SVM Nested-guest INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    19971998                                /** @todo should we call iemInitiateCpuShutdown? Should this
     
    20292030                            {
    20302031                                VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
    2031                                 if (rcStrict == VINF_SVM_VMEXIT)
     2032                                if (RT_SUCCESS(rcStrict))
    20322033                                    rc2 = VINF_EM_RESCHEDULE;
    20332034                                else
    20342035                                {
     2036                                    AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    20352037                                    Log(("EM: SVM Nested-guest VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    20362038                                    /** @todo should we call iemInitiateCpuShutdown? Should this
  • trunk/src/VBox/VMM/include/HMInternal.h

    r69111 r69142  
    186186#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(21)
    187187#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(22)
    188 #define HM_CHANGED_SVM_NESTED_GUEST              RT_BIT(23)
     188#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(23)
    189189
    190190#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette