Changeset 69142 in vbox
- Timestamp:
- Oct 20, 2017 9:59:27 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 118451
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r69111 r69142 1756 1756 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb); 1757 1757 1758 /** @todo Optimization: we don't need to intercept VMMCALL when the 1759 * nested-guest isn't intercepting them, and possibly others. */ 1760 1758 1761 /* Next, merge the intercepts into the nested-guest VMCB. */ 1759 1762 pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx; … … 1982 1985 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 1983 1986 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */ 1984 | HM_CHANGED_SVM_NESTED_GUEST1985 1987 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 1986 1988 | HM_CHANGED_SVM_RESERVED2 1987 | HM_CHANGED_SVM_RESERVED3); 1989 | HM_CHANGED_SVM_RESERVED3 1990 | HM_CHANGED_SVM_RESERVED4); 1988 1991 1989 1992 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ … … 2015 2018 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2016 2019 2017 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; 2018 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx; 2019 pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx; 2020 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx; 2021 pNstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt; 2022 pNstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl; 2023 pNstGstVmcbCache->u64CR3 = pVmcbNstGstState->u64CR3; 2024 pNstGstVmcbCache->u64CR4 = pVmcbNstGstState->u64CR4; 2025 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 2026 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2027 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 2028 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 2029 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; 2030 pNstGstVmcbCache->NestedPagingCtrl = pVmcbNstGstCtrl->NestedPaging; 2031 pNstGstVmcbCache->fValid = true; 2020 /* 2021 * Cache the nested-guest programmed VMCB fields if we have not cached it yet. 2022 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}. 2023 */ 2024 if (!pNstGstVmcbCache->fValid) 2025 { 2026 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; 2027 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx; 2028 pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx; 2029 pNstGstVmcbCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx; 2030 pNstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt; 2031 pNstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl; 2032 pNstGstVmcbCache->u64CR3 = pVmcbNstGstState->u64CR3; 2033 pNstGstVmcbCache->u64CR4 = pVmcbNstGstState->u64CR4; 2034 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 2035 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2036 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 2037 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 2038 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; 2039 pNstGstVmcbCache->NestedPagingCtrl = pVmcbNstGstCtrl->NestedPaging; 2040 pNstGstVmcbCache->fValid = true; 2041 Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n")); 2042 } 2032 2043 } 2033 2044 … … 2073 2084 2074 2085 /** 2075 * Sets up the nested-guest for hardware-assisted SVM execution.2076 *2077 * @param pVCpu The cross context virtual CPU structure.2078 * @param pCtx Pointer to the guest-CPU context.2079 */2080 static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)2081 {2082 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST))2083 {2084 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);2085 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST);2086 }2087 }2088 2089 2090 /**2091 2086 * Loads the nested-guest state into the VMCB. 2092 2087 * … … 2104 2099 Assert(pVmcbNstGst); 2105 2100 2106 /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */ 2107 hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx); 2101 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx); 2108 2102 2109 2103 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx); … … 2134 2128 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 2135 2129 | HM_CHANGED_SVM_RESERVED2 2136 | HM_CHANGED_SVM_RESERVED3); 2130 | HM_CHANGED_SVM_RESERVED3 2131 | HM_CHANGED_SVM_RESERVED4); 2137 2132 2138 2133 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ … … 3041 3036 Assert(!pVCpu->hm.s.Event.fPending); 3042 3037 3043 bool const f IntrEnabled = pCtx->hwvirt.svm.fGif && CPUMCanSvmNstGstTakePhysIntr(pCtx);3044 if (f IntrEnabled)3038 bool const fGif = pCtx->hwvirt.svm.fGif; 3039 if (fGif) 3045 3040 { 3046 3041 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 3047 SVMEVENT Event;3048 Event.u = 0;3049 3042 3050 3043 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); … … 3070 3063 Log4(("Pending NMI\n")); 3071 3064 3065 SVMEVENT Event; 3066 Event.u = 0; 3072 3067 Event.n.u1Valid = 1; 3073 3068 Event.n.u8Vector = X86_XCPT_NMI; … … 3082 3077 3083 3078 /* 3084 * Check if the nested-guest can receive external interrupts (PIC/APIC). 3079 * Check if the nested-guest can receive external interrupts (generated by 3080 * the guest's PIC/APIC). 3085 3081 * 3086 * Physical (from the nested-guest's point of view) intercepts are -always-3087 * intercepted, see HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS.3082 * External intercepts from the physical CPU are -always- intercepted when 3083 * executing using hardware-assisted SVM, see HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS. 3088 3084 * 3089 * Physical interrupts take priority over virtual interrupts, 3085 * External interrupts that are generated for the outer guest may be intercepted 3086 * depending on how the nested-guest VMCB was programmed by guest software. 3087 * 3088 * Physical interrupts always take priority over virtual interrupts, 3090 3089 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts". 3091 *3092 * We must be careful that the call to CPUMCanSvmNstGstTakePhysIntr below3093 * happens -before- modifying the nested-guests's V_INTR_MASKING bit,3094 * which is currently set later in hmR0SvmLoadGuestApicStateNested.3095 3090 */ 3096 3091 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) … … 3099 3094 && CPUMCanSvmNstGstTakePhysIntr(pCtx)) 3100 3095 { 3101 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3096 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR)) 3097 { 3098 Log4(("Intercepting external interrupt -> #VMEXIT\n")); 3099 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3100 } 3101 3102 uint8_t u8Interrupt; 3103 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3104 if (RT_SUCCESS(rc)) 3105 { 3106 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3107 3108 SVMEVENT Event; 3109 Event.u = 0; 3110 Event.n.u1Valid = 1; 3111 Event.n.u8Vector = u8Interrupt; 3112 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3113 3114 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3115 } 3116 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3117 { 3118 /* 3119 * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt. 3120 * We just avoid delivering the TPR-masked interrupt here. TPR will be updated 3121 * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState(). 3122 */ 3123 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3124 } 3125 else 3126 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3102 3127 } 3103 3128 3104 3129 /* 3105 * Check if the nested-guest can receive virtual interrupts. 3130 * Check if the nested-guest can receive virtual (injected by VMRUN) interrupts. 3131 * We can call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any 3132 * nested-guest VMCB interrupt control fields besides V_INTR_MASKING, see hmR0SvmVmRunCacheVmcb. 3106 3133 */ 3107 3134 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 3108 3135 && CPUMCanSvmNstGstTakeVirtIntr(pCtx)) 3109 3136 { 3110 uint8_t const u8Interrupt = CPUMGetSvmNstGstInterrupt(pCtx); 3111 Log4(("Injecting virtual interrupt u8Interrupt=%#x\n", u8Interrupt)); 3112 3113 Event.n.u1Valid = 1; 3114 Event.n.u8Vector = u8Interrupt; 3115 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3116 3117 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 3118 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3119 return VINF_SUCCESS; 3137 Log4(("Intercepting external interrupt -> #VMEXIT\n")); 3138 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 3120 3139 } 3121 3140 } … … 3153 3172 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3154 3173 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3155 3156 SVMEVENT Event;3157 Event.u = 0;3158 3174 3159 3175 Log4Func(("fGif=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fGif, fBlockInt, fIntShadow, … … 3171 3187 Log4(("Pending NMI\n")); 3172 3188 3189 SVMEVENT Event; 3190 Event.u = 0; 3173 3191 Event.n.u1Valid = 1; 3174 3192 Event.n.u8Vector = X86_XCPT_NMI; … … 3197 3215 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3198 3216 3217 SVMEVENT Event; 3218 Event.u = 0; 3199 3219 Event.n.u1Valid = 1; 3200 3220 Event.n.u8Vector = u8Interrupt; … … 3532 3552 HMSVM_ASSERT_PREEMPT_SAFE(); 3533 3553 3554 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3555 { 3534 3556 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 3535 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))3536 {3537 3557 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 3538 3558 return VINF_EM_RESCHEDULE_REM; 3539 }3540 3559 #endif 3560 } 3561 else 3562 return VINF_SVM_VMEXIT; 3541 3563 3542 3564 /* Check force flag actions that might require us to go back to ring-3. */ … … 3573 3595 /** @todo Get new STAM counter for this? */ 3574 3596 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 3597 3598 PCSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3599 Assert(pNstGstVmcbCache->fValid); 3575 3600 3576 3601 /* … … 4106 4131 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst); /* Save the nested-guest state from the VMCB to the 4107 4132 guest-CPU context. */ 4133 4134 /* 4135 * Currently, reload the entire nested-guest VMCB due to code that directly inspects 4136 * the nested-guest VMCB instead of the cache, e.g. hmR0SvmEvaluatePendingEventNested. 4137 */ 4138 HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst); 4139 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 4108 4140 } 4109 4141 #endif … … 4714 4746 case SVM_EXIT_WRITE_CR8: /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?? */ 4715 4747 { 4748 Log4(("hmR0SvmHandleExitNested: Write CRx: u16InterceptWrCRx=%#x u64ExitCode=%#RX64 %#x\n", 4749 pVmcbNstGstCache->u16InterceptWrCRx, pSvmTransient->u64ExitCode, 4750 (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)))); 4716 4751 if (pVmcbNstGstCache->u16InterceptWrCRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0))) 4717 4752 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); … … 6092 6127 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6093 6128 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER; 6129 Log4(("hmR0SvmExitWriteCRx: Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg)); 6094 6130 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg); 6095 6131 fDecodedInstr = true; … … 6100 6136 if (!fDecodedInstr) 6101 6137 { 6138 Log4(("hmR0SvmExitWriteCRx: iCrReg=%#x\n", iCrReg)); 6102 6139 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL); 6103 6140 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r69111 r69142 1990 1990 { 1991 1991 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 1992 if ( rcStrict == VINF_SVM_VMEXIT)1992 if (RT_SUCCESS(rcStrict)) 1993 1993 rc2 = VINF_EM_RESCHEDULE; 1994 1994 else 1995 1995 { 1996 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1996 1997 Log(("EM: SVM Nested-guest INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1997 1998 /** @todo should we call iemInitiateCpuShutdown? Should this … … 2029 2030 { 2030 2031 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 2031 if ( rcStrict == VINF_SVM_VMEXIT)2032 if (RT_SUCCESS(rcStrict)) 2032 2033 rc2 = VINF_EM_RESCHEDULE; 2033 2034 else 2034 2035 { 2036 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 2035 2037 Log(("EM: SVM Nested-guest VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 2036 2038 /** @todo should we call iemInitiateCpuShutdown? Should this -
trunk/src/VBox/VMM/include/HMInternal.h
r69111 r69142 186 186 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(21) 187 187 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(22) 188 #define HM_CHANGED_SVM_ NESTED_GUESTRT_BIT(23)188 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(23) 189 189 190 190 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
Note:
See TracChangeset
for help on using the changeset viewer.