Changeset 69764 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 20, 2017 9:14:10 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r69408 r69764 2561 2561 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 2562 2562 Assert(pCtx->hwvirt.svm.fGif); 2563 Assert(!pCtx->hwvirt.svm.fHMCachedVmcb); 2563 2564 2564 2565 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r69715 r69764 336 336 * 337 337 * @param pVCpu The cross context virtual CPU structure. 338 * @param p VmcbNstGst Pointer to the nested-guest VM control block.338 * @param pCtx Pointer to the guest-CPU context. 339 339 * 340 340 * @sa hmR0SvmVmRunCacheVmcb. 341 341 */ 342 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, P SVMVMCB pVmcbNstGst)342 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx) 343 343 { 344 344 /* … … 346 346 * the nested-guest under SVM R0. 347 347 */ 348 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 349 if (pNstGstVmcbCache->fValid) 350 { 351 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 352 PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest; 348 if (pCtx->hwvirt.svm.fHMCachedVmcb) 349 { 350 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 351 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 352 PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest; 353 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 354 353 355 pVmcbNstGstCtrl->u16InterceptRdCRx = pNstGstVmcbCache->u16InterceptRdCRx; 354 356 pVmcbNstGstCtrl->u16InterceptWrCRx = pNstGstVmcbCache->u16InterceptWrCRx; … … 366 368 pVmcbNstGstCtrl->TLBCtrl = pNstGstVmcbCache->TLBCtrl; 367 369 pVmcbNstGstCtrl->NestedPaging = pNstGstVmcbCache->NestedPagingCtrl; 368 p NstGstVmcbCache->fValid= false;370 pCtx->hwvirt.svm.fHMCachedVmcb = false; 369 371 } 370 372 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r69763 r69764 2002 2002 * hardware-assisted SVM. 2003 2003 * 2004 * @returns true if the VMCB was previously already cached, false otherwise. 2004 2005 * @param pCtx Pointer to the guest-CPU context. 2005 2006 * 2006 2007 * @sa HMSvmNstGstVmExitNotify. 2007 2008 */ 2008 static voidhmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)2009 static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx) 2009 2010 { 2010 2011 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 2020 2021 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT". 2021 2022 */ 2022 if (!pNstGstVmcbCache->fValid) 2023 bool const fWasCached = pCtx->hwvirt.svm.fHMCachedVmcb; 2024 if (!fWasCached) 2023 2025 { 2024 2026 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; … … 2037 2039 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; 2038 2040 pNstGstVmcbCache->NestedPagingCtrl = pVmcbNstGstCtrl->NestedPaging; 2039 p NstGstVmcbCache->fValid= true;2041 pCtx->hwvirt.svm.fHMCachedVmcb = true; 2040 2042 Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n")); 2041 2043 } 2044 2045 return fWasCached; 2042 2046 } 2043 2047 … … 2058 2062 * First cache the nested-guest VMCB fields we may potentially modify. 2059 2063 */ 2060 hmR0SvmVmRunCacheVmcb(pVCpu, pCtx); 2061 2062 /* 2063 * The IOPM of the nested-guest can be ignored because the the guest always 2064 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather 2065 * into the nested-guest one and swap it back on the #VMEXIT. 2066 */ 2067 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2068 2069 /* 2070 * Load the host-physical address into the MSRPM rather than the nested-guest 2071 * physical address (currently we trap all MSRs in the nested-guest). 2072 */ 2073 pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap; 2074 2075 /* 2076 * Use the same nested-paging as the "outer" guest. We can't dynamically 2077 * switch off nested-paging suddenly while executing a VM (see assertion at the 2078 * end of Trap0eHandler in PGMAllBth.h). 2079 */ 2080 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging; 2064 bool const fVmcbCached = hmR0SvmVmRunCacheVmcb(pVCpu, pCtx); 2065 if (!fVmcbCached) 2066 { 2067 /* 2068 * The IOPM of the nested-guest can be ignored because the the guest always 2069 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather 2070 * into the nested-guest one and swap it back on the #VMEXIT. 2071 */ 2072 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2073 2074 /* 2075 * Load the host-physical address into the MSRPM rather than the nested-guest 2076 * physical address (currently we trap all MSRs in the nested-guest). 2077 */ 2078 pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap; 2079 2080 /* 2081 * Use the same nested-paging as the "outer" guest. We can't dynamically 2082 * switch off nested-paging suddenly while executing a VM (see assertion at the 2083 * end of Trap0eHandler in PGMAllBth.h). 2084 */ 2085 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging; 2086 } 2087 else 2088 { 2089 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap); 2090 Assert(pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap); 2091 Assert(pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 2092 } 2081 2093 } 2082 2094 … … 2097 2109 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2098 2110 Assert(pVmcbNstGst); 2099 2100 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);2101 2111 2102 2112 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx); … … 2541 2551 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 2542 2552 VMMRZCallRing3Disable(pVCpu); 2543 Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit)); 2553 Log4(("hmR0SvmExitToRing3: VCPU[%u]: rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", pVCpu->idCpu, rcExit, 2554 pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions)); 2544 2555 2545 2556 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 2570 2581 if (rcExit != VINF_EM_RAW_INTERRUPT) 2571 2582 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 2583 2584 #ifdef VBOX_WITH_NESTED_HWVIRT 2585 /* 2586 * We may inspect the nested-guest VMCB state in ring-3 (e.g. for injecting interrupts) 2587 * and thus we need to restore any modifications we may have made to it here if we're 2588 * still executing the nested-guest. 2589 */ 2590 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2591 HMSvmNstGstVmExitNotify(pVCpu, pCtx); 2592 #endif 2572 2593 2573 2594 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 3021 3042 3022 3043 #ifdef VBOX_WITH_NESTED_HWVIRT 3044 /** 3045 * Checks whether the SVM nested-guest is in a state to receive physical (APIC) 3046 * interrupts. 3047 * 3048 * @returns true if it's ready, false otherwise. 3049 * @param pCtx The guest-CPU context. 3050 * 3051 * @remarks This function looks at the VMCB cache rather than directly at the 3052 * nested-guest VMCB which may have been suitably modified for executing 3053 * using hardware-assisted SVM. 3054 */ 3055 static bool hmR0SvmCanNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx) 3056 { 3057 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 3058 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3059 X86EFLAGS fEFlags; 3060 if (pVmcbNstGstCache->fVIntrMasking) 3061 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 3062 else 3063 fEFlags.u = pCtx->eflags.u; 3064 3065 return fEFlags.Bits.u1IF; 3066 } 3067 3068 3023 3069 /** 3024 3070 * Evaluates the event to be delivered to the nested-guest and sets it as the … … 3088 3134 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts". 3089 3135 */ 3136 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3137 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 3090 3138 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3091 3139 && !fIntShadow 3092 3140 && !pVCpu->hm.s.fSingleInstruction 3093 && CPUMCanSvmNstGstTakePhysIntr(pCtx))3094 { 3095 if ( CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR))3141 && hmR0SvmCanNstGstTakePhysIntr(pVCpu, pCtx)) 3142 { 3143 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR) 3096 3144 { 3097 3145 Log4(("Intercepting external interrupt -> #VMEXIT\n")); … … 3128 3176 /* 3129 3177 * Check if the nested-guest can receive virtual (injected by VMRUN) interrupts. 3130 * We can call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any3178 * We can safely call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any 3131 3179 * nested-guest VMCB interrupt control fields besides V_INTR_MASKING, see hmR0SvmVmRunCacheVmcb. 3132 3180 */ 3133 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)3134 && CPUMCanSvmNstGstTakeVirtIntr(pCtx)3135 && CPUM IsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VINTR))3181 if ( (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 3182 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 3183 && CPUMCanSvmNstGstTakeVirtIntr(pCtx)) 3136 3184 { 3137 3185 Log4(("Intercepting virtual interrupt -> #VMEXIT\n")); … … 3569 3617 return rc; 3570 3618 3619 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx); 3620 3571 3621 if (TRPMHasTrap(pVCpu)) 3572 3622 hmR0SvmTrpmTrapToPendingEvent(pVCpu); … … 3598 3648 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 3599 3649 3600 Assert(p VCpu->hm.s.svm.NstGstVmcbCache.fValid);3650 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 3601 3651 3602 3652 /* … … 4133 4183 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst); /* Save the nested-guest state from the VMCB to the 4134 4184 guest-CPU context. */ 4135 4136 /*4137 * Currently, reload the entire nested-guest VMCB due to code that directly inspects4138 * the nested-guest VMCB instead of the cache, e.g. hmR0SvmEvaluatePendingEventNested.4139 */4140 HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst);4141 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);4142 4185 } 4143 4186 #endif -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r69221 r69764 244 244 .Guest.hwvirt.svm.cPauseFilterThreshold resw 1 245 245 .Guest.hwvirt.svm.fInterceptEvents resb 1 246 .Guest.hwvirt.svm. u8Padding1resb 1246 .Guest.hwvirt.svm.fHMCachedVmcb resb 1 247 247 .Guest.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 248 248 .Guest.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 … … 530 530 .Hyper.hwvirt.svm.cPauseFilterThreshold resw 1 531 531 .Hyper.hwvirt.svm.fInterceptEvents resb 1 532 .Hyper.hwvirt.svm. u8Padding1resb 1532 .Hyper.hwvirt.svm.fHMCachedVmcb resb 1 533 533 .Hyper.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 534 534 .Hyper.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r69474 r69764 141 141 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold); 142 142 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fInterceptEvents); 143 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fHMCachedVmcb); 143 144 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0); 144 145 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
Note:
See TracChangeset
for help on using the changeset viewer.