Changeset 68361 in vbox
- Timestamp:
- Aug 10, 2017 9:38:55 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_svm.h
r68226 r68361 968 968 * operation during execution of the nested-guest, restored on \#VMEXIT. 969 969 */ 970 #pragma pack(1) 970 971 typedef struct SVMNESTEDVMCBCACHE 971 972 { … … 982 983 /** Cache of exception intercepts. */ 983 984 uint32_t u32InterceptXcpt; 985 /** Alignment. */ 986 uint32_t u32Padding0; 987 984 988 /** Cache of control intercepts. */ 985 989 uint64_t u64InterceptCtrl; … … 990 994 /** Cache of the VMCB clean bits. */ 991 995 uint64_t u64VmcbCleanBits; 996 /** Cache of the TLB control. */ 997 SVMTLBCTRL TLBCtrl; 992 998 /** Cache of V_INTR_MASKING bit. */ 993 999 bool fVIntrMasking; … … 1003 1009 * processing. */ 1004 1010 bool fExitCodeAndInfoUpdated; 1011 /** Alignment. */ 1012 bool afPadding0[4]; 1005 1013 /** @} */ 1006 1014 } SVMNESTEDVMCBCACHE; 1015 #pragma pack() 1007 1016 /** Pointer to the SVMNESTEDVMCBCACHE structure. */ 1008 1017 typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE; … … 1010 1019 typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE; 1011 1020 /** @} */ 1021 AssertCompileMemberAlignment(SVMNESTEDVMCBCACHE, fVIntrMasking, 8); 1022 AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8); 1012 1023 1013 1024 #ifdef IN_RING0 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r68293 r68361 361 361 pVmcbCtrl->u64MSRPMPhysAddr = pNstGstVmcbCache->u64MSRPMPhysAddr; 362 362 pVmcbCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking; 363 pVmcbCtrl->TLBCtrl = pNstGstVmcbCache->TLBCtrl; 363 364 pNstGstVmcbCache->fValid = false; 364 365 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r68346 r68361 409 409 410 410 /* 411 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.411 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}. 412 412 */ 413 413 ++pCpu->cTlbFlushes; … … 877 877 * Flushes the appropriate tagged-TLB entries. 878 878 * 879 * @param pVCpu The cross context virtual CPU structure. 880 */ 881 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu) 882 { 883 PVM pVM = pVCpu->CTX_SUFF(pVM); 884 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 879 * @param pVCpu The cross context virtual CPU structure. 880 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 881 * @param pVmcb Pointer to the VM control block. 882 */ 883 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb) 884 { 885 PVM pVM = pVCpu->CTX_SUFF(pVM); 885 886 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu(); 886 887 … … 911 912 } 912 913 914 #ifdef VBOX_WITH_NESTED_HWVIRT 915 /* 916 * Only if the nested hypervisor says it does not need to flush anything in the TLB, 917 * can we possibly apply it on the host. Otherwise, the nested-guest TLB flush setting 918 * should be used and then the host settings be added on top. 919 */ 920 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 921 { 922 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 923 if (pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 924 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 925 else 926 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush; 927 } 928 #else 913 929 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 930 #endif 914 931 915 932 if (pVM->hm.s.svm.fAlwaysFlushTLB) … … 941 958 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 942 959 { 943 pCpu->uCurrentAsid 944 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */945 fHitASIDLimit 960 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */ 961 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */ 962 fHitASIDLimit = true; 946 963 947 964 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) … … 990 1007 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID; 991 1008 } 1009 1010 #ifdef VBOX_WITH_NESTED_HWVIRT 1011 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING); 1012 #endif 992 1013 993 1014 AssertMsg(pVCpu->hm.s.idLastCpu == pCpu->idCpu, … … 2015 2036 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 2016 2037 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 2038 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; 2017 2039 pNstGstVmcbCache->fValid = true; 2018 2040 } … … 3129 3151 Log4Func(("\n")); 3130 3152 3131 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);3132 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);3133 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);3134 3153 #ifdef VBOX_WITH_NESTED_HWVIRT 3135 bool const fG lobalIF = pCtx->hwvirt.svm.fGif3154 bool const fGif = pCtx->hwvirt.svm.fGif; 3136 3155 #else 3137 bool const fG lobalIF= true;3156 bool const fGif = true; 3138 3157 #endif 3139 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3140 3141 SVMEVENT Event; 3142 Event.u = 0; 3143 3144 /* 3145 * If the global interrupt flag (GIF) isn't set, even NMIs are blocked. 3146 * Only relevant when SVM capability is exposed to the guest. 3147 */ 3148 if (fGlobalIF) 3149 { 3158 /* 3159 * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked. 3160 * See AMD spec. Table 15-10. "Effect of the GIF on Interrupt Handling". 3161 */ 3162 if (fGif) 3163 { 3164 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3165 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3166 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3167 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3168 3169 SVMEVENT Event; 3170 Event.u = 0; 3171 3150 3172 /** @todo SMI. SMIs take priority over NMIs. */ 3151 3173 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ … … 3211 3233 3212 3234 /** 3213 * Injects any pending events into the guest if the guest is in a state to 3214 * receive them. 3235 * Injects any pending events into the guest or nested-guest. 3215 3236 * 3216 3237 * @param pVCpu The cross context virtual CPU structure. … … 3224 3245 3225 3246 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3247 3248 /* 3249 * When executing the nested-guest, we avoid assertions on whether the 3250 * event injection is valid purely based on EFLAGS, as V_INTR_MASKING 3251 * affects the interpretation of interruptibility (see CPUMCanSvmNstGstTakePhysIntr). 3252 */ 3253 #ifndef VBOX_WITH_NESTED_HWVIRT 3226 3254 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3255 #endif 3227 3256 3228 3257 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ … … 3230 3259 SVMEVENT Event; 3231 3260 Event.u = pVCpu->hm.s.Event.u64IntInfo; 3232 3233 3261 Assert(Event.n.u1Valid); 3262 3263 #ifndef VBOX_WITH_NESTED_HWVIRT 3234 3264 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ) 3235 3265 { … … 3240 3270 Assert(!fIntShadow); 3241 3271 NOREF(fBlockInt); 3272 #endif 3242 3273 3243 3274 Log4(("Injecting pending HM event\n")); … … 3253 3284 } 3254 3285 3255 /* Update the guest interrupt shadow in the VMCB. */ 3286 /* 3287 * Update the guest interrupt shadow in the guest or nested-guest VMCB. 3288 * 3289 * For nested-guests: We need to update it too for the scenario where IEM executes 3290 * the nested-guest but execution later continues here with an interrupt shadow active. 3291 */ 3256 3292 pVmcb->ctrl.u64IntShadow = !!fIntShadow; 3257 3293 } … … 3814 3850 /* The TLB flushing would've already been setup by the nested-hypervisor. */ 3815 3851 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 3852 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcbNstGst); 3816 3853 Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu); 3817 3854 … … 3924 3961 /* Flush the appropriate tagged-TLB entries. */ 3925 3962 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 3926 hmR0SvmFlushTaggedTlb(pVCpu );3963 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb); 3927 3964 Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu); 3928 3965 … … 5742 5779 HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst); 5743 5780 5781 Log4(("hmR0SvmExecVmexit: uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pVmcbNstGst->ctrl.u64ExitCode, 5782 pVmcbNstGst->ctrl.u64ExitInfo1, pVmcbNstGst->ctrl.u64ExitInfo2)); 5783 5744 5784 /* 5745 5785 * Write the nested-guest VMCB back to nested-guest memory. … … 5760 5800 */ 5761 5801 pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false; 5802 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 5762 5803 5763 5804 if (RT_SUCCESS(rc)) … … 5766 5807 if (rc == VINF_SUCCESS) 5767 5808 rc = VINF_SVM_VMEXIT; 5768 } 5769 5770 Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp\n", GCPhysVmcb)); 5809 5810 Log4(("hmR0SvmExecVmexit: #VMEXIT success! rc=%d\n", rc)); 5811 } 5812 else 5813 Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp, rc=%d\n", GCPhysVmcb, rc)); 5814 5771 5815 return rc; 5772 5816 } … … 5886 5930 if (RT_FAILURE(rc)) 5887 5931 { 5888 Log((" iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGstState->u64EFER));5932 Log(("hmR0SvmExecVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGstState->u64EFER)); 5889 5933 pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID; 5890 5934 return hmR0SvmExecVmexit(pVCpu, pCtx);
Note:
See TracChangeset
for help on using the changeset viewer.