Changeset 81166 in vbox
- Timestamp:
- Oct 9, 2019 7:37:37 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r81092 r81166 4150 4150 4151 4151 4152 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4153 /** 4154 * Does the preparations before executing nested-guest code in AMD-V. 4152 /** 4153 * Does the preparations before executing guest code in AMD-V. 4154 * 4155 * This may cause longjmps to ring-3 and may even result in rescheduling to the 4156 * recompiler. We must be cautious what we do here regarding committing 4157 * guest-state information into the VMCB assuming we assuredly execute the guest 4158 * in AMD-V. If we fall back to the recompiler after updating the VMCB and 4159 * clearing the common-state (TRPM/forceflags), we must undo those changes so 4160 * that the recompiler can (and should) use them when it resumes guest 4161 * execution. Otherwise such operations must be done when we can no longer 4162 * exit to ring-3. 4155 4163 * 4156 4164 * @returns VBox status code (informational status codes included). … … 4160 4168 * @param pVCpu The cross context virtual CPU structure. 4161 4169 * @param pSvmTransient Pointer to the SVM transient structure. 4162 * 4163 * @remarks Same caveats regarding longjumps as hmR0SvmPreRunGuest applies. 4164 * @sa hmR0SvmPreRunGuest. 4165 */ 4166 static int hmR0SvmPreRunGuestNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient) 4167 { 4168 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4170 */ 4171 static int hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient) 4172 { 4169 4173 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 4170 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);4171 4174 4172 4175 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 4173 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) /* Redundant check to avoid unreachable code warning. */4176 if (pSvmTransient->fIsNestedGuest) 4174 4177 { 4175 4178 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); … … 4181 4184 int rc = hmR0SvmCheckForceFlags(pVCpu); 4182 4185 if (rc != VINF_SUCCESS) 4183 {4184 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))4185 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);4186 4186 return rc; 4187 }4188 4187 4189 4188 if (TRPMHasTrap(pVCpu)) … … 4191 4190 else if (!pVCpu->hm.s.Event.fPending) 4192 4191 { 4193 VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu); 4194 if ( rcStrict != VINF_SUCCESS 4195 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4196 { 4197 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4198 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit); 4199 return VBOXSTRICTRC_VAL(rcStrict); 4200 } 4201 } 4202 4203 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 4192 if (!pSvmTransient->fIsNestedGuest) 4193 hmR0SvmEvaluatePendingEvent(pVCpu); 4194 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4195 else 4196 { 4197 VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu); 4198 if ( rcStrict != VINF_SUCCESS 4199 || !CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 4200 { 4201 if (!CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 4202 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit); 4203 return VBOXSTRICTRC_VAL(rcStrict); 4204 } 4205 } 4206 #endif 4207 } 4204 4208 4205 4209 /* … … 4212 4216 && pVCpu->hm.s.Event.fPending 4213 4217 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI)) 4214 {4215 4218 return VINF_EM_RAW_INJECT_TRPM_EVENT; 4216 }4217 4218 #ifdef HMSVM_SYNC_FULL_GUEST_STATE4219 Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));4220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);4221 #endif4222 4223 /*4224 * Export the nested-guest state bits that are not shared with the host in any way as we4225 * can longjmp or get preempted in the midst of exporting some of the state.4226 */4227 rc = hmR0SvmExportGuestStateNested(pVCpu);4228 AssertRCReturn(rc, rc);4229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);4230 4231 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */4232 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);4233 4234 /*4235 * No longjmps to ring-3 from this point on!!!4236 *4237 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,4238 * better than a kernel panic. This also disables flushing of the R0-logger instance.4239 */4240 VMMRZCallRing3Disable(pVCpu);4241 4242 /*4243 * We disable interrupts so that we don't miss any interrupts that would flag preemption4244 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with4245 * preemption disabled for a while. Since this is purly to aid the4246 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and4247 * disable interrupt on NT.4248 *4249 * We need to check for force-flags that could've possible been altered since we last4250 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,4251 * see @bugref{6398}).4252 *4253 * We also check a couple of other force-flags as a last opportunity to get the EMT back4254 * to ring-3 before executing guest code.4255 */4256 pSvmTransient->fEFlags = ASMIntDisableFlags();4257 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)4258 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))4259 {4260 ASMSetFlags(pSvmTransient->fEFlags);4261 VMMRZCallRing3Enable(pVCpu);4262 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);4263 return VINF_EM_RAW_TO_R3;4264 }4265 if (RTThreadPreemptIsPending(NIL_RTTHREAD))4266 {4267 ASMSetFlags(pSvmTransient->fEFlags);4268 VMMRZCallRing3Enable(pVCpu);4269 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);4270 return VINF_EM_RAW_INTERRUPT;4271 }4272 return VINF_SUCCESS;4273 }4274 #endif4275 4276 4277 /**4278 * Does the preparations before executing guest code in AMD-V.4279 *4280 * This may cause longjmps to ring-3 and may even result in rescheduling to the4281 * recompiler. We must be cautious what we do here regarding committing4282 * guest-state information into the VMCB assuming we assuredly execute the guest4283 * in AMD-V. If we fall back to the recompiler after updating the VMCB and4284 * clearing the common-state (TRPM/forceflags), we must undo those changes so4285 * that the recompiler can (and should) use them when it resumes guest4286 * execution. Otherwise such operations must be done when we can no longer4287 * exit to ring-3.4288 *4289 * @returns VBox status code (informational status codes included).4290 * @retval VINF_SUCCESS if we can proceed with running the guest.4291 * @retval VINF_* scheduling changes, we have to go back to ring-3.4292 *4293 * @param pVCpu The cross context virtual CPU structure.4294 * @param pSvmTransient Pointer to the SVM transient structure.4295 */4296 static int hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)4297 {4298 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);4299 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);4300 4301 /* Check force flag actions that might require us to go back to ring-3. */4302 int rc = hmR0SvmCheckForceFlags(pVCpu);4303 if (rc != VINF_SUCCESS)4304 return rc;4305 4306 if (TRPMHasTrap(pVCpu))4307 hmR0SvmTrpmTrapToPendingEvent(pVCpu);4308 else if (!pVCpu->hm.s.Event.fPending)4309 hmR0SvmEvaluatePendingEvent(pVCpu);4310 4311 /*4312 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.4313 * Just do it in software, see @bugref{8411}.4314 * NB: If we could continue a task switch exit we wouldn't need to do this.4315 */4316 PVMCC pVM = pVCpu->CTX_SUFF(pVM);4317 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))4318 if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))4319 return VINF_EM_RAW_INJECT_TRPM_EVENT;4320 4219 4321 4220 #ifdef HMSVM_SYNC_FULL_GUEST_STATE … … 4328 4227 * longjmp or get preempted in the midst of exporting some of the state. 4329 4228 */ 4330 rc = hmR0SvmExportGuestState(pVCpu); 4229 if (!pSvmTransient->fIsNestedGuest) 4230 rc = hmR0SvmExportGuestState(pVCpu); 4231 else 4232 rc = hmR0SvmExportGuestStateNested(pVCpu); 4331 4233 AssertRCReturn(rc, rc); 4332 4234 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 4235 4236 /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */ 4237 Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid); 4333 4238 4334 4239 /* … … 4338 4243 if (pVCpu->hm.s.svm.fSyncVTpr) 4339 4244 { 4245 Assert(!pSvmTransient->fIsNestedGuest); 4340 4246 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 4341 4247 if (pVM->hm.s.fTPRPatchingActive) … … 4885 4791 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4886 4792 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4887 rc = hmR0SvmPreRunGuest Nested(pVCpu, &SvmTransient);4793 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient); 4888 4794 if ( rc != VINF_SUCCESS 4889 4795 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
Note:
See TracChangeset
for help on using the changeset viewer.