VirtualBox

Changeset 81166 in vbox


Ignore:
Timestamp:
Oct 9, 2019 7:37:37 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Nested SVM: bugref:7243 Coalesce hmR0SvmPreRunGuestNested into hmR0SvmPreRunGuest.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r81092 r81166  
    41504150
    41514151
    4152 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    4153 /**
    4154  * Does the preparations before executing nested-guest code in AMD-V.
     4152/**
     4153 * Does the preparations before executing guest code in AMD-V.
     4154 *
     4155 * This may cause longjmps to ring-3 and may even result in rescheduling to the
     4156 * recompiler. We must be cautious what we do here regarding committing
     4157 * guest-state information into the VMCB assuming we assuredly execute the guest
     4158 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
     4159 * clearing the common-state (TRPM/forceflags), we must undo those changes so
     4160 * that the recompiler can (and should) use them when it resumes guest
     4161 * execution. Otherwise such operations must be done when we can no longer
     4162 * exit to ring-3.
    41554163 *
    41564164 * @returns VBox status code (informational status codes included).
     
    41604168 * @param   pVCpu           The cross context virtual CPU structure.
    41614169 * @param   pSvmTransient   Pointer to the SVM transient structure.
    4162  *
    4163  * @remarks Same caveats regarding longjumps as hmR0SvmPreRunGuest applies.
    4164  * @sa      hmR0SvmPreRunGuest.
    4165  */
    4166 static int hmR0SvmPreRunGuestNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
    4167 {
    4168     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     4170 */
     4171static int hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
     4172{
    41694173    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    4170     HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    41714174
    41724175#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    4173     if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) /* Redundant check to avoid unreachable code warning. */
     4176    if (pSvmTransient->fIsNestedGuest)
    41744177    {
    41754178        Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
     
    41814184    int rc = hmR0SvmCheckForceFlags(pVCpu);
    41824185    if (rc != VINF_SUCCESS)
    4183     {
    4184         if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    4185             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
    41864186        return rc;
    4187     }
    41884187
    41894188    if (TRPMHasTrap(pVCpu))
     
    41914190    else if (!pVCpu->hm.s.Event.fPending)
    41924191    {
    4193         VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu);
    4194         if (    rcStrict != VINF_SUCCESS
    4195             || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    4196         {
    4197             if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    4198                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
    4199             return VBOXSTRICTRC_VAL(rcStrict);
    4200         }
    4201     }
    4202 
    4203     HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
     4192        if (!pSvmTransient->fIsNestedGuest)
     4193            hmR0SvmEvaluatePendingEvent(pVCpu);
     4194#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     4195        else
     4196        {
     4197            VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu);
     4198            if (    rcStrict != VINF_SUCCESS
     4199                || !CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     4200            {
     4201                if (!CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     4202                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
     4203                return VBOXSTRICTRC_VAL(rcStrict);
     4204            }
     4205        }
     4206#endif
     4207    }
    42044208
    42054209    /*
     
    42124216                    &&  pVCpu->hm.s.Event.fPending
    42134217                    &&  SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
    4214     {
    42154218        return VINF_EM_RAW_INJECT_TRPM_EVENT;
    4216     }
    4217 
    4218 #ifdef HMSVM_SYNC_FULL_GUEST_STATE
    4219     Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
    4220     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    4221 #endif
    4222 
    4223     /*
    4224      * Export the nested-guest state bits that are not shared with the host in any way as we
    4225      * can longjmp or get preempted in the midst of exporting some of the state.
    4226      */
    4227     rc = hmR0SvmExportGuestStateNested(pVCpu);
    4228     AssertRCReturn(rc, rc);
    4229     STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
    4230 
    4231     /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */
    4232     Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
    4233 
    4234     /*
    4235      * No longjmps to ring-3 from this point on!!!
    4236      *
    4237      * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
    4238      * better than a kernel panic. This also disables flushing of the R0-logger instance.
    4239      */
    4240     VMMRZCallRing3Disable(pVCpu);
    4241 
    4242     /*
    4243      * We disable interrupts so that we don't miss any interrupts that would flag preemption
    4244      * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
    4245      * preemption disabled for a while.  Since this is purly to aid the
    4246      * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
    4247      * disable interrupt on NT.
    4248      *
    4249      * We need to check for force-flags that could've possible been altered since we last
    4250      * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
    4251      * see @bugref{6398}).
    4252      *
    4253      * We also check a couple of other force-flags as a last opportunity to get the EMT back
    4254      * to ring-3 before executing guest code.
    4255      */
    4256     pSvmTransient->fEFlags = ASMIntDisableFlags();
    4257     if (   VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
    4258         || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    4259     {
    4260         ASMSetFlags(pSvmTransient->fEFlags);
    4261         VMMRZCallRing3Enable(pVCpu);
    4262         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    4263         return VINF_EM_RAW_TO_R3;
    4264     }
    4265     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    4266     {
    4267         ASMSetFlags(pSvmTransient->fEFlags);
    4268         VMMRZCallRing3Enable(pVCpu);
    4269         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
    4270         return VINF_EM_RAW_INTERRUPT;
    4271     }
    4272     return VINF_SUCCESS;
    4273 }
    4274 #endif
    4275 
    4276 
    4277 /**
    4278  * Does the preparations before executing guest code in AMD-V.
    4279  *
    4280  * This may cause longjmps to ring-3 and may even result in rescheduling to the
    4281  * recompiler. We must be cautious what we do here regarding committing
    4282  * guest-state information into the VMCB assuming we assuredly execute the guest
    4283  * in AMD-V. If we fall back to the recompiler after updating the VMCB and
    4284  * clearing the common-state (TRPM/forceflags), we must undo those changes so
    4285  * that the recompiler can (and should) use them when it resumes guest
    4286  * execution. Otherwise such operations must be done when we can no longer
    4287  * exit to ring-3.
    4288  *
    4289  * @returns VBox status code (informational status codes included).
    4290  * @retval VINF_SUCCESS if we can proceed with running the guest.
    4291  * @retval VINF_* scheduling changes, we have to go back to ring-3.
    4292  *
    4293  * @param   pVCpu           The cross context virtual CPU structure.
    4294  * @param   pSvmTransient   Pointer to the SVM transient structure.
    4295  */
    4296 static int hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
    4297 {
    4298     HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    4299     HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
    4300 
    4301     /* Check force flag actions that might require us to go back to ring-3. */
    4302     int rc = hmR0SvmCheckForceFlags(pVCpu);
    4303     if (rc != VINF_SUCCESS)
    4304         return rc;
    4305 
    4306     if (TRPMHasTrap(pVCpu))
    4307         hmR0SvmTrpmTrapToPendingEvent(pVCpu);
    4308     else if (!pVCpu->hm.s.Event.fPending)
    4309         hmR0SvmEvaluatePendingEvent(pVCpu);
    4310 
    4311     /*
    4312      * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
    4313      * Just do it in software, see @bugref{8411}.
    4314      * NB: If we could continue a task switch exit we wouldn't need to do this.
    4315      */
    4316     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    4317     if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
    4318         if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
    4319             return VINF_EM_RAW_INJECT_TRPM_EVENT;
    43204219
    43214220#ifdef HMSVM_SYNC_FULL_GUEST_STATE
     
    43284227     * longjmp or get preempted in the midst of exporting some of the state.
    43294228     */
    4330     rc = hmR0SvmExportGuestState(pVCpu);
     4229    if (!pSvmTransient->fIsNestedGuest)
     4230        rc = hmR0SvmExportGuestState(pVCpu);
     4231    else
     4232        rc = hmR0SvmExportGuestStateNested(pVCpu);
    43314233    AssertRCReturn(rc, rc);
    43324234    STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
     4235
     4236    /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */
     4237    Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
    43334238
    43344239    /*
     
    43384243    if (pVCpu->hm.s.svm.fSyncVTpr)
    43394244    {
     4245        Assert(!pSvmTransient->fIsNestedGuest);
    43404246        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    43414247        if (pVM->hm.s.fTPRPatchingActive)
     
    48854791           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    48864792        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    4887         rc = hmR0SvmPreRunGuestNested(pVCpu, &SvmTransient);
     4793        rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
    48884794        if (    rc != VINF_SUCCESS
    48894795            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette