VirtualBox

Changeset 71909 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Apr 19, 2018 5:46:44 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Tidy and unify the TSC offsetting code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71908 r71909  
    14271427}
    14281428
    1429 #if 0
     1429
    14301430/**
    14311431 * Sets a control intercept in the specified VMCB.
     
    14471447 * Clears a control intercept in the specified VMCB.
    14481448 *
     1449 * @returns @c true if the intercept is still set, @c false otherwise.
    14491450 * @param   pVCpu           The cross context virtual CPU structure.
    14501451 * @param   pCtx            Pointer to the guest-CPU context.
     
    14561457 *          are not intercepting it.
    14571458 */
    1458 DECLINLINE(void) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
     1459DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
    14591460{
    14601461    if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
     
    14771478        }
    14781479    }
    1479 }
    1480 #endif
     1480
     1481    return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
     1482}
     1483
    14811484
    14821485/**
     
    21932196        }
    21942197
    2195         /** @todo This doesn't make sense. Re-think and remove. */
    2196 #if 1
    21972198        /*
    21982199         * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
     
    22142215                                              |  SVM_CTRL_INTERCEPT_STGI;
    22152216        }
    2216 #endif
    22172217
    22182218        /* Finally, update the VMCB clean bits. */
     
    30953095
    30963096
    3097 #ifdef VBOX_WITH_NESTED_HWVIRT
    3098 /**
    3099  * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
    3100  * intercepts for the nested-guest.
    3101  *
    3102  * @param   pVM             The cross context VM structure.
    3103  * @param   pVCpu           The cross context virtual CPU structure.
    3104  * @param   pCtx            Pointer to the nested guest-CPU context.
    3105  * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
    3106  *
    3107  * @remarks No-long-jump zone!!!
    3108  */
    3109 static void hmR0SvmUpdateTscOffsettingNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst)
    3110 {
    3111     Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    3112 
    3113     bool       fParavirtTsc;
    3114     uint64_t   uTscOffset;
    3115     bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    3116 
    3117     PSVMVMCBCTRL         pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    3118     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
    3119 
    3120     /*
    3121      * Only avoid intercepting if we determined the host TSC (++) is stable enough
    3122      * to not intercept -and- the nested-hypervisor itself does not want to intercept it.
    3123      */
    3124     if (    fCanUseRealTsc
    3125         && !(pVmcbNstGstCache->u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
    3126     {
    3127         pVmcbNstGstCtrl->u64InterceptCtrl &= ~(SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
    3128         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    3129 
    3130         /* Apply the nested-guest VMCB's TSC offset over the guest one. */
    3131         uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset);
    3132 
    3133         /* Update the nested-guest VMCB with the combined TSC offset (of guest and nested-guest). */
    3134         pVmcbNstGstCtrl->u64TSCOffset = uTscOffset;
    3135     }
    3136     else
    3137     {
    3138         pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP;
    3139         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    3140     }
    3141 
    3142     /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */
    3143     pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    3144 
    3145     if (fParavirtTsc)
    3146     {
    3147         /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
    3148            information before every VM-entry, hence disable it for performance sake. */
    3149         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
    3150     }
    3151 }
    3152 #endif
    3153 
    3154 
    31553097/**
    31563098 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
     
    31593101 * @param   pVM         The cross context VM structure.
    31603102 * @param   pVCpu       The cross context virtual CPU structure.
     3103 * @param   pCtx        Pointer to the guest-CPU or nested-guest-CPU context.
    31613104 * @param   pVmcb       Pointer to the VM control block.
    31623105 *
    31633106 * @remarks No-long-jump zone!!!
    31643107 */
    3165 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PSVMVMCB pVmcb)
    3166 {
    3167     bool fParavirtTsc;
    3168     bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
     3108static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
     3109{
     3110    /*
     3111     * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
     3112     * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
     3113     * RDTSC/RDTSCP as well.
     3114     */
     3115    bool     fParavirtTsc;
     3116    uint64_t uTscOffset;
     3117    bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
     3118
     3119    bool fIntercept;
    31693120    if (fCanUseRealTsc)
    3170     {
    3171         pVmcb->ctrl.u64InterceptCtrl &= ~(SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
     3121         fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
     3122    else
     3123    {
     3124        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
     3125        fIntercept = true;
     3126    }
     3127
     3128    if (!fIntercept)
     3129    {
     3130        /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
     3131        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3132            uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset);
     3133
     3134        /* Update the TSC offset in the VMCB and the relevant clean bits. */
     3135        pVmcb->ctrl.u64TSCOffset = uTscOffset;
     3136        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     3137
    31723138        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    31733139    }
    31743140    else
    3175     {
    3176         pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP;
    31773141        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    3178     }
    3179     pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    3180 
    3181     /** @todo later optimize this to be done elsewhere and not before every
    3182      *        VM-entry. */
     3142
     3143    /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
     3144       information before every VM-entry, hence we have nothing to do here at the moment. */
    31833145    if (fParavirtTsc)
    3184     {
    3185         /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
    3186            information before every VM-entry, hence disable it for performance sake. */
    3187 #if 0
    3188         int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
    3189         AssertRC(rc);
    3190 #endif
    31913146        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
    3192     }
    31933147}
    31943148
     
    44274381        || fMigratedHostCpu)
    44284382    {
    4429         if (!fInNestedGuestMode)
    4430             hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);
    4431         else
    4432             hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcb);
     4383        hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pCtx, pVmcb);
    44334384        pSvmTransient->fUpdateTscOffsetting = false;
    44344385    }
     
    45924543    {
    45934544        /*
    4594          * Undo what we did in hmR0SvmUpdateTscOffsettingNested() but don't restore the
    4595          * nested-guest VMCB TSC offset here. It shall eventually be restored on #VMEXIT
    4596          * later by HMSvmNstGstVmExitNotify().
     4545         * Undo what we did in hmR0SvmUpdateTscOffsetting() and HMSvmNstGstApplyTscOffset()
     4546         * but don't restore the nested-guest VMCB TSC offset here. It shall eventually be
     4547         * restored on #VMEXIT in HMSvmNstGstVmExitNotify().
    45974548         */
    45984549        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette