VirtualBox

Changeset 71841 in vbox for trunk


Ignore:
Timestamp:
Apr 12, 2018 10:09:25 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Unify hmR0SvmPreRunGuestCommitted for guest and nested-guest cases, they do almost identical work.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71838 r71841  
    23832383 * whether the nested-guest is intercepting it or not.
    23842384 *
    2385  * @param   pHostCpu    Pointer to the physical CPU HM info. struct.
    2386  * @param   pVCpu       The cross context virtual CPU structure.
    2387  * @param   pCtx        Pointer to the nested-guest-CPU context.
    2388  */
    2389 static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx)
     2385 * @param   pHostCpu        Pointer to the physical CPU HM info. struct.
     2386 * @param   pVCpu           The cross context virtual CPU structure.
     2387 * @param   pCtx            Pointer to the nested-guest-CPU context.
     2388 * @param   pVmcbNstGst     Pointer to the nested-guest VMCB.
     2389 */
     2390static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst)
    23902391{
    23912392    uint64_t const *pu64GstMsrpm    = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap;
     
    43064307
    43074308
    4308 #ifdef VBOX_WITH_NESTED_HWVIRT
    4309 /**
    4310  * Prepares to run nested-guest code in AMD-V and we've committed to doing so. This
    4311  * means there is no backing out to ring-3 or anywhere else at this point.
     4309/**
     4310 * Prepares to run guest code in AMD-V and we've committed to doing so. This
     4311 * means there is no backing out to ring-3 or anywhere else at this
     4312 * point.
    43124313 *
    43134314 * @param   pVM             The cross context VM structure.
     
    43194320 * @remarks No-long-jump zone!!!
    43204321 */
    4321 static void hmR0SvmPreRunGuestCommittedNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4322static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43224323{
    43234324    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    43244325    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    43254326    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    4326     HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    43274327
    43284328    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    43294329    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
    43304330
    4331     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    4332     hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst);
    4333 
    4334     /* Pre-load the guest FPU state. */
     4331    bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
     4332    PSVMVMCB pVmcb = !fInNestedGuestMode ? pVCpu->hm.s.svm.pVmcb : pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     4333
     4334    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
     4335
    43354336    if (!CPUMIsGuestFPUStateActive(pVCpu))
    43364337    {
     
    43424343    }
    43434344
    4344     /* Load the state shared between host and nested-guest (FPU, debug). */
     4345    /* Load the state shared between host and guest (FPU, debug). */
    43454346    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    4346         hmR0SvmLoadSharedState(pVCpu, pVmcbNstGst, pCtx);
     4347        hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
    43474348
    43484349    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);             /* Preemption might set this, nothing to do on AMD-V. */
     
    43574358        || fMigratedHostCpu)
    43584359    {
    4359         hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst);
     4360        if (!fInNestedGuestMode)
     4361            hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);
     4362        else
     4363            hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcb);
    43604364        pSvmTransient->fUpdateTscOffsetting = false;
    43614365    }
     
    43634367    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    43644368    if (fMigratedHostCpu)
    4365         pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
     4369        pVmcb->ctrl.u32VmcbCleanBits = 0;
    43664370
    43674371    /* Store status of the shared guest-host state at the time of VMRUN. */
     
    43794383    }
    43804384
    4381     /* Merge the guest and nested-guest MSRPM. */
    4382     hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx);
    4383 
    4384     /* Update the nested-guest VMCB to use the newly merged MSRPM. */
    4385     pVmcbNstGst->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
    4386 
    4387     /* The TLB flushing would've already been setup by the nested-hypervisor. */
     4385    uint8_t *pbMsrBitmap;
     4386    if (!fInNestedGuestMode)
     4387        pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
     4388    else
     4389    {
     4390        hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx, pVmcb);
     4391
     4392        /* Update the nested-guest VMCB with the newly merged MSRPM.*/
     4393        pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
     4394        pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
     4395    }
     4396
    43884397    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    4389     hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcbNstGst, pHostCpu);
     4398    /* Flush the appropriate tagged-TLB entries. */
     4399    hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb, pHostCpu);
    43904400    Assert(pVCpu->hm.s.idLastCpu == idHostCpu);
    43914401
     
    44014411     * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
    44024412     */
    4403     uint8_t *pbMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    4404     if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    4405         && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    4406     {
    4407         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    4408         pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    4409 
    4410         pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    4411         uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
    4412         if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
    4413             ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
    4414         pSvmTransient->fRestoreTscAuxMsr = true;
    4415     }
    4416     else
    4417     {
    4418         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
    4419         pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    4420         pSvmTransient->fRestoreTscAuxMsr = false;
    4421     }
    4422 
    4423     /*
    4424      * If VMCB Clean bits isn't supported by the CPU or exposed by the guest,
    4425      * mark all state-bits as dirty indicating to the CPU to re-load from VMCB.
    4426      */
    4427     bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
    4428     if (!fSupportsVmcbCleanBits)
    4429         pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
    4430 }
    4431 #endif
    4432 
    4433 
    4434 /**
    4435  * Prepares to run guest code in AMD-V and we've committed to doing so. This
    4436  * means there is no backing out to ring-3 or anywhere else at this
    4437  * point.
    4438  *
    4439  * @param   pVM             The cross context VM structure.
    4440  * @param   pVCpu           The cross context virtual CPU structure.
    4441  * @param   pCtx            Pointer to the guest-CPU context.
    4442  * @param   pSvmTransient   Pointer to the SVM transient structure.
    4443  *
    4444  * @remarks Called with preemption disabled.
    4445  * @remarks No-long-jump zone!!!
    4446  */
    4447 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    4448 {
    4449     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    4450     Assert(VMMR0IsLogFlushDisabled(pVCpu));
    4451     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    4452     HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    4453 
    4454     VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    4455     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
    4456 
    4457     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    4458     hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
    4459 
    4460     if (!CPUMIsGuestFPUStateActive(pVCpu))
    4461     {
    4462         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    4463         CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
    4464         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    4465         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
    4466         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    4467     }
    4468 
    4469     /* Load the state shared between host and guest (FPU, debug). */
    4470     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    4471         hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
    4472 
    4473     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);             /* Preemption might set this, nothing to do on AMD-V. */
    4474     AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    4475 
    4476     PHMGLOBALCPUINFO pHostCpu         = hmR0GetCurrentCpu();
    4477     RTCPUID const    idHostCpu        = pHostCpu->idCpu;
    4478     bool const       fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu;
    4479 
    4480     /* Setup TSC offsetting. */
    4481     if (   pSvmTransient->fUpdateTscOffsetting
    4482         || fMigratedHostCpu)
    4483     {
    4484         hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);
    4485         pSvmTransient->fUpdateTscOffsetting = false;
    4486     }
    4487 
    4488     /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    4489     if (fMigratedHostCpu)
    4490         pVmcb->ctrl.u32VmcbCleanBits = 0;
    4491 
    4492     /* Store status of the shared guest-host state at the time of VMRUN. */
    4493 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4494     if (CPUMIsGuestInLongModeEx(pCtx))
    4495     {
    4496         pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
    4497         pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
    4498     }
    4499     else
    4500 #endif
    4501     {
    4502         pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
    4503         pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
    4504     }
    4505 
    4506     /* Flush the appropriate tagged-TLB entries. */
    4507     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    4508     hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb, pHostCpu);
    4509     Assert(pVCpu->hm.s.idLastCpu == idHostCpu);
    4510 
    4511     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    4512 
    4513     TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
    4514                                                                    to start executing. */
    4515 
    4516     /*
    4517      * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
    4518      * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    4519      *
    4520      * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
    4521      */
    4522     uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    45234413    if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    45244414        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    45254415    {
     4416        uint64_t const uGuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
     4417        pVCpu->hm.s.u64HostTscAux   = ASMRdMsr(MSR_K8_TSC_AUX);
     4418        if (uGuestTscAux != pVCpu->hm.s.u64HostTscAux)
     4419            ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
    45264420        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    4527         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    4528 
    4529         pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    4530         uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
    4531         if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
    4532             ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
    45334421        pSvmTransient->fRestoreTscAuxMsr = true;
    45344422    }
     
    45364424    {
    45374425        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
    4538         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    45394426        pSvmTransient->fRestoreTscAuxMsr = false;
    45404427    }
    4541 
    4542     /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
     4428    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     4429
     4430    /*
     4431     * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the
     4432     * nested virtualization case, mark all state-bits as dirty indicating to the
     4433     * CPU to re-load from VMCB.
     4434     */
    45434435    bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
    45444436    if (!fSupportsVmcbCleanBits)
     
    49774869         * This also disables flushing of the R0-logger instance (if any).
    49784870         */
    4979         hmR0SvmPreRunGuestCommittedNested(pVM, pVCpu, pCtx, &SvmTransient);
     4871        hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
    49804872
    49814873        rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette