VirtualBox

Changeset 48196 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Aug 30, 2013 2:51:26 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Thread-context hook fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48183 r48196  
    10511051
    10521052/**
    1053  * Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
     1053 * Loads the guest CR0 control register into the guest-state area in the VMCB.
     1054 * Although the guest CR0 is a separate field in the VMCB we have to consider
     1055 * the FPU state itself which is shared between the host and the guest.
    10541056 *
    10551057 * @returns VBox status code.
    1056  * @param   pVCpu       Pointer to the VMCPU.
     1058 * @param   pV       Pointer to the VMCPU.
    10571059 * @param   pVmcb       Pointer to the VMCB.
    1058  * @param   pCtx        Pointer the guest-CPU context.
     1060 * @param   pCtx        Pointer to the guest-CPU context.
    10591061 *
    10601062 * @remarks No-long-jump zone!!!
    10611063 */
    1062 DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1064static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    10631065{
    10641066    /*
     
    11211123        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
    11221124    }
     1125}
     1126
     1127
     1128/**
     1129 * Loads the guest control registers (CR2, CR3, CR4) into the VMCB.
     1130 *
     1131 * @returns VBox status code.
     1132 * @param   pVCpu       Pointer to the VMCPU.
     1133 * @param   pVmcb       Pointer to the VMCB.
     1134 * @param   pCtx        Pointer to the guest-CPU context.
     1135 *
     1136 * @remarks No-long-jump zone!!!
     1137 */
     1138DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1139{
     1140    PVM pVM = pVCpu->CTX_SUFF(pVM);
    11231141
    11241142    /*
     
    13241342
    13251343/**
    1326  * Loads the guest debug registers (DR6, DR7) into the VMCB and programs the
    1327  * necessary intercepts accordingly.
     1344 * Loads the guest state into the VMCB and programs the necessary intercepts
     1345 * accordingly.
    13281346 *
    13291347 * @param   pVCpu       Pointer to the VMCPU.
     
    13341352 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
    13351353 */
    1336 DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1354DECLINLINE(void) hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    13371355{
    13381356    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     
    13861404        fInterceptDB = true;
    13871405        fInterceptMovDRx = true;
    1388         Log5(("hmR0SvmLoadGuestDebugRegs: Loaded hyper DRx\n"));
     1406        Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n"));
    13891407    }
    13901408    else
     
    14141432            Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    14151433            Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
    1416             Log5(("hmR0SvmLoadGuestDebugRegs: Loaded guest DRx\n"));
     1434            Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
    14171435        }
    14181436        /*
     
    16851703    pVmcb->guest.u64RAX    = pCtx->rax;
    16861704
    1687     /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
    1688     hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx);
    1689 
    16901705    rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
    16911706    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    17151730    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    17161731    return rc;
     1732}
     1733
     1734
     1735/**
     1736 * Loads the state shared between the host and guest into the
     1737 * VMCB.
     1738 *
     1739 * @param   pVM         Pointer to the VM.
     1740 * @param   pVCpu       Pointer to the VMCPU.
     1741 * @param   pCtx        Pointer to the guest-CPU context.
     1742 *
     1743 * @remarks No-long-jump zone!!!
     1744 */
     1745static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1746{
     1747    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1748    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1749    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1750
     1751    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     1752        hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
     1753
     1754    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
     1755        hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
     1756
     1757    AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
     1758                                                                                     pVCpu->hm.s.fContextUseFlags));
    17171759}
    17181760
     
    19291971
    19301972    /* Deregister hook now that we've left HM context before re-enabling preemption. */
    1931     /** @todo This is bad. Deregistering here means we need to VMCLEAR always
    1932      *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
    19331973    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    19341974        VMMR0ThreadCtxHooksDeregister(pVCpu);
     
    27082748
    27092749    /*
     2750     * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
     2751     * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
     2752     */
     2753    /** @todo The above assumption could be wrong. It's not documented what
     2754     *        should be done wrt to the VMCB Clean Bit, but we'll find out the
     2755     *        hard way. */
     2756    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     2757    pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
     2758
     2759#ifdef HMSVM_SYNC_FULL_GUEST_STATE
     2760    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     2761#endif
     2762
     2763    /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
     2764    rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
     2765    AssertRCReturn(rc, rc);
     2766    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     2767
     2768    /*
     2769     * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
     2770     * so we can update it on the way back if the guest changed the TPR.
     2771     */
     2772    if (pVCpu->hm.s.svm.fSyncVTpr)
     2773    {
     2774        if (pVM->hm.s.fTPRPatchingActive)
     2775            pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
     2776        else
     2777            pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
     2778    }
     2779
     2780    /*
    27102781     * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
    27112782     * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
     
    27602831    hmR0SvmInjectPendingEvent(pVCpu, pCtx);
    27612832
    2762     /*
    2763      * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
    2764      * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
    2765     */
    2766     /** @todo The above assumption could be wrong. It's not documented what
    2767      *        should be done wrt to the VMCB Clean Bit, but we'll find out the
    2768      *        hard way. */
     2833    /* Load the state shared between host and guest (FPU, debug). */
     2834    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
     2835        hmR0VmxLoadSharedState(pVM, pVCpu, pCtx);
     2836    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;       /* Preemption might set this, nothing to do on AMD-V. */
     2837    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     2838
     2839    /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
    27692840    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2770     pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
    2771 
    2772 #ifdef HMSVM_SYNC_FULL_GUEST_STATE
    2773     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    2774 #endif
    2775 
    2776     /* Load the guest state. */
    2777     int rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
    2778     AssertRC(rc);
    2779     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;       /* Preemption might set this, nothing to do on AMD-V. */
    2780     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
    2781     STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    2782 
    2783     /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
    27842841    if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
    27852842        pVmcb->ctrl.u64VmcbCleanBits = 0;
    2786 
    2787     /*
    2788      * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
    2789      * so we can update it on the way back if the guest changed the TPR.
    2790      */
    2791     if (pVCpu->hm.s.svm.fSyncVTpr)
    2792     {
    2793         if (pVM->hm.s.fTPRPatchingActive)
    2794             pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
    2795         else
    2796             pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
    2797     }
    27982843
    27992844    /* Setup TSC offsetting. */
     
    41694214        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    41704215
     4216        /* We're playing with the host CPU state here, make sure we don't preempt. */
     4217        HM_DISABLE_PREEMPT_IF_NEEDED();
     4218
    41714219        /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    41724220        CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
    41734221        Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
     4222
     4223        HM_RESTORE_PREEMPT_IF_NEEDED();
    41744224
    41754225        STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
     
    43154365                        || DBGFBpIsHwIoArmed(pVM)))
    43164366        {
     4367            /* We're playing with the host CPU state here, make sure we don't preempt. */
     4368            HM_DISABLE_PREEMPT_IF_NEEDED();
     4369
    43174370            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    43184371            CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
     
    43314384                     && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
    43324385                rcStrict = rcStrict2;
     4386
     4387            HM_RESTORE_PREEMPT_IF_NEEDED();
    43334388        }
    43344389
     
    46644719#endif
    46654720
     4721    /* We're playing with the host CPU state here, make sure we don't preempt. */
     4722    HM_DISABLE_PREEMPT_IF_NEEDED();
     4723
    46664724    /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
    46674725    int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
     
    46694727    {
    46704728        Assert(CPUMIsGuestFPUStateActive(pVCpu));
     4729        HM_RESTORE_PREEMPT_IF_NEEDED();
     4730
    46714731        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    46724732        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    46734733        return VINF_SUCCESS;
    46744734    }
     4735
     4736    HM_RESTORE_PREEMPT_IF_NEEDED();
    46754737
    46764738    /* Forward #NM to the guest. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette