VirtualBox

Changeset 65241 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jan 11, 2017 12:43:56 PM (8 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Fix longjmp related regression introduced in r112729.

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r65233 r65241  
    34483448                AssertRCReturn(rc, rc);
    34493449            }
    3450 
    3451 #ifndef IEM_VERIFICATION_MODE_FULL
    3452             /*
    3453              * Setup the virtualized-APIC accesses.
    3454              *
    3455              * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock
    3456              * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
    3457              */
    3458             if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    3459             {
    3460                 uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    3461                 if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase)
    3462                 {
    3463                     /* We only care about the APIC base MSR address and not the other bits. */
    3464                     PVM pVM = pVCpu->CTX_SUFF(pVM);
    3465                     Assert(pVM->hm.s.vmx.HCPhysApicAccess);
    3466                     RTGCPHYS GCPhysApicBase;
    3467                     GCPhysApicBase  = u64MsrApicBase;
    3468                     GCPhysApicBase &= PAGE_BASE_GC_MASK;
    3469 
    3470                     /*
    3471                      * We only need a single HC page as the APIC-access page for all VCPUs as it's used
    3472                      * purely for causing VM-exits and not for data access within the actual page.
    3473                      *
    3474                      * The following check ensures we do the mapping on a per-VM basis as our APIC code
    3475                      * does not allow different APICs to be mapped at different addresses on different VCPUs.
    3476                      *
    3477                      * In fact, we do not support remapping of the APIC base at all, see APICSetBaseMsr()
    3478                      * so we just map this once per-VM.
    3479                      */
    3480                     if (ASMAtomicCmpXchgU64(&pVM->hm.s.vmx.GCPhysApicBase, GCPhysApicBase, 0 /* u64Old */))
    3481                     {
    3482                         /* Unalias any existing mapping. */
    3483                         rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
    3484                         AssertRCReturn(rc, rc);
    3485 
    3486                         /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables
    3487                            if necessary. */
    3488                         Log4(("HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase));
    3489                         rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess,
    3490                                                   X86_PTE_RW | X86_PTE_P);
    3491                         AssertRCReturn(rc, rc);
    3492                     }
    3493 
    3494                     /* Update the per-VCPU cache of the APIC base MSR. */
    3495                     pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
    3496                 }
    3497             }
    3498 #endif /* !IEM_VERIFICATION_MODE_FULL */
    34993450        }
    35003451        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     
    84128363 *                      out-of-sync. Make sure to update the required fields
    84138364 *                      before using them.
     8365 *
     8366 * @remarks No-long-jump zone!!!
    84148367 */
    84158368static VBOXSTRICTRC hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     
    84198372    AssertPtr(pMixedCtx);
    84208373    HMVMX_ASSERT_PREEMPT_SAFE();
     8374
     8375    VMMRZCallRing3Disable(pVCpu);
     8376    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    84218377
    84228378    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     
    84868442    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    84878443
     8444    VMMRZCallRing3Enable(pVCpu);
     8445
    84888446    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    84898447    return rc;
     
    85608518 *                          out-of-sync. Make sure to update the required fields
    85618519 *                          before using them.
     8520 *
     8521 * @remarks No-long-jump zone!!!
    85628522 */
    85638523static VBOXSTRICTRC hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     
    86528612    else
    86538613        return rcStrict;
     8614
     8615#ifndef IEM_VERIFICATION_MODE_FULL
     8616    /*
     8617     * Setup the virtualized-APIC accesses.
     8618     *
     8619     * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock
     8620     * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
     8621     *
     8622     * This is the reason we do it here and not in hmR0VmxLoadGuestState().
     8623     */
     8624    if (   !pVCpu->hm.s.vmx.u64MsrApicBase
     8625        && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     8626        && PDMHasApic(pVM))
     8627    {
     8628        uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
     8629        Assert(u64MsrApicBase);
     8630        Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     8631
     8632        /* We only care about the APIC base MSR address and not the other bits. */
     8633        RTGCPHYS GCPhysApicBase;
     8634        GCPhysApicBase  = u64MsrApicBase;
     8635        GCPhysApicBase &= PAGE_BASE_GC_MASK;
     8636
     8637        /* Unalias any existing mapping. */
     8638        int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
     8639        AssertRCReturn(rc, rc);
     8640
     8641        /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
     8642        LogRel(("HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase));
     8643        rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
     8644        AssertRCReturn(rc, rc);
     8645
     8646        /* Update the per-VCPU cache of the APIC base MSR. */
     8647        pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
     8648    }
     8649#endif /* !IEM_VERIFICATION_MODE_FULL */
    86548650
    86558651    if (TRPMHasTrap(pVCpu))
  • trunk/src/VBox/VMM/include/HMInternal.h

    r65222 r65241  
    461461        /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
    462462        R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
    463 
    464         /** The guest's MSR APIC base address at which the APIC access page is mapped. */
    465         RTGCPHYS volatile           GCPhysApicBase;
    466463
    467464        /** Physical address of the APIC-access page. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette