VirtualBox

Changeset 93922 in vbox


Ignore:
Timestamp:
Feb 24, 2022 3:14:31 PM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 EPT VM-exit handling with HM ring-0 code.

Location:
trunk
Files:
22 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r93830 r93922  
    15971597VMM_INT_DECL(bool)      CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu);
    15981598VMM_INT_DECL(bool)      CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu);
     1599VMM_INT_DECL(uint64_t)  CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu);
    15991600/** @} */
    16001601
     
    23352336 * @param   pCtx    Current CPU context.
    23362337 */
    2337 DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCCPUMCTX pCtx)
     2338DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddrEx(PCCPUMCTX pCtx)
    23382339{
    23392340    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
  • trunk/include/VBox/vmm/iem.h

    r93650 r93922  
    383383# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    384384VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
     385VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
     386VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo);
    385387# endif
    386388#endif
  • trunk/include/VBox/vmm/pgm.h

    r93716 r93922  
    508508    RTGCPHYS        GCPhysNested;
    509509
    510     /** The physical address that is the result of the walk (output).
    511      * @remarks This is page aligned and only valid if fSucceeded is set. */
     510    /** The physical address that is the result of the walk (output). */
    512511    RTGCPHYS        GCPhys;
    513512
     
    613612VMMDECL(int)        PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3);
    614613VMMDECL(int)        PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce);
    615 VMM_INT_DECL(int)   PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode);
     614VMM_INT_DECL(int)   PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce);
    616615VMMDECL(void)       PGMCr0WpEnabled(PVMCPUCC pVCpu);
    617616VMMDECL(PGMMODE)    PGMGetGuestMode(PVMCPU pVCpu);
     
    949948                                                      PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr);
    950949VMMR0_INT_DECL(int)  PGMR0PoolGrow(PGVM pGVM, VMCPUID idCpu);
     950
     951# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     952VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
     953                                                             PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     954                                                             bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk);
     955# endif
    951956/** @} */
    952957#endif /* IN_RING0 */
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r93725 r93922  
    30613061}
    30623062
     3063
     3064/**
     3065 * Returns the guest-physical address of the APIC-access page when executing a
     3066 * nested-guest.
     3067 *
     3068 * @returns The APIC-access page guest-physical address.
     3069 * @param   pVCpu   The cross context virtual CPU structure.
     3070 */
     3071VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
     3072{
     3073    return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
     3074}
     3075
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r93906 r93922  
    1636616366    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    1636716367}
     16368
     16369
     16370/**
     16371 * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
     16372 *
     16373 * @returns Strict VBox status code.
     16374 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
     16375 * @param   pExitInfo       Pointer to the VM-exit information.
     16376 * @param   pExitEventInfo  Pointer to the VM-exit event information.
     16377 * @thread  EMT(pVCpu)
     16378 */
     16379VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
     16380                                                        PCVMXVEXITEVENTINFO pExitEventInfo)
     16381{
     16382    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
     16383
     16384    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     16385    VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
     16386    Assert(!pVCpu->iem.s.cActiveMappings);
     16387    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     16388}
     16389
     16390
     16391/**
     16392 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
     16393 *
     16394 * @returns Strict VBox status code.
     16395 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
     16396 * @param   GCPhysAddr      The nested-guest physical address causing the EPT
     16397 *                          misconfiguration.
     16398 * @param   pExitEventInfo  Pointer to the VM-exit event information.
     16399 * @thread  EMT(pVCpu)
     16400 */
     16401VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
     16402{
     16403    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
     16404
     16405    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     16406    VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
     16407    Assert(!pVCpu->iem.s.cActiveMappings);
     16408    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     16409}
     16410
    1636816411# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    1636916412
     
    1638416427    {
    1638516428        Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
    16386         Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
    16387 
    16388         /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
    16389          *        Currently they will go through as read accesses. */
    16390         uint32_t const fAccess   = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
     16429        Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
     16430
     16431        uint32_t const fAccess   = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
    1639116432        uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
    1639216433        VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
     
    1639816439    }
    1639916440
    16400     Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
     16441    LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
    1640116442    int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
    1640216443    if (RT_FAILURE(rc))
     
    1640716448}
    1640816449
     16450
     16451# ifndef IN_RING3
     16452/**
     16453 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
     16454 *      \#PF access handler callback for guest VMX APIC-access page.}
     16455 */
     16456DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
     16457                                                         RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
     16458
     16459{
     16460    RT_NOREF4(pVM, pRegFrame, pvFault, uUser);
     16461
     16462    /** @todo We lack information about such as the current instruction length, IDT
     16463     *        vectoring info etc. These need to be queried from HMR0. */
     16464    RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     16465    if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
     16466    {
     16467        Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
     16468        Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
     16469
     16470        uint32_t fAccess;
     16471        if (uErr & X86_TRAP_PF_ID)
     16472            fAccess = IEM_ACCESS_INSTRUCTION;
     16473        else if (uErr & X86_TRAP_PF_RW)
     16474            fAccess = IEM_ACCESS_DATA_W;
     16475        else
     16476            fAccess = IEM_ACCESS_DATA_R;
     16477
     16478        uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
     16479        bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess);
     16480        if (fIntercept)
     16481        {
     16482            /** @todo Once HMR0 interface for querying VMXTRANSIENT info is available, use
     16483             *        iemVmxVmexitApicAccessWithInfo instead. This is R0-only code anyway. */
     16484            VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
     16485            return iemExecStatusCodeFiddling(pVCpu, rcStrict);
     16486        }
     16487
     16488        /* The access isn't intercepted, which means it needs to be virtualized. */
     16489        return VINF_EM_RAW_EMULATE_INSTR;
     16490    }
     16491
     16492    LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
     16493    int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
     16494    if (RT_FAILURE(rc))
     16495        return rc;
     16496
     16497    return VINF_SUCCESS;
     16498}
     16499# endif /* !IN_RING3 */
    1640916500#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     16501
    1641016502
    1641116503#ifdef IN_RING3
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r93847 r93922  
    75607560         * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
    75617561         */
    7562         rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem);
     7562        rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
    75637563        if (   rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
    75647564            && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
     
    82188218                 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
    82198219                 */
    8220                 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem);
     8220                rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
    82218221                if (   rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
    82228222                    && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r93650 r93922  
    37113711 *                          This need not be page aligned (e.g. nested-guest in real
    37123712 *                          mode).
    3713  * @param   pExitEventInfo  Pointer to the VM-exit event information. Optional, can
    3714  *                          be NULL.
    3715  */
    3716 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
    3717 {
    3718     if (pExitEventInfo)
    3719     {
    3720         iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo);
    3721         iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode);
    3722         iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
    3723         iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
    3724     }
    3725 
     3713 */
     3714IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr)
     3715{
     3716    iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
     3717    return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */);
     3718}
     3719
     3720
     3721/**
     3722 * VMX VM-exit handler for EPT misconfiguration.
     3723 *
     3724 * This is intended for EPT misconfigurations where the caller provides all the
     3725 * relevant VM-exit information.
     3726 *
     3727 * @param   pVCpu           The cross context virtual CPU structure.
     3728 * @param   GCPhysAddr      The physical address causing the EPT misconfiguration.
     3729 *                          This need not be page aligned (e.g. nested-guest in real
     3730 *                          mode).
     3731 * @param   pExitEventInfo  Pointer to the VM-exit event information.
     3732 */
     3733IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfigWithInfo(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
     3734{
     3735    Assert(pExitEventInfo);
     3736    Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
     3737    iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
     3738    iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
    37263739    iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
    37273740    return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */);
     
    37453758 */
    37463759IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptViolation(PVMCPUCC pVCpu, uint32_t fAccess, uint32_t fSlatFail, uint64_t fEptAccess,
    3747                                                  RTGCPHYS GCPhysAddr, bool fLinearAddrValid, uint64_t GCPtrAddr, uint8_t cbInstr)
     3760                                                 RTGCPHYS GCPhysAddr, bool fIsLinearAddrValid, uint64_t GCPtrAddr,
     3761                                                 uint8_t cbInstr)
    37483762{
    37493763    /*
     
    37523766     * While we can leave it this way, it's preferrable to zero it for consistency.
    37533767     */
    3754     Assert(fLinearAddrValid || GCPtrAddr == 0);
     3768    Assert(fIsLinearAddrValid || GCPtrAddr == 0);
    37553769
    37563770    uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
    3757     uint8_t const fSupportsAccessDirty = fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY;
    3758 
    3759     uint8_t const fDataRead      = ((fAccess & IEM_ACCESS_DATA_R)  == IEM_ACCESS_DATA_R)  | fSupportsAccessDirty;
    3760     uint8_t const fDataWrite     = ((fAccess & IEM_ACCESS_DATA_RW) == IEM_ACCESS_DATA_RW) | fSupportsAccessDirty;
    3761     uint8_t const fInstrFetch    = (fAccess & IEM_ACCESS_INSTRUCTION) == IEM_ACCESS_INSTRUCTION;
    3762     bool const fEptRead          = RT_BOOL(fEptAccess & EPT_E_READ);
    3763     bool const fEptWrite         = RT_BOOL(fEptAccess & EPT_E_WRITE);
    3764     bool const fEptExec          = RT_BOOL(fEptAccess & EPT_E_EXECUTE);
    3765     bool const fNmiUnblocking    = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
    3766     bool const fLinearToPhysAddr = fLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR);
     3771    bool const fSupportsAccessDirty = RT_BOOL(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
     3772
     3773    uint32_t const fDataRdMask     = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_READ;
     3774    uint32_t const fDataWrMask     = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE;
     3775    uint32_t const fInstrMask      = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_EXEC;
     3776    bool const fDataRead           = ((fAccess & fDataRdMask) == IEM_ACCESS_DATA_R) | fSupportsAccessDirty;
     3777    bool const fDataWrite          = ((fAccess & fDataWrMask) == IEM_ACCESS_DATA_W) | fSupportsAccessDirty;
     3778    bool const fInstrFetch         = ((fAccess & fInstrMask)  == IEM_ACCESS_INSTRUCTION);
     3779    bool const fEptRead            = RT_BOOL(fEptAccess & EPT_E_READ);
     3780    bool const fEptWrite           = RT_BOOL(fEptAccess & EPT_E_WRITE);
     3781    bool const fEptExec            = RT_BOOL(fEptAccess & EPT_E_EXECUTE);
     3782    bool const fNmiUnblocking      = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
     3783    bool const fIsLinearToPhysAddr = fIsLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR);
    37673784
    37683785    uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_READ,         fDataRead)
     
    37723789                               | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_WRITE,         fEptWrite)
    37733790                               | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_EXECUTE,       fEptExec)
    3774                                | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID,   fLinearAddrValid)
    3775                                | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, fLinearToPhysAddr)
     3791                               | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID,   fIsLinearAddrValid)
     3792                               | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, fIsLinearToPhysAddr)
    37763793                               | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_NMI_UNBLOCK_IRET,    fNmiUnblocking);
    37773794
     
    38073824                                                         PCVMXVEXITEVENTINFO pExitEventInfo)
    38083825{
     3826    Assert(pExitInfo);
     3827    Assert(pExitEventInfo);
    38093828    Assert(pExitInfo->uReason == VMX_EXIT_EPT_VIOLATION);
    3810 
    3811     iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo);
    3812     iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode);
     3829    Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
     3830
    38133831    iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
    38143832    iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
     
    38183836        iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
    38193837    else
    3820         iemVmxVmcsSetExitGuestLinearAddr(pVCpu,  0);
     3838        iemVmxVmcsSetExitGuestLinearAddr(pVCpu, 0);
    38213839    iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
    38223840    return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_VIOLATION, pExitInfo->u64Qual);
     
    38343852 *                      applicable.
    38353853 */
    3836 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail,
    3837                                         uint8_t cbInstr)
     3854IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr)
    38383855{
    38393856    Assert(pWalk->fIsSlat);
     
    38523869    Log(("EptMisconfig: cs:rip=%x:%#RX64 fAccess=%#RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fAccess));
    38533870    Assert(pWalk->fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
    3854     return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested, NULL /* pExitEventInfo */);
     3871    return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested);
    38553872}
    38563873
     
    38613878 * @param   pVCpu       The cross context virtual CPU structure.
    38623879 * @param   offAccess   The offset of the register being accessed.
    3863  * @param   fAccess     The type of access (must contain IEM_ACCESS_TYPE_READ or
    3864  *                      IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
     3880 * @param   fAccess     The type of access, see IEM_ACCESS_XXX.
    38653881 */
    38663882IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPUCC pVCpu, uint16_t offAccess, uint32_t fAccess)
    38673883{
    3868     Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
    3869 
    38703884    VMXAPICACCESS enmAccess;
    38713885    bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
    38723886    if (fInEventDelivery)
    38733887        enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
    3874     else if (fAccess & IEM_ACCESS_INSTRUCTION)
     3888    else if ((fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == IEM_ACCESS_INSTRUCTION)
    38753889        enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
    38763890    else if (fAccess & IEM_ACCESS_TYPE_WRITE)
     
    41274141 * @param   offAccess   The offset of the register being accessed.
    41284142 * @param   cbAccess    The size of the access in bytes.
    4129  * @param   fAccess     The type of access (must be IEM_ACCESS_TYPE_READ or
    4130  *                      IEM_ACCESS_TYPE_WRITE).
     4143 * @param   fAccess     The type of access, see IEM_ACCESS_XXX.
    41314144 *
    41324145 * @remarks This must not be used for MSR-based APIC-access page accesses!
     
    41364149{
    41374150    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    4138     Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
    41394151
    41404152    /*
     
    42974309 *
    42984310 * @param   pVCpu           The cross context virtual CPU structure.
    4299  * @param   pGCPhysAccess   Pointer to the guest-physical address used.
    4300  */
    4301 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess)
     4311 * @param   pGCPhysAccess   Pointer to the guest-physical address accessed.
     4312 * @param   fAccess         The type of access, see IEM_ACCESS_XXX.
     4313 */
     4314IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, uint32_t fAccess)
    43024315{
    43034316    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     
    43114324    {
    43124325        uint16_t const offAccess = *pGCPhysAccess & GUEST_PAGE_OFFSET_MASK;
    4313         uint32_t const fAccess   = IEM_ACCESS_TYPE_READ;
    43144326        uint16_t const cbAccess  = 1;
    43154327        bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
     
    43384350 * @param   pvData      Pointer to the data being written or where to store the data
    43394351 *                      being read.
    4340  * @param   fAccess     The type of access (must contain IEM_ACCESS_TYPE_READ or
    4341  *                      IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
     4352 * @param   fAccess     The type of access, see IEM_ACCESS_XXX.
    43424353 */
    43434354IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
     
    43464357    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    43474358    Assert(pvData);
    4348     Assert(   (fAccess & IEM_ACCESS_TYPE_READ)
    4349            || (fAccess & IEM_ACCESS_TYPE_WRITE)
    4350            || (fAccess & IEM_ACCESS_INSTRUCTION));
    43514359
    43524360    bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
     
    43894397         * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
    43904398         */
     4399        Assert(fAccess & IEM_ACCESS_TYPE_READ);
     4400
    43914401        Assert(cbAccess <= 4);
    43924402        Assert(offAccess < XAPIC_OFF_END + 4);
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r93905 r93922  
    736736# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
    737737# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    738     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
     738    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
    739739
    740740#elif !defined(IN_RING3) && defined(VBOX_STRICT)
    741741# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
    742742# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    743     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
     743    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
    744744
    745745#elif defined(IN_RING3) && !defined(VBOX_STRICT)
     
    19071907        case PGMSLAT_EPT:
    19081908            pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
    1909             return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
    1910                                                &pGstWalk->u.Ept);
     1909            return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */,
     1910                                               pWalk, &pGstWalk->u.Ept);
    19111911
    19121912        default:
     
    29912991    /* Flush the TLB */
    29922992    PGM_INVL_VCPU_TLBS(pVCpu);
    2993     return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
     2993    return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
    29942994}
    29952995
     
    32263226 * @param   enmGuestMode    The new guest mode. This is assumed to be different from
    32273227 *                          the current mode.
    3228  */
    3229 VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
     3228 * @param   fForce          Whether to force a shadow paging mode change.
     3229 */
     3230VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
    32303231{
    32313232    Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
     
    32353236     * Calc the shadow mode and switcher.
    32363237     */
    3237     PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
     3238    PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
     3239    bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
    32383240
    32393241    /*
     
    32413243     */
    32423244    /* shadow */
    3243     if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
    3244     {
    3245         LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n",  PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
     3245    if (fShadowModeChanged)
     3246    {
     3247        LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
    32463248        uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
    32473249        if (   idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
     
    33103312     * Enter new shadow mode (if changed).
    33113313     */
    3312     if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
     3314    if (fShadowModeChanged)
    33133315    {
    33143316        pVCpu->pgm.s.enmShadowMode = enmShadowMode;
     
    35803582    }
    35813583}
    3582 #endif
     3584#endif  /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    35833585
    35843586
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r93905 r93922  
    3737#ifndef IN_RING3
    3838PGM_BTH_DECL(int, Trap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
     39PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     40                                       bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken);
    3941#endif
    4042PGM_BTH_DECL(int, InvalidatePage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage);
     
    966968# else  /* Nested paging, EPT except PGM_GST_TYPE = PROT, NONE.   */
    967969    NOREF(uErr); NOREF(pRegFrame); NOREF(pvFault);
     970    AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
     971    return VERR_PGM_NOT_USED_IN_MODE;
     972# endif
     973}
     974
     975
     976/**
     977 * Nested \#PF handler for nested-guest hardware-assisted execution using nested
     978 * paging.
     979 *
     980 * @returns VBox status code (appropriate for trap handling and GC return).
     981 * @param   pVCpu               The cross context virtual CPU structure.
     982 * @param   uErr                The fault error (X86_TRAP_PF_*).
     983 * @param   pRegFrame           The register frame.
     984 * @param   GCPhysNested        The nested-guest physical address being accessed.
     985 * @param   fIsLinearAddrValid  Whether translation of a nested-guest linear address
     986 *                              caused this fault. If @c false, GCPtrNested must be
     987 *                              0.
     988 * @param   GCPtrNested         The nested-guest linear address that caused this
     989 *                              fault.
     990 * @param   pWalk               The guest page table walk result.
     991 * @param   pfLockTaken         Where to store whether the PGM lock is still held
     992 *                              when this function completes.
     993 */
     994PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     995                                       bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken)
     996{
     997    *pfLockTaken = false;
     998# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) \
     999    && (   PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_32BIT  \
     1000        || PGM_GST_TYPE == PGM_TYPE_PAE  || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     1001    && PGM_SHW_TYPE == PGM_TYPE_EPT
     1002
     1003    Assert(CPUMIsGuestVmxEptPagingEnabled(pVCpu));
     1004
     1005    /*
     1006     * Walk the guest EPT tables and check if it's an EPT violation or misconfiguration.
     1007     */
     1008    PGMPTWALKGST GstWalkAll;
     1009    int rc = pgmGstSlatWalk(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &GstWalkAll);
     1010    if (RT_FAILURE(rc))
     1011        return rc;
     1012
     1013    Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT);
     1014    Assert(pWalk->fSucceeded);
     1015    Assert(pWalk->fEffective & PGM_PTATTRS_R_MASK);
     1016    Assert(pWalk->fIsSlat);
     1017
     1018    if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID))
     1019    {
     1020        if (    (   (uErr & X86_TRAP_PF_RW)
     1021                 && !(pWalk->fEffective & PGM_PTATTRS_W_MASK)
     1022                 && (   (uErr & X86_TRAP_PF_US)
     1023                     || CPUMIsGuestR0WriteProtEnabled(pVCpu)) )
     1024            ||  ((uErr & X86_TRAP_PF_US) && !(pWalk->fEffective & PGM_PTATTRS_US_MASK))
     1025            ||  ((uErr & X86_TRAP_PF_ID) &&  (pWalk->fEffective & PGM_PTATTRS_NX_MASK))
     1026           )
     1027            return VERR_ACCESS_DENIED;
     1028    }
     1029
     1030    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     1031    RTGCPHYS const GCPhysFault = PGM_A20_APPLY(pVCpu, GCPhysNested & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
     1032    GSTPDE const   PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
     1033
     1034    /* Take the big lock now. */
     1035    *pfLockTaken = true;
     1036    PGM_LOCK_VOID(pVM);
     1037
     1038    /*
     1039     * Check if this is an APIC-access page access (VMX specific).
     1040     */
     1041    RTGCPHYS const GCPhysApicAccess = CPUMGetGuestVmxApicAccessPageAddr(pVCpu);
     1042    if ((pWalk->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysApicAccess)
     1043    {
     1044        PPGMPAGE pPage;
     1045        rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, GCPhysApicAccess), &pPage);
     1046        if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
     1047        {
     1048            rc = VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pWalk->GCPhys, pPage,
     1049                                                                               pfLockTaken));
     1050            return rc;
     1051        }
     1052    }
     1053
     1054#  ifdef PGM_WITH_MMIO_OPTIMIZATIONS
     1055    /*
     1056    * Check if this is an MMIO access.
     1057    */
     1058    if (uErr & X86_TRAP_PF_RSVD)
     1059    {
     1060        PPGMPAGE pPage;
     1061        rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, (RTGCPHYS)GCPhysFault), &pPage);
     1062        if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
     1063            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage,
     1064                                                                                 pfLockTaken));
     1065        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr);
     1066        AssertRC(rc);
     1067        HMInvalidatePhysPage(pVM, GCPhysFault);
     1068        return rc; /* Restart with the corrected entry. */
     1069    }
     1070#  endif /* PGM_WITH_MMIO_OPTIMIZATIONS */
     1071
     1072    /*
     1073     * Fetch the guest EPT page directory pointer.
     1074     */
     1075    const unsigned  iPDDst = ((GCPhysFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
     1076    PEPTPD          pPDDst;
     1077    rc = pgmShwGetEPTPDPtr(pVCpu, GCPhysFault, NULL /* ppPdpt */, &pPDDst);
     1078    AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
     1079    Assert(pPDDst);
     1080
     1081    /*
     1082     * A common case is the not-present error caused by lazy page table syncing.
     1083     *
     1084     * It is IMPORTANT that we weed out any access to non-present shadow PDEs
     1085     * here so we can safely assume that the shadow PT is present when calling
     1086     * SyncPage later.
     1087     *
     1088     * On failure, we ASSUME that SyncPT is out of memory or detected some kind
     1089     * of mapping conflict and defer to SyncCR3 in R3.
     1090     * (Again, we do NOT support access handlers for non-present guest pages.)
     1091     *
     1092     */
     1093    if (   !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
     1094        && !SHW_PDE_IS_P(pPDDst->a[iPDDst]))
     1095    {
     1096        STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2SyncPT; });
     1097        LogFlow(("=>SyncPT GCPhysFault=%RGp\n", GCPhysFault));
     1098        rc = PGM_BTH_NAME(SyncPT)(pVCpu, 0 /* iPDSrc */, NULL /* pPDSrc */, GCPhysFault);
     1099        if (RT_SUCCESS(rc))
     1100            return rc;
     1101        Log(("SyncPT: %RGp failed!! rc=%Rrc\n", GCPhysFault, rc));
     1102        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
     1103        return VINF_PGM_SYNC_CR3;
     1104    }
     1105
     1106    /*
     1107     * Check if this fault address is flagged for special treatment,
     1108     * which means we'll have to figure out the physical address and
     1109     * check flags associated with it.
     1110     *
     1111     * ASSUME that we can limit any special access handling to pages
     1112     * in page tables which the guest believes to be present.
     1113     */
     1114    PPGMPAGE pPage;
     1115    rc = pgmPhysGetPageEx(pVM, GCPhysFault, &pPage);
     1116    if (RT_FAILURE(rc))
     1117    {
     1118        /*
     1119         * When the guest accesses invalid physical memory (e.g. probing
     1120         * of RAM or accessing a remapped MMIO range), then we'll fall
     1121         * back to the recompiler to emulate the instruction.
     1122         */
     1123        LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhysFault, rc));
     1124        STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersInvalid);
     1125        STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2InvalidPhys; });
     1126        return VINF_EM_RAW_EMULATE_INSTR;
     1127    }
     1128
     1129    /*
     1130     * Any handlers for this page?
     1131     */
     1132    if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     1133        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage,
     1134                                                                             pfLockTaken));
     1135
     1136    /*
     1137     * We are here only if page is present in Guest page tables and
     1138     * trap is not handled by our handlers.
     1139     *
     1140     * Check it for page out-of-sync situation.
     1141     */
     1142    if (!(uErr & X86_TRAP_PF_P))
     1143    {
     1144        /*
     1145         * Page is not present in our page tables. Try to sync it!
     1146         */
     1147        if (uErr & X86_TRAP_PF_US)
     1148            STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUser));
     1149        else /* supervisor */
     1150            STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
     1151
     1152        if (PGM_PAGE_IS_BALLOONED(pPage))
     1153        {
     1154            /* Emulate reads from ballooned pages as they are not present in
     1155               our shadow page tables. (Required for e.g. Solaris guests; soft
     1156               ecc, random nr generator.) */
     1157            rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, GCPhysFault));
     1158            LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage));
     1159            STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncBallloon));
     1160            STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Ballooned; });
     1161            return rc;
     1162        }
     1163
     1164        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, PGM_SYNC_NR_PAGES, uErr);
     1165        if (RT_SUCCESS(rc))
     1166        {
     1167            /* The page was successfully synced, return to the guest. */
     1168            STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSync; });
     1169            return VINF_SUCCESS;
     1170        }
     1171    }
     1172    else
     1173    {
     1174        /*
     1175         * Write protected pages are made writable when the guest makes the
     1176         * first write to it.  This happens for pages that are shared, write
     1177         * monitored or not yet allocated.
     1178         *
     1179         * We may also end up here when CR0.WP=0 in the guest.
     1180         *
     1181         * Also, a side effect of not flushing global PDEs are out of sync
     1182         * pages due to physical monitored regions, that are no longer valid.
     1183         * Assume for now it only applies to the read/write flag.
     1184         */
     1185        if (uErr & X86_TRAP_PF_RW)
     1186        {
     1187            /*
     1188             * Check if it is a read-only page.
     1189             */
     1190            if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
     1191            {
     1192                Assert(!PGM_PAGE_IS_ZERO(pPage));
     1193                AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhysFault));
     1194                STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2MakeWritable; });
     1195
     1196                rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhysFault);
     1197                if (rc != VINF_SUCCESS)
     1198                {
     1199                    AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc));
     1200                    return rc;
     1201                }
     1202                if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)))
     1203                    return VINF_EM_NO_MEMORY;
     1204            }
     1205
     1206            if (uErr & X86_TRAP_PF_US)
     1207                STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUserWrite));
     1208            else
     1209                STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite));
     1210
     1211            /*
     1212             * Sync the page.
     1213             *
     1214             * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the
     1215             *       page is not present, which is not true in this case.
     1216             */
     1217            rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr);
     1218            if (RT_SUCCESS(rc))
     1219            {
     1220               /*
     1221                * Page was successfully synced, return to guest but invalidate
     1222                * the TLB first as the page is very likely to be in it.
     1223                */
     1224                HMInvalidatePhysPage(pVM, GCPhysFault);
     1225                STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndObs; });
     1226                return VINF_SUCCESS;
     1227            }
     1228        }
     1229    }
     1230
     1231    /*
     1232     * If we get here it is because something failed above, i.e. most like guru meditation time.
     1233     */
     1234    LogRelFunc(("returns rc=%Rrc GCPhysFault=%RGp uErr=%RX64 cs:rip=%04x:%08RX64\n", rc, GCPhysFault, (uint64_t)uErr,
     1235                pRegFrame->cs.Sel, pRegFrame->rip));
     1236    return rc;
     1237
     1238# else
     1239    RT_NOREF7(pVCpu, uErr, pRegFrame, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk);
    9681240    AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
    9691241    return VERR_PGM_NOT_USED_IN_MODE;
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r93572 r93922  
    376376            pWalk->fSucceeded = true;
    377377            pWalk->GCPtr      = GCPtr;
    378             pWalk->GCPhys     = SlatWalk.GCPhys & PAGE_BASE_GC_MASK;
     378            pWalk->GCPhys     = SlatWalk.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    379379            pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
    380380        }
  • trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h

    r93554 r93922  
    2121    if (!(uEntry & EPT_E_READ))
    2222    {
    23         if (uEntry & EPT_E_WRITE)
     23        Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
     24        Assert(!RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY));
     25        NOREF(pVCpu);
     26        if (uEntry & (EPT_E_WRITE | EPT_E_EXECUTE))
    2427            return false;
    25         Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
    26         if (   !RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY)
    27             && (uEntry & EPT_E_EXECUTE))
    28             return false;
    2928    }
    3029    return true;
     
    3534{
    3635    Assert(uLevel <= 3 && uLevel >= 1); NOREF(uLevel);
    37     uint64_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
    38     if (   fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_2
    39         || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_3
    40         || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_7)
    41         return false;
    42     return true;
     36    uint8_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
     37    switch (fEptMemTypeMask)
     38    {
     39        case EPT_E_MEMTYPE_WB:
     40        case EPT_E_MEMTYPE_UC:
     41        case EPT_E_MEMTYPE_WP:
     42        case EPT_E_MEMTYPE_WT:
     43        case EPT_E_MEMTYPE_WC:
     44            return true;
     45    }
     46    return false;
    4347}
    4448
     
    8892 * @param   GCPhysNested        The nested-guest physical address to walk.
    8993 * @param   fIsLinearAddrValid  Whether the linear-address in @c GCPtrNested caused
    90  *                              this page walk. If this is false, @c GCPtrNested
    91  *                              must be 0.
     94 *                              this page walk.
    9295 * @param   GCPtrNested         The nested-guest linear address that caused this
    93  *                              page walk.
     96 *                              page walk. If @c fIsLinearAddrValid is false, pass
     97 *                              0.
    9498 * @param   pWalk               The page walk info.
    9599 * @param   pGstWalk            The guest mode specific page walk info.
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r93831 r93922  
    311311static FNVMXEXITHANDLER            vmxHCExitInstrNested;
    312312static FNVMXEXITHANDLER            vmxHCExitInstrWithInfoNested;
     313# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     314static FNVMXEXITHANDLER            vmxHCExitEptViolationNested;
     315static FNVMXEXITHANDLER            vmxHCExitEptMisconfigNested;
     316# endif
    313317/** @} */
    314318#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     
    54115415    switch (uExitReason)
    54125416    {
    5413         case VMX_EXIT_EPT_MISCONFIG:            return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
    5414         case VMX_EXIT_EPT_VIOLATION:            return vmxHCExitEptViolation(pVCpu, pVmxTransient);
     5417        case VMX_EXIT_EPT_MISCONFIG:            return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
     5418        case VMX_EXIT_EPT_VIOLATION:            return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
    54155419        case VMX_EXIT_XCPT_OR_NMI:              return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
    54165420        case VMX_EXIT_IO_INSTR:                 return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
     
    1021110215}
    1021210216
     10217
     10218# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     10219/**
     10220 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
     10221 * Conditional VM-exit.
     10222 */
     10223HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     10224{
     10225    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     10226    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
     10227
     10228    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     10229    int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     10230    AssertRCReturn(rc, rc);
     10231
     10232    vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
     10233    vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     10234    vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
     10235
     10236    RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
     10237    uint64_t const uExitQual    = pVmxTransient->uExitQual;
     10238
     10239    RTGCPTR GCPtrNested;
     10240    bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
     10241    if (fIsLinearAddrValid)
     10242    {
     10243        vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
     10244        GCPtrNested = pVmxTransient->uGuestLinearAddr;
     10245    }
     10246    else
     10247        GCPtrNested = 0;
     10248
     10249    RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
     10250                        | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)       ? X86_TRAP_PF_RW : 0)
     10251                        | ((uExitQual & (  VMX_EXIT_QUAL_EPT_ENTRY_READ
     10252                                         | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
     10253                                         | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))  ? X86_TRAP_PF_P  : 0);
     10254
     10255    PGMPTWALK Walk;
     10256    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     10257    VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
     10258                                                                 fIsLinearAddrValid, GCPtrNested, &Walk);
     10259    if (RT_SUCCESS(rcStrict))
     10260    {
     10261        if (rcStrict == VINF_SUCCESS)
     10262            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     10263        else if (rcStrict == VINF_IEM_RAISED_XCPT)
     10264        {
     10265            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     10266            rcStrict = VINF_SUCCESS;
     10267        }
     10268        return rcStrict;
     10269    }
     10270
     10271    vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
     10272    vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
     10273
     10274    VMXVEXITEVENTINFO ExitEventInfo;
     10275    RT_ZERO(ExitEventInfo);
     10276    ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
     10277    ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
     10278
     10279    if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
     10280    {
     10281        VMXVEXITINFO ExitInfo;
     10282        RT_ZERO(ExitInfo);
     10283        ExitInfo.uReason            = VMX_EXIT_EPT_VIOLATION;
     10284        ExitInfo.cbInstr            = pVmxTransient->cbExitInstr;
     10285        ExitInfo.u64Qual            = pVmxTransient->uExitQual;
     10286        ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
     10287        ExitInfo.u64GuestPhysAddr   = pVmxTransient->uGuestPhysicalAddr;
     10288        return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
     10289    }
     10290
     10291    Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
     10292    return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
     10293}
     10294
     10295
     10296/**
     10297 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
     10298 * Conditional VM-exit.
     10299 */
     10300HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     10301{
     10302    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     10303    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
     10304
     10305    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     10306    int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     10307    AssertRCReturn(rc, rc);
     10308
     10309    vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
     10310
     10311    PGMPTWALK Walk;
     10312    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     10313    RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
     10314    VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
     10315                                                                 GCPhysNested, false /* fIsLinearAddrValid */,
     10316                                                                 0 /* GCPtrNested*/, &Walk);
     10317    if (RT_SUCCESS(rcStrict))
     10318        return VINF_EM_RAW_EMULATE_INSTR;
     10319
     10320    vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
     10321    vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
     10322
     10323    VMXVEXITEVENTINFO ExitEventInfo;
     10324    RT_ZERO(ExitEventInfo);
     10325    ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
     10326    ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
     10327
     10328    return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
     10329}
     10330# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     10331
    1021310332/** @} */
    1021410333#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r93748 r93922  
    4343#include "dtrace/VBoxVMM.h"
    4444
     45/*********************************************************************************************************************************
     46*   Defined Constants And Macros                                                                                                 *
     47*********************************************************************************************************************************/
    4548#ifdef DEBUG_ramshankar
    4649# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
     
    5861
    5962/*********************************************************************************************************************************
    60 *   Defined Constants And Macros                                                                                                 *
    61 *********************************************************************************************************************************/
    62 
    63 
    64 /*********************************************************************************************************************************
    6563*   Structures and Typedefs                                                                                                      *
    6664*********************************************************************************************************************************/
    67 
    6865/**
    6966 * VMX page allocation information.
     
    8683*   Internal Functions                                                                                                           *
    8784*********************************************************************************************************************************/
    88 
    89 
    90 /*********************************************************************************************************************************
    91 *   Global Variables                                                                                                             *
    92 *********************************************************************************************************************************/
    93 static bool hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient);
    94 static int hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
     85static bool     hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient);
     86static int      hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
    9587
    9688
     
    35203512    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL,  pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS);       AssertRC(rc);
    35213513    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL,  pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS);       AssertRC(rc);
    3522     rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL,  uSelDS);                                       AssertRC(rc);
    3523     rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL,  uSelES);                                       AssertRC(rc);
    3524     rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL,  uSelFS);                                       AssertRC(rc);
    3525     rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL,  uSelGS);                                       AssertRC(rc);
     3514    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL,  uSelDS);                                         AssertRC(rc);
     3515    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL,  uSelES);                                         AssertRC(rc);
     3516    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL,  uSelFS);                                         AssertRC(rc);
     3517    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL,  uSelGS);                                         AssertRC(rc);
    35263518    rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL,  pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR);       AssertRC(rc);
    35273519    rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr);   AssertRC(rc);
    35283520    rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.uAddr);   AssertRC(rc);
    3529     rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE,   uTRBase);                                      AssertRC(rc);
     3521    rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE,   uTRBase);                                        AssertRC(rc);
    35303522    rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE,   pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase);      AssertRC(rc);
    35313523    rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE,   pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase);      AssertRC(rc);
     
    55895581 *
    55905582 * @returns VBox status code.
    5591  * @param   pVCpu   The cross context virtual CPU structure.
    5592  */
    5593 static int hmR0VmxMapHCApicAccessPage(PVMCPUCC pVCpu)
     5583 * @param   pVCpu           The cross context virtual CPU structure.
     5584 * @param   u64MsrApicBase  The guest-physical address of the APIC access page.
     5585 */
     5586static int hmR0VmxMapHCApicAccessPage(PVMCPUCC pVCpu, RTGCPHYS GCPhysApicBase)
    55945587{
    55955588    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5596     uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    5597 
    5598     Assert(PDMHasApic(pVM));
    5599     Assert(u64MsrApicBase);
    5600 
    5601     RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
    5602     Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
     5589    Assert(GCPhysApicBase);
     5590
     5591    LogFunc(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
    56035592
    56045593    /* Unalias the existing mapping. */
     
    56115600    AssertRCReturn(rc, rc);
    56125601
    5613     /* Update the per-VCPU cache of the APIC base MSR. */
    5614     pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
    56155602    return VINF_SUCCESS;
    56165603}
     
    61136100        && PDMHasApic(pVM))
    61146101    {
    6115         int rc = hmR0VmxMapHCApicAccessPage(pVCpu);
     6102        /* Get the APIC base MSR from the virtual APIC device. */
     6103        uint64_t const uApicBaseMsr = APICGetBaseMsrNoCheck(pVCpu);
     6104
     6105        /* Map the APIC access page. */
     6106        int rc = hmR0VmxMapHCApicAccessPage(pVCpu, uApicBaseMsr & PAGE_BASE_GC_MASK);
    61166107        AssertRCReturn(rc, rc);
     6108
     6109        /* Update the per-VCPU cache of the APIC base MSR corresponding to the mapped APIC access page. */
     6110        pVCpu->hm.s.vmx.u64GstMsrApicBase = uApicBaseMsr;
    61176111    }
    61186112
     
    68126806            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    68136807            return rcRun;
     6808        }
     6809
     6810        /*
     6811         * Undo temporary disabling of the APIC-access page monitoring we did in hmR0VmxMergeVmcsNested.
     6812         * This is needed for NestedTrap0eHandler (and IEM) to cause nested-guest APIC-access VM-exits.
     6813         */
     6814        if (VmxTransient.pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     6815        {
     6816            PVMXVVMCS const pVmcsNstGst      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     6817            RTGCPHYS const  GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u;
     6818            PGMHandlerPhysicalReset(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess);
    68146819        }
    68156820
     
    72397244    return rcStrict;
    72407245}
     7246
  • trunk/src/VBox/VMM/VMMR0/IEMR0.cpp

    r93655 r93922  
    4444    {
    4545        int rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_ALL, 0 /*fFlags*/,
    46                                                       iemVmxApicAccessPageHandler, NULL /*pfnzPfHandlerR0*/,
     46                                                      iemVmxApicAccessPageHandler, iemVmxApicAccessPagePfHandler,
    4747                                                      "VMX APIC-access page", pGVM->iem.s.hVmxApicAccessPage);
    4848        AssertLogRelRCReturn(rc, rc);
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r93735 r93922  
    12131213    return rc;
    12141214}
     1215
     1216
     1217#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1218/**
     1219 * Nested \#PF Handler for nested-guest execution using nested paging.
     1220 *
     1221 * @returns Strict VBox status code (appropriate for trap handling and GC return).
     1222 * @param   pGVM                The global (ring-0) VM structure.
     1223 * @param   pGVCpu              The global (ring-0) CPU structure of the calling
     1224 *                              EMT.
     1225 * @param   uErr                The trap error code.
     1226 */
     1227VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
     1228                                                             PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     1229                                                             bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk)
     1230{
     1231    Assert(enmShwPagingMode == PGMMODE_EPT);
     1232    NOREF(enmShwPagingMode);
     1233
     1234    bool fLockTaken;
     1235    VBOXSTRICTRC rcStrict = PGM_BTH_NAME_EPT_PROT(NestedTrap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysNested, fIsLinearAddrValid,
     1236                                                                       GCPtrNested, pWalk, &fLockTaken);
     1237    if (fLockTaken)
     1238    {
     1239        PGM_LOCK_ASSERT_OWNER(pGVCpu->CTX_SUFF(pVM));
     1240        PGM_UNLOCK(pGVCpu->CTX_SUFF(pVM));
     1241    }
     1242    if (rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
     1243        rcStrict = VINF_SUCCESS;
     1244    return rcStrict;
     1245}
     1246#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    12151247
    12161248
  • trunk/src/VBox/VMM/VMMR0/PGMR0Bth.h

    r93115 r93922  
    2222RT_C_DECLS_BEGIN
    2323PGM_BTH_DECL(int, Trap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
     24PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     25                                       bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken);
    2426RT_C_DECLS_END
    2527
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r93905 r93922  
    42644264             * disabled will automatically prevent exposing features that rely on
    42654265             */
    4266             rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &pVM->cpum.s.fNestedVmxEpt, false);
     4266            rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &pVM->cpum.s.fNestedVmxEpt, true);
    42674267            AssertLogRelRCReturn(rc, rc);
    42684268
     
    42724272             * it.
    42734273             */
    4274             rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &pVM->cpum.s.fNestedVmxUnrestrictedGuest, false);
     4274            rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &pVM->cpum.s.fNestedVmxUnrestrictedGuest, true);
    42754275            AssertLogRelRCReturn(rc, rc);
    42764276
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r93905 r93922  
    10801080    {
    10811081        PVMCPU pVCpu = pVM->apCpusR3[idCpu];
    1082         PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
     1082        PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
    10831083    }
    10841084}
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r93905 r93922  
    10241024        {
    10251025            PVMCPU pVCpu = pVM->apCpusR3[i];
    1026             rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
     1026            rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
    10271027            if (RT_FAILURE(rc))
    10281028                break;
     
    16471647        pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000);
    16481648    }
    1649     Assert(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth);
     1649    /* Disabled the below assertion -- triggers 24 vs 39 on my Intel Skylake box for a 32-bit (Guest-type Other/Unknown) VM. */
     1650    //AssertMsg(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth,
     1651    //          ("CPUM %u - PGM %u\n", pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth, cMaxPhysAddrWidth));
    16501652#else
    16511653    uint32_t const cMaxPhysAddrWidth = pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth;
     
    18521854    pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
    18531855
    1854     int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
     1856    int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
    18551857    AssertReleaseRC(rc);
    18561858
     
    19181920        PVMCPU  pVCpu = pVM->apCpusR3[i];
    19191921
    1920         int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
     1922        int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
    19211923        AssertReleaseRC(rc);
    19221924
     
    22982300{
    22992301    pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
    2300     int rc = PGMHCChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu));
     2302    int rc = PGMHCChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu), false /* fForce */);
    23012303    Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    23022304    AssertRCReturn(rc, rc);
  • trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

    r93716 r93922  
    31903190                PVMCPU pVCpu = pVM->apCpusR3[i];
    31913191
    3192                 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
     3192                rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
    31933193                AssertLogRelRCReturn(rc, rc);
    31943194
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r93906 r93922  
    942942# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR          RT_BIT_32(0)
    943943/** Translating a nested-guest linear address failed accessing a
    944  *  paging-structure entry. */
     944 *  paging-structure entry or updating accessed/dirty bits. */
    945945# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE         RT_BIT_32(1)
    946946/** @} */
    947947
    948948PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
     949# ifndef IN_RING3
     950DECLCALLBACK(FNPGMRZPHYSPFHANDLER)  iemVmxApicAccessPagePfHandler;
     951# endif
    949952#endif
    950953
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r93905 r93922  
    27452745#ifndef IN_RING3
    27462746    DECLCALLBACKMEMBER(int, pfnTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
     2747    DECLCALLBACKMEMBER(int, pfnNestedTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
     2748                                                    bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
     2749                                                    bool *pfLockTaken));
    27472750#endif
    27482751#ifdef VBOX_STRICT
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette