VirtualBox

Changeset 104932 in vbox


Ignore:
Timestamp:
Jun 15, 2024 12:29:39 AM (9 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
163534
Message:

VMM/PGM,IEM: Refactored+copied PGMGstGetPage into PGMGstQueryPage that takes care of table walking, setting A & D bits and validating the access. Use new function in IEM. bugref:10687

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r104840 r104932  
    110110/** The requested feature is not supported by NEM. */
    111111#define VERR_NOT_SUP_BY_NEM                 (-1026)
     112/** Reserved page table bits set. */
     113#define VERR_RESERVED_PAGE_TABLE_BITS       (-1027)
    112114/** @} */
    113115
  • trunk/include/VBox/vmm/pgm.h

    r104910 r104932  
    316316 */
    317317typedef uint32_t PGMWALKFAIL;
    318 /** Regular page fault (MBZ since guest Walk code don't set these explicitly). */
    319 #define PGM_WALKFAIL_PAGE_FAULT                     UINT32_C(0)
     318/** No fault. */
     319#define PGM_WALKFAIL_SUCCESS                        UINT32_C(0)
     320
     321/** Not present (X86_TRAP_PF_P). */
     322#define PGM_WALKFAIL_NOT_PRESENT                    RT_BIT_32(0)
     323/** Reserved bit set in table entry (X86_TRAP_PF_RSVD). */
     324#define PGM_WALKFAIL_RESERVED_BITS                  RT_BIT_32(1)
     325/** Bad physical address (VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS). */
     326#define PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS           RT_BIT_32(2)
     327
    320328/** EPT violation - Intel. */
    321 #define PGM_WALKFAIL_EPT_VIOLATION                  RT_BIT_32(0)
     329#define PGM_WALKFAIL_EPT_VIOLATION                  RT_BIT_32(3)
    322330/** EPT violation, convertible to \#VE exception - Intel. */
    323 #define PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE      RT_BIT_32(1)
     331#define PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE      RT_BIT_32(4)
    324332/** EPT misconfiguration - Intel. */
    325 #define PGM_WALKFAIL_EPT_MISCONFIG                  RT_BIT_32(2)
    326 
     333#define PGM_WALKFAIL_EPT_MISCONFIG                  RT_BIT_32(5)
    327334/** Mask of all EPT induced page-walk failures - Intel. */
    328335#define PGM_WALKFAIL_EPT                            (  PGM_WALKFAIL_EPT_VIOLATION \
    329336                                                     | PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE \
    330337                                                     | PGM_WALKFAIL_EPT_MISCONFIG)
     338
     339/** Access denied: Not writable (VERR_ACCESS_DENIED). */
     340#define PGM_WALKFAIL_NOT_WRITABLE                   RT_BIT_32(6)
     341/** Access denied: Not executable (VERR_ACCESS_DENIED). */
     342#define PGM_WALKFAIL_NOT_EXECUTABLE                 RT_BIT_32(7)
     343/** Access denied: Not user/supervisor mode accessible (VERR_ACCESS_DENIED). */
     344#define PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE         RT_BIT_32(8)
     345
     346/** The level the problem arrised at.
     347 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
     348 * level 8.  This is 0 on success. */
     349#define PGM_WALKFAIL_LEVEL_MASK                     UINT32_C(0x0000f100)
     350/** Level shift (see PGM_WALKINFO_LEVEL_MASK).   */
     351#define PGM_WALKFAIL_LEVEL_SHIFT                    11
     352
    331353/** @} */
    332354
    333355
    334 /** @name PGMPTATTRS - PGM page-table attributes.
     356/** @name PGM_PTATTRS_XXX - PGM page-table attributes.
    335357 *
    336358 * This is VirtualBox's combined page table attributes. It combines regular page
     
    578600
    579601
     602/** @name PGM_WALKINFO_XXX - flag based PGM page table walk info.
     603 * @{ */
     604/** Set if the walk succeeded. */
     605#define PGM_WALKINFO_SUCCEEDED                  RT_BIT_32(0)
     606/** Whether this is a second-level address translation. */
     607#define PGM_WALKINFO_IS_SLAT                    RT_BIT_32(1)
     608/** Set if it involves a big page (2/4 MB). */
     609#define PGM_WALKINFO_BIG_PAGE                   RT_BIT_32(2)
     610/** Set if it involves a gigantic page (1 GB). */
     611#define PGM_WALKINFO_GIGANTIC_PAGE              RT_BIT_32(3)
     612
     613/** Whether the linear address (GCPtr) caused the second-level
     614 * address translation - read the code to figure this one.
     615 * @todo for PGMPTWALKFAST::fFailed?  */
     616#define PGM_WALKINFO_IS_LINEAR_ADDR_VALID       RT_BIT_32(7)
     617/** @} */
     618
     619/**
     620 * Fast page table walk information.
     621 *
     622 * This is a slimmed down version of PGMPTWALK for use by IEM.
     623 */
     624typedef struct PGMPTWALKFAST
     625{
     626    /** The linear address that is being resolved (input). */
     627    RTGCPTR         GCPtr;
     628
     629    /** The physical address that is the result of the walk (output).
     630     * This includes the offset mask from the GCPtr input value.  */
     631    RTGCPHYS        GCPhys;
     632
     633    /** The second-level physical address (input/output).
     634     *  @remarks only valid if fIsSlat is set. */
     635    RTGCPHYS        GCPhysNested;
     636
     637    /** Walk information PGM_WALKINFO_XXX (output). */
     638    uint32_t        fInfo;
     639    /** Page-walk failure type, PGM_WALKFAIL_XXX (output). */
     640    PGMWALKFAIL     fFailed;
     641
     642    /** The effective page-table attributes, PGM_PTATTRS_XXX (output). */
     643    PGMPTATTRS      fEffective;
     644} PGMPTWALKFAST;
     645/** Pointer to fast page walk information. */
     646typedef PGMPTWALKFAST *PPGMPTWALKFAST;
     647/** Pointer to const fast page walk information. */
     648typedef PGMPTWALKFAST const *PCPGMPTWALKFAST;
     649
     650#define PGMPTWALKFAST_ZERO(a_pWalkFast) do { \
     651        (a_pWalkFast)->GCPtr        = 0; \
     652        (a_pWalkFast)->GCPhys       = 0; \
     653        (a_pWalkFast)->GCPhysNested = 0; \
     654        (a_pWalkFast)->fInfo        = 0; \
     655        (a_pWalkFast)->fFailed      = 0; \
     656        (a_pWalkFast)->fEffective   = 0; \
     657    } while (0)
     658
     659
    580660/** Macro for checking if the guest is using paging.
    581661 * @param enmMode   PGMMODE_*.
     
    635715/** @}*/
    636716VMMDECL(int)        PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
     717/** @name PGMQPAGE_F_XXX - Flags for PGMGstQueryPageFast
     718 * @{ */
     719/** Querying for read access, set A bits accordingly. */
     720#define PGMQPAGE_F_READ         RT_BIT_32(0)
     721/** Querying for write access, set A bits and D bit accordingly.
     722 * Don't set leaf entry bits if is read-only.  */
     723#define PGMQPAGE_F_WRITE        RT_BIT_32(1)
     724/** Querying for execute access, set A bits accordingly. */
     725#define PGMQPAGE_F_EXECUTE      RT_BIT_32(2)
     726/** The query is for a user mode access, so don't set leaf A or D bits
     727 * unless the effective access allows usermode access.
     728 * Assume supervisor access when not set. */
     729#define PGMQPAGE_F_USER_MODE    RT_BIT_32(3)
     730/** Treat CR0.WP as zero when evalutating the access.
     731 * @note Same value as X86_CR0_WP.  */
     732#define PGMQPAGE_F_CR0_WP0      RT_BIT_32(16)
     733/** The valid flag mask.   */
     734#define PGMQPAGE_F_VALID_MASK   UINT32_C(0x0001000f)
     735/** @} */
     736VMM_INT_DECL(int)   PGMGstQueryPageFast(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalkFast);
    637737VMMDECL(int)        PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
    638738VMM_INT_DECL(bool)  PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes);
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r104877 r104932  
    511511    }
    512512
    513     PGMPTWALK Walk;
    514     int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
     513    PGMPTWALKFAST WalkFast;
     514    int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
     515                                 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
     516                                 &WalkFast);
    515517    if (RT_SUCCESS(rc))
    516         Assert(Walk.fSucceeded); /* probable. */
     518        Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
    517519    else
    518520    {
    519521        Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
    520522# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    521         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    522             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
     523/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
     524 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
     525        if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
     526            IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    523527# endif
    524528        return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
    525529    }
    526     if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
     530#if 0
     531    if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
    527532    else
    528533    {
    529534        Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
    530535# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    531         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    532             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
     536/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
     537#  error completely wrong
     538        if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
     539            IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    533540# endif
    534541        return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    535542    }
    536     if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
     543    if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
    537544    else
    538545    {
    539546        Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
    540547# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    541         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    542             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
     548/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
     549#  error completely wrong.
     550        if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
     551            IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    543552# endif
    544553        return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    545554    }
    546     RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
     555#else
     556    Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3));
     557    Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
     558#endif
     559    RTGCPHYS const GCPhys = Walk.GCPhys;
    547560    /** @todo Check reserved bits and such stuff. PGM is better at doing
    548561     *        that, so do it when implementing the guest virtual address
     
    929942            pVCpu->iem.s.CodeTlb.cTlbHits++;
    930943# endif
     944            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
     945
     946            /* Check TLB page table level access flags. */
     947            if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
     948            {
     949                if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
     950                {
     951                    Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
     952                    iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     953                }
     954                if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
     955                {
     956                    Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
     957                    iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     958                }
     959            }
     960
     961            /* Look up the physical page info if necessary. */
     962            if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
     963            { /* not necessary */ }
     964            else
     965            {
     966                if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
     967                { /* likely */ }
     968                else
     969                    IEMTlbInvalidateAllPhysicalSlow(pVCpu);
     970                pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
     971                int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
     972                                                    &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
     973                AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
     974            }
    931975        }
    932976        else
    933977        {
    934978            pVCpu->iem.s.CodeTlb.cTlbMisses++;
    935             PGMPTWALK Walk;
    936             int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
    937             if (RT_FAILURE(rc))
     979
     980            /* This page table walking will set A bits as required by the access while performing the walk.
     981               ASSUMES these are set when the address is translated rather than on commit... */
     982            /** @todo testcase: check when A bits are actually set by the CPU for code.  */
     983            PGMPTWALKFAST WalkFast;
     984            int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
     985                                         IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
     986                                         &WalkFast);
     987            if (RT_SUCCESS(rc))
     988                Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
     989            else
    938990            {
    939991#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    940                 /** @todo Nested VMX: Need to handle EPT violation/misconfig here?  */
     992                /** @todo Nested VMX: Need to handle EPT violation/misconfig here?  OF COURSE! */
    941993                Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
    942994#endif
     
    946998
    947999            AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
    948             Assert(Walk.fSucceeded);
    9491000            pTlbe->uTag             = uTag;
    950             pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
    951                                     | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
    952             pTlbe->GCPhys           = Walk.GCPhys;
     1001            pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
     1002                                    | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/;
     1003            RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     1004            pTlbe->GCPhys           = GCPhysPg;
    9531005            pTlbe->pbMappingR3      = NULL;
    954         }
    955 
    956         /*
    957          * Check TLB page table level access flags.
    958          */
    959         if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
    960         {
    961             if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
    962             {
    963                 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
    964                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    965             }
    966             if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
    967             {
    968                 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
    969                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    970             }
    971         }
    972 
    973         /*
    974          * Set the accessed flags.
    975          * ASSUMES this is set when the address is translated rather than on commit...
    976          */
    977         /** @todo testcase: check when the A bit are actually set by the CPU for code. */
    978         if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
    979         {
    980             int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
    981             AssertRC(rc2);
    982             /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
    983             Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
    984             pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
    985         }
    986 
    987         /*
    988          * Look up the physical page info if necessary.
    989          */
    990         if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    991         { /* not necessary */ }
    992         else
    993         {
    994             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
    995             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
    996             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
    997             AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED   == IEMTLBE_F_PG_UNASSIGNED);
    998             AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE    == IEMTLBE_F_PG_CODE_PAGE);
     1006            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
     1007            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
     1008            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
     1009
     1010            /* Resolve the physical address. */
    9991011            if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
    10001012            { /* likely */ }
    10011013            else
    10021014                IEMTlbInvalidateAllPhysicalSlow(pVCpu);
    1003             pTlbe->fFlagsAndPhysRev &= ~(  IEMTLBE_F_PHYS_REV
    1004                                          | IEMTLBE_F_NO_MAPPINGR3
    1005                                          | IEMTLBE_F_PG_NO_READ
    1006                                          | IEMTLBE_F_PG_NO_WRITE
    1007                                          | IEMTLBE_F_PG_UNASSIGNED
    1008                                          | IEMTLBE_F_PG_CODE_PAGE);
    1009             int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
    1010                                                 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
     1015            Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
     1016            rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
     1017                                            &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
    10111018            AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
    10121019        }
     
    44584465            break;
    44594466
     4467        case VERR_RESERVED_PAGE_TABLE_BITS:
     4468            uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
     4469            break;
     4470
    44604471        default:
    44614472            AssertMsgFailed(("%Rrc\n", rc));
     
    44644475            uErr = X86_TRAP_PF_P;
    44654476            break;
    4466 
    4467         /** @todo reserved  */
    44684477    }
    44694478
     
    56575666     *        iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
    56585667     *        here. */
    5659     PGMPTWALK Walk;
    5660     int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
    5661     if (RT_FAILURE(rc))
    5662     {
    5663         LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
    5664         /** @todo Check unassigned memory in unpaged mode. */
    5665         /** @todo Reserved bits in page tables. Requires new PGM interface. */
     5668    Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
     5669    PGMPTWALKFAST WalkFast;
     5670    AssertCompile(IEM_ACCESS_TYPE_READ  == PGMQPAGE_F_READ);
     5671    AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
     5672    AssertCompile(IEM_ACCESS_TYPE_EXEC  == PGMQPAGE_F_EXECUTE);
     5673    AssertCompile(X86_CR0_WP            == PGMQPAGE_F_CR0_WP0);
     5674    uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
     5675                    | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
     5676    if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
     5677        fQPage |= PGMQPAGE_F_USER_MODE;
     5678    int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
     5679    if (RT_SUCCESS(rc))
     5680    {
     5681        Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
     5682
     5683        /* If the page is writable and does not have the no-exec bit set, all
     5684           access is allowed.  Otherwise we'll have to check more carefully... */
     5685        Assert(   (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
     5686               || (   (   !(fAccess & IEM_ACCESS_TYPE_WRITE)
     5687                       || (WalkFast.fEffective & X86_PTE_RW)
     5688                       || (   (    IEM_GET_CPL(pVCpu) != 3
     5689                               || (fAccess & IEM_ACCESS_WHAT_SYS))
     5690                           && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
     5691                    && (   (WalkFast.fEffective & X86_PTE_US)
     5692                        || IEM_GET_CPL(pVCpu) != 3
     5693                        || (fAccess & IEM_ACCESS_WHAT_SYS) )
     5694                    && (   !(fAccess & IEM_ACCESS_TYPE_EXEC)
     5695                        || !(WalkFast.fEffective & X86_PTE_PAE_NX)
     5696                        || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
     5697                  )
     5698              );
     5699
     5700        /* PGMGstQueryPageFast sets the A & D bits. */
     5701        /** @todo testcase: check when A and D bits are actually set by the CPU.  */
     5702        Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
     5703
     5704        *pGCPhysMem = WalkFast.GCPhys;
     5705        return VINF_SUCCESS;
     5706    }
     5707
     5708    LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
     5709    /** @todo Check unassigned memory in unpaged mode. */
    56665710#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    5667         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    5668             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
     5711    if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
     5712        IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    56695713#endif
    5670         *pGCPhysMem = NIL_RTGCPHYS;
    5671         return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
    5672     }
    5673 
    5674     /* If the page is writable and does not have the no-exec bit set, all
    5675        access is allowed.  Otherwise we'll have to check more carefully... */
    5676     if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
    5677     {
    5678         /* Write to read only memory? */
    5679         if (   (fAccess & IEM_ACCESS_TYPE_WRITE)
    5680             && !(Walk.fEffective & X86_PTE_RW)
    5681             && (   (    IEM_GET_CPL(pVCpu) == 3
    5682                     && !(fAccess & IEM_ACCESS_WHAT_SYS))
    5683                 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
    5684         {
    5685             LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
    5686             *pGCPhysMem = NIL_RTGCPHYS;
    5687 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    5688             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    5689                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    5690 #endif
    5691             return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
    5692         }
    5693 
    5694         /* Kernel memory accessed by userland? */
    5695         if (   !(Walk.fEffective & X86_PTE_US)
    5696             && IEM_GET_CPL(pVCpu) == 3
    5697             && !(fAccess & IEM_ACCESS_WHAT_SYS))
    5698         {
    5699             LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
    5700             *pGCPhysMem = NIL_RTGCPHYS;
    5701 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    5702             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    5703                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    5704 #endif
    5705             return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
    5706         }
    5707 
    5708         /* Executing non-executable memory? */
    5709         if (   (fAccess & IEM_ACCESS_TYPE_EXEC)
    5710             && (Walk.fEffective & X86_PTE_PAE_NX)
    5711             && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
    5712         {
    5713             LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
    5714             *pGCPhysMem = NIL_RTGCPHYS;
    5715 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    5716             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    5717                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    5718 #endif
    5719             return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
    5720                                      VERR_ACCESS_DENIED);
    5721         }
    5722     }
    5723 
    5724     /*
    5725      * Set the dirty / access flags.
    5726      * ASSUMES this is set when the address is translated rather than on committ...
    5727      */
    5728     /** @todo testcase: check when A and D bits are actually set by the CPU.  */
    5729     uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
    5730     if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
    5731     {
    5732         int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
    5733         AssertRC(rc2);
    5734         /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
    5735         Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
    5736     }
    5737 
    5738     RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
    5739     *pGCPhysMem = GCPhys;
    5740     return VINF_SUCCESS;
     5714    *pGCPhysMem = NIL_RTGCPHYS;
     5715    return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
    57415716}
    57425717
     
    63686343
    63696344    /*
    6370      * Get the TLB entry for this page.
     6345     * Get the TLB entry for this page and check PT flags.
     6346     *
     6347     * We reload the TLB entry if we need to set the dirty bit (accessed
     6348     * should in theory always be set).
    63716349     */
     6350    uint8_t           *pbMem = NULL;
    63726351    uint64_t const     uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrMem);
    63736352    PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
    6374     if (pTlbe->uTag == uTag)
    6375     {
    6376 # ifdef VBOX_WITH_STATISTICS
    6377         pVCpu->iem.s.DataTlb.cTlbHits++;
     6353    if (   pTlbe->uTag == uTag
     6354        && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0))) )
     6355    {
     6356        STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     6357
     6358        /* If the page is either supervisor only or non-writable, we need to do
     6359           more careful access checks. */
     6360        if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
     6361        {
     6362            /* Write to read only memory? */
     6363            if (   (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
     6364                && (fAccess & IEM_ACCESS_TYPE_WRITE)
     6365                && (   (    IEM_GET_CPL(pVCpu) == 3
     6366                        && !(fAccess & IEM_ACCESS_WHAT_SYS))
     6367                    || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
     6368            {
     6369                LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
     6370                return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
     6371            }
     6372
     6373            /* Kernel memory accessed by userland? */
     6374            if (   (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
     6375                && IEM_GET_CPL(pVCpu) == 3
     6376                && !(fAccess & IEM_ACCESS_WHAT_SYS))
     6377            {
     6378                LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
     6379                return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
     6380            }
     6381        }
     6382
     6383        /* Look up the physical page info if necessary. */
     6384        if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
     6385# ifdef IN_RING3
     6386            pbMem = pTlbe->pbMappingR3;
     6387# else
     6388            pbMem = NULL;
    63786389# endif
     6390        else
     6391        {
     6392            if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
     6393            { /* likely */ }
     6394            else
     6395                IEMTlbInvalidateAllPhysicalSlow(pVCpu);
     6396            pTlbe->pbMappingR3       = NULL;
     6397            pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
     6398            int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
     6399                                                &pbMem, &pTlbe->fFlagsAndPhysRev);
     6400            AssertRCReturn(rc, rc);
     6401# ifdef IN_RING3
     6402            pTlbe->pbMappingR3 = pbMem;
     6403# endif
     6404        }
    63796405    }
    63806406    else
    63816407    {
    63826408        pVCpu->iem.s.DataTlb.cTlbMisses++;
    6383         PGMPTWALK Walk;
    6384         int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
    6385         if (RT_FAILURE(rc))
     6409
     6410        /* This page table walking will set A bits as required by the access while performing the walk.
     6411           ASSUMES these are set when the address is translated rather than on commit... */
     6412        /** @todo testcase: check when A bits are actually set by the CPU for code.  */
     6413        PGMPTWALKFAST WalkFast;
     6414        AssertCompile(IEM_ACCESS_TYPE_READ  == PGMQPAGE_F_READ);
     6415        AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
     6416        AssertCompile(IEM_ACCESS_TYPE_EXEC  == PGMQPAGE_F_EXECUTE);
     6417        AssertCompile(X86_CR0_WP            == PGMQPAGE_F_CR0_WP0);
     6418        uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
     6419                        | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
     6420        if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
     6421            fQPage |= PGMQPAGE_F_USER_MODE;
     6422        int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
     6423        if (RT_SUCCESS(rc))
     6424            Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
     6425        else
    63866426        {
    63876427            LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
    63886428# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    6389             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    6390                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
     6429            if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
     6430                IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    63916431# endif
    63926432            return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
    63936433        }
    63946434
    6395         Assert(Walk.fSucceeded);
    63966435        pTlbe->uTag             = uTag;
    6397         pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
    6398         pTlbe->GCPhys           = Walk.GCPhys;
     6436        pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
     6437        RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     6438        pTlbe->GCPhys           = GCPhysPg;
    63996439        pTlbe->pbMappingR3      = NULL;
    6400     }
    6401 
    6402     /*
    6403      * Check TLB page table level access flags.
    6404      */
    6405     /* If the page is either supervisor only or non-writable, we need to do
    6406        more careful access checks. */
    6407     if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
    6408     {
    6409         /* Write to read only memory? */
    6410         if (   (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
    6411             && (fAccess & IEM_ACCESS_TYPE_WRITE)
    6412             && (   (    IEM_GET_CPL(pVCpu) == 3
    6413                     && !(fAccess & IEM_ACCESS_WHAT_SYS))
    6414                 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
    6415         {
    6416             LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
    6417 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    6418             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    6419                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    6420 # endif
    6421             return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
    6422         }
    6423 
    6424         /* Kernel memory accessed by userland? */
    6425         if (   (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
    6426             && IEM_GET_CPL(pVCpu) == 3
    6427             && !(fAccess & IEM_ACCESS_WHAT_SYS))
    6428         {
    6429             LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
    6430 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    6431             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    6432                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    6433 # endif
    6434             return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
    6435         }
    6436     }
    6437 
    6438     /*
    6439      * Set the dirty / access flags.
    6440      * ASSUMES this is set when the address is translated rather than on commit...
    6441      */
    6442     /** @todo testcase: check when A and D bits are actually set by the CPU.  */
    6443     uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
    6444     if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
    6445     {
    6446         uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
    6447         int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
    6448         AssertRC(rc2);
    6449         /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
    6450         Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
    6451         pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
    6452     }
    6453 
    6454     /*
    6455      * Look up the physical page info if necessary.
    6456      */
    6457     uint8_t *pbMem = NULL;
    6458     if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
    6459 # ifdef IN_RING3
    6460         pbMem = pTlbe->pbMappingR3;
    6461 # else
    6462         pbMem = NULL;
    6463 # endif
    6464     else
    6465     {
    6466         AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
    6467         AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
    6468         AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
    6469         AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED   == IEMTLBE_F_PG_UNASSIGNED);
    6470         AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE    == IEMTLBE_F_PG_CODE_PAGE);
    6471         if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
    6472         { /* likely */ }
    6473         else
    6474             IEMTlbInvalidateAllPhysicalSlow(pVCpu);
    6475         pTlbe->pbMappingR3       = NULL;
    6476         pTlbe->fFlagsAndPhysRev &= ~(  IEMTLBE_F_PHYS_REV
    6477                                      | IEMTLBE_F_NO_MAPPINGR3
    6478                                      | IEMTLBE_F_PG_NO_READ
    6479                                      | IEMTLBE_F_PG_NO_WRITE
    6480                                      | IEMTLBE_F_PG_UNASSIGNED
    6481                                      | IEMTLBE_F_PG_CODE_PAGE);
    6482         int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
    6483                                             &pbMem, &pTlbe->fFlagsAndPhysRev);
     6440        Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
     6441        Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
     6442        Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
     6443        Assert(   !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
     6444               || IEM_GET_CPL(pVCpu) != 3
     6445               || (fAccess & IEM_ACCESS_WHAT_SYS));
     6446
     6447        /* Resolve the physical address. */
     6448        Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
     6449        rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
     6450                                        &pbMem, &pTlbe->fFlagsAndPhysRev);
    64846451        AssertRCReturn(rc, rc);
    64856452# ifdef IN_RING3
     
    67646731
    67656732    /*
    6766      * Get the TLB entry for this page.
     6733     * Get the TLB entry for this page checking that it has the A & D bits
     6734     * set as per fAccess flags.
    67676735     */
     6736    /** @todo make the caller pass these in with fAccess. */
     6737    uint64_t const     fNoUser          = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
     6738                                        ? IEMTLBE_F_PT_NO_USER : 0;
     6739    uint64_t const     fNoWriteNoDirty  = fAccess & IEM_ACCESS_TYPE_WRITE
     6740                                        ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
     6741                                          | (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
     6742                                             || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
     6743                                             ? IEMTLBE_F_PT_NO_WRITE : 0)
     6744                                        : 0;
     6745    uint64_t const     fNoRead          = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
     6746
    67686747    uint64_t const     uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrMem);
    67696748    PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
    6770     if (pTlbe->uTag == uTag)
     6749    if (   pTlbe->uTag == uTag
     6750        && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY))) )
    67716751        STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
    67726752    else
    67736753    {
    67746754        pVCpu->iem.s.DataTlb.cTlbMisses++;
    6775         PGMPTWALK Walk;
    6776         int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
    6777         if (RT_FAILURE(rc))
     6755
     6756        /* This page table walking will set A and D bits as required by the
     6757           access while performing the walk.
     6758           ASSUMES these are set when the address is translated rather than on commit... */
     6759        /** @todo testcase: check when A and D bits are actually set by the CPU.  */
     6760        PGMPTWALKFAST WalkFast;
     6761        AssertCompile(IEM_ACCESS_TYPE_READ  == PGMQPAGE_F_READ);
     6762        AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
     6763        AssertCompile(IEM_ACCESS_TYPE_EXEC  == PGMQPAGE_F_EXECUTE);
     6764        AssertCompile(X86_CR0_WP            == PGMQPAGE_F_CR0_WP0);
     6765        uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
     6766                        | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
     6767        if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
     6768            fQPage |= PGMQPAGE_F_USER_MODE;
     6769        int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
     6770        if (RT_SUCCESS(rc))
     6771            Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
     6772        else
    67786773        {
    67796774            LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
    67806775# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    6781             if (Walk.fFailed & PGM_WALKFAIL_EPT)
     6776            if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
    67826777                IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    67836778# endif
     
    67856780        }
    67866781
    6787         Assert(Walk.fSucceeded);
    67886782        pTlbe->uTag             = uTag;
    6789         pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
    6790         pTlbe->GCPhys           = Walk.GCPhys;
     6783        pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
     6784        RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     6785        pTlbe->GCPhys           = GCPhysPg;
    67916786        pTlbe->pbMappingR3      = NULL;
     6787        Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
     6788        Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
     6789        Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
     6790
     6791        /* Resolve the physical address. */
     6792        Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
     6793        uint8_t *pbMemFullLoad = NULL;
     6794        rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
     6795                                        &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
     6796        AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
     6797# ifdef IN_RING3
     6798        pTlbe->pbMappingR3 = pbMemFullLoad;
     6799# endif
    67926800    }
    67936801
    67946802    /*
    67956803     * Check the flags and physical revision.
     6804     * Note! This will revalidate the uTlbPhysRev after a full load.  This is
     6805     *       just to keep the code structure simple (i.e. avoid gotos or similar).
    67966806     */
    6797     /** @todo make the caller pass these in with fAccess. */
    6798     uint64_t const fNoUser          = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
    6799                                     ? IEMTLBE_F_PT_NO_USER : 0;
    6800     uint64_t const fNoWriteNoDirty  = fAccess & IEM_ACCESS_TYPE_WRITE
    6801                                     ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
    6802                                       | (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
    6803                                          || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
    6804                                          ? IEMTLBE_F_PT_NO_WRITE : 0)
    6805                                     : 0;
    6806     uint64_t const fNoRead          = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
    6807     uint8_t       *pbMem            = NULL;
     6807    uint8_t *pbMem;
    68086808    if (   (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
    68096809        == pVCpu->iem.s.DataTlb.uTlbPhysRev)
     
    68156815    else
    68166816    {
     6817        Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
     6818
    68176819        /*
    68186820         * Okay, something isn't quite right or needs refreshing.
     
    68236825            LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
    68246826# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     6827/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
     6828 *        to trigger an \#PG or a VM nested paging exit here yet! */
    68256829            if (Walk.fFailed & PGM_WALKFAIL_EPT)
    68266830                IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
     
    68346838            LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
    68356839# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     6840/** @todo TLB: See above. */
    68366841            if (Walk.fFailed & PGM_WALKFAIL_EPT)
    68376842                IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    68386843# endif
    68396844            iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
    6840         }
    6841 
    6842         /* Set the dirty / access flags.
    6843            ASSUMES this is set when the address is translated rather than on commit... */
    6844         /** @todo testcase: check when A and D bits are actually set by the CPU.  */
    6845         if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
    6846         {
    6847             uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
    6848             int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
    6849             AssertRC(rc2);
    6850             /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
    6851             Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
    6852             pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
    68536845        }
    68546846
     
    68646856        else
    68656857        {
    6866             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
    6867             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
    6868             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
    6869             AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED   == IEMTLBE_F_PG_UNASSIGNED);
    6870             AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE    == IEMTLBE_F_PG_CODE_PAGE);
    68716858            pTlbe->pbMappingR3       = NULL;
    6872             pTlbe->fFlagsAndPhysRev &= ~(  IEMTLBE_F_PHYS_REV
    6873                                          | IEMTLBE_F_NO_MAPPINGR3
    6874                                          | IEMTLBE_F_PG_NO_READ
    6875                                          | IEMTLBE_F_PG_NO_WRITE
    6876                                          | IEMTLBE_F_PG_UNASSIGNED
    6877                                          | IEMTLBE_F_PG_CODE_PAGE);
     6859            pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
     6860            pbMem = NULL;
    68786861            int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
    68796862                                                &pbMem, &pTlbe->fFlagsAndPhysRev);
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp

    r104543 r104932  
    41364136 *                      applicable.
    41374137 */
    4138 VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT
    4139 {
    4140     Assert(pWalk->fIsSlat);
     4138VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT
     4139{
     4140    Assert(pWalk->fInfo & PGM_WALKINFO_IS_SLAT);
    41414141    Assert(pWalk->fFailed & PGM_WALKFAIL_EPT);
    41424142    Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEptXcptVe);          /* #VE exceptions not supported. */
     
    41474147        LogFlow(("EptViolation: cs:rip=%04x:%08RX64 fAccess=%#RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fAccess));
    41484148        uint64_t const fEptAccess = (pWalk->fEffective & PGM_PTATTRS_EPT_MASK) >> PGM_PTATTRS_EPT_SHIFT;
    4149         return iemVmxVmexitEptViolation(pVCpu, fAccess, fSlatFail, fEptAccess, pWalk->GCPhysNested, pWalk->fIsLinearAddrValid,
     4149        return iemVmxVmexitEptViolation(pVCpu, fAccess, fSlatFail, fEptAccess, pWalk->GCPhysNested,
     4150                                        RT_BOOL(pWalk->fInfo & PGM_WALKINFO_IS_LINEAR_ADDR_VALID),
    41504151                                        pWalk->GCPtr, cbInstr);
    41514152    }
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r104548 r104932  
    600600        PGM_TYPE_REAL,
    601601        PGM_GST_NAME_REAL(GetPage),
     602        PGM_GST_NAME_REAL(QueryPageFast),
    602603        PGM_GST_NAME_REAL(ModifyPage),
    603604        PGM_GST_NAME_REAL(Enter),
     
    610611        PGM_TYPE_PROT,
    611612        PGM_GST_NAME_PROT(GetPage),
     613        PGM_GST_NAME_PROT(QueryPageFast),
    612614        PGM_GST_NAME_PROT(ModifyPage),
    613615        PGM_GST_NAME_PROT(Enter),
     
    620622        PGM_TYPE_32BIT,
    621623        PGM_GST_NAME_32BIT(GetPage),
     624        PGM_GST_NAME_32BIT(QueryPageFast),
    622625        PGM_GST_NAME_32BIT(ModifyPage),
    623626        PGM_GST_NAME_32BIT(Enter),
     
    630633        PGM_TYPE_PAE,
    631634        PGM_GST_NAME_PAE(GetPage),
     635        PGM_GST_NAME_PAE(QueryPageFast),
    632636        PGM_GST_NAME_PAE(ModifyPage),
    633637        PGM_GST_NAME_PAE(Enter),
     
    641645        PGM_TYPE_AMD64,
    642646        PGM_GST_NAME_AMD64(GetPage),
     647        PGM_GST_NAME_AMD64(QueryPageFast),
    643648        PGM_GST_NAME_AMD64(ModifyPage),
    644649        PGM_GST_NAME_AMD64(Enter),
     
    19211926    AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
    19221927    return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
     1928}
     1929
     1930
     1931/**
     1932 * Gets effective Guest OS page information.
     1933 *
     1934 * @returns VBox status code.
     1935 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     1936 * @param   GCPtr       Guest Context virtual address of the page.
     1937 * @param   fFlags      PGMQPAGE_F_XXX. If zero, no accessed or dirty bits will
     1938 *                      be set.
     1939 * @param   pWalk       Where to store the page walk information.
     1940 * @thread  EMT(pVCpu)
     1941 */
     1942VMM_INT_DECL(int) PGMGstQueryPageFast(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
     1943{
     1944    VMCPU_ASSERT_EMT(pVCpu);
     1945    Assert(pWalk);
     1946    Assert(!(fFlags & ~(PGMQPAGE_F_VALID_MASK)));
     1947    Assert(!(fFlags & PGMQPAGE_F_EXECUTE) || !(fFlags & PGMQPAGE_F_WRITE));
     1948    uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
     1949    AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
     1950    AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
     1951    return g_aPgmGuestModeData[idx].pfnQueryPageFast(pVCpu, GCPtr, fFlags, pWalk);
    19231952}
    19241953
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r104767 r104932  
    8787
    8888
    89 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
    90 {
    91     NOREF(iLevel); NOREF(pVCpu);
     89DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
     90{
     91    NOREF(pVCpu);
    9292    pWalk->fNotPresent     = true;
    93     pWalk->uLevel          = (uint8_t)iLevel;
     93    pWalk->uLevel          = uLevel;
     94    pWalk->fFailed         = PGM_WALKFAIL_NOT_PRESENT
     95                           | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    9496    return VERR_PAGE_TABLE_NOT_PRESENT;
    9597}
    9698
    97 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
     99DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
    98100{
    99101    AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
    100102    pWalk->fBadPhysAddr    = true;
    101     pWalk->uLevel          = (uint8_t)iLevel;
     103    pWalk->uLevel          = uLevel;
     104    pWalk->fFailed         = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
     105                           | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    102106    return VERR_PAGE_TABLE_NOT_PRESENT;
    103107}
    104108
    105 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
     109DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
    106110{
    107111    NOREF(pVCpu);
    108112    pWalk->fRsvdError      = true;
    109     pWalk->uLevel          = (uint8_t)iLevel;
     113    pWalk->uLevel          = uLevel;
     114    pWalk->fFailed         = PGM_WALKFAIL_RESERVED_BITS
     115                           | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    110116    return VERR_PAGE_TABLE_NOT_PRESENT;
    111117}
     
    297303# endif
    298304            pWalk->GCPhys     = GCPhysPde;
    299             PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
     305            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); /** @todo why do we apply it here and not below?!? */
    300306            return VINF_SUCCESS;
    301307        }
     
    443449}
    444450
     451
     452/* x x x x x x x x */
     453/* x x x x x x x x */
     454
     455#if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX) || defined(DOXYGEN_RUNNING)
     456/** Converts regular style walk info to fast style. */
     457DECL_FORCE_INLINE(void) PGM_GST_NAME(ConvertPtWalkToFast)(PGMPTWALK const *pSrc, PPGMPTWALKFAST *pDst)
     458{
     459    pDst->GCPtr              = pSrc->GCPtr;
     460    pDst->GCPhys             = pSrc->GCPhys;
     461    pDst->GCPhysNested       = pSrc->GCPhysNested;
     462    pDst->fInfo              = (pSrc->fSucceeded         ? PGM_WALKINFO_SUCCEEDED            : 0)
     463                             | (pSrc->fIsSlat            ? PGM_WALKINFO_IS_SLAT              : 0)
     464                             | (pSrc->fIsLinearAddrValid ? PGM_WALKINFO_IS_LINEAR_ADDR_VALID : 0)
     465                             | ((uint32_t)pSrc->uLevel << PGM_WALKINFO_LEVEL_SHIFT);
     466    pDst->fFailed            = pSrc->fFailed;
     467    pDst->fEffective         = pSrc->fEffective;
     468}
     469#endif
     470
     471
     472#if PGM_GST_TYPE == PGM_TYPE_32BIT \
     473 || PGM_GST_TYPE == PGM_TYPE_PAE \
     474 || PGM_GST_TYPE == PGM_TYPE_AMD64
     475
     476DECLINLINE(int) PGM_GST_NAME(WalkFastReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
     477{
     478    RT_NOREF(pVCpu);
     479    pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT           | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
     480    return VERR_PAGE_TABLE_NOT_PRESENT;
     481}
     482
     483DECLINLINE(int) PGM_GST_NAME(WalkFastReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel, int rc)
     484{
     485    AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); RT_NOREF(pVCpu, rc);
     486    pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS  | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
     487    return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     488}
     489
     490DECLINLINE(int) PGM_GST_NAME(WalkFastReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
     491{
     492    RT_NOREF(pVCpu);
     493    pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS         | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
     494    return VERR_RESERVED_PAGE_TABLE_BITS;
     495}
     496
     497/**
     498 * Performs a guest page table walk.
     499 *
     500 * @returns VBox status code.
     501 * @retval  VINF_SUCCESS on success.
     502 * @retval  VERR_PAGE_TABLE_NOT_PRESENT, VERR_RESERVED_PAGE_TABLE_BITS or
     503 *          VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS on normal failure.
     504 *          The failure reason is also recorded in PGMPTWALKFAST::fFailed.
     505 *
     506 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     507 * @param   GCPtr               The guest virtual address to walk by.
     508 * @param   fFlags              PGMQPAGE_F_XXX.
     509 *                              This is ignored when @a a_fSetFlags is @c false.
     510 * @param   pWalk               The page walk info.
     511 * @param   pGstWalk            The guest mode specific page walk info.
     512 * @tparam  a_enmGuestSlatMode  The SLAT mode of the function.
     513 * @tparam  a_fSetFlags         Whether to process @a fFlags and set accessed
     514 *                              and dirty flags accordingly.
     515 * @thread  EMT(pVCpu)
     516 */
     517template<PGMSLAT const a_enmGuestSlatMode = PGMSLAT_DIRECT, bool const a_fSetFlags = false>
     518DECLINLINE(int) PGM_GST_NAME(WalkFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk, PGSTPTWALK pGstWalk)
     519{
     520    int rc;
     521
     522# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX) || defined(DOXYGEN_RUNNING)
     523/** @def PGM_GST_SLAT_WALK
     524 * Macro to perform guest second-level address translation (EPT or Nested).
     525 *
     526 * @param   a_pVCpu         The cross context virtual CPU structure of the calling
     527 *                          EMT.
     528 * @param   a_GCPtrNested   The nested-guest linear address that caused the
     529 *                          second-level translation.
     530 * @param   a_GCPhysNested  The nested-guest physical address to translate.
     531 * @param   a_fFinal        Set to @a true if this is the final page table entry
     532 *                          and effective nested page table flags should be
     533 *                          merged into PGMPTWALKFAST::fEffective.  Otherwise
     534 *                          set to @a false and nothing done.
     535 * @param   a_GCPhysOut     Where to store the guest-physical address (result).
     536 * @param   a_pWalk         The @a pWalk argument to the function.
     537 */
     538#  define PGM_GST_SLAT_WALK_FAST(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_fFinal, a_GCPhysOut, a_pWalk) \
     539    do { \
     540        /** @todo Optimize this. Among other things, WalkSlat can be eliminated. WalkGstSlat is completely pointless. */ \
     541        /** @todo pass fFlags along as appropriate... */ \
     542        if (a_enmGuestSlatMode != PGMSLAT_DIRECT) \
     543        { \
     544            PGMPTWALK    WalkSlat; \
     545            PGMPTWALKGST WalkGstSlat; \
     546            int rcX; \
     547            if (a_enmGuestSlatMode == PGMSLAT_EPT) \
     548                rcX = PGM_GST_SLAT_NAME_EPT(Walk)(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, \
     549                                                  &WalkSlat, &WalkGstSlat.u.Ept); \
     550            else AssertFailedReturn(VERR_NOT_IMPLEMENTED); \
     551            if (RT_SUCCESS(rcX)) \
     552                (a_GCPhysOut) = WalkSlat.GCPhys; \
     553            else \
     554            { \
     555                PGM_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk); \
     556                return rcX; \
     557            } \
     558            if (a_fFinal) \
     559            {   /* Merge in the nested paging flags for the final GCPhys. */ \
     560                if (a_enmGuestSlatMode == PGMSLAT_EPT) \
     561                    (a_pWalk)->fEffective = ((a_pWalk)->fEffective & ~PGM_PTATTRS_EPT_MASK) \
     562                                          | WalkSlat.fEffective & PGM_PTATTRS_EPT_MASK; \
     563                else AssertFailedReturn(VERR_NOT_IMPLEMENTED); \
     564            } \
     565        } \
     566    } while (0)
     567# else
     568#  define PGM_GST_SLAT_WALK_FAST(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_fFinal, a_GCPhysOut, a_pWalk) do { } while (0)
     569# endif
     570# if PGM_GST_TYPE == PGM_TYPE_32BIT
     571#  define PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, a_fEffective, a_pEntryU, a_OrgEntryU, a_fFlags) do { \
     572       if (!a_fSetFlags || ((a_OrgEntryU) & (a_fFlags)) == (a_fFlags)) \
     573       { /* likely */ } \
     574       else \
     575       { \
     576           ASMAtomicOrU32((a_pEntryU), (a_fFlags)); \
     577           (a_fEffective) |= (a_fFlags); \
     578       } \
     579    } while (0)
     580# else
     581#  define PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, a_fEffective, a_pEntryU, a_OrgEntryU, a_fFlags) do { \
     582       if (!a_fSetFlags || ((a_OrgEntryU) & (a_fFlags)) == (a_fFlags)) \
     583       { /* likely */ } \
     584       else \
     585       { \
     586           ASMAtomicOrU64((a_pEntryU), (a_fFlags)); \
     587           (a_fEffective) |= (a_fFlags); \
     588       } \
     589    } while (0)
     590# endif
     591
     592
     593    /*
     594     * Init the walking structures.
     595     */
     596    RT_ZERO(*pGstWalk);
     597    pWalk->GCPtr        = GCPtr;
     598    pWalk->GCPhys       = 0;
     599    pWalk->GCPhysNested = 0;
     600    pWalk->fInfo        = 0;
     601    pWalk->fFailed      = 0;
     602    pWalk->fEffective   = 0;
     603
     604# if PGM_GST_TYPE == PGM_TYPE_32BIT \
     605  || PGM_GST_TYPE == PGM_TYPE_PAE
     606    /*
     607     * Boundary check for PAE and 32-bit (prevents trouble further down).
     608     */
     609    if (RT_LIKELY(GCPtr < _4G))
     610    { /* extremely likely */ }
     611    else
     612        return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 8);
     613# endif
     614
     615    uint64_t fEffective;
     616    {
     617# if PGM_GST_TYPE == PGM_TYPE_AMD64
     618        /*
     619         * The PML4 table.
     620         */
     621        rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
     622        if (RT_SUCCESS(rc)) { /* probable */ }
     623        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
     624
     625        PX86PML4E pPml4e;
     626        pGstWalk->pPml4e  = pPml4e  = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
     627        X86PML4E  Pml4e;
     628        pGstWalk->Pml4e.u = Pml4e.u = ASMAtomicUoReadU64(&pPml4e->u);
     629
     630        if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
     631        else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 4);
     632
     633        if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
     634        else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 4);
     635
     636        fEffective = Pml4e.u & (  X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
     637                                | X86_PML4E_NX);
     638        PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPml4e->u, Pml4e.u, X86_PML4E_A);
     639        pWalk->fEffective = fEffective;
     640
     641        /*
     642         * The PDPT.
     643         */
     644        RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
     645        PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPdpt, false /*a_fFinal*/, GCPhysPdpt, pWalk);
     646        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
     647        if (RT_SUCCESS(rc)) { /* probable */ }
     648        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
     649
     650# elif PGM_GST_TYPE == PGM_TYPE_PAE
     651        rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
     652        if (RT_SUCCESS(rc)) { /* probable */ }
     653        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
     654# endif
     655    }
     656    {
     657# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
     658        PX86PDPE pPdpe;
     659        pGstWalk->pPdpe  = pPdpe  = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
     660        X86PDPE  Pdpe;
     661        pGstWalk->Pdpe.u = Pdpe.u = ASMAtomicUoReadU64(&pPdpe->u);
     662
     663        if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
     664        else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 3);
     665
     666        if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
     667        else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 3);
     668
     669#  if PGM_GST_TYPE == PGM_TYPE_AMD64
     670        fEffective &= (Pdpe.u & (  X86_PDPE_P   | X86_PDPE_RW  | X86_PDPE_US
     671                                 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
     672        fEffective |= Pdpe.u & X86_PDPE_LM_NX;
     673        PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPdpe->u, Pdpe.u, X86_PDE_A);
     674#  else
     675        /*
     676         * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
     677         * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
     678         */
     679        Assert(!(Pdpe.u & X86_PDPE_LM_NX));
     680        fEffective = X86_PDPE_P | X86_PDPE_RW  | X86_PDPE_US | X86_PDPE_A
     681                   | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
     682#  endif
     683        pWalk->fEffective = fEffective;
     684
     685        /*
     686         * The PD.
     687         */
     688        RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
     689        PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPd, false /*a_fFinal*/, GCPhysPd, pWalk);
     690        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
     691        if (RT_SUCCESS(rc)) { /* probable */ }
     692        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
     693
     694# elif PGM_GST_TYPE == PGM_TYPE_32BIT
     695        rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
     696        if (RT_SUCCESS(rc)) { /* probable */ }
     697        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
     698# endif
     699    }
     700    {
     701        PGSTPDE pPde;
     702        pGstWalk->pPde  = pPde  = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
     703        GSTPDE  Pde;
     704# if PGM_GST_TYPE != PGM_TYPE_32BIT
     705        pGstWalk->Pde.u = Pde.u = ASMAtomicUoReadU64(&pPde->u);
     706# else
     707        pGstWalk->Pde.u = Pde.u = ASMAtomicUoReadU32(&pPde->u);
     708# endif
     709        if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
     710        else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 2);
     711        if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
     712        {
     713            if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
     714            else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 2);
     715
     716            /*
     717             * We're done.
     718             */
     719            pWalk->fInfo = PGM_WALKINFO_SUCCEEDED | PGM_WALKINFO_BIG_PAGE;
     720
     721# if PGM_GST_TYPE == PGM_TYPE_32BIT
     722            fEffective  = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
     723# else
     724            fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
     725            fEffective |= Pde.u & X86_PDE2M_PAE_NX;
     726# endif
     727            fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
     728            fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
     729
     730            rc = VINF_SUCCESS;
     731            if (a_fSetFlags)
     732            {
     733                /* We have to validate the access before setting any flags. */
     734                uint32_t fFailed = 0;
     735                if ((fFlags & PGMQPAGE_F_USER_MODE) && !(fEffective & X86_PDE4M_US))
     736                    fFailed |= PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE;
     737                if (fFlags & PGMQPAGE_F_WRITE)
     738                {
     739                    if (   (fEffective & X86_PDE4M_RW)
     740                        || (fFlags & (PGMQPAGE_F_USER_MODE | PGMQPAGE_F_CR0_WP0)) == PGMQPAGE_F_CR0_WP0)
     741                    { /* likely*/ }
     742                    else fFailed |= PGM_WALKFAIL_NOT_WRITABLE;
     743                }
     744# if PGM_GST_TYPE != PGM_TYPE_32BIT
     745                else if (fFlags & PGMQPAGE_F_EXECUTE)
     746                {
     747                    if (!(fEffective & X86_PDE2M_PAE_NX) || !pVCpu->pgm.s.fNoExecuteEnabled) { /* likely */ }
     748                    else fFailed |= PGM_WALKFAIL_NOT_EXECUTABLE;
     749                }
     750# endif
     751                if (fFailed == 0)
     752                {
     753                    if (!(fFlags & PGMQPAGE_F_WRITE))
     754                        PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE4M_A);
     755                    else
     756                        PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE4M_A | X86_PDE4M_D);
     757                }
     758                else
     759                {
     760                    pWalk->fFailed = fFailed | (2U << PGM_WALKFAIL_LEVEL_SHIFT);
     761                    pWalk->fInfo   = PGM_WALKINFO_BIG_PAGE;
     762                    rc = VERR_ACCESS_DENIED;
     763                }
     764            }
     765
     766            pWalk->fEffective = fEffective;
     767            Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
     768            Assert(fEffective & PGM_PTATTRS_R_MASK);
     769
     770            RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
     771                               | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
     772            PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPde, true /*a_fFinal*/, GCPhysPde, pWalk);
     773            pWalk->GCPhys     = GCPhysPde;
     774            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); /** @todo why do we apply it here and not below?!? */
     775            return rc;
     776        }
     777
     778        if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
     779            return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 2);
     780# if PGM_GST_TYPE == PGM_TYPE_32BIT
     781        fEffective  = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
     782# else
     783        fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
     784        fEffective |= Pde.u & X86_PDE_PAE_NX;
     785# endif
     786        PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE_A);
     787        pWalk->fEffective = fEffective;
     788
     789        /*
     790         * The PT.
     791         */
     792        RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
     793        PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPt, false /*a_fFinal*/, GCPhysPt, pWalk);
     794        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
     795        if (RT_SUCCESS(rc)) { /* probable */ }
     796        else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
     797    }
     798    {
     799        PGSTPTE pPte;
     800        pGstWalk->pPte  = pPte  = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
     801        GSTPTE  Pte;
     802# if PGM_GST_TYPE != PGM_TYPE_32BIT
     803        pGstWalk->Pte.u = Pte.u = ASMAtomicUoReadU64(&pPte->u);
     804# else
     805        pGstWalk->Pte.u = Pte.u = ASMAtomicUoReadU32(&pPte->u);
     806# endif
     807
     808        if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
     809        else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 1);
     810
     811        if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
     812        else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 1);
     813
     814        /*
     815         * We're done.
     816         */
     817        pWalk->fInfo = PGM_WALKINFO_SUCCEEDED;
     818
     819        fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
     820# if PGM_GST_TYPE != PGM_TYPE_32BIT
     821        fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G | X86_PTE_PAE_NX);
     822# else
     823        fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
     824# endif
     825
     826        rc = VINF_SUCCESS;
     827        if (a_fSetFlags)
     828        {
     829            /* We have to validate the access before setting any flags. */
     830            uint32_t fFailed = 0;
     831            if ((fFlags & PGMQPAGE_F_USER_MODE) && !(fEffective & X86_PTE_US))
     832                fFailed |= PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE;
     833            if (fFlags & PGMQPAGE_F_WRITE)
     834            {
     835                if ((fEffective & X86_PTE_RW) || (fFlags & (PGMQPAGE_F_USER_MODE | PGMQPAGE_F_CR0_WP0)) == PGMQPAGE_F_CR0_WP0)
     836                { /* likely*/ }
     837                else fFailed |= PGM_WALKFAIL_NOT_WRITABLE;
     838            }
     839# if PGM_GST_TYPE != PGM_TYPE_32BIT
     840            else if (fFlags & PGMQPAGE_F_EXECUTE)
     841            {
     842                if (!(fEffective & X86_PTE_PAE_NX) || !pVCpu->pgm.s.fNoExecuteEnabled) { /* likely */ }
     843                else fFailed |= PGM_WALKFAIL_NOT_EXECUTABLE;
     844            }
     845# endif
     846            if (fFailed == 0)
     847            {
     848                if (!(fFlags & PGMQPAGE_F_WRITE))
     849                    PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPte->u, Pte.u, X86_PTE_A);
     850                else
     851                    PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPte->u, Pte.u, X86_PTE_A | X86_PTE_D);
     852            }
     853            else
     854            {
     855                pWalk->fFailed = fFailed | (1U << PGM_WALKFAIL_LEVEL_SHIFT);
     856                pWalk->fInfo   = 0;
     857                rc = VERR_ACCESS_DENIED;
     858            }
     859        }
     860
     861        pWalk->fEffective = fEffective;
     862        Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
     863        Assert(fEffective & PGM_PTATTRS_R_MASK);
     864
     865        RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
     866                           | (GCPtr & GUEST_PAGE_OFFSET_MASK);
     867        PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPte, true /*a_fFinal*/, GCPhysPte, pWalk);
     868        pWalk->GCPhys     = GCPhysPte;
     869        return rc;
     870    }
     871# undef PGM_GST_SLAT_WALK_FAST
     872# undef PGM_GST_ENSURE_ENTRY_FLAGS_SET
     873}
     874
     875#endif /* 32BIT, PAE, AMD64 */
     876
     877/**
     878 * Guest virtual to guest physical + info translation, the faster and better
     879 * version.
     880 *
     881 * @returns VBox status code.
     882 * @param   pVCpu       The cross context virtual CPU structure.
     883 * @param   GCPtr       Guest Context virtual address of the page.
     884 * @param   fFlags      PGMQPAGE_F_XXX
     885 * @param   pWalk       Where to store the page walk info.
     886 * @thread  EMT(pVCpu)
     887 */
     888#define PGM_GET_PAGE_F_WRITE
     889#define PGM_GET_PAGE_F_READ
     890PGM_GST_DECL(int, QueryPageFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
     891{
     892#if PGM_GST_TYPE == PGM_TYPE_REAL \
     893 || PGM_GST_TYPE == PGM_TYPE_PROT
     894
     895# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     896    if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
     897    {
     898        /** @todo optimize this case as well.   */
     899        /** @todo pass fFlags along. */
     900        PGMPTWALK    WalkSlat;
     901        PGMPTWALKGST WalkGstSlat;
     902        int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &WalkSlat, &WalkGstSlat);
     903        if (RT_SUCCESS(rc))
     904        {
     905            PGMPTWALKFAST_ZERO(pWalk);
     906            pWalk->GCPtr        = GCPtr;
     907            pWalk->GCPhys       = WalkSlat.GCPhys;
     908            pWalk->GCPhysNested = 0;
     909            pWalk->u64Union     = 0;
     910            pWalk->fSucceeded   = true;
     911            pWalk->fEffective   = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D;
     912        }
     913        else
     914            PGM_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk);
     915        return rc;
     916    }
     917# endif
     918
     919    /*
     920     * Fake it.
     921     */
     922    pWalk->GCPtr        = GCPtr;
     923    pWalk->GCPhys       = GCPtr;
     924    pWalk->GCPhysNested = 0;
     925    pWalk->fInfo        = PGM_WALKINFO_SUCCEEDED;
     926    pWalk->fFailed      = PGM_WALKFAIL_SUCCESS;
     927    pWalk->fEffective   = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D;
     928    RT_NOREF(pVCpu, fFlags);
     929    return VINF_SUCCESS;
     930
     931#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
     932   || PGM_GST_TYPE == PGM_TYPE_PAE \
     933   || PGM_GST_TYPE == PGM_TYPE_AMD64
     934
     935    GSTPTWALK GstWalk;
     936    int rc;
     937# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX)
     938    switch (pVCpu->pgm.s.enmGuestSlatMode)
     939    {
     940        case PGMSLAT_DIRECT:
     941# endif
     942            if (fFlags)
     943                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_DIRECT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
     944            else
     945                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_DIRECT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
     946# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX)
     947            break;
     948#  ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     949        case PGMSLAT_EPT:
     950            if (fFlags)
     951                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_EPT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
     952            else
     953                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_EPT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
     954            break;
     955#  endif
     956#  ifdef VBOX_WITH_NESTED_HWVIRT_SVM_XXX
     957        case PGMSLAT_32BIT:
     958            if (fFlags)
     959                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_32BIT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
     960            else
     961                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_32BIT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
     962            break;
     963        case PGMSLAT_PAE:
     964            if (fFlags)
     965                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_PAE, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
     966            else
     967                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_PAE, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
     968            break;
     969        case PGMSLAT_AMD64:
     970            if (fFlags)
     971                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_AMD64, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
     972            else
     973                rc = PGM_GST_NAME(WalkFast)<PGMSLAT_AMD64, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
     974            break;
     975#  endif
     976        default:
     977            AssertFailedReturn(VERR_INTERNAL_ERROR_4);
     978    }
     979# endif
     980    if (RT_SUCCESS(rc))
     981    {
     982        Assert(pWalk->fInfo & PGM_WALKINFO_SUCCEEDED);
     983        Assert(pWalk->GCPtr == GCPtr);
     984        Assert((pWalk->GCPhys & GUEST_PAGE_OFFSET_MASK) == (GCPtr & GUEST_PAGE_OFFSET_MASK));
     985        return VINF_SUCCESS;
     986    }
     987    return rc;
     988
     989#else
     990# error "shouldn't be here!"
     991    /* something else... */
     992    return VERR_NOT_SUPPORTED;
     993#endif
     994}
     995
     996/* x x x x x x x x */
     997/* x x x x x x x x */
    445998
    446999/**
  • trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h

    r103583 r104932  
    102102    pWalk->fNotPresent = true;
    103103    pWalk->uLevel      = uLevel;
    104     pWalk->fFailed     = s_afEptViolations[idxViolationType];
     104    pWalk->fFailed     = s_afEptViolations[idxViolationType] | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    105105    return VERR_PAGE_TABLE_NOT_PRESENT;
    106106}
     
    121121    pWalk->fBadPhysAddr = true;
    122122    pWalk->uLevel       = uLevel;
    123     pWalk->fFailed      = PGM_WALKFAIL_EPT_VIOLATION;
     123    pWalk->fFailed      = PGM_WALKFAIL_EPT_VIOLATION         | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    124124    return VERR_PAGE_TABLE_NOT_PRESENT;
    125125}
     
    139139    pWalk->fRsvdError = true;
    140140    pWalk->uLevel     = uLevel;
    141     pWalk->fFailed    = PGM_WALKFAIL_EPT_MISCONFIG;
     141    pWalk->fFailed    = PGM_WALKFAIL_EPT_MISCONFIG           | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
    142142    return VERR_PAGE_TABLE_NOT_PRESENT;
    143143}
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r104858 r104932  
    504504#define IEMTLBE_F_PHYS_REV          UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
    505505/** @} */
     506AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
     507AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
     508AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
     509AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED   == IEMTLBE_F_PG_UNASSIGNED);
     510AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE    == IEMTLBE_F_PG_CODE_PAGE);
     511/** The bits set by PGMPhysIemGCPhys2PtrNoLock. */
     512#define IEMTLBE_GCPHYS2PTR_MASK     (  PGMIEMGCPHYS2PTR_F_NO_WRITE \
     513                                     | PGMIEMGCPHYS2PTR_F_NO_READ \
     514                                     | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 \
     515                                     | PGMIEMGCPHYS2PTR_F_UNASSIGNED \
     516                                     | PGMIEMGCPHYS2PTR_F_CODE_PAGE \
     517                                     | IEMTLBE_F_PHYS_REV )
    506518
    507519
     
    61006112VBOXSTRICTRC    iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr)  RT_NOEXCEPT;
    61016113VBOXSTRICTRC    iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6102 VBOXSTRICTRC    iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
     6114VBOXSTRICTRC    iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
    61036115VBOXSTRICTRC    iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
    61046116VBOXSTRICTRC    iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r104886 r104932  
    28102810    uint32_t                        uType;
    28112811    DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk));
     2812    DECLCALLBACKMEMBER(int, pfnQueryPageFast,(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk));
    28122813    DECLCALLBACKMEMBER(int, pfnModifyPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    28132814    DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette