VirtualBox

Changeset 99788 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
May 15, 2023 6:01:35 AM (19 months ago)
Author:
vboxsync
Message:

VMM/PGM: Nested VMX: bugref:10318 Don't need to get the physical page unless the guest is already using large pages. Other nits and assertions.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r99133 r99788  
    25472547    Assert(!pShwPage->fDirty);
    25482548    Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
     2549    AssertMsg(!(pGstWalkAll->u.Ept.Pte.u & EPT_E_LEAF), ("Large page unexpected: %RX64\n", pGstWalkAll->u.Ept.Pte.u));
    25492550    AssertMsg((pGstWalkAll->u.Ept.Pte.u & EPT_PTE_PG_MASK) == GCPhysPage,
    25502551              ("PTE address mismatch. GCPhysPage=%RGp Pte=%RX64\n", GCPhysPage, pGstWalkAll->u.Ept.Pte.u & EPT_PTE_PG_MASK));
     
    28802881
    28812882# ifdef PGM_WITH_LARGE_PAGES
    2882     if (BTH_IS_NP_ACTIVE(pVM))
    2883     {
    2884         /*
    2885          * Check if the guest is mapping a 2M page here.
    2886          */
     2883    Assert(BTH_IS_NP_ACTIVE(pVM));
     2884
     2885    /*
     2886     * Check if the guest is mapping a 2M page.
     2887     */
     2888    if (pGstWalkAll->u.Ept.Pde.u & EPT_E_LEAF)
     2889    {
    28872890        PPGMPAGE pPage;
    28882891        rc = pgmPhysGetPageEx(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK, &pPage);
    28892892        AssertRCReturn(rc, rc);
    2890         if (pGstWalkAll->u.Ept.Pde.u & EPT_E_LEAF)
    2891         {
    2892             /* A20 is always enabled in VMX root and non-root operation. */
    2893             Assert(PGM_A20_IS_ENABLED(pVCpu));
    2894 
    2895             RTHCPHYS HCPhys = NIL_RTHCPHYS;
    2896             if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
     2893
     2894        /* A20 is always enabled in VMX root and non-root operation. */
     2895        Assert(PGM_A20_IS_ENABLED(pVCpu));
     2896
     2897        /*
     2898         * Check if we have or can get a 2M backing page here.
     2899         */
     2900        RTHCPHYS HCPhys = NIL_RTHCPHYS;
     2901        if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
     2902        {
     2903            STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageReused);
     2904            AssertRelease(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2905            HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     2906        }
     2907        else if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
     2908        {
     2909            /* Recheck the entire 2 MB range to see if we can use it again as a large page. */
     2910            rc = pgmPhysRecheckLargePage(pVM, GCPhysPage, pPage);
     2911            if (RT_SUCCESS(rc))
    28972912            {
    2898                 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageReused);
    2899                 AssertRelease(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2913                Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2914                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE);
    29002915                HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    29012916            }
    2902             else if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
     2917        }
     2918        else if (PGMIsUsingLargePages(pVM))
     2919        {
     2920            rc = pgmPhysAllocLargePage(pVM, GCPhysPage);
     2921            if (RT_SUCCESS(rc))
    29032922            {
    2904                 /* Recheck the entire 2 MB range to see if we can use it again as a large page. */
    2905                 rc = pgmPhysRecheckLargePage(pVM, GCPhysPage, pPage);
    2906                 if (RT_SUCCESS(rc))
    2907                 {
    2908                     Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
    2909                     Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE);
    2910                     HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    2911                 }
     2923                Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2924                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE);
     2925                HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    29122926            }
    2913             else if (PGMIsUsingLargePages(pVM))
     2927        }
     2928
     2929        /*
     2930         * If we have a 2M backing page, we can map the guest's 2M page right away.
     2931         */
     2932        uint64_t const fShwBigPdeFlags = pGstWalkAll->u.Ept.Pde.u & pVCpu->pgm.s.fGstEptShadowedBigPdeMask;
     2933        if (HCPhys != NIL_RTHCPHYS)
     2934        {
     2935            Pde.u = HCPhys | fShwBigPdeFlags;
     2936            Assert(!(Pde.u & pVCpu->pgm.s.fGstEptMbzBigPdeMask));
     2937            Assert(Pde.u & EPT_E_LEAF);
     2938            SHW_PDE_ATOMIC_SET2(*pPde, Pde);
     2939
     2940            /* Add a reference to the first page only. */
     2941            PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPde, PGM_PAGE_GET_TRACKING(pPage), pPage, iPde);
     2942
     2943            Assert(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED);
     2944
     2945            STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
     2946            Log7Func(("GstPde=%RGp ShwPde=%RX64 [2M]\n", pGstWalkAll->u.Ept.Pde.u, Pde.u));
     2947            return VINF_SUCCESS;
     2948        }
     2949
     2950        /*
     2951         * We didn't get a perfect 2M fit. Split the 2M page into 4K pages.
     2952         * The page ought not to be marked as a big (2M) page at this point.
     2953         */
     2954        Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE);
     2955
     2956        /* Determine the right kind of large page to avoid incorrect cached entry reuse. */
     2957        PGMPOOLACCESS enmAccess;
     2958        {
     2959            Assert(!(pGstWalkAll->u.Ept.Pde.u & EPT_E_USER_EXECUTE));  /* Mode-based execute control for EPT not supported. */
     2960            bool const fNoExecute = !(pGstWalkAll->u.Ept.Pde.u & EPT_E_EXECUTE);
     2961            if (pGstWalkAll->u.Ept.Pde.u & EPT_E_WRITE)
     2962                enmAccess = fNoExecute ? PGMPOOLACCESS_SUPERVISOR_RW_NX : PGMPOOLACCESS_SUPERVISOR_RW;
     2963            else
     2964                enmAccess = fNoExecute ? PGMPOOLACCESS_SUPERVISOR_R_NX  : PGMPOOLACCESS_SUPERVISOR_R;
     2965        }
     2966
     2967        /*
     2968         * Allocate & map a 4K shadow table to cover the 2M guest page.
     2969         */
     2970        PPGMPOOLPAGE   pShwPage;
     2971        RTGCPHYS const GCPhysPt = pGstWalkAll->u.Ept.Pde.u & EPT_PDE2M_PG_MASK;
     2972        rc = pgmPoolAlloc(pVM, GCPhysPt, PGMPOOLKIND_EPT_PT_FOR_EPT_2MB, enmAccess, PGM_A20_IS_ENABLED(pVCpu),
     2973                          pShwPde->idx, iPde, false /*fLockPage*/, &pShwPage);
     2974        if (   rc == VINF_SUCCESS
     2975            || rc == VINF_PGM_CACHED_PAGE)
     2976        { /* likely */ }
     2977        else
     2978        {
     2979           STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
     2980           AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
     2981        }
     2982
     2983        PSHWPT pPt = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
     2984        Assert(pPt);
     2985        Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
     2986        if (rc == VINF_SUCCESS)
     2987        {
     2988            /* The 4K PTEs shall inherit the flags of the 2M PDE page sans the leaf bit. */
     2989            uint64_t const fShwPteFlags = fShwBigPdeFlags & ~EPT_E_LEAF;
     2990
     2991            /* Sync each 4K pages in the 2M range. */
     2992            for (unsigned iPte = 0; iPte < RT_ELEMENTS(pPt->a); iPte++)
    29142993            {
    2915                 rc = pgmPhysAllocLargePage(pVM, GCPhysPage);
    2916                 if (RT_SUCCESS(rc))
    2917                 {
    2918                     Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
    2919                     Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE);
    2920                     HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    2921                 }
     2994                RTGCPHYS const GCPhysSubPage = GCPhysPt | (iPte << GUEST_PAGE_SHIFT);
     2995                pGstWalkAll->u.Ept.Pte.u = GCPhysSubPage | fShwPteFlags;
     2996                Assert(!(pGstWalkAll->u.Ept.Pte.u & pVCpu->pgm.s.fGstEptMbzPteMask));
     2997                PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPte], GCPhysSubPage, pShwPage, iPte, pGstWalkAll);
     2998                Log7Func(("GstPte=%RGp ShwPte=%RX64 iPte=%u [2M->4K]\n", pGstWalkAll->u.Ept.Pte, pPt->a[iPte].u, iPte));
     2999                if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)))
     3000                    break;
    29223001            }
    29233002
    2924             /*
    2925              * If we have a 2M large page, we can map the guest's 2M large page right away.
    2926              */
    2927             uint64_t const fShwBigPdeFlags = pGstWalkAll->u.Ept.Pde.u & pVCpu->pgm.s.fGstEptShadowedBigPdeMask;
    2928             if (HCPhys != NIL_RTHCPHYS)
     3003            /* Restore modifications did to the guest-walk result above in case callers might inspect them later. */
     3004            pGstWalkAll->u.Ept.Pte.u = 0;
     3005        }
     3006        else
     3007        {
     3008            Assert(rc == VINF_PGM_CACHED_PAGE);
     3009#  if defined(VBOX_STRICT) && defined(DEBUG_ramshankar)
     3010            /* Paranoia - Verify address of each of the subpages are what they should be. */
     3011            RTGCPHYS GCPhysSubPage = GCPhysPt;
     3012            for (unsigned iPte = 0; iPte < RT_ELEMENTS(pPt->a); iPte++, GCPhysSubPage += GUEST_PAGE_SIZE)
    29293013            {
    2930                 Pde.u = HCPhys | fShwBigPdeFlags;
    2931                 Assert(!(Pde.u & pVCpu->pgm.s.fGstEptMbzBigPdeMask));
    2932                 Assert(Pde.u & EPT_E_LEAF);
    2933                 SHW_PDE_ATOMIC_SET2(*pPde, Pde);
    2934 
    2935                 /* Add a reference to the first page only. */
    2936                 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPde, PGM_PAGE_GET_TRACKING(pPage), pPage, iPde);
    2937 
    2938                 Assert(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED);
    2939 
    2940                 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
    2941                 Log7Func(("GstPde=%RGp ShwPde=%RX64 [2M]\n", pGstWalkAll->u.Ept.Pde.u, Pde.u));
    2942                 return VINF_SUCCESS;
     3014                PPGMPAGE pSubPage;
     3015                rc = pgmPhysGetPageEx(pVM, GCPhysSubPage, &pSubPage);
     3016                AssertRC(rc);
     3017                AssertMsg(   PGM_PAGE_GET_HCPHYS(pSubPage) == SHW_PTE_GET_HCPHYS(pPt->a[iPte])
     3018                          || !SHW_PTE_IS_P(pPt->a[iPte]),
     3019                          ("PGM 2M page and shadow PTE conflict. GCPhysSubPage=%RGp Page=%RHp Shw=%RHp\n",
     3020                           GCPhysSubPage, PGM_PAGE_GET_HCPHYS(pSubPage), SHW_PTE_GET_HCPHYS(pPt->a[iPte])));
    29433021            }
    2944 
    2945             /*
    2946              * We didn't get a perfect 2M fit. Split the 2M page into 4K pages.
    2947              * The page ought not to be marked as a big (2M) page at this point.
    2948              */
    2949             Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE);
    2950 
    2951             /* Determine the right kind of large page to avoid incorrect cached entry reuse. */
    2952             PGMPOOLACCESS enmAccess;
    2953             {
    2954                 Assert(!(pGstWalkAll->u.Ept.Pde.u & EPT_E_USER_EXECUTE));  /* Mode-based execute control for EPT not supported. */
    2955                 bool const fNoExecute = !(pGstWalkAll->u.Ept.Pde.u & EPT_E_EXECUTE);
    2956                 if (pGstWalkAll->u.Ept.Pde.u & EPT_E_WRITE)
    2957                     enmAccess = fNoExecute ? PGMPOOLACCESS_SUPERVISOR_RW_NX : PGMPOOLACCESS_SUPERVISOR_RW;
    2958                 else
    2959                     enmAccess = fNoExecute ? PGMPOOLACCESS_SUPERVISOR_R_NX  : PGMPOOLACCESS_SUPERVISOR_R;
    2960             }
    2961 
    2962             /*
    2963              * Allocate & map a 4K shadow table to cover the 2M guest page.
    2964              */
    2965             PPGMPOOLPAGE   pShwPage;
    2966             RTGCPHYS const GCPhysPt = pGstWalkAll->u.Ept.Pde.u & EPT_PDE2M_PG_MASK;
    2967             rc = pgmPoolAlloc(pVM, GCPhysPt, PGMPOOLKIND_EPT_PT_FOR_EPT_2MB, enmAccess, PGM_A20_IS_ENABLED(pVCpu),
    2968                               pShwPde->idx, iPde, false /*fLockPage*/, &pShwPage);
    2969             if (   rc == VINF_SUCCESS
    2970                 || rc == VINF_PGM_CACHED_PAGE)
    2971             { /* likely */ }
    2972             else
    2973             {
    2974                STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
    2975                AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
    2976             }
    2977 
    2978             PSHWPT pPt = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
    2979             Assert(pPt);
    2980             Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
    2981             if (rc == VINF_SUCCESS)
    2982             {
    2983                 /* The 4K PTEs shall inherit the flags of the 2M PDE page sans the leaf bit. */
    2984                 uint64_t const fShwPteFlags = fShwBigPdeFlags & ~EPT_E_LEAF;
    2985 
    2986                 /* Sync each 4K pages in the 2M range. */
    2987                 for (unsigned iPte = 0; iPte < RT_ELEMENTS(pPt->a); iPte++)
    2988                 {
    2989                     RTGCPHYS const GCPhysSubPage = GCPhysPt | (iPte << GUEST_PAGE_SHIFT);
    2990                     pGstWalkAll->u.Ept.Pte.u = GCPhysSubPage | fShwPteFlags;
    2991                     Assert(!(pGstWalkAll->u.Ept.Pte.u & pVCpu->pgm.s.fGstEptMbzPteMask));
    2992                     PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPte], GCPhysSubPage, pShwPage, iPte, pGstWalkAll);
    2993                     Log7Func(("GstPte=%RGp ShwPte=%RX64 iPte=%u [2M->4K]\n", pGstWalkAll->u.Ept.Pte, pPt->a[iPte].u, iPte));
    2994                     if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)))
    2995                         break;
    2996                 }
    2997 
    2998                 /* Restore modifications did to the guest-walk result above in case callers might inspect them later. */
    2999                 pGstWalkAll->u.Ept.Pte.u = 0;
    3000             }
    3001             else
    3002             {
    3003                 Assert(rc == VINF_PGM_CACHED_PAGE);
    3004 #  if defined(VBOX_STRICT) && defined(DEBUG_ramshankar)
    3005                 /* Paranoia - Verify address of each of the subpages are what they should be. */
    3006                 RTGCPHYS GCPhysSubPage = GCPhysPt;
    3007                 for (unsigned iPte = 0; iPte < RT_ELEMENTS(pPt->a); iPte++, GCPhysSubPage += GUEST_PAGE_SIZE)
    3008                 {
    3009                     PPGMPAGE pSubPage;
    3010                     rc = pgmPhysGetPageEx(pVM, GCPhysSubPage, &pSubPage);
    3011                     AssertRC(rc);
    3012                     AssertMsg(   PGM_PAGE_GET_HCPHYS(pSubPage) == SHW_PTE_GET_HCPHYS(pPt->a[iPte])
    3013                               || !SHW_PTE_IS_P(pPt->a[iPte]),
    3014                               ("PGM 2M page and shadow PTE conflict. GCPhysSubPage=%RGp Page=%RHp Shw=%RHp\n",
    3015                                GCPhysSubPage, PGM_PAGE_GET_HCPHYS(pSubPage), SHW_PTE_GET_HCPHYS(pPt->a[iPte])));
    3016                 }
    3017 #  endif
    3018                 rc = VINF_SUCCESS; /* Cached entry; assume it's still fully valid. */
    3019             }
    3020 
    3021             /* Save the new PDE. */
    3022             uint64_t const fShwPdeFlags = pGstWalkAll->u.Ept.Pde.u & pVCpu->pgm.s.fGstEptShadowedPdeMask;
    3023             Pde.u = pShwPage->Core.Key | fShwPdeFlags;
    3024             Assert(!(Pde.u & EPT_E_LEAF));
    3025             Assert(!(Pde.u & pVCpu->pgm.s.fGstEptMbzPdeMask));
    3026             SHW_PDE_ATOMIC_SET2(*pPde, Pde);
    3027             STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
    3028             Log7Func(("GstPde=%RGp ShwPde=%RX64 iPde=%u\n", pGstWalkAll->u.Ept.Pde.u, pPde->u, iPde));
    3029             return rc;
    3030         }
     3022#  endif
     3023            rc = VINF_SUCCESS; /* Cached entry; assume it's still fully valid. */
     3024        }
     3025
     3026        /* Save the new PDE. */
     3027        uint64_t const fShwPdeFlags = pGstWalkAll->u.Ept.Pde.u & pVCpu->pgm.s.fGstEptShadowedPdeMask;
     3028        Pde.u = pShwPage->Core.Key | fShwPdeFlags;
     3029        Assert(!(Pde.u & EPT_E_LEAF));
     3030        Assert(!(Pde.u & pVCpu->pgm.s.fGstEptMbzPdeMask));
     3031        SHW_PDE_ATOMIC_SET2(*pPde, Pde);
     3032        STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a);
     3033        Log7Func(("GstPde=%RGp ShwPde=%RX64 iPde=%u\n", pGstWalkAll->u.Ept.Pde.u, pPde->u, iPde));
     3034        return rc;
    30313035    }
    30323036# endif /* PGM_WITH_LARGE_PAGES */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette