VirtualBox

Ignore:
Timestamp:
Oct 6, 2021 7:22:04 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 Made changes to PGM++ to handle invalid PAE PDPEs being loaded.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r91271 r91580  
    5353PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    5454#endif
    55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
     55PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped);
    5656PGM_BTH_DECL(int, UnmapCR3)(PVMCPUCC pVCpu);
    5757
     
    42954295 *
    42964296 * @param   pVCpu           The cross context virtual CPU structure.
    4297  * @param   GCPhysCR3       The physical address in the CR3 register.  (A20
    4298  *                          mask already applied.)
     4297 * @param   GCPhysCR3       The physical address in the CR3 register. (A20 mask
     4298 *                          already applied.)
     4299 * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
    42994300 */
    4300 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
     4301PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped)
    43014302{
    43024303    PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM);
     4304    int rc = VINF_SUCCESS;
    43034305
    43044306    /* Update guest paging info. */
     
    43104312    PGM_A20_ASSERT_MASKED(pVCpu, GCPhysCR3);
    43114313
    4312     /*
    4313      * Map the page CR3 points at.
    4314      */
    4315     RTHCPTR     HCPtrGuestCR3;
    4316     PGM_LOCK_VOID(pVM);
    4317     PPGMPAGE    pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
    4318     AssertReturn(pPageCR3, VERR_PGM_INVALID_CR3_ADDR);
    4319     /** @todo this needs some reworking wrt. locking?  */
    4320     int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
    4321     PGM_UNLOCK(pVM);
    4322     if (RT_SUCCESS(rc))
    4323     {
     4314# if PGM_GST_TYPE == PGM_TYPE_PAE
     4315    if (!fPdpesMapped)
     4316# else
     4317    NOREF(fPdpesMapped);
     4318#endif
     4319    {
     4320        /*
     4321         * Map the page CR3 points at.
     4322         */
     4323        RTHCPTR     HCPtrGuestCR3;
     4324        PGM_LOCK_VOID(pVM);
     4325        PPGMPAGE    pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
     4326        AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
     4327        /** @todo this needs some reworking wrt. locking?  */
     4328        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
     4329        PGM_UNLOCK(pVM);
     4330        if (RT_SUCCESS(rc))
     4331        {
    43244332# if PGM_GST_TYPE == PGM_TYPE_32BIT
    43254333#  ifdef IN_RING3
    4326         pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;
    4327         pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
     4334            pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;
     4335            pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
    43284336#  else
    4329         pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;
    4330         pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;
     4337            pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;
     4338            pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;
    43314339#  endif
    43324340
    43334341# elif PGM_GST_TYPE == PGM_TYPE_PAE
    43344342#  ifdef IN_RING3
    4335         pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;
    4336         pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
     4343            pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;
     4344            pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
    43374345#  else
    4338         pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
    4339         pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3;
    4340 #  endif
    4341 
    4342         /*
    4343          * Map the 4 PDs too.
    4344          */
    4345         X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
    4346         memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes));
    4347         CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);
    4348         for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    4349         {
    4350             X86PDPE PaePdpe = aGstPaePdpes[i];
    4351             if (PaePdpe.u & X86_PDPE_P)
    4352             {
    4353                 RTHCPTR     HCPtr;
    4354                 RTGCPHYS    GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
    4355                 PGM_LOCK_VOID(pVM);
    4356                 PPGMPAGE    pPage  = pgmPhysGetPage(pVM, GCPhys);
    4357                 AssertReturn(pPage, VERR_PGM_INVALID_PDPE_ADDR);
    4358                 int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
    4359                 PGM_UNLOCK(pVM);
    4360                 if (RT_SUCCESS(rc2))
    4361                 {
    4362 #  ifdef IN_RING3
    4363                     pVCpu->pgm.s.apGstPaePDsR3[i]     = (PX86PDPAE)HCPtr;
    4364                     pVCpu->pgm.s.apGstPaePDsR0[i]     = NIL_RTR0PTR;
    4365 #  else
    4366                     pVCpu->pgm.s.apGstPaePDsR3[i]     = NIL_RTR3PTR;
    4367                     pVCpu->pgm.s.apGstPaePDsR0[i]     = (PX86PDPAE)HCPtr;
    4368 #  endif
    4369                     pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = GCPhys;
    4370                     continue;
    4371                 }
    4372                 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
    4373             }
    4374 
    4375             pVCpu->pgm.s.apGstPaePDsR3[i]     = 0;
    4376             pVCpu->pgm.s.apGstPaePDsR0[i]     = 0;
    4377             pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = NIL_RTGCPHYS;
    4378         }
     4346            pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
     4347            pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3;
     4348#  endif
     4349
     4350            /*
     4351             * Update CPUM and map the 4 PDs too.
     4352             */
     4353            X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
     4354            memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes));
     4355            CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);
     4356            PGMGstMapPaePdpes(pVCpu, &aGstPaePdpes[0]);
    43794357
    43804358# elif PGM_GST_TYPE == PGM_TYPE_AMD64
    43814359#  ifdef IN_RING3
    4382         pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;
    4383         pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
     4360            pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;
     4361            pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
    43844362#  else
    4385         pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
    4386         pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;
    4387 #  endif
    4388 # endif
    4389     }
    4390     else
    4391         AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
    4392 
     4363            pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
     4364            pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;
     4365#  endif
     4366# endif
     4367        }
     4368        else
     4369            AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
     4370    }
    43934371#else /* prot/real stub */
    4394     int rc = VINF_SUCCESS;
     4372    NOREF(fPdpesMapped);
    43954373#endif
    43964374
     
    44224400
    44234401    Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
    4424     rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
    4425                       NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
    4426                       &pNewShwPageCR3);
    4427     AssertFatalRC(rc);
    4428     rc = VINF_SUCCESS;
     4402    int const rc2 = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE,
     4403                                 PGM_A20_IS_ENABLED(pVCpu), NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, &pNewShwPageCR3);
     4404    AssertFatalRC(rc2);
    44294405
    44304406    pVCpu->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
     
    44444420    Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    44454421#   endif
    4446     rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
    4447     AssertRCReturn(rc, rc);
     4422    int const rc3 = pgmMapActivateCR3(pVM, pNewShwPageCR3);
     4423    AssertRCReturn(rc3, rc3);
    44484424#  endif
    44494425
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette