VirtualBox

Changeset 91854 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Oct 20, 2021 12:50:11 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
147656
Message:

VMM: Removed PGM_WITHOUT_MAPPINGS and associated mapping code. bugref:9517

Location:
trunk/src/VBox/VMM/VMMR3
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/MMHyper.cpp

    r91266 r91854  
    3939*   Internal Functions                                                                                                           *
    4040*********************************************************************************************************************************/
    41 #ifndef PGM_WITHOUT_MAPPINGS
    42 static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode,
    43                                                     void *pvUser);
    44 #endif
    4541static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
    4642static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
    4743static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
    4844static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     45static int MMR3HyperReserveFence(PVM pVM);
     46static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
     47                             const char *pszDesc, PRTGCPTR pGCPtr);
    4948
    5049
     
    222221
    223222/**
    224  * Finalizes the HMA mapping.
     223 * Finalizes the HMA mapping (obsolete).
    225224 *
    226225 * This is called later during init, most (all) HMA allocations should be done
     
    239238    AssertRC(rc);
    240239
    241 #ifndef PGM_WITHOUT_MAPPINGS
    242     /*
    243      * Adjust and create the HMA mapping.
    244      */
    245     while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
    246         pVM->mm.s.cbHyperArea -= _4M;
    247     rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
    248                     mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
    249     if (RT_FAILURE(rc))
    250         return rc;
    251 #endif
    252240    pVM->mm.s.fPGMInitialized = true;
    253 
    254 #ifndef PGM_WITHOUT_MAPPINGS
    255     /*
    256      * Do all the delayed mappings.
    257      */
    258     PMMLOOKUPHYPER  pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
    259     for (;;)
    260     {
    261         RTGCPTR     GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
    262         uint32_t    cPages = pLookup->cb >> PAGE_SHIFT;
    263         switch (pLookup->enmType)
    264         {
    265             case MMLOOKUPHYPERTYPE_LOCKED:
    266             {
    267                 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;
    268                 for (uint32_t i = 0; i < cPages; i++)
    269                 {
    270                     rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
    271                     AssertRCReturn(rc, rc);
    272                 }
    273                 break;
    274             }
    275 
    276             case MMLOOKUPHYPERTYPE_HCPHYS:
    277                 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
    278                 break;
    279 
    280             case MMLOOKUPHYPERTYPE_GCPHYS:
    281             {
    282                 const RTGCPHYS  GCPhys = pLookup->u.GCPhys.GCPhys;
    283                 const uint32_t  cb = pLookup->cb;
    284                 for (uint32_t off = 0; off < cb; off += PAGE_SIZE)
    285                 {
    286                     RTHCPHYS HCPhys;
    287                     rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
    288                     if (RT_FAILURE(rc))
    289                         break;
    290                     rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
    291                     if (RT_FAILURE(rc))
    292                         break;
    293                 }
    294                 break;
    295             }
    296 
    297             case MMLOOKUPHYPERTYPE_MMIO2:
    298             {
    299                 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
    300                 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
    301                 {
    302                     RTHCPHYS HCPhys;
    303                     rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iSubDev,
    304                                                  pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
    305                     if (RT_FAILURE(rc))
    306                         break;
    307                     rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
    308                     if (RT_FAILURE(rc))
    309                         break;
    310                 }
    311                 break;
    312             }
    313 
    314             case MMLOOKUPHYPERTYPE_DYNAMIC:
    315                 /* do nothing here since these are either fences or managed by someone else using PGM. */
    316                 break;
    317 
    318             default:
    319                 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
    320                 break;
    321         }
    322 
    323         if (RT_FAILURE(rc))
    324         {
    325             AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
    326                              rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
    327             return rc;
    328         }
    329 
    330         /* next */
    331         if (pLookup->offNext == (int32_t)NIL_OFFSET)
    332             break;
    333         pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
    334     }
    335 #endif /* !PGM_WITHOUT_MAPPINGS */
    336241
    337242    LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
     
    339244}
    340245
    341 
    342 #ifndef PGM_WITHOUT_MAPPINGS
    343 /**
    344  * Callback function which will be called when PGM is trying to find a new
    345  * location for the mapping.
    346  *
    347  * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
    348  * In 1) the callback should say if it objects to a suggested new location. If it
    349  * accepts the new location, it is called again for doing it's relocation.
    350  *
    351  *
    352  * @returns true if the location is ok.
    353  * @returns false if another location should be found.
    354  * @param   pVM         The cross context VM structure.
    355  * @param   GCPtrOld    The old virtual address.
    356  * @param   GCPtrNew    The new virtual address.
    357  * @param   enmMode     Used to indicate the callback mode.
    358  * @param   pvUser      User argument. Ignored.
    359  * @remark  The return value is no a failure indicator, it's an acceptance
    360  *          indicator. Relocation can not fail!
    361  */
    362 static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
    363                                                     PGMRELOCATECALL enmMode, void *pvUser)
    364 {
    365     NOREF(pvUser);
    366     switch (enmMode)
    367     {
    368         /*
    369          * Verify location - all locations are good for us.
    370          */
    371         case PGMRELOCATECALL_SUGGEST:
    372             return true;
    373 
    374         /*
    375          * Execute the relocation.
    376          */
    377         case PGMRELOCATECALL_RELOCATE:
    378         {
    379             /*
    380              * Accepted!
    381              */
    382             AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC,
    383                       ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
    384             Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
    385 
    386             /*
    387              * Relocate the VM structure and ourselves.
    388              */
    389             RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
    390             pVM->pVMRC                          += offDelta;
    391             for (VMCPUID i = 0; i < pVM->cCpus; i++)
    392                 pVM->aCpus[i].pVMRC              = pVM->pVMRC;
    393 
    394             pVM->mm.s.pvHyperAreaGC             += offDelta;
    395             Assert(pVM->mm.s.pvHyperAreaGC < _4G);
    396             pVM->mm.s.pHyperHeapRC              += offDelta;
    397             pVM->mm.s.pHyperHeapR3->pbHeapRC    += offDelta;
    398             pVM->mm.s.pHyperHeapR3->pVMRC        = pVM->pVMRC;
    399 
    400             /*
    401              * Relocate the rest.
    402              */
    403             VMR3Relocate(pVM, offDelta);
    404             return true;
    405         }
    406 
    407         default:
    408             AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
    409     }
    410 
    411     return false;
    412 }
    413 
    414 
    415 /**
    416  * Maps contiguous HC physical memory into the hypervisor region in the GC.
    417  *
    418  * @return VBox status code.
    419  *
    420  * @param   pVM         The cross context VM structure.
    421  * @param   pvR3        Ring-3 address of the memory. Must be page aligned!
    422  * @param   pvR0        Optional ring-0 address of the memory.
    423  * @param   HCPhys      Host context physical address of the memory to be
    424  *                      mapped. Must be page aligned!
    425  * @param   cb          Size of the memory. Will be rounded up to nearest page.
    426  * @param   pszDesc     Description.
    427  * @param   pGCPtr      Where to store the GC address.
    428  */
    429 VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb,
    430                                   const char *pszDesc, PRTGCPTR pGCPtr)
    431 {
    432     LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n",
    433              pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
    434 
    435     /*
    436      * Validate input.
    437      */
    438     AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
    439     AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
    440     AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
    441     AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
    442 
    443     /*
    444      * Add the memory to the hypervisor area.
    445      */
    446     uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
    447     AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
    448     RTGCPTR         GCPtr;
    449     PMMLOOKUPHYPER  pLookup;
    450     int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
    451     if (RT_SUCCESS(rc))
    452     {
    453         pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
    454         pLookup->u.HCPhys.pvR3   = pvR3;
    455         pLookup->u.HCPhys.pvR0   = pvR0;
    456         pLookup->u.HCPhys.HCPhys = HCPhys;
    457 
    458         /*
    459          * Update the page table.
    460          */
    461         if (pVM->mm.s.fPGMInitialized)
    462             rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
    463         if (RT_SUCCESS(rc))
    464             *pGCPtr = GCPtr;
    465     }
    466     return rc;
    467 }
    468 
    469 
    470 /**
    471  * Maps contiguous GC physical memory into the hypervisor region in the GC.
    472  *
    473  * @return VBox status code.
    474  *
    475  * @param   pVM         The cross context VM structure.
    476  * @param   GCPhys      Guest context physical address of the memory to be mapped. Must be page aligned!
    477  * @param   cb          Size of the memory. Will be rounded up to nearest page.
    478  * @param   pszDesc     Mapping description.
    479  * @param   pGCPtr      Where to store the GC address.
    480  */
    481 VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
    482 {
    483     LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
    484 
    485     /*
    486      * Validate input.
    487      */
    488     AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
    489     AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
    490 
    491     /*
    492      * Add the memory to the hypervisor area.
    493      */
    494     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    495     RTGCPTR         GCPtr;
    496     PMMLOOKUPHYPER  pLookup;
    497     int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
    498     if (RT_SUCCESS(rc))
    499     {
    500         pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
    501         pLookup->u.GCPhys.GCPhys = GCPhys;
    502 
    503         /*
    504          * Update the page table.
    505          */
    506         for (unsigned off = 0; off < cb; off += PAGE_SIZE)
    507         {
    508             RTHCPHYS HCPhys;
    509             rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
    510             AssertRC(rc);
    511             if (RT_FAILURE(rc))
    512             {
    513                 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
    514                 break;
    515             }
    516             if (pVM->mm.s.fPGMInitialized)
    517             {
    518                 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
    519                 AssertRC(rc);
    520                 if (RT_FAILURE(rc))
    521                 {
    522                     AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
    523                     break;
    524                 }
    525             }
    526         }
    527 
    528         if (RT_SUCCESS(rc) && pGCPtr)
    529             *pGCPtr = GCPtr;
    530     }
    531     return rc;
    532 }
    533 
    534 
    535 /**
    536  * Maps a portion of an MMIO2 region into the hypervisor region.
    537  *
    538  * Callers of this API must never deregister the MMIO2 region before the
    539  * VM is powered off.  If this becomes a requirement MMR3HyperUnmapMMIO2
    540  * API will be needed to perform cleanups.
    541  *
    542  * @return VBox status code.
    543  *
    544  * @param   pVM         The cross context VM structure.
    545  * @param   pDevIns     The device owning the MMIO2 memory.
    546  * @param   iSubDev     The sub-device number.
    547  * @param   iRegion     The region.
    548  * @param   off         The offset into the region. Will be rounded down to closest page boundary.
    549  * @param   cb          The number of bytes to map. Will be rounded up to the closest page boundary.
    550  * @param   pszDesc     Mapping description.
    551  * @param   pRCPtr      Where to store the RC address.
    552  */
    553 VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
    554                                 const char *pszDesc, PRTRCPTR pRCPtr)
    555 {
    556     LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iSubDev=%#x iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
    557              pDevIns, iSubDev, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
    558     int rc;
    559 
    560     /*
    561      * Validate input.
    562      */
    563     AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
    564     AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
    565     uint32_t const offPage = off & PAGE_OFFSET_MASK;
    566     off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
    567     cb += offPage;
    568     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    569     const RTGCPHYS offEnd = off + cb;
    570     AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
    571     for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
    572     {
    573         RTHCPHYS HCPhys;
    574         rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);
    575         AssertMsgRCReturn(rc, ("rc=%Rrc - iSubDev=%#x iRegion=%#x off=%RGp\n", rc, iSubDev, iRegion, off), rc);
    576     }
    577 
    578     /*
    579      * Add the memory to the hypervisor area.
    580      */
    581     RTGCPTR         GCPtr;
    582     PMMLOOKUPHYPER  pLookup;
    583     rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
    584     if (RT_SUCCESS(rc))
    585     {
    586         pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
    587         pLookup->u.MMIO2.pDevIns = pDevIns;
    588         pLookup->u.MMIO2.iSubDev = iSubDev;
    589         pLookup->u.MMIO2.iRegion = iRegion;
    590         pLookup->u.MMIO2.off = off;
    591 
    592         /*
    593          * Update the page table.
    594          */
    595         if (pVM->mm.s.fPGMInitialized)
    596         {
    597             for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
    598             {
    599                 RTHCPHYS HCPhys;
    600                 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);
    601                 AssertRCReturn(rc, rc);
    602                 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
    603                 if (RT_FAILURE(rc))
    604                 {
    605                     AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
    606                     break;
    607                 }
    608             }
    609         }
    610 
    611         if (RT_SUCCESS(rc))
    612         {
    613             GCPtr |= offPage;
    614             *pRCPtr = GCPtr;
    615             AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
    616         }
    617     }
    618     return rc;
    619 }
    620 
    621 #endif /* !PGM_WITHOUT_MAPPINGS */
    622246
    623247/**
     
    634258 * @param   pGCPtr      Where to store the GC address corresponding to pvR3.
    635259 */
    636 VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
    637                                  const char *pszDesc, PRTGCPTR pGCPtr)
     260static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
     261                             const char *pszDesc, PRTGCPTR pGCPtr)
    638262{
    639263    LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
     
    675299            }
    676300
    677 #ifndef PGM_WITHOUT_MAPPINGS
    678             if (pVM->mm.s.fPGMInitialized)
    679             {
    680                 for (size_t i = 0; i < cPages; i++)
    681                 {
    682                     rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
    683                     AssertRCBreak(rc);
    684                 }
    685             }
    686 #endif
    687             if (RT_SUCCESS(rc))
    688             {
    689                 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
    690                 pLookup->u.Locked.pvR3          = pvR3;
    691                 pLookup->u.Locked.pvR0          = pvR0;
    692                 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
    693 
    694                 /* done. */
    695                 *pGCPtr   = GCPtr;
    696                 return rc;
    697             }
    698             /* Don't care about failure clean, we're screwed if this fails anyway. */
    699         }
     301            pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
     302            pLookup->u.Locked.pvR3          = pvR3;
     303            pLookup->u.Locked.pvR0          = pvR0;
     304            pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
     305
     306            /* done. */
     307            *pGCPtr   = GCPtr;
     308            return rc;
     309        }
     310        /* Don't care about failure clean, we're screwed if this fails anyway. */
    700311    }
    701312
    702313    return rc;
    703314}
    704 
    705 
    706 #ifndef PGM_WITHOUT_MAPPINGS
    707 /**
    708  * Reserves a hypervisor memory area.
    709  * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
    710  *
    711  * @return VBox status code.
    712  *
    713  * @param   pVM         The cross context VM structure.
    714  * @param   cb          Size of the memory. Will be rounded up to nearest page.
    715  * @param   pszDesc     Mapping description.
    716  * @param   pGCPtr      Where to store the assigned GC address. Optional.
    717  */
    718 VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
    719 {
    720     LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
    721 
    722     /*
    723      * Validate input.
    724      */
    725     if (    cb <= 0
    726         ||  !pszDesc
    727         ||  !*pszDesc)
    728     {
    729         AssertMsgFailed(("Invalid parameter\n"));
    730         return VERR_INVALID_PARAMETER;
    731     }
    732 
    733     /*
    734      * Add the memory to the hypervisor area.
    735      */
    736     RTGCPTR         GCPtr;
    737     PMMLOOKUPHYPER  pLookup;
    738     int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
    739     if (RT_SUCCESS(rc))
    740     {
    741         pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
    742         if (pGCPtr)
    743             *pGCPtr = GCPtr;
    744         return VINF_SUCCESS;
    745     }
    746     return rc;
    747 }
    748 #endif /* !PGM_WITHOUT_MAPPINGS */
    749315
    750316
     
    755321 * @param   pVM         The cross context VM structure.
    756322 */
    757 VMMR3DECL(int) MMR3HyperReserveFence(PVM pVM)
    758 {
    759 #ifndef PGM_WITHOUT_MAPPINGS
    760     return MMR3HyperReserve(pVM, cb, "fence", NULL);
    761 #else
     323static int MMR3HyperReserveFence(PVM pVM)
     324{
    762325    RT_NOREF(pVM);
    763326    return VINF_SUCCESS;
    764 #endif
    765327}
    766328
     
    1139701
    1140702/**
    1141  * Set / unset guard status on one or more hyper heap pages.
    1142  *
    1143  * @returns VBox status code (first failure).
    1144  * @param   pVM                 The cross context VM structure.
    1145  * @param   pvStart             The hyper heap page address. Must be page
    1146  *                              aligned.
    1147  * @param   cb                  The number of bytes. Must be page aligned.
    1148  * @param   fSet                Whether to set or unset guard page status.
    1149  */
    1150 VMMR3DECL(int) MMR3HyperSetGuard(PVM pVM, void *pvStart, size_t cb, bool fSet)
    1151 {
    1152     /*
    1153      * Validate input.
    1154      */
    1155     AssertReturn(!((uintptr_t)pvStart & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
    1156     AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    1157     AssertReturn(cb <= UINT32_MAX, VERR_INVALID_PARAMETER);
    1158     PMMLOOKUPHYPER pLookup = mmR3HyperLookupR3(pVM, pvStart);
    1159     AssertReturn(pLookup, VERR_INVALID_PARAMETER);
    1160     AssertReturn(pLookup->enmType == MMLOOKUPHYPERTYPE_LOCKED, VERR_INVALID_PARAMETER);
    1161 
    1162     /*
    1163      * Get down to business.
    1164      * Note! We quietly ignore errors from the support library since the
    1165      *       protection stuff isn't possible to implement on all platforms.
    1166      */
    1167     uint8_t    *pbR3  = (uint8_t *)pLookup->u.Locked.pvR3;
    1168     RTR0PTR     R0Ptr = pLookup->u.Locked.pvR0 != (uintptr_t)pLookup->u.Locked.pvR3
    1169                       ? pLookup->u.Locked.pvR0
    1170                       : NIL_RTR0PTR;
    1171     uint32_t    off   = (uint32_t)((uint8_t *)pvStart - pbR3);
    1172     int         rc;
    1173     if (fSet)
    1174     {
    1175 #ifndef PGM_WITHOUT_MAPPINGS
    1176         rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, 0);
    1177 #else
    1178         rc = VINF_SUCCESS;
    1179 #endif
    1180         SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_NONE);
    1181     }
    1182     else
    1183     {
    1184 #ifndef PGM_WITHOUT_MAPPINGS
    1185         rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
    1186 #else
    1187         rc = VINF_SUCCESS;
    1188 #endif
    1189         SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    1190     }
    1191     return rc;
    1192 }
    1193 
    1194 
    1195 /**
    1196703 * Convert hypervisor HC virtual address to HC physical address.
    1197704 *
     
    1244751}
    1245752
    1246 #ifndef PGM_WITHOUT_MAPPINGS
    1247 
    1248 /**
    1249  * Implements the hcphys-not-found return case of MMR3HyperQueryInfoFromHCPhys.
    1250  *
    1251  * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW.
    1252  * @param   pVM                 The cross context VM structure.
    1253  * @param   HCPhys              The host physical address to look for.
    1254  * @param   pLookup             The HMA lookup entry corresponding to HCPhys.
    1255  * @param   pszWhat             Where to return the description.
    1256  * @param   cbWhat              Size of the return buffer.
    1257  * @param   pcbAlloc            Where to return the size of whatever it is.
    1258  */
    1259 static int mmR3HyperQueryInfoFromHCPhysFound(PVM pVM, RTHCPHYS HCPhys, PMMLOOKUPHYPER pLookup,
    1260                                              char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
    1261 {
    1262     NOREF(pVM); NOREF(HCPhys);
    1263     *pcbAlloc = pLookup->cb;
    1264     int rc = RTStrCopy(pszWhat, cbWhat, pLookup->pszDesc);
    1265     return rc == VERR_BUFFER_OVERFLOW ? VINF_BUFFER_OVERFLOW : rc;
    1266 }
    1267 
    1268 
    1269 /**
    1270  * Scans the HMA for the physical page and reports back a description if found.
    1271  *
    1272  * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW, VERR_NOT_FOUND.
    1273  * @param   pVM                 The cross context VM structure.
    1274  * @param   HCPhys              The host physical address to look for.
    1275  * @param   pszWhat             Where to return the description.
    1276  * @param   cbWhat              Size of the return buffer.
    1277  * @param   pcbAlloc            Where to return the size of whatever it is.
    1278  */
    1279 VMMR3_INT_DECL(int) MMR3HyperQueryInfoFromHCPhys(PVM pVM, RTHCPHYS HCPhys, char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
    1280 {
    1281     RTHCPHYS        HCPhysPage = HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
    1282     PMMLOOKUPHYPER  pLookup    = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
    1283     for (;;)
    1284     {
    1285         switch (pLookup->enmType)
    1286         {
    1287             case MMLOOKUPHYPERTYPE_LOCKED:
    1288             {
    1289                 uint32_t i = pLookup->cb >> PAGE_SHIFT;
    1290                 while (i-- > 0)
    1291                     if (pLookup->u.Locked.paHCPhysPages[i] == HCPhysPage)
    1292                         return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
    1293                 break;
    1294             }
    1295 
    1296             case MMLOOKUPHYPERTYPE_HCPHYS:
    1297             {
    1298                 if (pLookup->u.HCPhys.HCPhys - HCPhysPage < pLookup->cb)
    1299                     return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
    1300                 break;
    1301             }
    1302 
    1303             case MMLOOKUPHYPERTYPE_MMIO2:
    1304             case MMLOOKUPHYPERTYPE_GCPHYS:
    1305             case MMLOOKUPHYPERTYPE_DYNAMIC:
    1306             {
    1307                 /* brute force. */
    1308                 uint32_t i = pLookup->cb >> PAGE_SHIFT;
    1309                 while (i-- > 0)
    1310                 {
    1311                     RTGCPTR     GCPtr = pLookup->off + pVM->mm.s.pvHyperAreaGC;
    1312                     RTHCPHYS    HCPhysCur;
    1313                     int rc = PGMMapGetPage(pVM, GCPtr, NULL, &HCPhysCur);
    1314                     if (RT_SUCCESS(rc) && HCPhysCur == HCPhysPage)
    1315                         return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
    1316                 }
    1317                 break;
    1318             }
    1319             default:
    1320                 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
    1321                 break;
    1322         }
    1323 
    1324         /* next */
    1325         if ((unsigned)pLookup->offNext == NIL_OFFSET)
    1326             break;
    1327         pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
    1328     }
    1329     return VERR_NOT_FOUND;
    1330 }
    1331 
    1332 
    1333 /**
    1334  * Read hypervisor memory from GC virtual address.
    1335  *
    1336  * @returns VBox status code.
    1337  * @param   pVM         The cross context VM structure.
    1338  * @param   pvDst       Destination address (HC of course).
    1339  * @param   GCPtr       GC virtual address.
    1340  * @param   cb          Number of bytes to read.
    1341  *
    1342  * @remarks For DBGF only.
    1343  */
    1344 VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
    1345 {
    1346     if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
    1347         return VERR_INVALID_POINTER;
    1348     return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
    1349 }
    1350 
    1351 #endif /* !PGM_WITHOUT_MAPPINGS */
    1352753
    1353754/**
  • trunk/src/VBox/VMM/VMMR3/PDMLdr.cpp

    r86510 r91854  
    565565                {
    566566                    RTGCPTR GCPtr;
    567                     rc = MMR3HyperMapPages(pVM, pModule->pvBits, NIL_RTR0PTR,
    568                                            cPages, paPages, pModule->szName, &GCPtr);
     567                    rc = VERR_NOT_IMPLEMENTED; //MMR3HyperMapPages(pVM, pModule->pvBits, NIL_RTR0PTR, cPages, paPages, pModule->szName, &GCPtr);
    569568                    if (RT_SUCCESS(rc))
    570569                    {
    571                         MMR3HyperReserveFence(pVM);
     570                        //MMR3HyperReserveFence(pVM);
    572571
    573572                        /*
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r91848 r91854  
    824824    pVM->pgm.s.enmHostMode      = SUPPAGINGMODE_INVALID;
    825825    pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
    826 #ifndef PGM_WITHOUT_MAPPINGS
    827     pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS;
    828 #endif
    829826
    830827    rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
     
    10051002                                   "Add 'nost' if the statistics are unwanted, use together with 'all' or explicit selection.",
    10061003                                   pgmR3InfoHandlers);
    1007 #ifndef PGM_WITHOUT_MAPPINGS
    1008         DBGFR3InfoRegisterInternal(pVM, "mappings",
    1009                                    "Dumps guest mappings.",
    1010                                    pgmR3MapInfo);
    1011 #endif
    10121004
    10131005        pgmR3InitStats(pVM);
     
    10791071
    10801072    pVM->pgm.s.enmHostMode   = SUPPAGINGMODE_INVALID;
    1081 
    1082 #ifndef PGM_WITHOUT_MAPPINGS
    1083     /*
    1084      * Allocate static mapping space for whatever the cr3 register
    1085      * points to and in the case of PAE mode to the 4 PDs.
    1086      */
    1087     int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);
    1088     if (RT_FAILURE(rc))
    1089     {
    1090         AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Rrc\n", rc));
    1091         return rc;
    1092     }
    1093     MMR3HyperReserveFence(pVM);
    1094 #endif
    1095 
    1096 #if 0
    1097     /*
    1098      * Allocate pages for the three possible intermediate contexts
    1099      * (AMD64, PAE and plain 32-Bit). We maintain all three contexts
    1100      * for the sake of simplicity. The AMD64 uses the PAE for the
    1101      * lower levels, making the total number of pages 11 (3 + 7 + 1).
    1102      *
    1103      * We assume that two page tables will be enought for the core code
    1104      * mappings (HC virtual and identity).
    1105      */
    1106     pVM->pgm.s.pInterPD         = (PX86PD)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.pInterPD,         VERR_NO_PAGE_MEMORY);
    1107     pVM->pgm.s.apInterPTs[0]    = (PX86PT)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.apInterPTs[0],    VERR_NO_PAGE_MEMORY);
    1108     pVM->pgm.s.apInterPTs[1]    = (PX86PT)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.apInterPTs[1],    VERR_NO_PAGE_MEMORY);
    1109     pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePTs[0], VERR_NO_PAGE_MEMORY);
    1110     pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePTs[1], VERR_NO_PAGE_MEMORY);
    1111     pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[0], VERR_NO_PAGE_MEMORY);
    1112     pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[1], VERR_NO_PAGE_MEMORY);
    1113     pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[2], VERR_NO_PAGE_MEMORY);
    1114     pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[3], VERR_NO_PAGE_MEMORY);
    1115     pVM->pgm.s.pInterPaePDPT    = (PX86PDPT)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePDPT,    VERR_NO_PAGE_MEMORY);
    1116     pVM->pgm.s.pInterPaePDPT64  = (PX86PDPT)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePDPT64,  VERR_NO_PAGE_MEMORY);
    1117     pVM->pgm.s.pInterPaePML4    = (PX86PML4)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePML4,    VERR_NO_PAGE_MEMORY);
    1118 
    1119     pVM->pgm.s.HCPhysInterPD = MMPage2Phys(pVM, pVM->pgm.s.pInterPD);
    1120     AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
    1121     pVM->pgm.s.HCPhysInterPaePDPT = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT);
    1122     AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
    1123     pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4);
    1124     AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
    1125 
    1126     /*
    1127      * Initialize the pages, setting up the PML4 and PDPT for repetitive 4GB action.
    1128      */
    1129     ASMMemZeroPage(pVM->pgm.s.pInterPD);
    1130     ASMMemZeroPage(pVM->pgm.s.apInterPTs[0]);
    1131     ASMMemZeroPage(pVM->pgm.s.apInterPTs[1]);
    1132 
    1133     ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[0]);
    1134     ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[1]);
    1135 
    1136     ASMMemZeroPage(pVM->pgm.s.pInterPaePDPT);
    1137     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apInterPaePDs); i++)
    1138     {
    1139         ASMMemZeroPage(pVM->pgm.s.apInterPaePDs[i]);
    1140         pVM->pgm.s.pInterPaePDPT->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT
    1141                                           | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[i]);
    1142     }
    1143 
    1144     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePDPT64->a); i++)
    1145     {
    1146         const unsigned iPD = i % RT_ELEMENTS(pVM->pgm.s.apInterPaePDs);
    1147         pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
    1148                                             | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[iPD]);
    1149     }
    1150 
    1151     RTHCPHYS HCPhysInterPaePDPT64 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64);
    1152     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePML4->a); i++)
    1153         pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
    1154                                          | HCPhysInterPaePDPT64;
    1155 #endif
    11561073
    11571074    /*
     
    14281345        PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2InvalidPhys,      "/PGM/CPU%u/RZ/Trap0e/Time2/InvalidPhys",       "Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address.");
    14291346        PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2MakeWritable,     "/PGM/CPU%u/RZ/Trap0e/Time2/MakeWritable",      "Profiling of the Trap0eHandler body when the cause is that a page needed to be made writeable.");
    1430         PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Mapping,          "/PGM/CPU%u/RZ/Trap0e/Time2/Mapping",           "Profiling of the Trap0eHandler body when the cause is related to the guest mappings.");
    14311347        PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Misc,             "/PGM/CPU%u/RZ/Trap0e/Time2/Misc",              "Profiling of the Trap0eHandler body when the cause is not known.");
    14321348        PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSync,        "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSync",         "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
     
    14381354        PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsUnhack,    "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USUnhack",     "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled.");
    14391355        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eConflicts,             "/PGM/CPU%u/RZ/Trap0e/Conflicts",               "The number of times #PF was caused by an undetected conflict.");
    1440         PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersMapping,       "/PGM/CPU%u/RZ/Trap0e/Handlers/Mapping",        "Number of traps due to access handlers in mappings.");
    14411356        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersOutOfSync,     "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync",      "Number of traps due to out-of-sync handled pages.");
    14421357        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysAll,       "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysAll",        "Number of traps due to physical all-access handlers.");
     
    14571372        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSNXE,                  "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NXE",      "Number of supervisor mode NXE page faults.");
    14581373        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPF,               "/PGM/CPU%u/RZ/Trap0e/GuestPF",                 "Number of real guest page faults.");
    1459         PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPFMapping,        "/PGM/CPU%u/RZ/Trap0e/GuestPF/InMapping",       "Number of real guest page faults in a mapping.");
    14601374        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulInRZ,            "/PGM/CPU%u/RZ/Trap0e/WP/InRZ",                 "Number of guest page faults due to X86_CR0_WP emulation.");
    14611375        PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulToR3,            "/PGM/CPU%u/RZ/Trap0e/WP/ToR3",                 "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
     
    15411455        PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPages,      "/PGM/CPU%u/RZ/InvalidatePage/4MBPages",    "The number of times PGMInvalidatePage() was called for a 4MB page.");
    15421456        PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPagesSkip,  "/PGM/CPU%u/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
    1543         PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDMappings,    "/PGM/CPU%u/RZ/InvalidatePage/PDMappings",  "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
    15441457        PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNAs,         "/PGM/CPU%u/RZ/InvalidatePage/PDNAs",       "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
    15451458        PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs,         "/PGM/CPU%u/RZ/InvalidatePage/PDNPs",       "The number of times PGMInvalidatePage() was called for a not present page directory.");
     
    15891502        PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPages,      "/PGM/CPU%u/R3/InvalidatePage/4MBPages",    "The number of times PGMInvalidatePage() was called for a 4MB page.");
    15901503        PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPagesSkip,  "/PGM/CPU%u/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
    1591         PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDMappings,    "/PGM/CPU%u/R3/InvalidatePage/PDMappings",  "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
    15921504        PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNAs,         "/PGM/CPU%u/R3/InvalidatePage/PDNAs",       "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
    15931505        PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs,         "/PGM/CPU%u/R3/InvalidatePage/PDNPs",       "The number of times PGMInvalidatePage() was called for a not present page directory.");
     
    16171529
    16181530/**
    1619  * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
    1620  *
    1621  * The dynamic mapping area will also be allocated and initialized at this
    1622  * time. We could allocate it during PGMR3Init of course, but the mapping
    1623  * wouldn't be allocated at that time preventing us from setting up the
    1624  * page table entries with the dummy page.
    1625  *
    1626  * @returns VBox status code.
    1627  * @param   pVM     The cross context VM structure.
    1628  */
    1629 VMMR3DECL(int) PGMR3InitDynMap(PVM pVM)
    1630 {
    1631 #ifndef PGM_WITHOUT_MAPPINGS
    1632     RTGCPTR GCPtr;
    1633     int     rc;
    1634 
    1635     /*
    1636      * Reserve space for the dynamic mappings.
    1637      */
    1638     rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &GCPtr);
    1639     if (RT_SUCCESS(rc))
    1640         pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;
    1641 
    1642     if (    RT_SUCCESS(rc)
    1643         &&  (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
    1644     {
    1645         rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &GCPtr);
    1646         if (RT_SUCCESS(rc))
    1647             pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;
    1648     }
    1649     if (RT_SUCCESS(rc))
    1650     {
    1651         AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
    1652         MMR3HyperReserveFence(pVM);
    1653     }
    1654     return rc;
    1655 #else
    1656     RT_NOREF(pVM);
    1657     return VINF_SUCCESS;
    1658 #endif
    1659 }
    1660 
    1661 
    1662 /**
    16631531 * Ring-3 init finalizing.
    16641532 *
     
    16681536VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
    16691537{
    1670 #ifndef PGM_WITHOUT_MAPPINGS
    1671     int rc = VERR_IPE_UNINITIALIZED_STATUS; /* (MSC incorrectly thinks it can be used uninitialized) */
    1672 
    1673     /*
    1674      * Reserve space for the dynamic mappings.
    1675      * Initialize the dynamic mapping pages with dummy pages to simply the cache.
    1676      */
    1677     /* get the pointer to the page table entries. */
    1678     PPGMMAPPING pMapping = pgmGetMapping(pVM, pVM->pgm.s.pbDynPageMapBaseGC);
    1679     AssertRelease(pMapping);
    1680     const uintptr_t off = pVM->pgm.s.pbDynPageMapBaseGC - pMapping->GCPtr;
    1681     const unsigned iPT =  off >> X86_PD_SHIFT;
    1682     const unsigned iPG = (off >> X86_PT_SHIFT) & X86_PT_MASK;
    1683     pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC      + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
    1684     pVM->pgm.s.paDynPageMapPaePTEsGC   = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
    1685 
    1686     /* init cache area */
    1687     RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
    1688     for (uint32_t offDynMap = 0; offDynMap < MM_HYPER_DYNAMIC_SIZE; offDynMap += PAGE_SIZE)
    1689     {
    1690         rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + offDynMap, HCPhysDummy, PAGE_SIZE, 0);
    1691         AssertRCReturn(rc, rc);
    1692     }
    1693 #endif
    1694 
    16951538    /*
    16961539     * Determine the max physical address width (MAXPHYADDR) and apply it to
     
    17721615     * Allocate memory if we're supposed to do that.
    17731616     */
    1774 #ifdef PGM_WITHOUT_MAPPINGS
    17751617    int rc = VINF_SUCCESS;
    1776 #endif
    17771618    if (pVM->pgm.s.fRamPreAlloc)
    17781619        rc = pgmR3PhysRamPreAllocate(pVM);
     
    18701711        pgmR3PhysRelinkRamRanges(pVM);
    18711712
    1872 #ifndef PGM_WITHOUT_MAPPINGS
    1873 
    1874     /*
    1875      * Update the two page directories with all page table mappings.
    1876      * (One or more of them have changed, that's why we're here.)
    1877      */
    1878     pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pMappingsR3);
    1879     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur->pNextR3; pCur = pCur->pNextR3)
    1880         pCur->pNextRC = MMHyperR3ToRC(pVM, pCur->pNextR3);
    1881 
    1882     /* Relocate GC addresses of Page Tables. */
    1883     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1884     {
    1885         for (RTHCUINT i = 0; i < pCur->cPTs; i++)
    1886         {
    1887             pCur->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].pPTR3);
    1888             pCur->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].paPaePTsR3);
    1889         }
    1890     }
    1891 
    1892     /*
    1893      * Dynamic page mapping area.
    1894      */
    1895     pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
    1896     pVM->pgm.s.paDynPageMapPaePTEsGC   += offDelta;
    1897     pVM->pgm.s.pbDynPageMapBaseGC      += offDelta;
    1898 
    1899     if (pVM->pgm.s.pRCDynMap)
    1900     {
    1901         pVM->pgm.s.pRCDynMap += offDelta;
    1902         PPGMRCDYNMAP pDynMap = (PPGMRCDYNMAP)MMHyperRCToCC(pVM, pVM->pgm.s.pRCDynMap);
    1903 
    1904         pDynMap->paPages     += offDelta;
    1905         PPGMRCDYNMAPENTRY paPages = (PPGMRCDYNMAPENTRY)MMHyperRCToCC(pVM, pDynMap->paPages);
    1906 
    1907         for (uint32_t iPage = 0; iPage < pDynMap->cPages; iPage++)
    1908         {
    1909             paPages[iPage].pvPage       += offDelta;
    1910             paPages[iPage].uPte.pLegacy += offDelta;
    1911             paPages[iPage].uPte.pPae    += offDelta;
    1912         }
    1913     }
    1914 
    1915 #endif /* PGM_WITHOUT_MAPPINGS */
    1916 
    19171713    /*
    19181714     * The Zero page.
     
    19801776
    19811777    PGM_LOCK_VOID(pVM);
    1982 
    1983     /*
    1984      * Unfix any fixed mappings and disable CR3 monitoring.
    1985      */
    1986     pVM->pgm.s.fMappingsFixed         = false;
    1987     pVM->pgm.s.fMappingsFixedRestored = false;
    1988     pVM->pgm.s.GCPtrMappingFixed      = NIL_RTGCPTR;
    1989     pVM->pgm.s.cbMappingFixed         = 0;
    19901778
    19911779    /*
  • trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp

    r91016 r91854  
    11041104 * @param   HCPhys              The physical address of the shadow page.
    11051105 * @param   pszDesc             The description.
    1106  * @param   fIsMapping          Set if it's a mapping.
    11071106 * @param   ppv                 Where to return the pointer.
    11081107 */
    1109 static int pgmR3DumpHierarchyShwMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, const char *pszDesc,
    1110                                         bool fIsMapping, void const **ppv)
    1111 {
    1112     void *pvPage;
    1113     if (!fIsMapping)
    1114     {
    1115         PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys);
    1116         if (pPoolPage)
    1117         {
    1118             pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n",
    1119                                     pState->cchAddress, pState->u64Address, pszDesc, HCPhys);
    1120             return VERR_PGM_POOL_GET_PAGE_FAILED;
    1121         }
    1122         pvPage = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & PAGE_OFFSET_MASK);
    1123     }
    1124     else
    1125     {
    1126         pvPage = NULL;
    1127 #ifndef PGM_WITHOUT_MAPPINGS
    1128         for (PPGMMAPPING pMap = pState->pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
    1129         {
    1130             uint64_t off = pState->u64Address - pMap->GCPtr;
    1131             if (off < pMap->cb)
    1132             {
    1133                 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);
    1134                 const int iSub = (int)((off >> X86_PD_PAE_SHIFT) & 1); /* MSC is a pain sometimes */
    1135                 if ((iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0) != HCPhys)
    1136                     pState->pHlp->pfnPrintf(pState->pHlp,
    1137                                             "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
    1138                                             pState->cchAddress, pState->u64Address, iPDE,
    1139                                             iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0, HCPhys);
    1140                 pvPage = &pMap->aPTs[iPDE].paPaePTsR3[iSub];
    1141                 break;
    1142             }
    1143         }
    1144 #endif /* !PGM_WITHOUT_MAPPINGS */
    1145         if (!pvPage)
    1146         {
    1147             pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! PT mapping %s at HCPhys=%RHp was not found in the page pool!\n",
    1148                                     pState->cchAddress, pState->u64Address, pszDesc, HCPhys);
    1149             return VERR_INVALID_PARAMETER;
    1150         }
    1151     }
    1152     *ppv = pvPage;
     1108static int pgmR3DumpHierarchyShwMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, const char *pszDesc, void const **ppv)
     1109{
     1110    PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys);
     1111    if (pPoolPage)
     1112    {
     1113        pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n",
     1114                                pState->cchAddress, pState->u64Address, pszDesc, HCPhys);
     1115        return VERR_PGM_POOL_GET_PAGE_FAILED;
     1116    }
     1117    *ppv = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & PAGE_OFFSET_MASK);
    11531118    return VINF_SUCCESS;
    11541119}
     
    11691134        RTStrPrintf(szPage, sizeof(szPage), " idx=0i%u", pPage->idx);
    11701135    else
    1171     {
    1172         /* probably a mapping */
    11731136        strcpy(szPage, " not found");
    1174 #ifndef PGM_WITHOUT_MAPPINGS
    1175         for (PPGMMAPPING pMap = pState->pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
    1176         {
    1177             uint64_t off = pState->u64Address - pMap->GCPtr;
    1178             if (off < pMap->cb)
    1179             {
    1180                 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);
    1181                 if (pMap->aPTs[iPDE].HCPhysPT == HCPhys)
    1182                     RTStrPrintf(szPage, sizeof(szPage), " #%u: %s", iPDE, pMap->pszDesc);
    1183                 else if (pMap->aPTs[iPDE].HCPhysPaePT0 == HCPhys)
    1184                     RTStrPrintf(szPage, sizeof(szPage), " #%u/0: %s", iPDE, pMap->pszDesc);
    1185                 else if (pMap->aPTs[iPDE].HCPhysPaePT1 == HCPhys)
    1186                     RTStrPrintf(szPage, sizeof(szPage), " #%u/1: %s", iPDE, pMap->pszDesc);
    1187                 else
    1188                     continue;
    1189                 break;
    1190             }
    1191         }
    1192 #endif /* !PGM_WITHOUT_MAPPINGS */
    1193     }
    11941137    PGM_UNLOCK(pState->pVM);
    11951138    pState->pHlp->pfnPrintf(pState->pHlp, "%s", szPage);
     
    12211164    }
    12221165    else
    1223     {
    1224 #ifndef PGM_WITHOUT_MAPPINGS
    1225         /* check the heap */
    1226         uint32_t cbAlloc;
    1227         rc = MMR3HyperQueryInfoFromHCPhys(pState->pVM, HCPhys, szPage, sizeof(szPage), &cbAlloc);
    1228         if (RT_SUCCESS(rc))
    1229             pState->pHlp->pfnPrintf(pState->pHlp, " %s %#x bytes", szPage, cbAlloc);
    1230         else
    1231 #endif
    1232             pState->pHlp->pfnPrintf(pState->pHlp, " not found");
    1233     }
     1166        pState->pHlp->pfnPrintf(pState->pHlp, " not found");
    12341167    NOREF(cbPage);
    12351168}
     
    12421175 * @param   pState              The dumper state.
    12431176 * @param   HCPhys              The page table address.
    1244  * @param   fIsMapping          Whether it is a mapping.
    1245  */
    1246 static int pgmR3DumpHierarchyShwPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, bool fIsMapping)
     1177 */
     1178static int pgmR3DumpHierarchyShwPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
    12471179{
    12481180    PCPGMSHWPTPAE pPT;
    1249     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", fIsMapping, (void const **)&pPT);
     1181    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT);
    12501182    if (RT_FAILURE(rc))
    12511183        return rc;
     
    13151247{
    13161248    PCX86PDPAE pPD;
    1317     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", false, (void const **)&pPD);
     1249    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD);
    13181250    if (RT_FAILURE(rc))
    13191251        return rc;
     
    13471279                                        Pde.b.u1NoExecute   ? "NX" : "--",
    13481280                                        Pde.u & PGM_PDFLAGS_BIG_PAGE    ? 'b' : '-',
    1349 #ifndef PGM_WITHOUT_MAPPINGS
    1350                                         Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    1351 #else
    13521281                                        '-',
    1353 #endif
    13541282                                        Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    13551283                                        Pde.u & X86_PDE2M_PAE_PG_MASK);
     
    13801308                                        Pde.n.u1NoExecute   ? "NX" : "--",
    13811309                                        Pde.u & PGM_PDFLAGS_BIG_PAGE    ? 'b' : '-',
    1382 #ifndef PGM_WITHOUT_MAPPINGS
    1383                                         Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    1384 #else
    13851310                                        '-',
    1386 #endif
    13871311                                        Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    13881312                                        Pde.u & X86_PDE_PAE_PG_MASK);
     
    13951319                if (cMaxDepth)
    13961320                {
    1397                     int rc2 = pgmR3DumpHierarchyShwPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK,
    1398 #ifndef PGM_WITHOUT_MAPPINGS
    1399                                                          RT_BOOL(Pde.u & PGM_PDFLAGS_MAPPING)
    1400 #else
    1401                                                          false /*fIsMapping*/
    1402 #endif
    1403                                                          );
     1321                    int rc2 = pgmR3DumpHierarchyShwPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK);
    14041322                    if (rc2 < rc && RT_SUCCESS(rc))
    14051323                        rc = rc2;
     
    14291347
    14301348    PCX86PDPT pPDPT;
    1431     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory pointer table", false, (void const **)&pPDPT);
     1349    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory pointer table", (void const **)&pPDPT);
    14321350    if (RT_FAILURE(rc))
    14331351        return rc;
     
    15191437{
    15201438    PCX86PML4 pPML4;
    1521     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page map level 4", false, (void const **)&pPML4);
     1439    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page map level 4", (void const **)&pPML4);
    15221440    if (RT_FAILURE(rc))
    15231441        return rc;
     
    15911509 * @param   pState      The dumper state.
    15921510 * @param   HCPhys      The physical address of the table.
    1593  * @param   fMapping    Set if it's a guest mapping.
    1594  */
    1595 static int pgmR3DumpHierarchyShw32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, bool fMapping)
     1511 */
     1512static int pgmR3DumpHierarchyShw32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys)
    15961513{
    15971514    PCX86PT pPT;
    1598     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", fMapping, (void const **)&pPT);
     1515    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT);
    15991516    if (RT_FAILURE(rc))
    16001517        return rc;
     
    16461563
    16471564    PCX86PD pPD;
    1648     int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", false, (void const **)&pPD);
     1565    int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD);
    16491566    if (RT_FAILURE(rc))
    16501567        return rc;
     
    16771594                                        Pde.b.u1PAT         ? "AT" : "--",
    16781595                                        Pde.u & PGM_PDFLAGS_BIG_PAGE    ? 'b' : '-',
    1679 #ifndef PGM_WITHOUT_MAPPINGS
    1680                                         Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    1681 #else
    16821596                                        '-',
    1683 #endif
    16841597                                        Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    16851598                                        u64Phys);
     
    17021615                                        Pde.n.u1CacheDisable? "CD" : "--",
    17031616                                        Pde.u & PGM_PDFLAGS_BIG_PAGE    ? 'b' : '-',
    1704 #ifndef PGM_WITHOUT_MAPPINGS
    1705                                         Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    1706 #else
    17071617                                        '-',
    1708 #endif
    17091618                                        Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    17101619                                        Pde.u & X86_PDE_PG_MASK);
     
    17151624                if (cMaxDepth)
    17161625                {
    1717                     int rc2 = pgmR3DumpHierarchyShw32BitPT(pState, Pde.u & X86_PDE_PG_MASK,
    1718 #ifndef PGM_WITHOUT_MAPPINGS
    1719                                                            !!(Pde.u & PGM_PDFLAGS_MAPPING)
    1720 #else
    1721                                                            false /*fIsMapping*/
    1722 #endif
    1723                                                            );
     1626                    int rc2 = pgmR3DumpHierarchyShw32BitPT(pState, Pde.u & X86_PDE_PG_MASK);
    17241627                    if (rc2 < rc && RT_SUCCESS(rc))
    17251628                        rc = rc2;
  • trunk/src/VBox/VMM/VMMR3/PGMMap.cpp

    r91247 r91854  
    2121*********************************************************************************************************************************/
    2222#define LOG_GROUP LOG_GROUP_PGM
    23 #include <VBox/vmm/dbgf.h>
    2423#include <VBox/vmm/pgm.h>
    2524#include "PGMInternal.h"
    2625#include <VBox/vmm/vm.h>
    27 #include "PGMInline.h"
    2826
    2927#include <VBox/log.h>
    30 #include <VBox/err.h>
    31 #include <iprt/asm.h>
    32 #include <iprt/assert.h>
    33 #include <iprt/string.h>
    34 
    35 
    36 /*********************************************************************************************************************************
    37 *   Internal Functions                                                                                                           *
    38 *********************************************************************************************************************************/
    39 #ifndef PGM_WITHOUT_MAPPINGS
    40 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
    41 static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
    42 static int  pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    43 static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    44 #endif
    45 
    46 
    47 #ifndef PGM_WITHOUT_MAPPINGS
    48 
    49 /**
    50  * Creates a page table based mapping in GC.
    51  *
    52  * @returns VBox status code.
    53  * @param   pVM             The cross context VM structure.
    54  * @param   GCPtr           Virtual Address. (Page table aligned!)
    55  * @param   cb              Size of the range. Must be a 4MB aligned!
    56  * @param   fFlags          PGMR3MAPPT_FLAGS_UNMAPPABLE or 0.
    57  * @param   pfnRelocate     Relocation callback function.
    58  * @param   pvUser          User argument to the callback.
    59  * @param   pszDesc         Pointer to description string. This must not be freed.
    60  */
    61 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
    62 {
    63     LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d fFlags=%#x pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, fFlags, pfnRelocate, pvUser, pszDesc));
    64     AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n"));
    65 
    66     /*
    67      * Validate input.
    68      * Note! The lower limit (1 MB) matches how pgmR3PhysMMIOExCreate works.
    69      */
    70     Assert(!fFlags || fFlags == PGMR3MAPPT_FLAGS_UNMAPPABLE);
    71     AssertMsgReturn(cb >= _1M && cb <= _64M, ("Seriously? cb=%d (%#x)\n", cb, cb), VERR_OUT_OF_RANGE);
    72 
    73     cb = RT_ALIGN_32(cb, _4M);
    74     RTGCPTR GCPtrLast = GCPtr + cb - 1;
    75 
    76     AssertMsgReturn(GCPtrLast >= GCPtr, ("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast),
    77                     VERR_INVALID_PARAMETER);
    78     AssertMsgReturn(!pVM->pgm.s.fMappingsFixed, ("Mappings are fixed! It's not possible to add new mappings at this time!\n"),
    79                     VERR_PGM_MAPPINGS_FIXED);
    80     AssertPtrReturn(pfnRelocate, VERR_INVALID_PARAMETER);
    81 
    82     /*
    83      * Find list location.
    84      */
    85     PPGMMAPPING pPrev = NULL;
    86     PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
    87     while (pCur)
    88     {
    89         if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
    90         {
    91             AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
    92                              pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
    93             LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
    94                     pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
    95             return VERR_PGM_MAPPING_CONFLICT;
    96         }
    97         if (pCur->GCPtr > GCPtr)
    98             break;
    99         pPrev = pCur;
    100         pCur = pCur->pNextR3;
    101     }
    102 
    103     /*
    104      * Check for conflicts with intermediate mappings.
    105      */
    106     const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
    107     const unsigned cPTs     = cb >> X86_PD_SHIFT;
    108     if (pVM->pgm.s.fFinalizedMappings)
    109     {
    110         for (unsigned i = 0; i < cPTs; i++)
    111             if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
    112             {
    113                 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
    114                 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
    115                 return VERR_PGM_MAPPING_CONFLICT;
    116             }
    117         /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
    118     }
    119 
    120     /*
    121      * Allocate and initialize the new list node.
    122      */
    123     PPGMMAPPING pNew;
    124     int rc;
    125     if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
    126         rc = MMHyperAlloc(           pVM, RT_UOFFSETOF_DYN(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
    127     else
    128         rc = MMR3HyperAllocOnceNoRel(pVM, RT_UOFFSETOF_DYN(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
    129     if (RT_FAILURE(rc))
    130         return rc;
    131     pNew->GCPtr         = GCPtr;
    132     pNew->GCPtrLast     = GCPtrLast;
    133     pNew->cb            = cb;
    134     pNew->pfnRelocate   = pfnRelocate;
    135     pNew->pvUser        = pvUser;
    136     pNew->pszDesc       = pszDesc;
    137     pNew->cPTs          = cPTs;
    138 
    139     /*
    140      * Allocate page tables and insert them into the page directories.
    141      * (One 32-bit PT and two PAE PTs.)
    142      */
    143     uint8_t *pbPTs;
    144     if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
    145         rc = MMHyperAlloc(           pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
    146     else
    147         rc = MMR3HyperAllocOnceNoRel(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
    148     if (RT_FAILURE(rc))
    149     {
    150         MMHyperFree(pVM, pNew);
    151         return VERR_NO_MEMORY;
    152     }
    153 
    154     /*
    155      * Init the page tables and insert them into the page directories.
    156      */
    157     Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
    158     for (unsigned i = 0; i < cPTs; i++)
    159     {
    160         /*
    161          * 32-bit.
    162          */
    163         pNew->aPTs[i].pPTR3    = (PX86PT)pbPTs;
    164         pNew->aPTs[i].pPTRC    = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
    165         pNew->aPTs[i].pPTR0    = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
    166         pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
    167         pbPTs += PAGE_SIZE;
    168         Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n",
    169               i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT));
    170 
    171         /*
    172          * PAE.
    173          */
    174         pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
    175         pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
    176         pNew->aPTs[i].paPaePTsR3 = (PPGMSHWPTPAE)pbPTs;
    177         pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs);
    178         pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
    179         pbPTs += PAGE_SIZE * 2;
    180         Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
    181               i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
    182     }
    183     if (pVM->pgm.s.fFinalizedMappings)
    184         pgmR3MapSetPDEs(pVM, pNew, iPageDir);
    185     /* else PGMR3FinalizeMappings() */
    186 
    187     /*
    188      * Insert the new mapping.
    189      */
    190     pNew->pNextR3 = pCur;
    191     pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR;
    192     pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR;
    193     if (pPrev)
    194     {
    195         pPrev->pNextR3 = pNew;
    196         pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew);
    197         pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
    198     }
    199     else
    200     {
    201         pVM->pgm.s.pMappingsR3 = pNew;
    202         pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew);
    203         pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
    204     }
    205 
    206     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    207     {
    208         PVMCPU pVCpu = pVM->apCpusR3[i];
    209         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    210     }
    211     return VINF_SUCCESS;
    212 }
    213 
    214 #ifdef VBOX_WITH_UNUSED_CODE
    215 
    216 /**
    217  * Removes a page table based mapping.
    218  *
    219  * @returns VBox status code.
    220  * @param   pVM     The cross context VM structure.
    221  * @param   GCPtr   Virtual Address. (Page table aligned!)
    222  *
    223  * @remarks Don't call this without passing PGMR3MAPPT_FLAGS_UNMAPPABLE to
    224  *          PGMR3MapPT or you'll burn in the heap.
    225  */
    226 VMMR3DECL(int)  PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
    227 {
    228     LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
    229     AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
    230 
    231     /*
    232      * Find it.
    233      */
    234     PPGMMAPPING pPrev = NULL;
    235     PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
    236     while (pCur)
    237     {
    238         if (pCur->GCPtr == GCPtr)
    239         {
    240             /*
    241              * Unlink it.
    242              */
    243             if (pPrev)
    244             {
    245                 pPrev->pNextR3 = pCur->pNextR3;
    246                 pPrev->pNextRC = pCur->pNextRC;
    247                 pPrev->pNextR0 = pCur->pNextR0;
    248             }
    249             else
    250             {
    251                 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
    252                 pVM->pgm.s.pMappingsRC = pCur->pNextRC;
    253                 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
    254             }
    255 
    256             /*
    257              * Free the page table memory, clear page directory entries
    258              * and free the page tables and node memory.
    259              */
    260             MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
    261             if (pCur->GCPtr != NIL_RTGCPTR)
    262                 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT);
    263             MMHyperFree(pVM, pCur);
    264 
    265             for (VMCPUID i = 0; i < pVM->cCpus; i++)
    266             {
    267                 PVMCPU pVCpu = pVM->apCpusR3[i];
    268                 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    269             }
    270             return VINF_SUCCESS;
    271         }
    272 
    273         /* done? */
    274         if (pCur->GCPtr > GCPtr)
    275             break;
    276 
    277         /* next */
    278         pPrev = pCur;
    279         pCur = pCur->pNextR3;
    280     }
    281 
    282     AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
    283     return VERR_INVALID_PARAMETER;
    284 }
    285 
    286 #endif /* unused */
    287 
    288 
    289 /**
    290  * Checks whether a range of PDEs in the intermediate
    291  * memory context are unused.
    292  *
    293  * We're talking 32-bit PDEs here.
    294  *
    295  * @returns true/false.
    296  * @param   pVM         The cross context VM structure.
    297  * @param   iPD         The first PDE in the range.
    298  * @param   cPTs        The number of PDEs in the range.
    299  */
    300 DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs)
    301 {
    302     if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
    303         return false;
    304     while (cPTs > 1)
    305     {
    306         iPD++;
    307         if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
    308             return false;
    309         cPTs--;
    310     }
    311     return true;
    312 }
    313 
    314 
    315 /**
    316  * Unlinks the mapping.
    317  *
    318  * The mapping *must* be in the list.
    319  *
    320  * @param   pVM             The cross context VM structure.
    321  * @param   pMapping        The mapping to unlink.
    322  */
    323 static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping)
    324 {
    325     PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
    326     if (pAfterThis == pMapping)
    327     {
    328         /* head */
    329         pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
    330         pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
    331         pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
    332     }
    333     else
    334     {
    335         /* in the list */
    336         while (pAfterThis->pNextR3 != pMapping)
    337         {
    338             pAfterThis = pAfterThis->pNextR3;
    339             AssertReleaseReturnVoid(pAfterThis);
    340         }
    341 
    342         pAfterThis->pNextR3 = pMapping->pNextR3;
    343         pAfterThis->pNextRC = pMapping->pNextRC;
    344         pAfterThis->pNextR0 = pMapping->pNextR0;
    345     }
    346 }
    347 
    348 
    349 /**
    350  * Links the mapping.
    351  *
    352  * @param   pVM             The cross context VM structure.
    353  * @param   pMapping        The mapping to linked.
    354  */
    355 static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping)
    356 {
    357     /*
    358      * Find the list location (it's sorted by GCPhys) and link it in.
    359      */
    360     if (    !pVM->pgm.s.pMappingsR3
    361         ||  pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr)
    362     {
    363         /* head */
    364         pMapping->pNextR3 = pVM->pgm.s.pMappingsR3;
    365         pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
    366         pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
    367         pVM->pgm.s.pMappingsR3 = pMapping;
    368         pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
    369         pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
    370     }
    371     else
    372     {
    373         /* in the list */
    374         PPGMMAPPING pAfterThis  = pVM->pgm.s.pMappingsR3;
    375         PPGMMAPPING pBeforeThis = pAfterThis->pNextR3;
    376         while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr)
    377         {
    378             pAfterThis = pBeforeThis;
    379             pBeforeThis = pBeforeThis->pNextR3;
    380         }
    381 
    382         pMapping->pNextR3 = pAfterThis->pNextR3;
    383         pMapping->pNextRC = pAfterThis->pNextRC;
    384         pMapping->pNextR0 = pAfterThis->pNextR0;
    385         pAfterThis->pNextR3 = pMapping;
    386         pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping);
    387         pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
    388     }
    389 }
    390 
    391 
    392 /**
    393  * Finalizes the intermediate context.
    394  *
    395  * This is called at the end of the ring-3 init and will construct the
    396  * intermediate paging structures, relocating all the mappings in the process.
    397  *
    398  * @returns VBox status code.
    399  * @param   pVM     The cross context VM structure.
    400  * @thread  EMT(0)
    401  */
    402 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
    403 {
    404     AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
    405     pVM->pgm.s.fFinalizedMappings = true;
    406 
    407     /*
    408      * Loop until all mappings have been finalized.
    409      */
    410 #if 0
    411     unsigned    iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT; /* makes CSAM/PATM freak out booting linux. :-/ */
    412 #elif 0
    413     unsigned    iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT;
    414 #else
    415     unsigned    iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */
    416 #endif
    417 
    418     PPGMMAPPING pCur;
    419     do
    420     {
    421         pCur = pVM->pgm.s.pMappingsR3;
    422         while (pCur)
    423         {
    424             if (!pCur->fFinalized)
    425             {
    426                 /*
    427                  * Find a suitable location.
    428                  */
    429                 RTGCPTR const   GCPtrOld = pCur->GCPtr;
    430                 const unsigned  cPTs     = pCur->cPTs;
    431                 unsigned        iPDNew   = iPDNext;
    432                 if (    iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */
    433                     ||  !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
    434                     ||  !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
    435                 {
    436                     /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */
    437                     iPDNew = X86_PG_ENTRIES - cPTs - 1;
    438                     while (     iPDNew > 0
    439                            &&   (   !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
    440                                  || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
    441                            )
    442                         iPDNew--;
    443                     AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    444                 }
    445 
    446                 /*
    447                  * Relocate it (something akin to pgmR3MapRelocate).
    448                  */
    449                 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
    450 
    451                 /* unlink the mapping, update the entry and relink it. */
    452                 pgmR3MapUnlink(pVM, pCur);
    453 
    454                 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT;
    455                 pCur->GCPtr      = GCPtrNew;
    456                 pCur->GCPtrLast  = GCPtrNew + pCur->cb - 1;
    457                 pCur->fFinalized = true;
    458 
    459                 pgmR3MapLink(pVM, pCur);
    460 
    461                 /* Finally work the callback. */
    462                 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
    463 
    464                 /*
    465                  * The list order might have changed, start from the beginning again.
    466                  */
    467                 iPDNext = iPDNew + cPTs;
    468                 break;
    469             }
    470 
    471             /* next */
    472             pCur = pCur->pNextR3;
    473         }
    474     } while (pCur);
    475 
    476     return VINF_SUCCESS;
    477 }
    478 
    479 #endif /* !PGM_WITHOUT_MAPPINGS */
     28#include <iprt/errcore.h>
    48029
    48130
     
    49039VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
    49140{
    492     RTGCPTR cb = 0;
    493 #ifndef PGM_WITHOUT_MAPPINGS
    494     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    495         cb += pCur->cb;
    496 #else
    49741    RT_NOREF(pVM);
    498 #endif
    499 
    500     *pcb = cb;
    501     AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
    502     Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
     42    *pcb = 0;
     43    Log(("PGMR3MappingsSize: returns zero\n"));
    50344    return VINF_SUCCESS;
    50445}
     
    51556VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
    51657{
    517     Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x (fMappingsFixed=%RTbool MappingEnabled=%RTbool)\n",
    518          GCPtrBase, cb, pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM)));
    519 
    520 #ifndef PGM_WITHOUT_MAPPINGS
    521     if (pgmMapAreMappingsEnabled(pVM))
    522     {
    523         /*
    524          * Only applies to VCPU 0 as we don't support SMP guests with raw mode.
    525          */
    526         Assert(pVM->cCpus == 1);
    527         PVMCPU pVCpu = pVM->apCpusR3[0];
    528 
    529         /*
    530          * Before we do anything we'll do a forced PD sync to try make sure any
    531          * pending relocations because of these mappings have been resolved.
    532          */
    533         PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
    534 
    535         return pgmR3MappingsFixInternal(pVM, GCPtrBase, cb);
    536     }
    537 
    538 #else  /* PGM_WITHOUT_MAPPINGS */
     58    Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x\n", GCPtrBase, cb));
    53959    RT_NOREF(pVM, GCPtrBase, cb);
    540 #endif /* PGM_WITHOUT_MAPPINGS */
    541 
    542     Assert(!VM_IS_RAW_MODE_ENABLED(pVM));
    54360    return VINF_SUCCESS;
    54461}
    545 
    546 
    547 #ifndef PGM_WITHOUT_MAPPINGS
    548 /**
    549  * Internal worker for PGMR3MappingsFix and pgmR3Load.
    550  *
    551  * (This does not perform a SyncCR3 before the fixation like PGMR3MappingsFix.)
    552  *
    553  * @returns VBox status code.
    554  * @param   pVM         The cross context VM structure.
    555  * @param   GCPtrBase   The address of the reserved range of guest memory.
    556  * @param   cb          The size of the range starting at GCPtrBase.
    557  */
    558 int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
    559 {
    560     /*
    561      * Check input arguments and pre-conditions.
    562      */
    563     AssertMsgReturn(!(GCPtrBase & X86_PAGE_4M_OFFSET_MASK), ("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase),
    564                     VERR_INVALID_PARAMETER);
    565     AssertMsgReturn(cb && !(cb & X86_PAGE_4M_OFFSET_MASK), ("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb),
    566                     VERR_INVALID_PARAMETER);
    567     AssertReturn(pgmMapAreMappingsEnabled(pVM), VERR_PGM_MAPPINGS_DISABLED);
    568     AssertReturn(pVM->cCpus == 1, VERR_PGM_MAPPINGS_SMP);
    569 
    570     /*
    571      * Check that it's not conflicting with a core code mapping in the intermediate page table.
    572      */
    573     unsigned    iPDNew = GCPtrBase >> X86_PD_SHIFT;
    574     unsigned    i = cb >> X86_PD_SHIFT;
    575     while (i-- > 0)
    576     {
    577         if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
    578         {
    579             /* Check that it's not one or our mappings. */
    580             PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
    581             while (pCur)
    582             {
    583                 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
    584                     break;
    585                 pCur = pCur->pNextR3;
    586             }
    587             if (!pCur)
    588             {
    589                 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
    590                         iPDNew + i, GCPtrBase, cb));
    591                 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
    592             }
    593         }
    594     }
    595 
    596     /*
    597      * In PAE / PAE mode, make sure we don't cross page directories.
    598      */
    599     PVMCPU pVCpu = pVM->apCpusR3[0];
    600     if (    (   pVCpu->pgm.s.enmGuestMode  == PGMMODE_PAE
    601              || pVCpu->pgm.s.enmGuestMode  == PGMMODE_PAE_NX)
    602         &&  (   pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE
    603              || pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE_NX))
    604     {
    605         unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;
    606         unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;
    607         if (iPdptBase != iPdptLast)
    608         {
    609             LogRel(("PGMR3MappingsFix: Crosses PD boundary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
    610                     iPdptBase, iPdptLast, GCPtrBase, cb));
    611             return VERR_PGM_MAPPINGS_FIX_CONFLICT;
    612         }
    613     }
    614 
    615     /*
    616      * Loop the mappings and check that they all agree on their new locations.
    617      */
    618     RTGCPTR     GCPtrCur = GCPtrBase;
    619     PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
    620     while (pCur)
    621     {
    622         if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
    623         {
    624             AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
    625             return VERR_PGM_MAPPINGS_FIX_REJECTED;
    626         }
    627         /* next */
    628         GCPtrCur += pCur->cb;
    629         pCur = pCur->pNextR3;
    630     }
    631     if (GCPtrCur > GCPtrBase + cb)
    632     {
    633         AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
    634         return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
    635     }
    636 
    637     /*
    638      * Loop the table assigning the mappings to the passed in memory
    639      * and call their relocator callback.
    640      */
    641     GCPtrCur = GCPtrBase;
    642     pCur = pVM->pgm.s.pMappingsR3;
    643     while (pCur)
    644     {
    645         RTGCPTR const GCPtrOld = pCur->GCPtr;
    646 
    647         /*
    648          * Relocate the page table(s).
    649          */
    650         if (pCur->GCPtr != NIL_RTGCPTR)
    651             pgmR3MapClearPDEs(pVM, pCur, GCPtrOld >> X86_PD_SHIFT);
    652         pgmR3MapSetPDEs(pVM, pCur, GCPtrCur >> X86_PD_SHIFT);
    653 
    654         /*
    655          * Update the entry.
    656          */
    657         pCur->GCPtr = GCPtrCur;
    658         pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
    659 
    660         /*
    661          * Callback to execute the relocation.
    662          */
    663         pCur->pfnRelocate(pVM, GCPtrOld, GCPtrCur, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
    664 
    665         /*
    666          * Advance.
    667          */
    668         GCPtrCur += pCur->cb;
    669         pCur = pCur->pNextR3;
    670     }
    671 
    672     /*
    673      * Mark the mappings as fixed at this new location and return.
    674      */
    675     pVM->pgm.s.fMappingsFixed           = true;
    676     pVM->pgm.s.fMappingsFixedRestored   = false;
    677     pVM->pgm.s.GCPtrMappingFixed        = GCPtrBase;
    678     pVM->pgm.s.cbMappingFixed           = cb;
    679 
    680     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    681     {
    682         pVM->aCpus[idCpu].pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
    683         VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_PGM_SYNC_CR3);
    684     }
    685     return VINF_SUCCESS;
    686 }
    687 #endif /* !PGM_WITHOUT_MAPPINGS */
    68862
    68963
     
    70074VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
    70175{
    702     Log(("PGMR3MappingsUnfix: fMappingsFixed=%RTbool MappingsEnabled=%RTbool\n", pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM)));
    703     if (   pgmMapAreMappingsEnabled(pVM)
    704         && (    pVM->pgm.s.fMappingsFixed
    705             ||  pVM->pgm.s.fMappingsFixedRestored)
    706        )
    707     {
    708         bool const fResyncCR3 = pVM->pgm.s.fMappingsFixed;
    709 
    710         pVM->pgm.s.fMappingsFixed           = false;
    711         pVM->pgm.s.fMappingsFixedRestored   = false;
    712         pVM->pgm.s.GCPtrMappingFixed        = 0;
    713         pVM->pgm.s.cbMappingFixed           = 0;
    714 
    715         if (fResyncCR3)
    716             for (VMCPUID i = 0; i < pVM->cCpus; i++)
    717                 VMCPU_FF_SET(pVM->apCpusR3[i], VMCPU_FF_PGM_SYNC_CR3);
    718     }
     76    Log(("PGMR3MappingsUnfix:\n"));
     77    RT_NOREF(pVM);
    71978    return VINF_SUCCESS;
    72079}
    72180
    722 #ifndef PGM_WITHOUT_MAPPINGS
    723 
    724 /**
    725  * Checks if the mappings needs re-fixing after a restore.
    726  *
    727  * @returns true if they need, false if not.
    728  * @param   pVM                 The cross context VM structure.
    729  */
    730 VMMR3DECL(bool) PGMR3MappingsNeedReFixing(PVM pVM)
    731 {
    732     VM_ASSERT_VALID_EXT_RETURN(pVM, false);
    733     return pVM->pgm.s.fMappingsFixedRestored;
    734 }
    735 
    736 
    737 /**
    738  * Map pages into the intermediate context (switcher code).
    739  *
    740  * These pages are mapped at both the give virtual address and at the physical
    741  * address (for identity mapping).
    742  *
    743  * @returns VBox status code.
    744  * @param   pVM         The cross context VM structure.
    745  * @param   Addr        Intermediate context address of the mapping.
    746  * @param   HCPhys      Start of the range of physical pages. This must be entriely below 4GB!
    747  * @param   cbPages     Number of bytes to map.
    748  *
    749  * @remark  This API shall not be used to anything but mapping the switcher code.
    750  */
    751 VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
    752 {
    753     LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));
    754 
    755     /*
    756      * Adjust input.
    757      */
    758     cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
    759     cbPages  = RT_ALIGN(cbPages, PAGE_SIZE);
    760     HCPhys  &= X86_PTE_PAE_PG_MASK;
    761     Addr    &= PAGE_BASE_MASK;
    762     /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
    763     uint32_t uAddress = (uint32_t)Addr;
    764 
    765     /*
    766      * Assert input and state.
    767      */
    768     AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
    769     AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
    770     AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
    771     AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));
    772     AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
    773 
    774     /*
    775      * Check for internal conflicts between the virtual address and the physical address.
    776      * A 1:1 mapping is fine, but partial overlapping is a no-no.
    777      */
    778     if (    uAddress != HCPhys
    779         &&  (   uAddress < HCPhys
    780                 ? HCPhys - uAddress < cbPages
    781                 : uAddress - HCPhys < cbPages
    782             )
    783        )
    784         AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),
    785                                     VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    786 
    787     const unsigned cPages = cbPages >> PAGE_SHIFT;
    788     int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
    789     if (RT_FAILURE(rc))
    790         return rc;
    791     rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
    792     if (RT_FAILURE(rc))
    793         return rc;
    794 
    795     /*
    796      * Everythings fine, do the mapping.
    797      */
    798     pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
    799     pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
    800 
    801     return VINF_SUCCESS;
    802 }
    803 
    804 
    805 /**
    806  * Validates that there are no conflicts for this mapping into the intermediate context.
    807  *
    808  * @returns VBox status code.
    809  * @param   pVM             The cross context VM structure.
    810  * @param   uAddress        Address of the mapping.
    811  * @param   cPages          Number of pages.
    812  * @param   pPTDefault      Pointer to the default page table for this mapping.
    813  * @param   pPTPaeDefault   Pointer to the default page table for this mapping.
    814  */
    815 static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
    816 {
    817     AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme uAddress=%RGv cPages=%u\n", uAddress, cPages));
    818 
    819     /*
    820      * Check that the ranges are available.
    821      * (This code doesn't have to be fast.)
    822      */
    823     while (cPages > 0)
    824     {
    825         /*
    826          * 32-Bit.
    827          */
    828         unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
    829         unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
    830         PX86PT pPT = pPTDefault;
    831         if (pVM->pgm.s.pInterPD->a[iPDE].u)
    832         {
    833             RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
    834             if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
    835                 pPT = pVM->pgm.s.apInterPTs[0];
    836             else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
    837                 pPT = pVM->pgm.s.apInterPTs[1];
    838             else
    839             {
    840                 /** @todo this must be handled with a relocation of the conflicting mapping!
    841                  * Which of course cannot be done because we're in the middle of the initialization. bad design! */
    842                 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
    843                                             VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    844             }
    845         }
    846         if (pPT->a[iPTE].u)
    847             AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
    848                                         VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    849 
    850         /*
    851          * PAE.
    852          */
    853         const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    854         iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    855         iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
    856         Assert(iPDPE < 4);
    857         Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
    858         PX86PTPAE pPTPae = pPTPaeDefault;
    859         if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
    860         {
    861             RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
    862             if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
    863                 pPTPae = pVM->pgm.s.apInterPaePTs[0];
    864             else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
    865                 pPTPae = pVM->pgm.s.apInterPaePTs[1];
    866             else
    867             {
    868                 /** @todo this must be handled with a relocation of the conflicting mapping!
    869                  * Which of course cannot be done because we're in the middle of the initialization. bad design! */
    870                 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
    871                                             VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    872             }
    873         }
    874         if (pPTPae->a[iPTE].u)
    875             AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
    876                                         VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
    877 
    878         /* next */
    879         uAddress += PAGE_SIZE;
    880         cPages--;
    881     }
    882 
    883     return VINF_SUCCESS;
    884 }
    885 
    886 
    887 
    888 /**
    889  * Sets up the intermediate page tables for a verified mapping.
    890  *
    891  * @param   pVM             The cross context VM structure.
    892  * @param   uAddress        Address of the mapping.
    893  * @param   HCPhys          The physical address of the page range.
    894  * @param   cPages          Number of pages.
    895  * @param   pPTDefault      Pointer to the default page table for this mapping.
    896  * @param   pPTPaeDefault   Pointer to the default page table for this mapping.
    897  */
    898 static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
    899 {
    900     while (cPages > 0)
    901     {
    902         /*
    903          * 32-Bit.
    904          */
    905         unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
    906         unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
    907         PX86PT pPT;
    908         if (pVM->pgm.s.pInterPD->a[iPDE].u)
    909             pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
    910         else
    911         {
    912             pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
    913                                            | (uint32_t)MMPage2Phys(pVM, pPTDefault);
    914             pPT = pPTDefault;
    915         }
    916         pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
    917 
    918         /*
    919          * PAE
    920          */
    921         const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    922         iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    923         iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
    924         Assert(iPDPE < 4);
    925         Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
    926         PX86PTPAE pPTPae;
    927         if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
    928             pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
    929         else
    930         {
    931             pPTPae = pPTPaeDefault;
    932             pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
    933                                                        | MMPage2Phys(pVM, pPTPaeDefault);
    934         }
    935         pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
    936 
    937         /* next */
    938         cPages--;
    939         HCPhys += PAGE_SIZE;
    940         uAddress += PAGE_SIZE;
    941     }
    942 }
    943 
    944 
    945 /**
    946  * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.
    947  *
    948  * @param   pVM         The cross context VM structure.
    949  * @param   pMap        Pointer to the mapping in question.
    950  * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    951  */
    952 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
    953 {
    954     unsigned i     = pMap->cPTs;
    955     PVMCPU   pVCpu = VMMGetCpu(pVM);
    956     PGM_LOCK_VOID(pVM);                           /* to avoid assertions */
    957 
    958     pgmMapClearShadowPDEs(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE, false /*fDeactivateCR3*/);
    959 
    960     iOldPDE += i;
    961     while (i-- > 0)
    962     {
    963         iOldPDE--;
    964 
    965         /*
    966          * 32-bit.
    967          */
    968         pVM->pgm.s.pInterPD->a[iOldPDE].u        = 0;
    969 
    970         /*
    971          * PAE.
    972          */
    973         const unsigned iPD = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
    974         unsigned iPDE = iOldPDE * 2 % 512;
    975         pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    976         iPDE++;
    977         AssertFatal(iPDE < 512);
    978         pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    979     }
    980 
    981     PGM_UNLOCK(pVM);
    982 }
    983 
    984 
    985 /**
    986  * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
    987  *
    988  * @param   pVM         The cross context VM structure.
    989  * @param   pMap        Pointer to the mapping in question.
    990  * @param   iNewPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    991  */
    992 static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
    993 {
    994     PPGM   pPGM  = &pVM->pgm.s;
    995 # ifdef VBOX_STRICT
    996     PVMCPU pVCpu = VMMGetCpu(pVM);
    997 # endif
    998     PGM_LOCK_VOID(pVM);                           /* to avoid assertions */
    999 
    1000     Assert(!pgmMapAreMappingsEnabled(pVM) || PGMGetGuestMode(pVCpu) <= PGMMODE_PAE_NX);
    1001 
    1002     pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);
    1003 
    1004     /*
    1005      * Init the page tables and insert them into the page directories.
    1006      */
    1007     unsigned i = pMap->cPTs;
    1008     iNewPDE += i;
    1009     while (i-- > 0)
    1010     {
    1011         iNewPDE--;
    1012 
    1013         /*
    1014          * 32-bit.
    1015          */
    1016         X86PDE Pde;
    1017         /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
    1018         Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
    1019         pPGM->pInterPD->a[iNewPDE]        = Pde;
    1020 
    1021         /*
    1022          * PAE.
    1023          */
    1024         const unsigned iPD = iNewPDE / 256;
    1025         unsigned iPDE = iNewPDE * 2 % 512;
    1026         X86PDEPAE PdePae0;
    1027         PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
    1028         pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
    1029         iPDE++;
    1030         AssertFatal(iPDE < 512);
    1031         X86PDEPAE PdePae1;
    1032         PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
    1033         pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
    1034     }
    1035 
    1036     PGM_UNLOCK(pVM);
    1037 }
    1038 
    1039 
    1040 /**
    1041  * Relocates a mapping to a new address.
    1042  *
    1043  * @param   pVM                 The cross context VM structure.
    1044  * @param   pMapping            The mapping to relocate.
    1045  * @param   GCPtrOldMapping     The address of the start of the old mapping.
    1046  *                              NIL_RTGCPTR if not currently mapped.
    1047  * @param   GCPtrNewMapping     The address of the start of the new mapping.
    1048  */
    1049 static void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
    1050 {
    1051     Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
    1052     AssertMsg(GCPtrOldMapping == pMapping->GCPtr, ("%RGv vs %RGv\n", GCPtrOldMapping, pMapping->GCPtr));
    1053     AssertMsg((GCPtrOldMapping >> X86_PD_SHIFT) < X86_PG_ENTRIES, ("%RGv\n", GCPtrOldMapping));
    1054     AssertMsg((GCPtrNewMapping >> X86_PD_SHIFT) < X86_PG_ENTRIES, ("%RGv\n", GCPtrOldMapping));
    1055 
    1056     /*
    1057      * Relocate the page table(s).
    1058      */
    1059     if (GCPtrOldMapping != NIL_RTGCPTR)
    1060         pgmR3MapClearPDEs(pVM, pMapping, GCPtrOldMapping >> X86_PD_SHIFT);
    1061     pgmR3MapSetPDEs(pVM, pMapping, GCPtrNewMapping >> X86_PD_SHIFT);
    1062 
    1063     /*
    1064      * Update and resort the mapping list.
    1065      */
    1066 
    1067     /* Find previous mapping for pMapping, put result into pPrevMap. */
    1068     PPGMMAPPING pPrevMap = NULL;
    1069     PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
    1070     while (pCur && pCur != pMapping)
    1071     {
    1072         /* next */
    1073         pPrevMap = pCur;
    1074         pCur = pCur->pNextR3;
    1075     }
    1076     Assert(pCur);
    1077 
    1078     /* Find mapping which >= than pMapping. */
    1079     RTGCPTR     GCPtrNew = GCPtrNewMapping;
    1080     PPGMMAPPING pPrev = NULL;
    1081     pCur = pVM->pgm.s.pMappingsR3;
    1082     while (pCur && pCur->GCPtr < GCPtrNew)
    1083     {
    1084         /* next */
    1085         pPrev = pCur;
    1086         pCur = pCur->pNextR3;
    1087     }
    1088 
    1089     if (pCur != pMapping && pPrev != pMapping)
    1090     {
    1091         /*
    1092          * Unlink.
    1093          */
    1094         if (pPrevMap)
    1095         {
    1096             pPrevMap->pNextR3 = pMapping->pNextR3;
    1097             pPrevMap->pNextRC = pMapping->pNextRC;
    1098             pPrevMap->pNextR0 = pMapping->pNextR0;
    1099         }
    1100         else
    1101         {
    1102             pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
    1103             pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
    1104             pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
    1105         }
    1106 
    1107         /*
    1108          * Link
    1109          */
    1110         pMapping->pNextR3 = pCur;
    1111         if (pPrev)
    1112         {
    1113             pMapping->pNextRC = pPrev->pNextRC;
    1114             pMapping->pNextR0 = pPrev->pNextR0;
    1115             pPrev->pNextR3 = pMapping;
    1116             pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);
    1117             pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
    1118         }
    1119         else
    1120         {
    1121             pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
    1122             pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
    1123             pVM->pgm.s.pMappingsR3 = pMapping;
    1124             pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
    1125             pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
    1126         }
    1127     }
    1128 
    1129     /*
    1130      * Update the entry.
    1131      */
    1132     pMapping->GCPtr = GCPtrNew;
    1133     pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
    1134 
    1135     /*
    1136      * Callback to execute the relocation.
    1137      */
    1138     pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
    1139 }
    1140 
    1141 
    1142 /**
    1143  * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings.
    1144  *
    1145  * @returns VBox status code.
    1146  * @param   pMapping            The mapping which conflicts.
    1147  * @param   GCPtr               New mapping address to try
    1148  */
    1149 bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr)
    1150 {
    1151     for (unsigned i = 0; i < RT_ELEMENTS(pMapping->aGCPtrConflicts); i++)
    1152     {
    1153         if (GCPtr == pMapping->aGCPtrConflicts[i])
    1154             return true;
    1155     }
    1156     return false;
    1157 }
    1158 
    1159 
    1160 /**
    1161  * Resolves a conflict between a page table based GC mapping and
    1162  * the Guest OS page tables. (32 bits version)
    1163  *
    1164  * @returns VBox status code.
    1165  * @param   pVM                 The cross context VM structure.
    1166  * @param   pMapping            The mapping which conflicts.
    1167  * @param   pPDSrc              The page directory of the guest OS.
    1168  * @param   GCPtrOldMapping     The address of the start of the current mapping.
    1169  */
    1170 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
    1171 {
    1172     STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
    1173     STAM_PROFILE_START(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);
    1174 
    1175     /* Raw mode only which implies one VCPU. */
    1176     Assert(pVM->cCpus == 1);
    1177 
    1178     pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
    1179     pMapping->cConflicts++;
    1180 
    1181     /*
    1182      * Scan for free page directory entries.
    1183      *
    1184      * Note that we do not support mappings at the very end of the
    1185      * address space since that will break our GCPtrEnd assumptions.
    1186      */
    1187     const unsigned  cPTs = pMapping->cPTs;
    1188     unsigned        iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
    1189     while (iPDNew-- > 0)
    1190     {
    1191         if (pPDSrc->a[iPDNew].n.u1Present)
    1192             continue;
    1193 
    1194         if (pgmR3MapIsKnownConflictAddress(pMapping, iPDNew << X86_PD_SHIFT))
    1195             continue;
    1196 
    1197         if (cPTs > 1)
    1198         {
    1199             bool fOk = true;
    1200             for (unsigned i = 1; fOk && i < cPTs; i++)
    1201                 if (pPDSrc->a[iPDNew + i].n.u1Present)
    1202                     fOk = false;
    1203             if (!fOk)
    1204                 continue;
    1205         }
    1206 
    1207         /*
    1208          * Check that it's not conflicting with an intermediate page table mapping.
    1209          */
    1210         bool        fOk = true;
    1211         unsigned    i   = cPTs;
    1212         while (fOk && i-- > 0)
    1213             fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
    1214         if (!fOk)
    1215             continue;
    1216         /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
    1217 
    1218         /*
    1219          * Ask for the mapping.
    1220          */
    1221         RTGCPTR GCPtrNewMapping = (RTGCPTR32)iPDNew << X86_PD_SHIFT;
    1222 
    1223         if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
    1224         {
    1225             pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
    1226             STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);
    1227             return VINF_SUCCESS;
    1228         }
    1229     }
    1230 
    1231     STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);
    1232 # ifdef DEBUG_bird
    1233     /*
    1234      * Ended up here frequently recently with an NT4.0 VM (using SMP kernel).
    1235      *
    1236      * The problem is when enabling large pages (i.e. updating CR4) using the
    1237      * _Ki386EnableCurrentLargePage@8 assembly routine (address 0x801c97ad-9).
    1238      * The routine loads a sparsely popuplated page tables with identiy mappings
    1239      * of its own code, most entries are whatever ExAllocatePool returned, which
    1240      * is documented as undefined but all 0xffffffff in this case.  Once loaded,
    1241      * it jumps to the physical code address, disables paging, set CR4.PSE=1,
    1242      * re-enables paging, restore the original page table and returns successfully.
    1243      *
    1244      * Theory: if CSAM/PATM patches the pushf;cli;mov eax,cr3; sequence, at the
    1245      * start of that function we're apparently in trouble, if CSAM/PATM doesn't
    1246      * we're switching back to REM and doing disabling of paging there instead.
    1247      *
    1248      * Normal PD: CR3=00030000; Problematic identity mapped PD: CR3=0x5fa000.
    1249      */
    1250     DBGFSTOP(pVM);
    1251 # endif
    1252     AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
    1253     return VERR_PGM_NO_HYPERVISOR_ADDRESS;
    1254 }
    1255 
    1256 
    1257 /**
    1258  * Resolves a conflict between a page table based GC mapping and
    1259  * the Guest OS page tables. (PAE bits version)
    1260  *
    1261  * @returns VBox status code.
    1262  * @param   pVM                 The cross context VM structure.
    1263  * @param   pMapping            The mapping which conflicts.
    1264  * @param   GCPtrOldMapping     The address of the start of the current mapping.
    1265  */
    1266 int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
    1267 {
    1268     STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
    1269     STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
    1270 
    1271     /* Raw mode only which implies one VCPU. */
    1272     Assert(pVM->cCpus == 1);
    1273     PVMCPU pVCpu = VMMGetCpu(pVM);
    1274 
    1275     pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
    1276     pMapping->cConflicts++;
    1277 
    1278     for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
    1279     {
    1280         unsigned  iPDSrc;
    1281         PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(pVCpu, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
    1282 
    1283         /*
    1284          * Scan for free page directory entries.
    1285          *
    1286          * Note that we do not support mappings at the very end of the
    1287          * address space since that will break our GCPtrEnd assumptions.
    1288          * Nor do we support mappings crossing page directories.
    1289          */
    1290         const unsigned  cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
    1291         unsigned        iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
    1292 
    1293         while (iPDNew-- > 0)
    1294         {
    1295             /* Ugly assumption that mappings start on a 4 MB boundary. */
    1296             if (iPDNew & 1)
    1297                 continue;
    1298 
    1299             if (pgmR3MapIsKnownConflictAddress(pMapping, ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT)))
    1300                 continue;
    1301 
    1302             if (pPDSrc)
    1303             {
    1304                 if (pPDSrc->a[iPDNew].n.u1Present)
    1305                     continue;
    1306                 if (cPTs > 1)
    1307                 {
    1308                     bool fOk = true;
    1309                     for (unsigned i = 1; fOk && i < cPTs; i++)
    1310                         if (pPDSrc->a[iPDNew + i].n.u1Present)
    1311                             fOk = false;
    1312                     if (!fOk)
    1313                         continue;
    1314                 }
    1315             }
    1316             /*
    1317              * Check that it's not conflicting with an intermediate page table mapping.
    1318              */
    1319             bool        fOk = true;
    1320             unsigned    i   = cPTs;
    1321             while (fOk && i-- > 0)
    1322                 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
    1323             if (!fOk)
    1324                 continue;
    1325 
    1326             /*
    1327              * Ask for the mapping.
    1328              */
    1329             RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + ((RTGCPTR32)iPDNew << X86_PD_PAE_SHIFT);
    1330 
    1331             if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
    1332             {
    1333                 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
    1334                 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);
    1335                 return VINF_SUCCESS;
    1336             }
    1337         }
    1338     }
    1339     STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);
    1340     AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
    1341     return VERR_PGM_NO_HYPERVISOR_ADDRESS;
    1342 }
    1343 
    1344 
    1345 /**
    1346  * Read memory from the guest mappings.
    1347  *
    1348  * This will use the page tables associated with the mappings to
    1349  * read the memory. This means that not all kind of memory is readable
    1350  * since we don't necessarily know how to convert that physical address
    1351  * to a HC virtual one.
    1352  *
    1353  * @returns VBox status code.
    1354  * @param   pVM         The cross context VM structure.
    1355  * @param   pvDst       The destination address (HC of course).
    1356  * @param   GCPtrSrc    The source address (GC virtual address).
    1357  * @param   cb          Number of bytes to read.
    1358  *
    1359  * @remarks The is indirectly for DBGF only.
    1360  * @todo    Consider renaming it to indicate it's special usage, or just
    1361  *          reimplement it in MMR3HyperReadGCVirt.
    1362  */
    1363 VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
    1364 {
    1365     /*
    1366      * Simplicity over speed... Chop the request up into chunks
    1367      * which don't cross pages.
    1368      */
    1369     if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
    1370     {
    1371         for (;;)
    1372         {
    1373             size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
    1374             int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
    1375             if (RT_FAILURE(rc))
    1376                 return rc;
    1377             cb -= cbRead;
    1378             if (!cb)
    1379                 break;
    1380             pvDst = (char *)pvDst + cbRead;
    1381             GCPtrSrc += cbRead;
    1382         }
    1383         return VINF_SUCCESS;
    1384     }
    1385 
    1386     /*
    1387      * Find the mapping.
    1388      */
    1389     PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
    1390     while (pCur)
    1391     {
    1392         RTGCPTR off = GCPtrSrc - pCur->GCPtr;
    1393         if (off < pCur->cb)
    1394         {
    1395             if (off + cb > pCur->cb)
    1396             {
    1397                 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",
    1398                                  GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
    1399                 return VERR_INVALID_PARAMETER;
    1400             }
    1401 
    1402             unsigned iPT  = off >> X86_PD_SHIFT;
    1403             unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
    1404             while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
    1405             {
    1406                 PCPGMSHWPTEPAE pPte = &pCur->aPTs[iPT].CTXALLSUFF(paPaePTs)[iPTE / 512].a[iPTE % 512];
    1407                 if (!PGMSHWPTEPAE_IS_P(*pPte))
    1408                     return VERR_PAGE_NOT_PRESENT;
    1409                 RTHCPHYS HCPhys = PGMSHWPTEPAE_GET_HCPHYS(*pPte);
    1410 
    1411                 /*
    1412                  * Get the virtual page from the physical one.
    1413                  */
    1414                 void *pvPage;
    1415                 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
    1416                 if (RT_FAILURE(rc))
    1417                     return rc;
    1418 
    1419                 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
    1420                 return VINF_SUCCESS;
    1421             }
    1422         }
    1423 
    1424         /* next */
    1425         pCur = CTXALLSUFF(pCur->pNext);
    1426     }
    1427 
    1428     return VERR_INVALID_POINTER;
    1429 }
    1430 
    1431 
    1432 /**
    1433  * Info callback for 'pgmhandlers'.
    1434  *
    1435  * @param   pVM         The cross context VM structure.
    1436  * @param   pHlp        The output helpers.
    1437  * @param   pszArgs     The arguments. phys or virt.
    1438  */
    1439 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    1440 {
    1441     NOREF(pszArgs);
    1442     if (!pgmMapAreMappingsEnabled(pVM))
    1443         pHlp->pfnPrintf(pHlp, "\nThe mappings are DISABLED.\n");
    1444     else if (pVM->pgm.s.fMappingsFixed)
    1445         pHlp->pfnPrintf(pHlp, "\nThe mappings are FIXED: %RGv-%RGv\n",
    1446                         pVM->pgm.s.GCPtrMappingFixed, pVM->pgm.s.GCPtrMappingFixed + pVM->pgm.s.cbMappingFixed - 1);
    1447     else if (pVM->pgm.s.fMappingsFixedRestored)
    1448         pHlp->pfnPrintf(pHlp, "\nThe mappings are FLOATING-RESTORED-FIXED: %RGv-%RGv\n",
    1449                         pVM->pgm.s.GCPtrMappingFixed, pVM->pgm.s.GCPtrMappingFixed + pVM->pgm.s.cbMappingFixed - 1);
    1450     else
    1451         pHlp->pfnPrintf(pHlp, "\nThe mappings are FLOATING.\n");
    1452 
    1453     PPGMMAPPING pCur;
    1454     for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1455     {
    1456         pHlp->pfnPrintf(pHlp, "%RGv - %RGv  %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
    1457         if (pCur->cConflicts > 0)
    1458         {
    1459             pHlp->pfnPrintf(pHlp, "  %u conflict%s: ", pCur->cConflicts, pCur->cConflicts == 1 ? "" : "s");
    1460             uint32_t cLeft = RT_MIN(pCur->cConflicts, RT_ELEMENTS(pCur->aGCPtrConflicts));
    1461             uint32_t i     = pCur->cConflicts;
    1462             while (cLeft-- > 0)
    1463             {
    1464                 i = (i - 1) & (PGMMAPPING_CONFLICT_MAX - 1);
    1465                 pHlp->pfnPrintf(pHlp, cLeft ? "%RGv, " : "%RGv\n", pCur->aGCPtrConflicts[i]);
    1466             }
    1467         }
    1468     }
    1469 }
    1470 
    1471 #endif /* !PGM_WITHOUT_MAPPINGS */
    1472 
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r91853 r91854  
    16991699 * @param   GCPhys          The address of the RAM range.
    17001700 * @param   GCPhysLast      The last address of the RAM range.
    1701  * @param   RCPtrNew        The RC address if the range is floating. NIL_RTRCPTR
    1702  *                          if in HMA.
    17031701 * @param   R0PtrNew        Ditto for R0.
    17041702 * @param   pszDesc         The description.
     
    17061704 */
    17071705static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
    1708                                         RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
     1706                                        RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
    17091707{
    17101708    /*
     
    17191717    pNew->paLSPages     = NULL;
    17201718    pNew->fFlags        = 0;
    1721 #ifndef PGM_WITHOUT_MAPPINGS
    1722     if (RCPtrNew != NIL_RTRCPTR)
    1723         pNew->fFlags   |= PGM_RAM_RANGE_FLAGS_FLOATING;
    1724 #else
    1725     NOREF(RCPtrNew);
    1726 #endif
    17271719
    17281720    uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
     
    17761768
    17771769
    1778 #ifndef PGM_WITHOUT_MAPPINGS
    1779 /**
    1780  * @callback_method_impl{FNPGMRELOCATE, Relocate a floating RAM range.}
    1781  * @sa pgmR3PhysMMIO2ExRangeRelocate
    1782  */
    1783 static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
    1784                                                     PGMRELOCATECALL enmMode, void *pvUser)
    1785 {
    1786     PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
    1787     Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
    1788     Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE); RT_NOREF_PV(GCPtrOld);
    1789 
    1790     switch (enmMode)
    1791     {
    1792         case PGMRELOCATECALL_SUGGEST:
    1793             return true;
    1794 
    1795         case PGMRELOCATECALL_RELOCATE:
    1796         {
    1797             /*
    1798              * Update myself, then relink all the ranges and flush the RC TLB.
    1799              */
    1800             PGM_LOCK_VOID(pVM);
    1801 
    1802             pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
    1803 
    1804             pgmR3PhysRelinkRamRanges(pVM);
    1805             for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
    1806                 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
    1807 
    1808             PGM_UNLOCK(pVM);
    1809             return true;
    1810         }
    1811 
    1812         default:
    1813             AssertFailedReturn(false);
    1814     }
    1815 }
    1816 #endif /* !PGM_WITHOUT_MAPPINGS */
    1817 
    1818 
    18191770/**
    18201771 * PGMR3PhysRegisterRam worker that registers a high chunk.
     
    18241775 * @param   GCPhys          The address of the RAM.
    18251776 * @param   cRamPages       The number of RAM pages to register.
    1826  * @param   cbChunk         The size of the PGMRAMRANGE guest mapping.
    18271777 * @param   iChunk          The chunk number.
    18281778 * @param   pszDesc         The RAM range description.
    18291779 * @param   ppPrev          Previous RAM range pointer. In/Out.
    18301780 */
    1831 static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
    1832                                          uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
    1833                                          PPGMRAMRANGE *ppPrev)
     1781static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, uint32_t iChunk,
     1782                                         const char *pszDesc, PPGMRAMRANGE *ppPrev)
    18341783{
    18351784    const char *pszDescChunk = iChunk == 0
     
    18551804
    18561805        /*
    1857          * Create a mapping and map the pages into it.
    1858          * We push these in below the HMA.
     1806         * Ok, init and link the range.
    18591807         */
    1860 #ifdef PGM_WITHOUT_MAPPINGS
    1861         RTGCPTR const GCPtrChunk = NIL_RTGCPTR;
    1862         RT_NOREF(cbChunk);
    1863 #else
    1864         RTGCPTR const GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
    1865         rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
     1808        rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
     1809                                          R0PtrChunk, pszDescChunk, *ppPrev);
    18661810        if (RT_SUCCESS(rc))
    1867         {
    1868             pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
    1869 
    1870             RTGCPTR const   GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
    1871             RTGCPTR         GCPtrPage  = GCPtrChunk;
    1872             for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
    1873                 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
    1874             if (RT_SUCCESS(rc))
    1875 #endif /* !PGM_WITHOUT_MAPPINGS */
    1876             {
    1877                 /*
    1878                  * Ok, init and link the range.
    1879                  */
    1880                 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
    1881                                                   (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
    1882                 if (RT_SUCCESS(rc))
    1883                     *ppPrev = pNew;
    1884             }
    1885 #ifndef PGM_WITHOUT_MAPPINGS
    1886         }
    1887 #endif
     1811            *ppPrev = pNew;
    18881812
    18891813        if (RT_FAILURE(rc))
     
    19851909                cPagesInChunk = cPagesPerChunk;
    19861910
    1987             rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
     1911            rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, iChunk, pszDesc, &pPrev);
    19881912            AssertRCReturn(rc, rc);
    19891913
     
    20041928        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
    20051929
    2006         rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, MMHyperCCToR0(pVM, pNew), pszDesc, pPrev);
     1930        rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, MMHyperCCToR0(pVM, pNew), pszDesc, pPrev);
    20071931        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
    20081932    }
     
    28062730
    28072731
    2808 #ifndef PGM_WITHOUT_MAPPINGS
    2809 /**
    2810  * @callback_method_impl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.}
    2811  * @sa pgmR3PhysRamRangeRelocate
    2812  */
    2813 static DECLCALLBACK(bool) pgmR3PhysMmio2RangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
    2814                                                       PGMRELOCATECALL enmMode, void *pvUser)
    2815 {
    2816     PPGMREGMMIO2RANGE pMmio = (PPGMREGMMIO2RANGE)pvUser;
    2817     Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
    2818     Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange)); RT_NOREF_PV(GCPtrOld);
    2819 
    2820     switch (enmMode)
    2821     {
    2822         case PGMRELOCATECALL_SUGGEST:
    2823             return true;
    2824 
    2825         case PGMRELOCATECALL_RELOCATE:
    2826         {
    2827             /*
    2828              * Update myself, then relink all the ranges and flush the RC TLB.
    2829              */
    2830             PGM_LOCK_VOID(pVM);
    2831 
    2832             pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange));
    2833 
    2834             pgmR3PhysRelinkRamRanges(pVM);
    2835             for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
    2836                 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
    2837 
    2838             PGM_UNLOCK(pVM);
    2839             return true;
    2840         }
    2841 
    2842         default:
    2843             AssertFailedReturn(false);
    2844     }
    2845 }
    2846 #endif /* !PGM_WITHOUT_MAPPINGS */
    2847 
    2848 
    28492732/**
    28502733 * Calculates the number of chunks
     
    40243907    return VINF_SUCCESS;
    40253908}
    4026 
    4027 
    4028 #ifndef PGM_WITHOUT_MAPPINGS
    4029 /**
    4030  * Gets the HC physical address of a page in the MMIO2 region.
    4031  *
    4032  * This is API is intended for MMHyper and shouldn't be called
    4033  * by anyone else...
    4034  *
    4035  * @returns VBox status code.
    4036  * @param   pVM             The cross context VM structure.
    4037  * @param   pDevIns         The owner of the memory, optional.
    4038  * @param   iSubDev         Sub-device number.
    4039  * @param   iRegion         The region.
    4040  * @param   off             The page expressed an offset into the MMIO2 region.
    4041  * @param   pHCPhys         Where to store the result.
    4042  */
    4043 VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
    4044                                             RTGCPHYS off, PRTHCPHYS pHCPhys)
    4045 {
    4046     /*
    4047      * Validate input
    4048      */
    4049     VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
    4050     AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
    4051     AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
    4052     AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
    4053 
    4054     PGM_LOCK_VOID(pVM);
    4055     PPGMREGMMIO2RANGE pCurMmio = pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE);
    4056     AssertReturn(pCurMmio, VERR_NOT_FOUND);
    4057     AssertReturn(pCurMmio->fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);
    4058 
    4059     while (   off >= pCurMmio->RamRange.cb
    4060            && !(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK))
    4061     {
    4062         off -= pCurMmio->RamRange.cb;
    4063         pCurMmio = pCurMmio->pNextR3;
    4064     }
    4065     AssertReturn(off < pCurMmio->RamRange.cb, VERR_INVALID_PARAMETER);
    4066 
    4067     PCPGMPAGE pPage = &pCurMmio->RamRange.aPages[off >> PAGE_SHIFT];
    4068     *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    4069     PGM_UNLOCK(pVM);
    4070     return VINF_SUCCESS;
    4071 }
    4072 #endif /* !PGM_WITHOUT_MAPPINGS */
    40733909
    40743910
  • trunk/src/VBox/VMM/VMMR3/PGMPool.cpp

    r90439 r91854  
    544544                            if ((pShwPD->a[i].u & (X86_PDE_P | X86_PDE_PS)) == (X86_PDE_P | X86_PDE_PS))
    545545                            {
    546 # ifndef PGM_WITHOUT_MAPPINGS
    547                                 Assert(!(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING));
    548 # endif
    549546                                pShwPD->a[i].u = 0;
    550547                                Assert(pPage->cPresent);
     
    565562                            if ((pShwPD->a[i].u & (EPT_E_READ | EPT_E_LEAF)) == (EPT_E_READ | EPT_E_LEAF))
    566563                            {
    567 # ifndef PGM_WITHOUT_MAPPINGS
    568                                 Assert(!(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING));
    569 # endif
    570564                                pShwPD->a[i].u = 0;
    571565                                Assert(pPage->cPresent);
  • trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

    r91848 r91854  
    157157static const SSMFIELD s_aPGMFields[] =
    158158{
    159     SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    160     SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    161     SSMFIELD_ENTRY(         PGM, cbMappingFixed),
     159    SSMFIELD_ENTRY_OLD(          fMappingsFixed, sizeof(bool)),
     160    SSMFIELD_ENTRY_OLD_GCPTR(    GCPtrMappingFixed),
     161    SSMFIELD_ENTRY_OLD(          cbMappingFixed, sizeof(uint32_t)),
    162162    SSMFIELD_ENTRY(         PGM, cBalloonedPages),
    163163    SSMFIELD_ENTRY_TERM()
     
    166166static const SSMFIELD s_aPGMFieldsPreBalloon[] =
    167167{
    168     SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    169     SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    170     SSMFIELD_ENTRY(         PGM, cbMappingFixed),
     168    SSMFIELD_ENTRY_OLD(          fMappingsFixed, sizeof(bool)),
     169    SSMFIELD_ENTRY_OLD_GCPTR(    GCPtrMappingFixed),
     170    SSMFIELD_ENTRY_OLD(          cbMappingFixed, sizeof(uint32_t)),
    171171    SSMFIELD_ENTRY_TERM()
    172172};
     
    20502050     * Save basic data (required / unaffected by relocation).
    20512051     */
    2052     bool const fMappingsFixed  = pVM->pgm.s.fMappingsFixed;
    2053     pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
    20542052    SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
    2055     pVM->pgm.s.fMappingsFixed  = fMappingsFixed;
    20562053
    20572054    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     
    29982995        AssertLogRelRCReturn(rc, rc);
    29992996
    3000         pPGM->fMappingsFixed    = pgmOld.fMappingsFixed;
    3001         pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
    3002         pPGM->cbMappingFixed    = pgmOld.cbMappingFixed;
    3003 
    30042997        PVMCPU pVCpu0 = pVM->apCpusR3[0];
    30052998        pVCpu0->pgm.s.fA20Enabled   = pgmOld.fA20Enabled;
     
    30113004        AssertRelease(pVM->cCpus == 1);
    30123005
    3013         SSMR3GetBool(pSSM,      &pPGM->fMappingsFixed);
    3014         SSMR3GetGCPtr(pSSM,     &pPGM->GCPtrMappingFixed);
    3015         SSMR3GetU32(pSSM,       &pPGM->cbMappingFixed);
     3006        SSMR3Skip(pSSM,         sizeof(bool));
     3007        RTGCPTR GCPtrIgn;
     3008        SSMR3GetGCPtr(pSSM,     &GCPtrIgn);
     3009        SSMR3Skip(pSSM,         sizeof(uint32_t));
    30163010
    30173011        uint32_t cbRamSizeIgnored;
     
    32143208                PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
    32153209            }
    3216 
    3217             /*
    3218              * Try re-fixate the guest mappings.
    3219              */
    3220             pVM->pgm.s.fMappingsFixedRestored = false;
    3221             if (   pVM->pgm.s.fMappingsFixed
    3222                 && pgmMapAreMappingsEnabled(pVM))
    3223             {
    3224 #ifndef PGM_WITHOUT_MAPPINGS
    3225                 RTGCPTR     GCPtrFixed    = pVM->pgm.s.GCPtrMappingFixed;
    3226                 uint32_t    cbFixed       = pVM->pgm.s.cbMappingFixed;
    3227                 pVM->pgm.s.fMappingsFixed = false;
    3228 
    3229                 uint32_t    cbRequired;
    3230                 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
    3231                 if (   RT_SUCCESS(rc2)
    3232                     && cbRequired > cbFixed)
    3233                     rc2 = VERR_OUT_OF_RANGE;
    3234                 if (RT_SUCCESS(rc2))
    3235                     rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
    3236                 if (RT_FAILURE(rc2))
    3237                 {
    3238                     LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
    3239                             GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
    3240                     pVM->pgm.s.fMappingsFixed         = false;
    3241                     pVM->pgm.s.fMappingsFixedRestored = true;
    3242                     pVM->pgm.s.GCPtrMappingFixed      = GCPtrFixed;
    3243                     pVM->pgm.s.cbMappingFixed         = cbFixed;
    3244                 }
    3245 #else
    3246                 AssertFailed();
    3247 #endif
    3248             }
    3249             else
    3250             {
    3251                 /* We used to set fixed + disabled while we only use disabled now,
    3252                    so wipe the state to avoid any confusion. */
    3253                 pVM->pgm.s.fMappingsFixed    = false;
    3254                 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
    3255                 pVM->pgm.s.cbMappingFixed    = 0;
    3256             }
    3257 
    3258             /*
    3259              * If we have floating mappings, do a CR3 sync now to make sure the HMA
    3260              * doesn't conflict with guest code / data and thereby cause trouble
    3261              * when restoring other components like PATM.
    3262              */
    3263             if (pgmMapAreMappingsFloating(pVM))
    3264             {
    3265                 PVMCPU pVCpu = pVM->apCpusR3[0];
    3266                 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu),  CPUMGetGuestCR4(pVCpu), true);
    3267                 if (RT_FAILURE(rc))
    3268                     return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
    3269                                              N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);
    3270 
    3271                 /* Make sure to re-sync before executing code. */
    3272                 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    3273                 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    3274             }
    32753210        }
    32763211    }
  • trunk/src/VBox/VMM/VMMR3/TM.cpp

    r90347 r91854  
    260260    rc = SUPR3GipGetPhys(&HCPhysGIP);
    261261    AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc);
    262 
    263 #ifndef PGM_WITHOUT_MAPPINGS
    264     RTGCPTR GCPtr;
    265 # ifdef SUP_WITH_LOTS_OF_CPUS
    266     rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, NIL_RTR0PTR, HCPhysGIP, (size_t)pGip->cPages * PAGE_SIZE,
    267                             "GIP", &GCPtr);
    268 # else
    269     rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, NIL_RTR0PTR, HCPhysGIP, PAGE_SIZE, "GIP", &GCPtr);
    270 # endif
    271     if (RT_FAILURE(rc))
    272     {
    273         AssertMsgFailed(("Failed to map GIP into GC, rc=%Rrc!\n", rc));
    274         return rc;
    275     }
    276     pVM->tm.s.pvGIPRC = GCPtr;
    277     LogFlow(("TMR3Init: HCPhysGIP=%RHp at %RRv\n", HCPhysGIP, pVM->tm.s.pvGIPRC));
    278     MMR3HyperReserveFence(pVM);
    279 #endif
    280 
    281262
    282263    /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r91807 r91854  
    632632                if (RT_SUCCESS(rc))
    633633                {
    634 #ifndef PGM_WITHOUT_MAPPINGS
    635                     rc = PGMR3FinalizeMappings(pVM);
     634                    LogFlow(("Ring-3 init succeeded\n"));
     635
     636                    /*
     637                     * Init the Ring-0 components.
     638                     */
     639                    rc = vmR3InitRing0(pVM);
    636640                    if (RT_SUCCESS(rc))
     641                    {
     642                        /* Relocate again, because some switcher fixups depends on R0 init results. */
     643                        VMR3Relocate(pVM, 0 /* offDelta */);
     644
     645#ifdef VBOX_WITH_DEBUGGER
     646                        /*
     647                         * Init the tcp debugger console if we're building
     648                         * with debugger support.
     649                         */
     650                        void *pvUser = NULL;
     651                        rc = DBGCIoCreate(pUVM, &pvUser);
     652                        if (    RT_SUCCESS(rc)
     653                            ||  rc == VERR_NET_ADDRESS_IN_USE)
     654                        {
     655                            pUVM->vm.s.pvDBGC = pvUser;
    637656#endif
    638                     {
    639 
    640                         LogFlow(("Ring-3 init succeeded\n"));
    641 
    642                         /*
    643                          * Init the Ring-0 components.
    644                          */
    645                         rc = vmR3InitRing0(pVM);
    646                         if (RT_SUCCESS(rc))
    647                         {
    648                             /* Relocate again, because some switcher fixups depends on R0 init results. */
    649                             VMR3Relocate(pVM, 0 /* offDelta */);
    650 
     657                            /*
     658                             * Now we can safely set the VM halt method to default.
     659                             */
     660                            rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
     661                            if (RT_SUCCESS(rc))
     662                            {
     663                                /*
     664                                 * Set the state and we're done.
     665                                 */
     666                                vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
     667                                return VINF_SUCCESS;
     668                            }
    651669#ifdef VBOX_WITH_DEBUGGER
    652                             /*
    653                              * Init the tcp debugger console if we're building
    654                              * with debugger support.
    655                              */
    656                             void *pvUser = NULL;
    657                             rc = DBGCIoCreate(pUVM, &pvUser);
    658                             if (    RT_SUCCESS(rc)
    659                                 ||  rc == VERR_NET_ADDRESS_IN_USE)
    660                             {
    661                                 pUVM->vm.s.pvDBGC = pvUser;
     670                            DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
     671                            pUVM->vm.s.pvDBGC = NULL;
     672                        }
    662673#endif
    663                                 /*
    664                                  * Now we can safely set the VM halt method to default.
    665                                  */
    666                                 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
    667                                 if (RT_SUCCESS(rc))
    668                                 {
    669                                     /*
    670                                      * Set the state and we're done.
    671                                      */
    672                                     vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
    673                                     return VINF_SUCCESS;
    674                                 }
    675 #ifdef VBOX_WITH_DEBUGGER
    676                                 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
    677                                 pUVM->vm.s.pvDBGC = NULL;
    678                             }
    679 #endif
    680                             //..
    681                         }
     674                        //..
    682675                    }
    683676                    vmR3Destroy(pVM);
     
    913906                                                                if (RT_SUCCESS(rc))
    914907                                                                {
    915                                                                     rc = PGMR3InitDynMap(pVM);
    916                                                                     if (RT_SUCCESS(rc))
    917                                                                         rc = MMR3HyperInitFinalize(pVM);
     908                                                                    rc = MMR3HyperInitFinalize(pVM);
    918909                                                                    if (RT_SUCCESS(rc))
    919910                                                                        rc = PGMR3InitFinalize(pVM);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette