VirtualBox

Changeset 82591 in vbox


Ignore:
Timestamp:
Dec 16, 2019 5:55:40 PM (5 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
135476
Message:

VMM: Changing how we access guest RAM when in ring-0 (disabled). bugref:9627

Location:
trunk
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/Config.kmk

    r82496 r82591  
    431431 VBOX_WITH_NATIVE_NEM = 1
    432432endif
     433# Enables mapping guest RAM into host kernel space.
     434#if1of ($(KBUILD_TARGET), linux win)
     435# VBOX_WITH_RAM_IN_KERNEL := 1
     436#endif
    433437## @}
    434438
  • trunk/include/VBox/vmm/gmm.h

    r80346 r82591  
    414414GMMR0DECL(int)  GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
    415415GMMR0DECL(int)  GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3);
     416GMMR0DECL(int)  GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv);
    416417GMMR0DECL(int)  GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
    417418                                          char *pszVersion, RTGCPTR GCBaseAddr,  uint32_t cbModule, uint32_t cRegions,
  • trunk/src/VBox/VMM/Config.kmk

    r81153 r82591  
    8181 endif
    8282endif
     83ifdef VBOX_WITH_RAM_IN_KERNEL
     84 VMM_COMMON_DEFS += VBOX_WITH_RAM_IN_KERNEL
     85 if1of ($(KBUILD_TARGET), linux solaris) # Hosts that implements SUPR0HCPhysToVirt.
     86  VMM_COMMON_DEFS += VBOX_WITH_LINEAR_HOST_PHYS_MEM
     87 endif
     88endif
    8389
    8490# VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r81153 r82591  
    21542154    if (RT_SUCCESS(rc))
    21552155    {
     2156# ifdef VBOX_WITH_RAM_IN_KERNEL
     2157        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
     2158        if (RT_SUCCESS(rc))
     2159        {
     2160#  ifdef IN_RING3
     2161            pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
     2162            pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
     2163#  else
     2164            pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
     2165            pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
     2166#  endif
     2167            pgmUnlock(pVM);
     2168            return VINF_SUCCESS;
     2169        }
     2170# else
    21562171        RTHCPTR HCPtrGuestCR3;
    21572172        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     
    21592174        {
    21602175            pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
    2161 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     2176#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    21622177            pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
    2163 # endif
     2178#  endif
    21642179            *ppPd = (PX86PD)HCPtrGuestCR3;
    21652180
     
    21672182            return VINF_SUCCESS;
    21682183        }
    2169 
     2184# endif
    21702185        AssertRC(rc);
    21712186    }
     
    21962211    if (RT_SUCCESS(rc))
    21972212    {
     2213# ifdef VBOX_WITH_RAM_IN_KERNEL
     2214        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
     2215        if (RT_SUCCESS(rc))
     2216        {
     2217#  ifdef IN_RING3
     2218            pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
     2219            pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
     2220#  else
     2221            pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
     2222            pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
     2223#  endif
     2224            pgmUnlock(pVM);
     2225            return VINF_SUCCESS;
     2226        }
     2227# else
    21982228        RTHCPTR HCPtrGuestCR3;
    21992229        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     
    22012231        {
    22022232            pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
    2203 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     2233#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    22042234            pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
    2205 # endif
     2235#  endif
    22062236            *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
    22072237
     
    22092239            return VINF_SUCCESS;
    22102240        }
    2211 
     2241# endif
    22122242        AssertRC(rc);
    22132243    }
     
    22442274    if (RT_SUCCESS(rc))
    22452275    {
     2276# ifdef VBOX_WITH_RAM_IN_KERNEL
     2277        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
     2278        AssertRC(rc);
     2279        if (RT_SUCCESS(rc))
     2280        {
     2281#  ifdef IN_RING3
     2282            pVCpu->pgm.s.apGstPaePDsR0[iPdpt]          = NIL_RTR0PTR;
     2283            pVCpu->pgm.s.apGstPaePDsR3[iPdpt]          = *ppPd;
     2284#  else
     2285            pVCpu->pgm.s.apGstPaePDsR3[iPdpt]          = NIL_RTR3PTR;
     2286            pVCpu->pgm.s.apGstPaePDsR0[iPdpt]          = *ppPd;
     2287#  endif
     2288            if (fChanged)
     2289                pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]   = GCPhys;
     2290            pgmUnlock(pVM);
     2291            return VINF_SUCCESS;
     2292        }
     2293# else
    22462294        RTHCPTR     HCPtr       = NIL_RTHCPTR;
    2247 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     2295#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    22482296        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
    22492297        AssertRC(rc);
    2250 # endif
     2298#  endif
    22512299        if (RT_SUCCESS(rc))
    22522300        {
    22532301            pVCpu->pgm.s.apGstPaePDsR3[iPdpt]          = (R3PTRTYPE(PX86PDPAE))HCPtr;
    2254 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     2302#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    22552303            pVCpu->pgm.s.apGstPaePDsR0[iPdpt]          = (R0PTRTYPE(PX86PDPAE))HCPtr;
    2256 # endif
     2304#  endif
    22572305            if (fChanged)
    22582306                pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]   = GCPhys;
     
    22622310            return VINF_SUCCESS;
    22632311        }
     2312# endif
    22642313    }
    22652314
    22662315    /* Invalid page or some failure, invalidate the entry. */
    22672316    pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]   = NIL_RTGCPHYS;
    2268     pVCpu->pgm.s.apGstPaePDsR3[iPdpt]      = 0;
     2317    pVCpu->pgm.s.apGstPaePDsR3[iPdpt]      = NIL_RTR3PTR;
    22692318# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2270     pVCpu->pgm.s.apGstPaePDsR0[iPdpt]      = 0;
     2319    pVCpu->pgm.s.apGstPaePDsR0[iPdpt]      = NIL_RTR0PTR;
    22712320# endif
    22722321
     
    22952344    if (RT_SUCCESS(rc))
    22962345    {
     2346# ifdef VBOX_WITH_RAM_IN_KERNEL
     2347        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
     2348        if (RT_SUCCESS(rc))
     2349        {
     2350#  ifdef IN_RING3
     2351            pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
     2352            pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
     2353#  else
     2354            pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
     2355            pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
     2356#  endif
     2357            pgmUnlock(pVM);
     2358            return VINF_SUCCESS;
     2359        }
     2360# else
    22972361        RTHCPTR HCPtrGuestCR3;
    22982362        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
     
    23002364        {
    23012365            pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
    2302 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     2366#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    23032367            pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
    2304 # endif
     2368#  endif
    23052369            *ppPml4 = (PX86PML4)HCPtrGuestCR3;
    23062370
     
    23082372            return VINF_SUCCESS;
    23092373        }
     2374# endif
    23102375    }
    23112376
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r80268 r82591  
    43414341    {
    43424342# if PGM_GST_TYPE == PGM_TYPE_32BIT
     4343#  ifdef VBOX_WITH_RAM_IN_KERNEL
     4344#   ifdef IN_RING3
     4345        pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;
     4346        pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
     4347#   else
     4348        pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;
     4349        pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;
     4350#   endif
     4351#  else
    43434352        pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
    4344 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4353 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    43454354        pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
     4355#   endif
    43464356#  endif
    43474357
    43484358# elif PGM_GST_TYPE == PGM_TYPE_PAE
     4359#  ifdef VBOX_WITH_RAM_IN_KERNEL
     4360#   ifdef IN_RING3
     4361        pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;
     4362        pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
     4363#   else
     4364        pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
     4365        pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3;
     4366#   endif
     4367#  else
    43494368        pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
    4350 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4369 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    43514370        pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
     4371#   endif
    43524372#  endif
    43534373
     
    43754395                if (RT_SUCCESS(rc2))
    43764396                {
     4397#  ifdef VBOX_WITH_RAM_IN_KERNEL
     4398#   ifdef IN_RING3
     4399                    pVCpu->pgm.s.apGstPaePDsR3[i]     = (PX86PDPAE)HCPtr;
     4400                    pVCpu->pgm.s.apGstPaePDsR0[i]     = NIL_RTR0PTR;
     4401#   else
     4402                    pVCpu->pgm.s.apGstPaePDsR3[i]     = NIL_RTR3PTR;
     4403                    pVCpu->pgm.s.apGstPaePDsR0[i]     = (PX86PDPAE)HCPtr;
     4404#   endif
     4405#  else
    43774406                    pVCpu->pgm.s.apGstPaePDsR3[i]     = (R3PTRTYPE(PX86PDPAE))HCPtr;
    4378 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4407 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    43794408                    pVCpu->pgm.s.apGstPaePDsR0[i]     = (R0PTRTYPE(PX86PDPAE))HCPtr;
     4409#   endif
    43804410#  endif
    43814411                    pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = GCPhys;
     
    43934423
    43944424# elif PGM_GST_TYPE == PGM_TYPE_AMD64
     4425#  ifdef VBOX_WITH_RAM_IN_KERNEL
     4426#   ifdef IN_RING3
     4427        pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;
     4428        pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
     4429#   else
     4430        pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
     4431        pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;
     4432#   endif
     4433#  else
    43954434        pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
    4396 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4435 ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    43974436        pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
     4437#   endif
    43984438#  endif
    43994439# endif
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r81624 r82591  
    573573        pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
    574574        pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
     575#ifndef VBOX_WITH_RAM_IN_KERNEL
    575576        pVM->pgm.s.PhysTlbR0.aEntries[i].pMap = 0;
     577#endif
    576578        pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
    577579    }
     
    605607    pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
    606608    pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
     609#ifndef VBOX_WITH_RAM_IN_KERNEL
    607610    pVM->pgm.s.PhysTlbR0.aEntries[idx].pMap = 0;
     611#endif
    608612    pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
    609613
     
    11311135 * @param   pVM         The cross context VM structure.
    11321136 * @param   idPage      The Page ID.
    1133  * @param   HCPhys      The physical address (for RC).
     1137 * @param   HCPhys      The physical address (for SUPR0HCPhysToVirt).
    11341138 * @param   ppv         Where to store the mapping address.
    11351139 *
     
    11521156     */
    11531157    return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv  RTLOG_COMMA_SRC_POS);
     1158
     1159#elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
     1160# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
     1161    return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
     1162# else
     1163    return GMMR0PageIdToVirt(pVM, idPage, ppv);
     1164# endif
    11541165
    11551166#else
     
    12531264        AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
    12541265        AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
     1266        *ppMap = NULL;
     1267# if   defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     1268        return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
     1269# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
     1270        *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
     1271        return VINF_SUCCESS;
     1272# else
    12551273        *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
    1256         *ppMap = NULL;
    12571274        return VINF_SUCCESS;
     1275# endif
    12581276    }
    12591277
     
    12771295    }
    12781296
     1297# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     1298    /*
     1299     * Just use the physical address.
     1300     */
     1301    *ppMap = NULL;
     1302    return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
     1303
     1304# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
     1305    /*
     1306     * Go by page ID thru GMMR0.
     1307     */
     1308    *ppMap = NULL;
     1309    return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
     1310
     1311# else
    12791312    /*
    12801313     * Find/make Chunk TLB entry for the mapping chunk.
     
    13261359    *ppMap = pMap;
    13271360    return VINF_SUCCESS;
     1361# endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */
    13281362#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    13291363}
     
    14801514        if (RT_FAILURE(rc))
    14811515            return rc;
     1516# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    14821517        pTlbe->pMap = pMap;
     1518# endif
    14831519        pTlbe->pv = pv;
    14841520        Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
     
    14871523    {
    14881524        AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
     1525# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    14891526        pTlbe->pMap = NULL;
     1527# endif
    14901528        pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
    14911529    }
     
    15751613DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
    15761614{
     1615# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    15771616    PPGMPAGEMAP pMap = pTlbe->pMap;
    15781617    if (pMap)
    15791618        pMap->cRefs++;
     1619# else
     1620    RT_NOREF(pTlbe);
     1621# endif
    15801622
    15811623    unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
     
    15901632        PGM_PAGE_INC_WRITE_LOCKS(pPage);
    15911633        AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
     1634# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    15921635        if (pMap)
    15931636            pMap->cRefs++; /* Extra ref to prevent it from going away. */
     1637# endif
    15941638    }
    15951639
    15961640    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
     1641# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    15971642    pLock->pvMap = pMap;
     1643# else
     1644    pLock->pvMap = NULL;
     1645# endif
    15981646}
    15991647
     
    16081656DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
    16091657{
     1658# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    16101659    PPGMPAGEMAP pMap = pTlbe->pMap;
    16111660    if (pMap)
    16121661        pMap->cRefs++;
     1662# else
     1663    RT_NOREF(pTlbe);
     1664# endif
    16131665
    16141666    unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
     
    16231675        PGM_PAGE_INC_READ_LOCKS(pPage);
    16241676        AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
     1677# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    16251678        if (pMap)
    16261679            pMap->cRefs++; /* Extra ref to prevent it from going away. */
     1680# endif
    16271681    }
    16281682
    16291683    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
     1684# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    16301685    pLock->pvMap = pMap;
     1686# else
     1687    pLock->pvMap = NULL;
     1688# endif
    16311689}
    16321690
     
    20422100
    20432101#else
     2102# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    20442103    PPGMPAGEMAP pMap       = (PPGMPAGEMAP)pLock->pvMap;
     2104# endif
    20452105    PPGMPAGE    pPage      = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
    20462106    bool        fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
     
    20842144    }
    20852145
     2146# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
    20862147    if (pMap)
    20872148    {
     
    20892150        pMap->cRefs--;
    20902151    }
     2152# endif
    20912153    pgmUnlock(pVM);
    20922154#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
     
    42684330                    *ppb = NULL;
    42694331#else
    4270                     PPGMPAGER3MAPTLBE pTlbe;
     4332                    PPGMPAGEMAPTLBE pTlbe;
    42714333                    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
    42724334                    AssertLogRelRCReturn(rc, rc);
     
    43094371                    *ppb = NULL;
    43104372#else
    4311                     PPGMPAGER3MAPTLBE pTlbe;
     4373                    PPGMPAGEMAPTLBE pTlbe;
    43124374                    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
    43134375                    AssertLogRelRCReturn(rc, rc);
     
    44374499#else
    44384500            /* Get a ring-3 mapping of the address. */
    4439             PPGMPAGER3MAPTLBE pTlbe;
     4501            PPGMPAGEMAPTLBE pTlbe;
    44404502            rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
    44414503            AssertLogRelRCReturn(rc2, rc2);
     
    46644726
    46654727    return rc;
    4666 
    4667 }
    4668 
     4728}
     4729
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r82333 r82591  
    130130    if (pTimer->enmType == TMTIMERTYPE_DEV)
    131131    {
     132        RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
    132133        PPDMDEVINSR0        pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
     134        ASMSetFlags(fSavedFlags);
    133135        struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
    134136        if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r81369 r82591  
    400400     * and related frees.) */
    401401    RTR0MEMOBJ          hMemObj;
     402#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     403    /** Pointer to the kernel mapping. */
     404    uint8_t            *pbMapping;
     405#endif
    402406    /** Pointer to the next chunk in the free list.  (Giant mtx.) */
    403407    PGMMCHUNK           pFreeNext;
     
    21132117 * @param   pGMM        Pointer to the GMM instance.
    21142118 * @param   pSet        Pointer to the set.
    2115  * @param   MemObj      The memory object for the chunk.
     2119 * @param   hMemObj     The memory object for the chunk.
    21162120 * @param   hGVM        The affinity of the chunk. NIL_GVM_HANDLE for no
    21172121 *                      affinity.
     
    21232127 *          the success path.   On failure, no locks will be held.
    21242128 */
    2125 static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, uint16_t fChunkFlags,
     2129static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ hMemObj, uint16_t hGVM, uint16_t fChunkFlags,
    21262130                              PGMMCHUNK *ppChunk)
    21272131{
     
    21302134    Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE);
    21312135
     2136#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     2137    /*
     2138     * Get a ring-0 mapping of the object.
     2139     */
     2140    uint8_t *pbMapping = (uint8_t *)RTR0MemObjAddress(hMemObj);
     2141    if (!pbMapping)
     2142    {
     2143        RTR0MEMOBJ hMapObj;
     2144        int rc = RTR0MemObjMapKernel(&hMapObj, hMemObj, (void *)-1, 0,  RTMEM_PROT_READ | RTMEM_PROT_WRITE);
     2145        if (RT_SUCCESS(rc))
     2146            pbMapping = (uint8_t *)RTR0MemObjAddress(hMapObj);
     2147        else
     2148            return rc;
     2149        AssertPtr(pbMapping);
     2150    }
     2151#endif
     2152
     2153    /*
     2154     * Allocate a chunk.
     2155     */
    21322156    int rc;
    21332157    PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
     
    21372161         * Initialize it.
    21382162         */
    2139         pChunk->hMemObj     = MemObj;
     2163        pChunk->hMemObj     = hMemObj;
     2164#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     2165        pChunk->pbMapping   = pbMapping;
     2166#endif
    21402167        pChunk->cFree       = GMM_CHUNK_NUM_PAGES;
    21412168        pChunk->hGVM        = hGVM;
     
    22172244    if (RT_SUCCESS(rc))
    22182245    {
    2219 /** @todo Duplicate gmmR0RegisterChunk here so we can avoid chaining up the
    2220  *        free pages first and then unchaining them right afterwards. Instead
    2221  *        do as much work as possible without holding the giant lock. */
     2246        /** @todo Duplicate gmmR0RegisterChunk here so we can avoid chaining up the
     2247         *        free pages first and then unchaining them right afterwards. Instead
     2248         *        do as much work as possible without holding the giant lock. */
    22222249        PGMMCHUNK pChunk;
    22232250        rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, pGVM->hSelf, 0 /*fChunkFlags*/, &pChunk);
     
    22292256
    22302257        /* bail out */
    2231         RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     2258        RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
    22322259    }
    22332260
     
    31043131                gmmR0LinkChunk(pChunk, pSet);
    31053132                gmmR0MutexRelease(pGMM);
     3133                LogFlow(("GMMR0AllocateLargePage: returns VINF_SUCCESS\n"));
     3134                return VINF_SUCCESS;
    31063135            }
    3107             else
    3108                 RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     3136            RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
    31093137        }
    31103138    }
     
    32923320    RTMemFree(pChunk);
    32933321
     3322#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     3323    int rc = RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
     3324#else
    32943325    int rc = RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     3326#endif
    32953327    AssertLogRelRC(rc);
    32963328
     
    42554287     * (The GMM locking is done inside gmmR0RegisterChunk.)
    42564288     */
    4257     RTR0MEMOBJ MemObj;
    4258     rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
     4289    RTR0MEMOBJ hMemObj;
     4290    rc = RTR0MemObjLockUser(&hMemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
    42594291    if (RT_SUCCESS(rc))
    42604292    {
    4261         rc = gmmR0RegisterChunk(pGMM, &pGVM->gmm.s.Private, MemObj, pGVM->hSelf, 0 /*fChunkFlags*/, NULL);
     4293        rc = gmmR0RegisterChunk(pGMM, &pGVM->gmm.s.Private, hMemObj, pGVM->hSelf, 0 /*fChunkFlags*/, NULL);
    42624294        if (RT_SUCCESS(rc))
    42634295            gmmR0MutexRelease(pGMM);
    42644296        else
    4265             RTR0MemObjFree(MemObj, false /* fFreeMappings */);
     4297            RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
    42664298    }
    42674299
     
    42694301    return rc;
    42704302}
     4303
     4304#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     4305
     4306/**
     4307 * Gets the ring-0 virtual address for the given page.
     4308 *
     4309 * @returns VBox status code.
     4310 * @param   pGVM        Pointer to the kernel-only VM instace data.
     4311 * @param   idPage      The page ID.
     4312 * @param   ppv         Where to store the address.
     4313 * @thread  EMT
     4314 */
     4315GMMR0DECL(int)  GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv)
     4316{
     4317    *ppv = NULL;
     4318    PGMM pGMM;
     4319    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
     4320    gmmR0MutexAcquire(pGMM); /** @todo shared access */
     4321
     4322    int rc;
     4323    PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
     4324    if (pChunk)
     4325    {
     4326        const GMMPAGE *pPage = &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
     4327        if (RT_LIKELY(   (   GMM_PAGE_IS_PRIVATE(pPage)
     4328                          && pPage->Private.hGVM == pGVM->hSelf)
     4329                      || GMM_PAGE_IS_SHARED(pPage)))
     4330        {
     4331            AssertPtr(pChunk->pbMapping);
     4332            *ppv = &pChunk->pbMapping[(idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT];
     4333            rc = VINF_SUCCESS;
     4334        }
     4335        else
     4336            rc = VERR_GMM_NOT_PAGE_OWNER;
     4337    }
     4338    else
     4339        rc = VERR_GMM_PAGE_NOT_FOUND;
     4340
     4341    gmmR0MutexRelease(pGMM);
     4342    return rc;
     4343}
     4344
     4345#endif
    42714346
    42724347#ifdef VBOX_WITH_PAGE_SHARING
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r82556 r82591  
    106106 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
    107107 * executed. */
    108 #if defined(VBOX_STRICT) || 1
     108#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
    109109# define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
    110110# define GVMM_CHECK_SMAP_CHECK(a_BadExpr) \
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r82555 r82591  
    375375    PPGMREGMMIO2RANGE pFirstRegMmio = pgmR0PhysMMIOExFind(pGVM, pDevIns, hMmio2);
    376376    AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
     377#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     378    uint8_t * const pvR0  = (uint8_t *)pFirstRegMmio->pvR0;
     379#else
    377380    RTR3PTR const  pvR3   = pFirstRegMmio->pvR3;
     381#endif
    378382    RTGCPHYS const cbReal = pFirstRegMmio->cbReal;
    379383    pFirstRegMmio = NULL;
     
    389393     * Do the mapping.
    390394     */
     395#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     396    AssertPtr(pvR0);
     397    *ppvMapping = pvR0 + offSub;
     398    return VINF_SUCCESS;
     399#else
    391400    return SUPR0PageMapKernel(pGVM->pSession, pvR3, (uint32_t)offSub, (uint32_t)cbSub, 0 /*fFlags*/, ppvMapping);
     401#endif
    392402}
    393403
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r82555 r82591  
    8888 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
    8989 * executed. */
    90 #if defined(VBOX_STRICT) || 1
     90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
    9191# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
    9292# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
     
    437437    }
    438438#endif /* LOG_ENABLED */
     439SUPR0Printf("VMMR0InitVM: eflags=%x fKernelFeatures=%#x (SUPKERNELFEATURES_SMAP=%d)\n",
     440            ASMGetFlags(), fKernelFeatures, RT_BOOL(fKernelFeatures & SUPKERNELFEATURES_SMAP));
    439441
    440442    /*
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r82316 r82591  
    29952995    AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
    29962996    AssertLogRelReturn(cPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
     2997    AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE);
    29972998
    29982999    /*
     
    30353036        {
    30363037            void *pvPages;
     3038#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     3039            RTR0PTR pvPagesR0;
     3040            rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, &pvPagesR0, paPages);
     3041#else
    30373042            rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
     3043#endif
    30383044            if (RT_SUCCESS(rc))
    30393045            {
     
    30553061                    {
    30563062                        pCur->pvR3          = pbCurPages;
     3063#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     3064                        pCur->pvR0          = pvPagesR0 + (iSrcPage << PAGE_SHIFT);
     3065#endif
    30573066                        pCur->RamRange.pvR3 = pbCurPages;
    30583067                        pCur->idMmio2       = idMmio2;
     
    46304639#endif
    46314640
     4641#ifndef VBOX_WITH_RAM_IN_KERNEL
    46324642    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
    46334643        if (pVM->pgm.s.PhysTlbR0.aEntries[i].pMap == pChunk)
    46344644            return 0;
     4645#endif
    46354646    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
    46364647        if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
  • trunk/src/VBox/VMM/include/PGMInline.h

    r82092 r82591  
    505505#endif
    506506        AssertPtr(pTlbe->pv);
    507 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     507#if defined(IN_RING3) || (!defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_RAM_IN_KERNEL))
    508508        Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
    509509#endif
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r82558 r82591  
    14791479    /** The owner of the range. (a device) */
    14801480    PPDMDEVINSR3                        pDevInsR3;
    1481     /** Pointer to the ring-3 mapping of the allocation, if MMIO2. */
     1481    /** Pointer to the ring-3 mapping of the allocation. */
    14821482    RTR3PTR                             pvR3;
     1483#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     1484    /** Pointer to the ring-0 mapping of the allocation. */
     1485    RTR0PTR                             pvR0;
     1486#endif
    14831487    /** Pointer to the next range - R3. */
    14841488    R3PTRTYPE(struct PGMREGMMIO2RANGE *) pNextR3;
     
    14941498    uint8_t                             idMmio2;
    14951499    /** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */
     1500#if defined(VBOX_WITH_RAM_IN_KERNEL) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
     1501    uint8_t                             abAlignment[HC_ARCH_BITS == 32 ? 6 + 4 : 2];
     1502#else
    14961503    uint8_t                             abAlignment[HC_ARCH_BITS == 32 ? 6 + 8 : 2 + 8];
     1504#endif
    14971505    /** The real size.
    14981506     * This may be larger than indicated by RamRange.cb if the range has been
     
    15681576
    15691577
     1578/** @name Ring-3 page mapping TLBs
     1579 * @{  */
     1580
    15701581/** Pointer to an allocation chunk ring-3 mapping. */
    15711582typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
     
    16041615#endif
    16051616    /** The chunk map. */
    1606 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1617#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAM_IN_KERNEL)
    16071618    R3PTRTYPE(PPGMCHUNKR3MAP) volatile  pChunk;
    16081619#else
     
    16561667    RTGCPHYS volatile                   GCPhys;
    16571668    /** The guest page. */
    1658 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1669#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAM_IN_KERNEL)
    16591670    R3PTRTYPE(PPGMPAGE) volatile        pPage;
    16601671#else
     
    16621673#endif
    16631674    /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
    1664 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1675#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAM_IN_KERNEL)
    16651676    R3PTRTYPE(PPGMCHUNKR3MAP) volatile  pMap;
    16661677#else
     
    16681679#endif
    16691680    /** The address */
    1670 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1681#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAM_IN_KERNEL)
    16711682    R3PTRTYPE(void *) volatile          pv;
    16721683#else
     
    17041715#define PGM_PAGER3MAPTLB_IDX(GCPhys)    ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
    17051716
     1717/** @} */
     1718
     1719#if defined(VBOX_WITH_RAM_IN_KERNEL) || defined(DOXYGEN_RUNNING)
     1720/** @name Ring-0 page mapping TLB
     1721 * @{  */
     1722/**
     1723 * Ring-0 guest page mapping TLB entry.
     1724 */
     1725typedef struct PGMPAGER0MAPTLBE
     1726{
     1727    /** Address of the page. */
     1728    RTGCPHYS volatile                   GCPhys;
     1729    /** The guest page. */
     1730    R0PTRTYPE(PPGMPAGE) volatile        pPage;
     1731    /** The address */
     1732    R0PTRTYPE(void *) volatile          pv;
     1733} PGMPAGER0MAPTLBE;
     1734/** Pointer to an entry in the HC physical TLB. */
     1735typedef PGMPAGER0MAPTLBE *PPGMPAGER0MAPTLBE;
     1736
     1737
     1738/** The number of entries in the ring-3 guest page mapping TLB.
     1739 * @remarks The value must be a power of two. */
     1740#define PGM_PAGER0MAPTLB_ENTRIES 256
     1741
     1742/**
     1743 * Ring-3 guest page mapping TLB.
     1744 * @remarks used in ring-0 as well at the moment.
     1745 */
     1746typedef struct PGMPAGER0MAPTLB
     1747{
     1748    /** The TLB entries. */
     1749    PGMPAGER0MAPTLBE            aEntries[PGM_PAGER0MAPTLB_ENTRIES];
     1750} PGMPAGER0MAPTLB;
     1751/** Pointer to the ring-3 guest page mapping TLB. */
     1752typedef PGMPAGER0MAPTLB *PPGMPAGER0MAPTLB;
     1753
     1754/**
     1755 * Calculates the index of the TLB entry for the specified guest page.
     1756 * @returns Physical TLB index.
     1757 * @param   GCPhys      The guest physical address.
     1758 */
     1759#define PGM_PAGER0MAPTLB_IDX(GCPhys)    ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER0MAPTLB_ENTRIES - 1) )
     1760/** @} */
     1761#endif /* VBOX_WITH_RAM_IN_KERNEL || DOXYGEN_RUNNING */
    17061762
    17071763/**
     
    18671923/** @typedef PPPGMPAGEMAP
    18681924 * Pointer to a page mapper unit pointer for current context. */
    1869 #if defined(IN_RING0) && 0
    1870 // typedef PPGMPAGER0MAPTLB               PPGMPAGEMAPTLB;
    1871 // typedef PPGMPAGER0MAPTLBE              PPGMPAGEMAPTLBE;
    1872 // typedef PPGMPAGER0MAPTLBE             *PPPGMPAGEMAPTLBE;
    1873 //# define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGER0MAPTLB_ENTRIES
    1874 //# define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGER0MAPTLB_IDX(GCPhys)
    1875 // typedef PPGMCHUNKR0MAP                 PPGMPAGEMAP;
    1876 // typedef PPPGMCHUNKR0MAP                PPPGMPAGEMAP;
     1925#if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
     1926typedef PPGMPAGER0MAPTLB                PPGMPAGEMAPTLB;
     1927typedef PPGMPAGER0MAPTLBE               PPGMPAGEMAPTLBE;
     1928typedef PPGMPAGER0MAPTLBE              *PPPGMPAGEMAPTLBE;
     1929# define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGER0MAPTLB_ENTRIES
     1930# define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGER0MAPTLB_IDX(GCPhys)
     1931typedef struct PGMCHUNKR0MAP           *PPGMPAGEMAP;
     1932typedef struct PGMCHUNKR0MAP          **PPPGMPAGEMAP;
    18771933#else
    1878  typedef PPGMPAGER3MAPTLB               PPGMPAGEMAPTLB;
    1879  typedef PPGMPAGER3MAPTLBE              PPGMPAGEMAPTLBE;
    1880  typedef PPGMPAGER3MAPTLBE             *PPPGMPAGEMAPTLBE;
     1934typedef PPGMPAGER3MAPTLB                PPGMPAGEMAPTLB;
     1935typedef PPGMPAGER3MAPTLBE               PPGMPAGEMAPTLBE;
     1936typedef PPGMPAGER3MAPTLBE              *PPPGMPAGEMAPTLBE;
    18811937# define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGER3MAPTLB_ENTRIES
    18821938# define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGER3MAPTLB_IDX(GCPhys)
    1883  typedef PPGMCHUNKR3MAP                 PPGMPAGEMAP;
    1884  typedef PPPGMCHUNKR3MAP                PPPGMPAGEMAP;
     1939typedef PPGMCHUNKR3MAP                  PPGMPAGEMAP;
     1940typedef PPPGMCHUNKR3MAP                 PPPGMPAGEMAP;
    18851941#endif
    18861942/** @} */
     
    32293285        PGMCHUNKR3MAPTLB            Tlb;
    32303286        /** The chunk tree, ordered by chunk id. */
    3231 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     3287#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAM_IN_KERNEL)
    32323288        R3PTRTYPE(PAVLU32NODECORE)  pTree;
    32333289#else
     
    32513307    /** The page mapping TLB for ring-3. */
    32523308    PGMPAGER3MAPTLB                 PhysTlbR3;
     3309#ifdef VBOX_WITH_RAM_IN_KERNEL
     3310    /** The page mapping TLB for ring-0. */
     3311    PGMPAGER0MAPTLB                 PhysTlbR0;
     3312#else
    32533313    /** The page mapping TLB for ring-0 (still using ring-3 mappings). */
    32543314    PGMPAGER3MAPTLB                 PhysTlbR0;
     3315#endif
    32553316
    32563317    /** @name   The zero page.
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette