VirtualBox

Changeset 4713 in vbox


Ignore:
Timestamp:
Sep 11, 2007 4:04:02 PM (17 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
24384
Message:

more on the phys page management rewrite...

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pgm.h

    r4693 r4713  
    783783PGMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys);
    784784
     785/**
     786 * Converts a guest pointer to a GC physical address.
     787 *
     788 * This uses the current CR3/CR0/CR4 of the guest.
     789 *
     790 * @returns VBox status code.
     791 * @param   pVM         The VM Handle
     792 * @param   GCPtr       The guest pointer to convert.
     793 * @param   pGCPhys     Where to store the GC physical address.
     794 */
     795PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
     796
     797/**
     798 * Converts a guest pointer to a HC physical address.
     799 *
     800 * This uses the current CR3/CR0/CR4 of the guest.
     801 *
     802 * @returns VBox status code.
     803 * @param   pVM         The VM Handle
     804 * @param   GCPtr       The guest pointer to convert.
     805 * @param   pHCPhys     Where to store the HC physical address.
     806 */
     807PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys);
     808
     809
     810/**
     811 * Invalidates the GC page mapping TLB.
     812 *
     813 * @param   pVM     The VM handle.
     814 */
     815PDMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM);
     816
     817/**
     818 * Invalidates the ring-0 page mapping TLB.
     819 *
     820 * @param   pVM     The VM handle.
     821 */
     822PDMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM);
     823
     824/**
     825 * Invalidates the ring-3 page mapping TLB.
     826 *
     827 * @param   pVM     The VM handle.
     828 */
     829PDMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM);
     830
    785831/**
    786832 * Page mapping lock.
     
    795841    uint32_t    u32Dummy;
    796842#else
    797     /** Just a dummy for the time being. */
    798     uint32_t    u32Dummy;
     843    /** Pointer to the PGMPAGE. */
     844    void       *pvPage;
     845    /** Pointer to the PGMCHUNKR3MAP. */
     846    void       *pvMap;
    799847#endif
    800848} PGMPAGEMAPLOCK;
     
    931979 */
    932980PGMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr);
    933 
    934 /**
    935  * Converts a guest pointer to a GC physical address.
    936  *
    937  * This uses the current CR3/CR0/CR4 of the guest.
    938  *
    939  * @returns VBox status code.
    940  * @param   pVM         The VM Handle
    941  * @param   GCPtr       The guest pointer to convert.
    942  * @param   pGCPhys     Where to store the GC physical address.
    943  */
    944 PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
    945 
    946 /**
    947  * Converts a guest pointer to a HC physical address.
    948  *
    949  * This uses the current CR3/CR0/CR4 of the guest.
    950  *
    951  * @returns VBox status code.
    952  * @param   pVM         The VM Handle
    953  * @param   GCPtr       The guest pointer to convert.
    954  * @param   pHCPhys     Where to store the HC physical address.
    955  */
    956 PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys);
    957981
    958982/**
     
    17221746
    17231747/**
     1748 * Invalidates the TLB for the ring-3 mapping cache.
     1749 *
     1750 * @param   pVM         The VM handle.
     1751 */
     1752PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM);
     1753
     1754/**
    17241755 * Perform an integrity check on the PGM component.
    17251756 *
  • trunk/src/VBox/Runtime/Makefile.kmk

    r4687 r4713  
    661661        table/avloioport.cpp \
    662662        table/avlroioport.cpp \
     663        table/avlu32.cpp \
     664        table/avllu32.cpp \
    663665        logcom.cpp \
    664666        logformat.cpp \
     
    962964RuntimeGC_DEFS          = IN_RT_GC RT_WITH_VBOX
    963965RuntimeGC_SOURCES       = \
    964         gc/initterm-gc.cpp \
     966        gc/initterm-gc.cpp \
    965967        misc/sanity-cpp.cpp \
    966968        misc/sanity-c.c \
     
    973975        table/avloioport.cpp \
    974976        table/avlroioport.cpp \
     977        table/avlu32.cpp \
     978        table/avllu32.cpp \
    975979        assert.cpp \
    976980        generic/RTAssertDoBreakpoint-generic.cpp \
  • trunk/src/VBox/VMM/PGM.cpp

    r4620 r4713  
    928928    rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, "PGM");
    929929    AssertRCReturn(rc, rc);
     930
     931    /*
     932     * Invalidate the TLBs.
     933     */
     934    PGMR3PhysChunkInvalidateTLB(pVM);
     935    PGMPhysInvalidatePageR3MapTLB(pVM);
     936    PGMPhysInvalidatePageR0MapTLB(pVM);
     937    PGMPhysInvalidatePageGCMapTLB(pVM);
    930938
    931939    /*
     
    13771385    STAM_REG(pVM, &pPGM->StatDynRamTotal,                   STAMTYPE_COUNTER, "/PGM/RAM/TotalAlloc",                STAMUNIT_MEGABYTES,      "Allocated mbs of guest ram.");
    13781386    STAM_REG(pVM, &pPGM->StatDynRamGrow,                    STAMTYPE_COUNTER, "/PGM/RAM/Grow",                      STAMUNIT_OCCURENCES,     "Nr of pgmr3PhysGrowRange calls.");
     1387
     1388    STAM_REG(pVM, &pPGM->StatPageHCMapTlbHits,              STAMTYPE_COUNTER, "/PGM/PageHCMap/TlbHits",                 STAMUNIT_OCCURENCES, "TLB hits.");
     1389    STAM_REG(pVM, &pPGM->StatPageHCMapTlbMisses,            STAMTYPE_COUNTER, "/PGM/PageHCMap/TlbMisses",               STAMUNIT_OCCURENCES, "TLB misses.");
     1390    STAM_REG(pVM, &pPGM->ChunkR3Map.c,                      STAMTYPE_U32,     "/PGM/ChunkR3Map/c",                      STAMUNIT_OCCURENCES, "Number of mapped chunks.");
     1391    STAM_REG(pVM, &pPGM->ChunkR3Map.cMax,                   STAMTYPE_U32,     "/PGM/ChunkR3Map/cMax",                   STAMUNIT_OCCURENCES, "Maximum number of mapped chunks.");
     1392    STAM_REG(pVM, &pPGM->StatChunkR3MapTlbHits,             STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbHits",                STAMUNIT_OCCURENCES, "TLB hits.");
     1393    STAM_REG(pVM, &pPGM->StatChunkR3MapTlbMisses,           STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbMisses",              STAMUNIT_OCCURENCES, "TLB misses.");
    13791394
    13801395#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
  • trunk/src/VBox/VMM/PGMInternal.h

    r4696 r4713  
    481481    /** The physical address and a whole lot of other stuff. All bits are used! */
    482482    RTHCPHYS    HCPhys;
    483     uint32_t    u32A;
     483    /** The page state. */
     484    uint32_t    u2State : 2;
     485    /** Flag indicating that a write monitored page was written to when set. */
     486    uint32_t    fWrittenTo : 1;
     487    /** For later. */
     488    uint32_t    fSomethingElse : 1;
     489    /** The Page ID. */
     490    uint32_t    idPage : 28;
    484491    uint32_t    u32B;
    485492} PGMPAGE;
     493AssertCompileSize(PGMPAGE, 16);
    486494/** Pointer to a physical guest page. */
    487495typedef PGMPAGE *PPGMPAGE;
     
    491499typedef PPGMPAGE *PPPGMPAGE;
    492500
     501/** @name The Page state, PGMPAGE::u2State.
     502 * @{ */
     503/** The zero page.
     504 * This is a per-VM page that's never ever mapped writable. */
     505#define PGM_PAGE_STATE_ZERO             0
     506/** A allocated page.
     507 * This is a per-VM page allocated from the page pool.
     508 */
     509#define PGM_PAGE_STATE_ALLOCATED        1
     510/** A allocated page that's being monitored for writes.
     511 * The shadow page table mappings are read-only. When a write occurs, the
     512 * fWrittenTo member is set, the page remapped as read-write and the state
     513 * moved back to allocated. */
     514#define PGM_PAGE_STATE_WRITE_MONITORED  2
     515/** The page is shared, aka. copy-on-write.
     516 * This is a page that's shared with other VMs. */
     517#define PGM_PAGE_STATE_SHARED           3   
     518/** @} */
     519
     520
     521/**
     522 * Gets the page state.
     523 * @returns page state (PGM_PAGE_STATE_*).
     524 * @param   pPage       Pointer to the physical guest page tracking structure.
     525 */
     526#define PGM_PAGE_GET_STATE(pPage)       ( (pPage)->u2State )
     527
     528/**
     529 * Sets the page state.
     530 * @param   pPage       Pointer to the physical guest page tracking structure.
     531 * @param   _uState     The new page state.
     532 */
     533#define PGM_PAGE_SET_STATE(pPage, _uState) \
     534                                        do { (pPage)->u2State = (_uState); } while (0)
     535
    493536
    494537/**
     
    500543
    501544/**
     545 * Sets the host physical address of the guest page.
     546 * @param   pPage       Pointer to the physical guest page tracking structure.
     547 * @param   _HCPhys     The new host physical address.
     548 */
     549#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
     550                                        do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0xffff000000000fff)) \
     551                                                             | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
     552
     553/** The chunk shift. (2^20 = 1 MB) */
     554#define GPM_CHUNK_SHIFT                 20
     555/** The allocation chunk size. */
     556#define GPM_CHUNK_SIZE                  (1U << GPM_CHUNK_SIZE_LOG2)
     557/** The shift factor for converting a page id into a chunk id. */
     558#define GPM_CHUNKID_SHIFT               (GPM_CHUNK_SHIFT - PAGE_SHIFT)
     559/** The NIL Chunk ID value. */
     560#define NIL_GPM_CHUNKID                 0
     561/** The NIL Page ID value. */
     562#define NIL_GPM_PAGEID                  0
     563
     564/**
     565 * Get the Page ID.
     566 * @returns The Page ID; NIL_GPM_PAGEID if it's a ZERO page.
     567 * @param   pPage       Pointer to the physical guest page tracking structure.
     568 */
     569#define PGM_PAGE_GET_PAGEID(pPage)      ( (pPage)->idPage )
     570/* later:
     571#define PGM_PAGE_GET_PAGEID(pPage)      (   ((uint32_t)(pPage)->HCPhys >> (48 - 12))
     572                                         |  ((uint32_t)(pPage)->HCPhys & 0xfff) )
     573*/
     574/**
     575 * Sets the Page ID.
     576 * @param   pPage       Pointer to the physical guest page tracking structure.
     577 */
     578#define PGM_PAGE_SET_PAGEID(pPage, _idPage)  do { (pPage)->idPage = (_idPage); } while (0)
     579/* later:
     580#define PGM_PAGE_SET_PAGEID(pPage, _idPage)  do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0x0000fffffffff000)) \
     581                                                                  | ((_idPage) & 0xfff) \
     582                                                                  | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
     583*/
     584
     585/**
     586 * Get the Chunk ID.
     587 * @returns The Chunk ID; NIL_GPM_CHUNKID if it's a ZERO page.
     588 * @param   pPage       Pointer to the physical guest page tracking structure.
     589 */
     590#define PGM_PAGE_GET_CHUNKID(pPage)     ( (pPage)->idPage >> GPM_CHUNKID_SHIFT )
     591/* later:
     592#if GPM_CHUNKID_SHIFT == 12
     593# define PGM_PAGE_GET_CHUNKID(pPage)    ( (uint32_t)((pPage)->HCPhys >> 48) )
     594#elif GPM_CHUNKID_SHIFT > 12
     595# define PGM_PAGE_GET_CHUNKID(pPage)    ( (uint32_t)((pPage)->HCPhys >> (48 + (GPM_CHUNKID_SHIFT - 12)) )
     596#elif GPM_CHUNKID_SHIFT < 12
     597# define PGM_PAGE_GET_CHUNKID(pPage)    (   ( (uint32_t)((pPage)->HCPhys >> 48)   << (12 - GPM_CHUNKID_SHIFT) ) \
     598                                         |  ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GPM_CHUNKID_SHIFT ) )
     599#else
     600# error "GPM_CHUNKID_SHIFT isn't defined or something."
     601#endif
     602*/
     603
     604/**
     605 * Get the index of the page within the allocaiton chunk.
     606 * @returns The page index.
     607 * @param   pPage       Pointer to the physical guest page tracking structure.
     608 */
     609#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)   ( (pPage)->idPage & (RT_BIT_32(GPM_CHUNKID_SHIFT) - 1) )
     610/* later:
     611#if GPM_CHUNKID_SHIFT <= 12
     612# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)  ( (uint32_t)((pPage)->HCPhys & (RT_BIT_32(GPM_CHUNKID_SHIFT) - 1)) )
     613#else
     614# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)  (   (uint32_t)((pPage)->HCPhys & 0xfff) \
     615                                             |  ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GPM_CHUNKID_SHIFT - 12) - 1) ) )
     616#endif
     617*/
     618
     619/**
    502620 * Checks if the page is 'reserved'.
    503621 * @returns true/false.
     
    505623 */
    506624#define PGM_PAGE_IS_RESERVED(pPage)     ( !!((pPage)->HCPhys & MM_RAM_FLAGS_RESERVED) )
     625
     626/**
     627 * Checks if the page is marked for MMIO.
     628 * @returns true/false.
     629 * @param   pPage       Pointer to the physical guest page tracking structure.
     630 */
     631#define PGM_PAGE_IS_MMIO(pPage)         ( !!((pPage)->HCPhys & MM_RAM_FLAGS_MMIO) )
     632
     633/**
     634 * Checks if the page is backed by the ZERO page.
     635 * @returns true/false.
     636 * @param   pPage       Pointer to the physical guest page tracking structure.
     637 */
     638#define PGM_PAGE_IS_ZERO(pPage)         ( (pPage)->u2State == PGM_PAGE_STATE_ZERO )
     639
    507640
    508641
     
    610743{
    611744    /** The chunk id. */
    612     uint32_t                    idChunk;
     745    uint32_t volatile                   idChunk;
    613746#if HC_ARCH_BITS == 64
    614     uint32_t                    u32Padding; /**< alignment padding. */
     747    uint32_t                            u32Padding; /**< alignment padding. */
    615748#endif
    616749    /** The chunk map. */
    617     HCPTRTYPE(PPGMCHUNKR3MAP)   pChunk;
     750    HCPTRTYPE(PPGMCHUNKR3MAP) volatile  pChunk;
    618751} PGMCHUNKR3MAPTLBE;
    619752/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
    620753typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
    621754
    622 /** The number of TLB entries in PGMCHUNKR3TLB. */
    623 #define PGMCHUNKR3MAPTLB_ENTRIES   32
     755/** The number of TLB entries in PGMCHUNKR3MAPTLB.
     756 * @remark Must be a power of two value. */
     757#define PGM_CHUNKR3MAPTLB_ENTRIES   32
    624758
    625759/**
     
    642776{
    643777    /** The TLB entries. */
    644     PGMCHUNKR3MAPTLBE   aEntries[PGMCHUNKR3MAPTLB_ENTRIES];
     778    PGMCHUNKR3MAPTLBE   aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
    645779} PGMCHUNKR3MAPTLB;
     780
     781/**
     782 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
     783 * @returns Chunk TLB index.
     784 * @param   idChunk         The Chunk ID.
     785 */
     786#define PGM_CHUNKR3MAPTLB_IDX(idChunk)     ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
    646787
    647788
     
    652793typedef struct PGMPAGER3MAPTLBE
    653794{
    654     /** The page id. */
    655     uint32_t                    idPage;
     795    /** Address of the page. */
     796    RTGCPHYS volatile                   GCPhys;
    656797#if HC_ARCH_BITS == 64
    657     uint32_t                    u32Padding; /**< alignment padding. */
     798    uint32_t                            u32Padding; /**< alignment padding. */
    658799#endif
    659800    /** The guest page. */
    660     HCPTRTYPE(PPGMPAGE)         pPage;
     801    HCPTRTYPE(PPGMPAGE) volatile        pPage;
    661802    /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
    662     HCPTRTYPE(PPGMCHUNKR3MAP)   pMap;
     803    HCPTRTYPE(PPGMCHUNKR3MAP) volatile  pMap;
    663804    /** The address */
    664     HCPTRTYPE(void *)           pv;
     805    HCPTRTYPE(void *) volatile          pv;
    665806} PGMPAGER3MAPTLBE;
    666807/** Pointer to an entry in the HC physical TLB. */
     
    668809
    669810
    670 /** The number of entries in the ring-3 guest page mapping TLB. */
    671 #define PGMPAGER3MAPTLB_ENTRIES 64
     811/** The number of entries in the ring-3 guest page mapping TLB.
     812 * @remarks The value must be a power of two. */
     813#define PGM_PAGER3MAPTLB_ENTRIES 64
    672814         
    673815/**
     
    678820{
    679821    /** The TLB entries. */
    680     PGMPAGER3MAPTLBE            aEntries[PGMPAGER3MAPTLB_ENTRIES];
     822    PGMPAGER3MAPTLBE            aEntries[PGM_PAGER3MAPTLB_ENTRIES];
    681823} PGMPAGER3MAPTLB;
    682824/** Pointer to the ring-3 guest page mapping TLB. */
    683825typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
    684826
    685 
     827/**
     828 * Calculates the index of the TLB entry for the specified guest page.
     829 * @returns Physical TLB index.
     830 * @param   GCPhys      The guest physical address.
     831 */
     832#define PGM_PAGER3MAPTLB_IDX(GCPhys)    ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
     833
     834
     835/** @name Context neutrual page mapper TLB.
     836 *
     837 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
     838 * code is writting in a kind of context neutrual way. Time will show whether
     839 * this actually makes sense or not...
     840 *
     841 * @{ */
     842/** @typedef PPGMPAGEMAPTLB
     843 * The page mapper TLB pointer type for the current context. */
     844/** @typedef PPGMPAGEMAPTLB
     845 * The page mapper TLB entry pointer type for the current context. */
     846/** @typedef PPGMPAGEMAPTLB
     847 * The page mapper TLB entry pointer pointer type for the current context. */
     848/** @def PGMPAGEMAPTLB_ENTRIES
     849 * The number of TLB entries in the page mapper TLB for the current context. */
     850/** @def PGM_PAGEMAPTLB_IDX
     851 * Calculate the TLB index for a guest physical address.
     852 * @returns The TLB index.
     853 * @param   GCPhys      The guest physical address. */
     854/** @typedef PPGMPAGEMAP
     855 * Pointer to a page mapper unit for current context. */
     856/** @typedef PPPGMPAGEMAP
     857 * Pointer to a page mapper unit pointer for current context. */
     858#ifdef IN_GC
     859// typedef PPGMPAGEGCMAPTLB       PPGMPAGEMAPTLB;
     860// typedef PPGMPAGEGCMAPTLBE      PPGMPAGEMAPTLBE;
     861// typedef PPGMPAGEGCMAPTLBE     *PPPGMPAGEMAPTLBE;
     862# define PGM_PAGEMAPTLB_ENTRIES     PGM_PAGEGCMAPTLB_ENTRIES
     863# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)     
     864 typedef void *                 PPGMPAGEMAP;
     865 typedef void **                PPPGMPAGEMAP;
     866//#elif IN_RING0
     867// typedef PPGMPAGER0MAPTLB       PPGMPAGEMAPTLB;
     868// typedef PPGMPAGER0MAPTLBE      PPGMPAGEMAPTLBE;
     869// typedef PPGMPAGER0MAPTLBE     *PPPGMPAGEMAPTLBE;
     870//# define PGM_PAGEMAPTLB_ENTRIES     PGM_PAGER0MAPTLB_ENTRIES
     871//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)     
     872// typedef PPGMCHUNKR0MAP         PPGMPAGEMAP;
     873// typedef PPPGMCHUNKR0MAP        PPPGMPAGEMAP;
     874#else
     875 typedef PPGMPAGER3MAPTLB       PPGMPAGEMAPTLB;
     876 typedef PPGMPAGER3MAPTLBE      PPGMPAGEMAPTLBE;
     877 typedef PPGMPAGER3MAPTLBE     *PPPGMPAGEMAPTLBE;
     878# define PGM_PAGEMAPTLB_ENTRIES     PGM_PAGER3MAPTLB_ENTRIES
     879# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)     
     880 typedef PPGMCHUNKR3MAP         PPGMPAGEMAP;
     881 typedef PPPGMCHUNKR3MAP        PPPGMPAGEMAP;
     882#endif
     883/** @} */
    686884
    687885
     
    18872085# endif
    18882086
     2087    /** Ring-3/0 page mapper TLB hits. */
     2088    STAMCOUNTER StatPageHCMapTlbHits;
     2089    /** Ring-3/0 page mapper TLB misses. */
     2090    STAMCOUNTER StatPageHCMapTlbMisses;
     2091    /** Ring-3/0 chunk mapper TLB hits. */
     2092    STAMCOUNTER StatChunkR3MapTlbHits;
     2093    /** Ring-3/0 chunk mapper TLB misses. */
     2094    STAMCOUNTER StatChunkR3MapTlbMisses;
     2095
    18892096    /** Allocated mbs of guest ram */
    18902097    STAMCOUNTER     StatDynRamTotal;
     
    19362143
    19372144
     2145int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    19382146#ifdef IN_RING3
    19392147int             pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
     2148int             pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
    19402149
    19412150int             pgmR3PoolInit(PVM pVM);
     
    19432152void            pgmR3PoolReset(PVM pVM);
    19442153
    1945 #endif
     2154#endif /* IN_RING3 */
    19462155#ifdef IN_GC
    19472156void           *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
     
    22512460
    22522461
     2462#ifndef IN_GC
     2463/**
     2464 * Queries the Physical TLB entry for a physical guest page,
     2465 * attemting to load the TLB entry if necessary.
     2466 *
     2467 * @returns VBox status code.
     2468 * @retval  VINF_SUCCESS on success
     2469 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
     2470 * @param   pPGM        The PGM instance handle.
     2471 * @param   GCPhys      The address of the guest page.
     2472 * @param   ppTlbe      Where to store the pointer to the TLB entry.
     2473 */
     2474 
     2475DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
     2476{
     2477    int rc;
     2478    PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
     2479    if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
     2480    {
     2481        STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbHits));
     2482        rc = VINF_SUCCESS;
     2483    }
     2484    else
     2485        rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
     2486    *ppTlbe = pTlbe;
     2487    return rc;
     2488}
     2489#endif /* !IN_GC */
     2490
     2491
    22532492#ifndef NEW_PHYS_CODE
    22542493/**
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r4689 r4713  
    187187            {
    188188                pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
    189                 pNew->aPages[iPage].u32A = 0;
     189                pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ALLOCATED;
     190                pNew->aPages[iPage].fWrittenTo = 0;
     191                pNew->aPages[iPage].fSomethingElse = 0;
     192                pNew->aPages[iPage].idPage = 0;
    190193                pNew->aPages[iPage].u32B = 0;
    191194            }
     
    204207            {
    205208                pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
    206                 pNew->aPages[iPage].u32A = 0;
     209                pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
     210                pNew->aPages[iPage].fWrittenTo = 0;
     211                pNew->aPages[iPage].fSomethingElse = 0;
     212                pNew->aPages[iPage].idPage = 0;
    207213                pNew->aPages[iPage].u32B = 0;
    208214            }
     
    215221            {
    216222                pNew->aPages[iPage].HCPhys = HCPhysDummyPage; /** @todo PAGE FLAGS */
    217                 pNew->aPages[iPage].u32A = 0;
     223                pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
     224                pNew->aPages[iPage].fWrittenTo = 0;
     225                pNew->aPages[iPage].fSomethingElse = 0;
     226                pNew->aPages[iPage].idPage = 0;
    218227                pNew->aPages[iPage].u32B = 0;
    219228            }
     
    889898}
    890899
     900
     901/**
     902 * Invalidates the TLB for the ring-3 mapping cache.
     903 *
     904 * @param   pVM         The VM handle.
     905 */
     906PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
     907{
     908    pgmLock(pVM);
     909    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
     910    {
     911        pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GPM_CHUNKID;
     912        pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
     913    }
     914    pgmUnlock(pVM);
     915}
     916
     917
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r4693 r4713  
    125125
    126126
    127 #ifdef NEW_PHYS_CODE
     127/**
     128 * Invalidates the GC page mapping TLB.
     129 *
     130 * @param   pVM     The VM handle.
     131 */
     132PDMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
     133{
     134    /* later */
     135    NOREF(pVM);
     136}
     137
     138
     139/**
     140 * Invalidates the ring-0 page mapping TLB.
     141 *
     142 * @param   pVM     The VM handle.
     143 */
     144PDMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
     145{
     146    PGMPhysInvalidatePageR3MapTLB(pVM);
     147}
     148
     149
     150/**
     151 * Invalidates the ring-3 page mapping TLB.
     152 *
     153 * @param   pVM     The VM handle.
     154 */
     155PDMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
     156{
     157    pgmLock(pVM);
     158    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
     159    {
     160        pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
     161        pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
     162        pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
     163        pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
     164    }
     165    pgmUnlock(pVM);
     166}
    128167
    129168
     
    165204        case PGM_PAGE_STATE_WRITE_MONITORED:
    166205            pPage->fWrittenTo = true;
    167             pPage->u2State = PGM_PAGE_STATE_WRITE_ALLOCATED;
     206            pPage->u2State = PGM_PAGE_STATE_ALLOCATED;
    168207            /* fall thru */
     208        default: /* to shut up GCC */
    169209        case PGM_PAGE_STATE_ALLOCATED:
    170210            return VINF_SUCCESS;
     
    209249     */
    210250    *ppMap = NULL;
    211     RTHCPHYS HCPhys = pPage->HCPhys & PGM_HCPHYS_PAGE_MASK;
    212     Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg)
     251    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     252    /** @todo  Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg); */
    213253    return PGMGCDynMapHCPage(pVM, HCPhys, ppv);
    214254
    215255#else /* IN_RING3 || IN_RING0 */
    216256
    217 /**
    218  * Calculates the index of a guest page in the Ring-3 Chunk TLB.
    219  * @returns Chunk TLB index.
    220  * @param   idChunk         The Chunk ID.
    221  */
    222 #define PGM_R3CHUNKTLB_IDX(idChunk)     ( (idChunk) & (PGM_R3CHUNKTLB_ENTRIES - 1) )
    223 
    224257    /*
    225258     * Find/make Chunk TLB entry for the mapping chunk.
    226259     */
    227     PPGMR3CHUNK pChunk;
    228     const uint32_t idChunk = PGM_PAGE_GET_PAGEID(pPage) >> XXX_CHUNKID_SHIFT;
    229     PGMR3CHUNKTLBE pTlbe = &pVM->pgm.s.R3ChunkTlb.aEntries[PGM_R3CHUNKTLB_IDX(idChunk)];
     260    PPGMCHUNKR3MAP pMap;
     261    const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
     262    PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
    230263    if (pTlbe->idChunk == idChunk)
    231264    {
    232         STAM_COUNTER_INC(&pVM->pgm.s.StatR3ChunkTlbHits);
    233         pChunk = pTlbe->pChunk;
    234     }
    235     else
    236     {
    237         STAM_COUNTER_INC(&pVM->pgm.s.StatR3ChunkTlbMisses);
     265        STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbHits);
     266        pMap = pTlbe->pChunk;
     267    }
     268    else if (idChunk != NIL_GPM_CHUNKID)
     269    {
     270        STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbMisses);
    238271
    239272        /*
    240273         * Find the chunk, map it if necessary.
    241274         */
    242         pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
    243         if (!pChunk)
     275        pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
     276        if (!pMap)
    244277        {
    245278#ifdef IN_RING0
    246279            int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
    247280            AssertRCReturn(rc, rc);
    248             pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
    249             Assert(pChunk);
     281            pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
     282            Assert(pMap);
    250283#else
    251             int rc = PGMR3PhysChunkMap(pVM, idChunk, &pChunk);
     284            int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
    252285            if (VBOX_FAILURE(rc))
    253286                return rc;
     
    259292         */
    260293        pTlbe->idChunk = idChunk;
    261         pTlbe->pChunk = pChunk;
    262         pChunk->iAge = 0;
    263     }
    264 
    265     *ppv = (uint8_t *)pMap->pv + (iPage << PAGE_SHIFT);
    266     *ppMap = pChunk;
     294        pTlbe->pChunk = pMap;
     295        pMap->iAge = 0;
     296    }
     297    else
     298    {
     299        Assert(PGM_PAGE_IS_ZERO(pPage));
     300        ///@todo *ppv = pVM->pgm.s.pvZeroPgR3;
     301        *ppMap = NULL;
     302        return VINF_SUCCESS;
     303    }
     304
     305    *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
     306    *ppMap = pMap;
    267307    return VINF_SUCCESS;
    268308#endif /* IN_RING3 */
     
    270310
    271311
    272 /**
    273  * Calculates the index of a guest page in the Physical TLB.
    274  * @returns Physical TLB index.
    275  * @param   GCPhys      The guest physical address.
    276  */
    277 #define PGM_R3PHYSTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_R3PHYSTLB_ENTRIES - 1) )
    278 
    279 #if defined(IN_RING3) || defined(IN_RING0)
    280 # define PGM_PHYSTLB_IDX(GCPhys)   PGM_R3PHYSTLB_IDX(GCPhys)
    281 # define PGMPHYSTLBE PGMR3PHYSTLBE
    282 #else /* IN_GC */
    283 # define PGM_PHYSTLB_IDX(GCPhys)   PGM_GCPHYSTLB_IDX(GCPhys)
    284 # define PGMPHYSTLBE PGMGCPHYSTLBE
    285 #endif
    286 
    287 
     312#ifndef IN_GC
    288313/**
    289314 * Load a guest page into the ring-3 physical TLB.
     
    297322int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
    298323{
    299     STAM_COUNTER_INC(&pPGM->StatR3PhysTlbMisses);
     324    STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbMisses));
    300325
    301326    /*
     
    321346     */
    322347    PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
    323     PPGMR3PHYSTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PHYSTLB_IDX(GCPhys)];
    324     if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO)
     348    PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
     349    if (!PGM_PAGE_IS_ZERO(pPage))
    325350    {
    326351        void *pv;
    327352        PPGMPAGEMAP pMap;
    328         int rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pMap, &pv);
     353        int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
    329354        if (VBOX_FAILURE(rc))
    330355            return rc;
     
    334359    else
    335360    {
    336         Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
     361        /** @todo Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg); */
    337362        pTlbe->pMap = NULL;
    338         pTlbe->pv = pPGM->pvZeroPgR3;
     363        /** @todo pTlbe->pv = pPGM->pvZeroPgR3; */
    339364    }
    340365    pTlbe->pPage = pPage;
    341366    return VINF_SUCCESS;
    342367}
    343 
    344 
    345 /**
    346  * Queries the Physical TLB entry for a physical guest page,
    347  * attemting to load the TLB entry if necessary.
    348  *
    349  * @returns VBox status code.
    350  * @retval  VINF_SUCCESS on success
    351  * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
    352  * @param   pPgm        The PGM instance handle.
    353  * @param   GCPhys      The address of the guest page.
    354  * @param   ppTlbe      Where to store the pointer to the TLB entry.
    355  */
    356 DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPgm, RTGCPHYS GCPhys, PPPGMPHYSTLBE ppTlbe)
    357 {
    358     int rc;
    359     PGMPHYSTLBE pTlbe = &pPgm->CTXSUFF(PhysTlb).aEntries[PGM_PHYSTLB_IDX(GCPhys)];
    360     if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
    361     {
    362         STAM_COUNTER_INC(&pPgm->StatR3PhysTlbHits);
    363         rc = VINF_SUCCESS;
    364     }
    365     else
    366         rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
    367     *ppTlbe = pTlbe;
    368     return rc;
    369 }
    370 
    371 
    372 #endif /* NEW_PHYS_CODE */
     368#endif /* !IN_GC */
    373369
    374370
     
    437433
    438434            *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
     435            pLock->pvPage = pPage;
     436            pLock->pvMap = pMap;
    439437        }
    440438    }
     
    577575    pgmLock(pVM);
    578576
    579     /*
    580      * Try the Physical TLB cache.
    581      * There's a high likely hood that this will work out since it's a short-term lock.
    582      */
    583     PPGMR3PHYSTLBE pTlbe = &pVM->pgm.s.R3PhysTlb.aEntries[PGM_R3PHYSTLB_IDX(GCPhys)];
    584     if (RT_LIKELY(pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK)))
    585     {
    586         PPGMPAGE pPage = pTlbe->pPage;
    587         Assert(PGM_PAGE_IS_NORMAL(pPage));
    588         Assert(pPage->cLocks >= 1);
    589         if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
    590             pPage->cLocks--;
    591 
    592         PPGMR3CHUNK pChunk = pTlbe->pChunk;
    593         Assert(pChunk->cRefs >= 1);
    594         pChunk->cRefs--;
    595         pChunk->iAge = 0;
    596     }
    597     else
    598     {
    599         /*
    600          * Find the page and unlock it.
    601          */
    602         PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
    603         RTGCPHYS off = GCPhys - pRam->GCPhys;
    604         if (RT_UNLIKELY(off >= pRam->cb))
    605         {
    606             do
    607             {
    608                 pRam = CTXSUFF(pRam->pNext);
    609                 AssertMsgRelease(pRam, ("GCPhys=%RGp\n", GCPhys));
    610                 off = GCPhys - pRam->GCPhys;
    611             } while (off >= pRam->cb);
    612         }
    613         PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
    614         Assert(PGM_PAGE_IS_NORMAL(pTlbe->pPage));
    615         Assert(pPage->cLocks >= 1);
    616         if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
    617             pPage->cLocks--;
    618 
    619         /*
    620          * Now find the chunk mapping and unlock it.
    621          */
    622         PPGMR3CHUNK pChunk;
    623         const uint32_t idChunk = PGM_PAGE_GET_PAGEID(pPage) >> XXX_CHUNKID_SHIFT;
    624         PGMR3CHUNKTLBE pTlbe = &pVM->pgm.s.R3ChunkTlb.aEntries[PGM_R3CHUNKTLB_IDX(idChunk)];
    625         if (pTlbe->idChunk == idChunk)
    626             pChunk = pTlbe->pChunk;
    627         else
    628         {
    629             pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
    630             AssertMsgRelease(pChunk, ("GCPhys=%RGp\n", GCPhys));
    631             pChunk->iAge = 0;
    632         }
    633         Assert(pChunk->cRefs >= 1);
    634         pChunk->cRefs--;
    635     }
     577    PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
     578    Assert(pPage->cLocks >= 1);
     579    if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
     580        pPage->cLocks--;
     581
     582    PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pLock->pvChunk;
     583    Assert(pChunk->cRefs >= 1);
     584    pChunk->cRefs--;
     585    pChunk->iAge = 0;
    636586
    637587    pgmUnlock(pVM);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette