VirtualBox

Changeset 4738 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Sep 12, 2007 4:00:54 PM (17 years ago)
Author:
vboxsync
Message:

more new phys code.

Location:
trunk/src/VBox/VMM
Files:
1 added
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/EM.cpp

    r4551 r4738  
    23592359    }
    23602360
     2361    /*
     2362     * Allocate handy pages (just in case the above actions have consumed some pages).
     2363     */
     2364    if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
     2365    {
     2366        int rc = PGMR3PhysAllocateHandyPages(pVM);
     2367        if (VBOX_FAILURE(rc))
     2368            return rc;
     2369    }
     2370
    23612371    return VINF_SUCCESS;
    23622372}
     
    24892499        if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
    24902500            rc = emR3HighPriorityPostForcedActions(pVM, rc);
    2491 
    2492 #ifdef PGM_CACHE_VERY_STRICT
    2493         /*
    2494          * Page manager cache checks.
    2495          */
    2496         if (    rc == VINF_EM_RAW_INTERRUPT
    2497             ||  rc == VINF_EM_RAW_GUEST_TRAP
    2498             ||  rc == VINF_IOM_HC_IOPORT_READ
    2499             ||  rc == VINF_IOM_HC_IOPORT_WRITE
    2500             //||  rc == VINF_PATM_PATCH_INT3
    2501            )
    2502             pgmCacheCheckPD(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4);
    2503 #endif
    25042501
    25052502#ifdef VBOX_STRICT
     
    30713068        }
    30723069
     3070        /*
     3071         * Allocate handy pages.
     3072         */
     3073        if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
     3074        {
     3075            rc2 = PGMR3PhysAllocateHandyPages(pVM);
     3076            UPDATE_RC();
     3077        }
     3078
    30733079        /*
    30743080         * Debugger Facility request.
     
    31033109#endif
    31043110        /* check that we got them all  */
    3105         Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS)));
     3111        Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES)));
    31063112    }
    31073113
  • trunk/src/VBox/VMM/Makefile.kmk

    r4665 r4738  
    335335        VMMR0/TRPMR0A.asm \
    336336        VMMR0/PDMR0Device.cpp \
     337        VMMR0/PGMR0.cpp \
    337338        VMMAll/EMAll.cpp \
    338339        VMMAll/EMAllA.asm \
  • trunk/src/VBox/VMM/PGM.cpp

    r4714 r4738  
    501501 * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator
    502502 * will return to the ring-3 caller (and later ring-0) and asking it to seed
    503  * the page allocator with some fresh pages (VERR_GVM_SEED_ME). Ring-3 will
     503 * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will
    504504 * then perform an SUPPageAlloc(cbChunk >> PAGE_SHIFT) call and make a
    505505 * "SeededAllocPages" call to ring-0.
     
    14061406    STAM_REG(pVM, &pPGM->StatChunkR3MapTlbHits,             STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbHits",                STAMUNIT_OCCURENCES, "TLB hits.");
    14071407    STAM_REG(pVM, &pPGM->StatChunkR3MapTlbMisses,           STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbMisses",              STAMUNIT_OCCURENCES, "TLB misses.");
     1408    STAM_REG(pVM, &pPGM->StatPageReplaceShared,             STAMTYPE_COUNTER, "/PGM/Page/ReplacedShared",               STAMUNIT_OCCURENCES, "Times a shared page was replaced.");
     1409    STAM_REG(pVM, &pPGM->StatPageReplaceZero,               STAMTYPE_COUNTER, "/PGM/Page/ReplacedZero",                 STAMUNIT_OCCURENCES, "Times the zero page was replaced.");
     1410    STAM_REG(pVM, &pPGM->StatPageHandyAllocs,               STAMTYPE_COUNTER, "/PGM/Page/HandyAllocs",                  STAMUNIT_OCCURENCES, "Number of times we've allocated more handy pages.");
     1411    STAM_REG(pVM, &pPGM->cAllPages,                         STAMTYPE_U32,     "/PGM/Page/cAllPages",                    STAMUNIT_OCCURENCES, "The total number of pages.");
     1412    STAM_REG(pVM, &pPGM->cPrivatePages,                     STAMTYPE_U32,     "/PGM/Page/cPrivatePages",                STAMUNIT_OCCURENCES, "The number of private pages.");
     1413    STAM_REG(pVM, &pPGM->cSharedPages,                      STAMTYPE_U32,     "/PGM/Page/cSharedPages",                 STAMUNIT_OCCURENCES, "The number of shared pages.");
     1414    STAM_REG(pVM, &pPGM->cZeroPages,                        STAMTYPE_U32,     "/PGM/Page/cZeroPages",                   STAMUNIT_OCCURENCES, "The number of zero backed pages.");
    14081415
    14091416#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
  • trunk/src/VBox/VMM/PGMInternal.h

    r4714 r4738  
    552552
    553553/** The chunk shift. (2^20 = 1 MB) */
    554 #define GPM_CHUNK_SHIFT                 20
     554#define GMM_CHUNK_SHIFT                 20
    555555/** The allocation chunk size. */
    556 #define GPM_CHUNK_SIZE                  (1U << GPM_CHUNK_SIZE_LOG2)
     556#define GMM_CHUNK_SIZE                  (1U << GMM_CHUNK_SHIFT)
    557557/** The shift factor for converting a page id into a chunk id. */
    558 #define GPM_CHUNKID_SHIFT               (GPM_CHUNK_SHIFT - PAGE_SHIFT)
     558#define GMM_CHUNKID_SHIFT               (GMM_CHUNK_SHIFT - PAGE_SHIFT)
    559559/** The NIL Chunk ID value. */
    560 #define NIL_GPM_CHUNKID                 0
     560#define NIL_GMM_CHUNKID                 0
    561561/** The NIL Page ID value. */
    562 #define NIL_GPM_PAGEID                  0
     562#define NIL_GMM_PAGEID                  0
    563563
    564564/**
    565565 * Get the Page ID.
    566  * @returns The Page ID; NIL_GPM_PAGEID if it's a ZERO page.
     566 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
    567567 * @param   pPage       Pointer to the physical guest page tracking structure.
    568568 */
     
    585585/**
    586586 * Get the Chunk ID.
    587  * @returns The Chunk ID; NIL_GPM_CHUNKID if it's a ZERO page.
     587 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
    588588 * @param   pPage       Pointer to the physical guest page tracking structure.
    589589 */
    590 #define PGM_PAGE_GET_CHUNKID(pPage)     ( (pPage)->idPage >> GPM_CHUNKID_SHIFT )
     590#define PGM_PAGE_GET_CHUNKID(pPage)     ( (pPage)->idPage >> GMM_CHUNKID_SHIFT )
    591591/* later:
    592 #if GPM_CHUNKID_SHIFT == 12
     592#if GMM_CHUNKID_SHIFT == 12
    593593# define PGM_PAGE_GET_CHUNKID(pPage)    ( (uint32_t)((pPage)->HCPhys >> 48) )
    594 #elif GPM_CHUNKID_SHIFT > 12
    595 # define PGM_PAGE_GET_CHUNKID(pPage)    ( (uint32_t)((pPage)->HCPhys >> (48 + (GPM_CHUNKID_SHIFT - 12)) )
    596 #elif GPM_CHUNKID_SHIFT < 12
    597 # define PGM_PAGE_GET_CHUNKID(pPage)    (   ( (uint32_t)((pPage)->HCPhys >> 48)   << (12 - GPM_CHUNKID_SHIFT) ) \
    598                                          |  ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GPM_CHUNKID_SHIFT ) )
     594#elif GMM_CHUNKID_SHIFT > 12
     595# define PGM_PAGE_GET_CHUNKID(pPage)    ( (uint32_t)((pPage)->HCPhys >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
     596#elif GMM_CHUNKID_SHIFT < 12
     597# define PGM_PAGE_GET_CHUNKID(pPage)    (   ( (uint32_t)((pPage)->HCPhys >> 48)   << (12 - GMM_CHUNKID_SHIFT) ) \
     598                                         |  ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GMM_CHUNKID_SHIFT ) )
    599599#else
    600 # error "GPM_CHUNKID_SHIFT isn't defined or something."
     600# error "GMM_CHUNKID_SHIFT isn't defined or something."
    601601#endif
    602602*/
     
    607607 * @param   pPage       Pointer to the physical guest page tracking structure.
    608608 */
    609 #define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)   ( (pPage)->idPage & (RT_BIT_32(GPM_CHUNKID_SHIFT) - 1) )
     609#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)   ( (pPage)->idPage & (RT_BIT_32(GMM_CHUNKID_SHIFT) - 1) )
    610610/* later:
    611 #if GPM_CHUNKID_SHIFT <= 12
    612 # define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)  ( (uint32_t)((pPage)->HCPhys & (RT_BIT_32(GPM_CHUNKID_SHIFT) - 1)) )
     611#if GMM_CHUNKID_SHIFT <= 12
     612# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)  ( (uint32_t)((pPage)->HCPhys & (RT_BIT_32(GMM_CHUNKID_SHIFT) - 1)) )
    613613#else
    614614# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)  (   (uint32_t)((pPage)->HCPhys & 0xfff) \
    615                                              |  ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GPM_CHUNKID_SHIFT - 12) - 1) ) )
     615                                             |  ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
    616616#endif
    617617*/
     
    637637 */
    638638#define PGM_PAGE_IS_ZERO(pPage)         ( (pPage)->u2State == PGM_PAGE_STATE_ZERO )
     639
     640/**
     641 * Checks if the page is backed by a SHARED page.
     642 * @returns true/false.
     643 * @param   pPage       Pointer to the physical guest page tracking structure.
     644 */
     645#define PGM_PAGE_IS_SHARED(pPage)        ( (pPage)->u2State == PGM_PAGE_STATE_SHARED )
    639646
    640647
     
    18451852    /** The GC mapping of the zero page. */
    18461853    RTGCPTR                         pvZeroPgGC;
    1847 #if GC_ARCH_BITS == 32
     1854#if GC_ARCH_BITS != 32
    18481855    uint32_t                        u32ZeroAlignment; /**< Alignment padding. */
    18491856#endif
    18501857    /** @}*/
    18511858
     1859    /** The number of handy pages. */
     1860    uint32_t                        cHandyPages;
     1861    /**
     1862     * Array of handy pages.
     1863     *
     1864     * This array is used in a two way communication between pgmPhysAllocPage
     1865     * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
     1866     * an intermediary.
     1867     *
     1868     * The size of this array is important, see pgmPhysEnsureHandyPage for details.
     1869     * (The current size of 32 pages, means 128 KB of memory.)
     1870     */
     1871    struct
     1872    {
     1873        /** The host physical address before pgmPhysAllocPage uses it,
     1874         * and the guest physical address afterwards.
     1875         * This is NIL_RTHCPHYS if the array entry isn't valid.
     1876         * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTHCPHYS). */
     1877        RTHCPHYS                    HCPhysGCPhys;
     1878        /** The Page ID.
     1879         * This is NIL_GMM_PAGEID if the array entry isn't valid. */
     1880        uint32_t                    idPage;
     1881        /** The Page ID of the shared page that pgmPageAllocPage replaced.
     1882         * This is NIL_GMM_PAGEID if no shared page was replaced. */
     1883        uint32_t                    idSharedPage;
     1884    }                               aHandyPages[32];
     1885
    18521886    /** @name Release Statistics
    1853      * @{ */
     1887     * @{ */                                                     
     1888    uint32_t                        cAllPages;          /**< The total number of pages. (Should be Private + Shared + Zero.) */
     1889    uint32_t                        cPrivatePages;      /**< The number of private pages. */
     1890    uint32_t                        cSharedPages;       /**< The number of shared pages. */
     1891    uint32_t                        cZeroPages;         /**< The number of zero backed pages. */
    18541892    /** The number of times the guest has switched mode since last reset or statistics reset. */
    18551893    STAMCOUNTER                     cGuestModeChanges;
     
    21082146    /** Ring-3/0 chunk mapper TLB misses. */
    21092147    STAMCOUNTER StatChunkR3MapTlbMisses;
     2148    /** Times a shared page has been replaced by a private one. */
     2149    STAMCOUNTER StatPageReplaceShared;
     2150    /** Times the zero page has been replaced by a private one. */
     2151    STAMCOUNTER StatPageReplaceZero;
     2152    /** The number of times we've executed GMMR3AllocateHandyPages. */
     2153    STAMCOUNTER StatPageHandyAllocs;
    21102154
    21112155    /** Allocated mbs of guest ram */
     
    21602204int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    21612205#ifdef IN_RING3
     2206int             pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
     2207#ifndef NEW_PHYS_CODE
    21622208int             pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
    2163 int             pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
     2209#endif
    21642210
    21652211int             pgmR3PoolInit(PVM pVM);
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r4713 r4738  
    372372}
    373373
     374#ifndef NEW_PHYS_CODE
    374375
    375376/**
     
    448449}
    449450
     451#endif /* !NEW_PHYS_CODE */
    450452
    451453/**
     
    779781
    780782
    781 #define VMMR0_DO_PGM_MAP_CHUNK 0 // later
    782 /**
    783  * Argument package for the VMMR0_DO_PGM_MAP_CHUNK request.
    784  */
    785 typedef struct PGMMAPCHUNKREQ
     783/**
     784 * Argument package for the VMMR0_DO_GMM_MAP_UNMAP_CHUNK request.
     785 */
     786typedef struct GMMMAPUNMAPCHUNKREQ
    786787{
    787788    /** The chunk to map, UINT32_MAX if unmap only. (IN) */
     
    791792    /** Where the mapping address is returned. (OUT) */
    792793    RTR3PTR     pvR3;
    793 } PGMMAPCHUNKREQ;
     794} GMMMAPUNMAPCHUNKREQ;
    794795
    795796
     
    831832     * necessary unmap another one to make space in the mapping cache.
    832833     */
    833     PGMMAPCHUNKREQ Req;
     834    GMMMAPUNMAPCHUNKREQ Req;
    834835    Req.pvR3 = NULL;
    835836    Req.idChunkMap = idChunk;
     
    838839        Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
    839840    /** @todo SUPCallVMMR0Ex needs to support in+out or similar.  */
    840     rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_MAP_CHUNK, &Req, sizeof(Req));
     841    rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, &Req, sizeof(Req));
    841842    if (VBOX_SUCCESS(rc))
    842843    {
     
    909910    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
    910911    {
    911         pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GPM_CHUNKID;
     912        pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
    912913        pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
    913914    }
     
    916917
    917918
     919/**
     920 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
     921 *
     922 * @returns The following VBox status codes.
     923 * @retval  VINF_SUCCESS on success. FF cleared.
     924 * @retval  VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
     925 *
     926 * @param   pVM         The VM handle.
     927 */
     928PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
     929{
     930    pgmLock(pVM);
     931    int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);
     932    if (rc == VERR_GMM_SEED_ME)
     933    {
     934        void *pvChunk;
     935        rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
     936        if (VBOX_SUCCESS(rc))
     937            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, pvChunk, 0);
     938        if (VBOX_FAILURE(rc))
     939        {
     940            LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
     941            rc = VINF_EM_NO_MEMORY;
     942        }
     943    }
     944    pgmUnlock(pVM);
     945    Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
     946    return rc;
     947}
     948
  • trunk/src/VBox/VMM/VMM.cpp

    r4689 r4738  
    21732173            break;
    21742174        }
     2175
     2176        /*
     2177         * Allocates more handy pages.
     2178         */
     2179        case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
     2180        {
     2181            pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
     2182            break;
     2183        }
     2184
    21752185#ifndef NEW_PHYS_CODE
    21762186
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r4714 r4738  
    3535#include <VBox/vmm.h>
    3636#include <VBox/iom.h>
     37#include <VBox/rem.h>
    3738#include "PGMInternal.h"
    3839#include <VBox/vm.h>
     
    167168
    168169
     170
     171/**
     172 * Makes sure that there is at least one handy page ready for use.
     173 *
     174 * This will also take the appropriate actions when reaching water-marks.
     175 *
     176 * @returns The following VBox status codes.
     177 * @retval  VINF_SUCCESS on success.
     178 * @retval  VERR_EM_NO_MEMORY if we're really out of memory.
     179 *
     180 * @param   pVM     The VM handle.
     181 *
     182 * @remarks Must be called from within the PGM critical section. It may
     183 *          nip back to ring-3/0 in some cases.
     184 */
     185static int pgmPhysEnsureHandyPage(PVM pVM)
     186{
     187    /** @remarks
     188     * low-water mark logic for R0 & GC:
     189     *      - 75%: Set FF.
     190     *      - 50%: Force return to ring-3 ASAP.
     191     *
     192     * For ring-3 there is a little problem wrt to the recompiler, so:
     193     *      - 75%: Set FF.
     194     *      - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
     195     *
     196     * The basic idea is that we should be able to get out of any situation with
     197     * only 50% of handy pages remaining.
     198     *
     199     * At the moment we'll not adjust the number of handy pages relative to the
     200     * actual VM RAM committment, that's too much work for now.
     201     */
     202    Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
     203    if (    !pVM->pgm.s.cHandyPages
     204#ifdef IN_RING3
     205        ||   pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
     206#endif
     207       )
     208    {
     209        Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
     210#ifdef IN_RING3
     211        int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);
     212#elif defined(IN_RING0)
     213        /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */
     214        int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
     215#else
     216        int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
     217#endif
     218        if (RT_UNLIKELY(rc != VINF_SUCCESS))
     219        {
     220            Assert(rc == VINF_EM_NO_MEMORY);
     221            if (!pVM->pgm.s.cHandyPages)
     222            {
     223                LogRel(("PGM: no more handy pages!\n"));
     224                return VERR_EM_NO_MEMORY;
     225            }
     226            Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
     227#ifdef IN_RING3
     228            REMR3NotifyFF(pVM);
     229#else
     230            VM_FF_SET(pVM, VM_FF_TO_R3);
     231#endif
     232        }
     233        Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
     234    }
     235    else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */
     236    {
     237        VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
     238#ifndef IN_RING3
     239        if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2)
     240        {
     241            Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
     242            VM_FF_SET(pVM, VM_FF_TO_R3);
     243        }
     244#endif
     245    }
     246
     247    return VINF_SUCCESS;
     248}
     249
     250
    169251/**
    170252 * Replace a zero or shared page with new page that we can write to.
    171253 *
    172  * @returns VBox status.
    173  * @todo    Define the return values and propagate them up the call tree..
     254 * @returns The following VBox status codes.
     255 * @retval  VINF_SUCCESS on success, pPage is modified.
     256 * @retval  VERR_EM_NO_MEMORY if we're totally out of memory.
     257 *
     258 * @todo    Propagate VERR_EM_NO_MEMORY up the call tree.
    174259 *
    175260 * @param   pVM         The VM address.
    176  * @param   pPage       The physical page tracking structure.
     261 * @param   pPage       The physical page tracking structure. This will
     262 *                      be modified on success.
    177263 * @param   GCPhys      The address of the page.
    178264 *
    179  * @remarks Called from within the PGM critical section.
     265 * @remarks Must be called from within the PGM critical section. It may
     266 *          nip back to ring-3/0 in some cases.
     267 *
     268 * @remarks This function shouldn't really fail, however if it does
     269 *          it probably means we've screwed up the size of the amount
     270 *          and/or the low-water mark of handy pages. Or, that some
     271 *          device I/O is causing a lot of pages to be allocated while
     272 *          while the host is in a low-memory condition.
    180273 */
    181274int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
    182275{
    183     return VERR_NOT_IMPLEMENTED;
     276    /*
     277     * Ensure that we've got a page handy, take it and use it.
     278     */
     279    int rc = pgmPhysEnsureHandyPage(pVM);
     280    if (VBOX_FAILURE(rc))
     281    {
     282        Assert(rc == VERR_EM_NO_MEMORY);
     283        return rc;
     284    }
     285    AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys));
     286    Assert(!PGM_PAGE_IS_RESERVED(pPage));
     287    Assert(!PGM_PAGE_IS_MMIO(pPage));
     288
     289    uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
     290    Assert(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages));
     291    Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
     292    Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
     293    Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
     294    Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
     295
     296    /*
     297     * There are one or two action to be taken the next time we allocate handy pages:
     298     *      - Tell the GMM (global memory manager) what the page is being used for.
     299     *        (Speeds up replacement operations - sharing and defragmenting.)
     300     *      - If the current backing is shared, it must be freed.
     301     */
     302    const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
     303    pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys;
     304
     305    if (PGM_PAGE_IS_SHARED(pPage))
     306    {
     307        pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
     308        Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
     309        VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
     310
     311        Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
     312              GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
     313        STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared);
     314        pVM->pgm.s.cSharedPages--;
     315    }
     316    else
     317    {
     318        Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
     319        STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceZero);
     320        pVM->pgm.s.cZeroPages--;
     321    }
     322
     323    /*
     324     * Do the PGMPAGE modifications.
     325     */
     326    pVM->pgm.s.cPrivatePages++;
     327    PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
     328    PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
     329    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     330
     331    return VINF_SUCCESS;
    184332}
    185333
     
    266414        pMap = pTlbe->pChunk;
    267415    }
    268     else if (idChunk != NIL_GPM_CHUNKID)
     416    else if (idChunk != NIL_GMM_CHUNKID)
    269417    {
    270418        STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbMisses);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r4250 r4738  
    582582        }
    583583
     584        /*
     585         * PGM wrappers.
     586         */
     587        case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
     588            return PGMR0PhysAllocateHandyPages(pVM);
     589
     590#if 0
     591        /*
     592         * GMM wrappers
     593         */
     594        case VMMR0_DO_GMM_ALLOCATE_PAGES:
     595            return GMMR0AllocatePages(pVM, ...);
     596        case VMMR0_DO_GMM_FREE_PAGES:
     597            return GMMR0FreePages(pVM, ...);
     598        case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
     599            return GMMR0FreeMapUnmapChunk(pVM, ...);
     600        case VMMR0_DO_GMM_SEED_CHUNK:
     601            return GMMR0SeedChunk(pVM, (RTR3PTR)pvArg);
     602#endif
     603
     604
     605
    584606#ifdef VBOX_WITH_INTERNAL_NETWORKING
    585607        /*
  • trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r4714 r4738  
    425425    GEN_CHECK_OFF(PGM, pvZeroPgR0);
    426426    GEN_CHECK_OFF(PGM, pvZeroPgGC);
     427    GEN_CHECK_OFF(PGM, cHandyPages);
     428    GEN_CHECK_OFF(PGM, aHandyPages);
     429    GEN_CHECK_OFF(PGM, aHandyPages[1]);
     430    GEN_CHECK_OFF(PGM, aHandyPages[1].HCPhysGCPhys);
     431    GEN_CHECK_OFF(PGM, aHandyPages[1].idPage);
     432    GEN_CHECK_OFF(PGM, aHandyPages[1].idSharedPage);
     433    GEN_CHECK_OFF(PGM, cAllPages);
     434    GEN_CHECK_OFF(PGM, cPrivatePages);
     435    GEN_CHECK_OFF(PGM, cSharedPages);
     436    GEN_CHECK_OFF(PGM, cZeroPages);
     437    GEN_CHECK_OFF(PGM, cGuestModeChanges);
    427438
    428439    GEN_CHECK_SIZE(PGMMAPPING);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette