VirtualBox

Changeset 23307 in vbox


Ignore:
Timestamp:
Sep 24, 2009 5:33:56 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
52803
Message:

VMM: Moved the saved state code out of PGM.cpp and into PGMSavedState.cpp.

Location:
trunk/src/VBox/VMM
Files:
3 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r22906 r23307  
    107107        PGMPhys.cpp \
    108108        PGMPool.cpp \
     109        PGMSavedState.cpp \
    109110        SELM.cpp \
    110111        SSM.cpp \
  • trunk/src/VBox/VMM/PGM.cpp

    r23306 r23307  
    631631static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
    632632#endif
    633 static DECLCALLBACK(int)  pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM);
    634 static DECLCALLBACK(int)  pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
    635 static DECLCALLBACK(int)  pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM);
    636 static DECLCALLBACK(int)  pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
    637 static DECLCALLBACK(int)  pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM);
    638 static DECLCALLBACK(int)  pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    639633static int                pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
    640634static void               pgmR3ModeDataSwitch(PVM pVM, PVMCPU pVCpu, PGMMODE enmShw, PGMMODE enmGst);
     
    12841278    PGMRegisterStringFormatTypes();
    12851279
    1286     rc = SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
    1287                                pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
    1288                                NULL, pgmR3SaveExec, pgmR3SaveDone,
    1289                                NULL, pgmR3Load, NULL);
     1280    rc = pgmR3InitSavedState(pVM, cbRam);
    12901281    if (RT_FAILURE(rc))
    12911282        return rc;
     
    22872278{
    22882279    return 0;
    2289 }
    2290 
    2291 
    2292 /**
    2293  * Find the ROM tracking structure for the given page.
    2294  *
    2295  * @returns Pointer to the ROM page structure. NULL if the caller didn't check
    2296  *          that it's a ROM page.
    2297  * @param   pVM         The VM handle.
    2298  * @param   GCPhys      The address of the ROM page.
    2299  */
    2300 static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys)
    2301 {
    2302     for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
    2303          pRomRange;
    2304          pRomRange = pRomRange->CTX_SUFF(pNext))
    2305     {
    2306         RTGCPHYS off = GCPhys - pRomRange->GCPhys;
    2307         if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
    2308             return &pRomRange->aPages[off >> PAGE_SHIFT];
    2309     }
    2310     return NULL;
    2311 }
    2312 
    2313 
    2314 /**
    2315  * Save zero indicator + bits for the specified page.
    2316  *
    2317  * @returns VBox status code, errors are logged/asserted before returning.
    2318  * @param   pVM         The VM handle.
    2319  * @param   pSSH        The saved state handle.
    2320  * @param   pPage       The page to save.
    2321  * @param   GCPhys      The address of the page.
    2322  * @param   pRam        The ram range (for error logging).
    2323  */
    2324 static int pgmR3SavePage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2325 {
    2326     int rc;
    2327     if (PGM_PAGE_IS_ZERO(pPage))
    2328         rc = SSMR3PutU8(pSSM, 0);
    2329     else
    2330     {
    2331         void const *pvPage;
    2332         rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
    2333         AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
    2334 
    2335         SSMR3PutU8(pSSM, 1);
    2336         rc = SSMR3PutMem(pSSM, pvPage, PAGE_SIZE);
    2337     }
    2338     return rc;
    2339 }
    2340 
    2341 
    2342 /**
    2343  * Save a shadowed ROM page.
    2344  *
    2345  * Format: Type, protection, and two pages with zero indicators.
    2346  *
    2347  * @returns VBox status code, errors are logged/asserted before returning.
    2348  * @param   pVM         The VM handle.
    2349  * @param   pSSH        The saved state handle.
    2350  * @param   pPage       The page to save.
    2351  * @param   GCPhys      The address of the page.
    2352  * @param   pRam        The ram range (for error logging).
    2353  */
    2354 static int pgmR3SaveShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2355 {
    2356     /* Need to save both pages and the current state. */
    2357     PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
    2358     AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
    2359 
    2360     SSMR3PutU8(pSSM, PGMPAGETYPE_ROM_SHADOW);
    2361     SSMR3PutU8(pSSM, pRomPage->enmProt);
    2362 
    2363     int rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhys, pRam);
    2364     if (RT_SUCCESS(rc))
    2365     {
    2366         PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
    2367         rc = pgmR3SavePage(pVM, pSSM, pPagePassive, GCPhys, pRam);
    2368     }
    2369     return rc;
    2370 }
    2371 
    2372 
    2373 /**
    2374  * Prepare for a live save operation.
    2375  *
    2376  * This will attempt to allocate and initialize the tracking structures.  It
    2377  * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
    2378  * pgmR3SaveDone will do the cleanups.
    2379  *
    2380  * @returns VBox status code.
    2381  *
    2382  * @param   pVM         The VM handle.
    2383  * @param   pSSM        The SSM handle.
    2384  */
    2385 static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
    2386 {
    2387     /*
    2388      * Indicate that we will be using the write monitoring.
    2389      */
    2390     pgmLock(pVM);
    2391     /** @todo find a way of mediating this when more users are added. */
    2392     if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
    2393     {
    2394         pgmUnlock(pVM);
    2395         AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
    2396     }
    2397     pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
    2398     pgmUnlock(pVM);
    2399 
    2400     /*
    2401      * Initialize the statistics.
    2402      */
    2403     pVM->pgm.s.LiveSave.cReadyPages = 0;
    2404     pVM->pgm.s.LiveSave.cDirtyPages = 0;
    2405     pVM->pgm.s.LiveSave.cMmioPages  = 0;
    2406 
    2407     /*
    2408      * Try allocating tracking structures for the ram ranges.
    2409      *
    2410      * To avoid lock contention, we leave the lock every time we're allocating
    2411      * a new array.  This means we'll have to ditch the allocation and start
    2412      * all over again if the RAM range list changes in-between.
    2413      *
    2414      * Note! pgmR3SaveDone will always be called and it is therefore responsible
    2415      *       for cleaning up.
    2416      */
    2417     PPGMRAMRANGE pCur;
    2418     pgmLock(pVM);
    2419     do
    2420     {
    2421         for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
    2422         {
    2423             if (   !pCur->paLSPages
    2424                 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
    2425             {
    2426                 uint32_t const  idRamRangeGen = pVM->pgm.s.idRamRangesGen;
    2427                 uint32_t const  cPages = pCur->cb >> PAGE_SHIFT;
    2428                 pgmUnlock(pVM);
    2429                 PPGMLIVESAVEPAGE paLSPages = (PPGMLIVESAVEPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVEPAGE));
    2430                 if (!paLSPages)
    2431                     return VERR_NO_MEMORY;
    2432                 pgmLock(pVM);
    2433                 if (pVM->pgm.s.idRamRangesGen != idRamRangeGen)
    2434                 {
    2435                     pgmUnlock(pVM);
    2436                     MMR3HeapFree(paLSPages);
    2437                     pgmLock(pVM);
    2438                     break;              /* try again */
    2439                 }
    2440                 pCur->paLSPages = paLSPages;
    2441 
    2442                 /*
    2443                  * Initialize the array.
    2444                  */
    2445                 uint32_t iPage = cPages;
    2446                 while (iPage-- > 0)
    2447                 {
    2448                     PCPGMPAGE pPage = &pCur->aPages[iPage];
    2449                     paLSPages[iPage].uPassSaved = UINT32_MAX;
    2450                     paLSPages[iPage].cDirtied   = 0;
    2451                     paLSPages[iPage].u5Reserved = 0;
    2452                     switch (PGM_PAGE_GET_TYPE(pPage))
    2453                     {
    2454                         case PGMPAGETYPE_RAM:
    2455                         case PGMPAGETYPE_ROM_SHADOW:
    2456                         case PGMPAGETYPE_ROM:
    2457                             if (PGM_PAGE_IS_ZERO(pPage))
    2458                             {
    2459                                 paLSPages[iPage].fZero  = 1;
    2460                                 paLSPages[iPage].fDirty = 0;
    2461                                 pVM->pgm.s.LiveSave.cReadyPages++;
    2462                             }
    2463                             else
    2464                             {
    2465                                 paLSPages[iPage].fZero  = 0;
    2466                                 paLSPages[iPage].fDirty = 1;
    2467                                 pVM->pgm.s.LiveSave.cDirtyPages++;
    2468                             }
    2469                             paLSPages[iPage].fMmio  = 0;
    2470                             break;
    2471                         default:
    2472                             AssertMsgFailed(("%R[pgmpage]", pPage));
    2473                         case PGMPAGETYPE_MMIO2:
    2474                         case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
    2475                             paLSPages[iPage].fZero  = 0;
    2476                             paLSPages[iPage].fDirty = 1;
    2477                             paLSPages[iPage].fMmio  = 1;
    2478                             pVM->pgm.s.LiveSave.cMmioPages++;
    2479                             break;
    2480                         case PGMPAGETYPE_MMIO:
    2481                             paLSPages[iPage].fZero  = 1;
    2482                             paLSPages[iPage].fDirty = 1;
    2483                             paLSPages[iPage].fMmio  = 1;
    2484                             pVM->pgm.s.LiveSave.cMmioPages++;
    2485                             break;
    2486                     }
    2487                 }
    2488             }
    2489         }
    2490     } while (pCur);
    2491     pgmUnlock(pVM);
    2492 
    2493     return VINF_SUCCESS;
    2494 }
    2495 
    2496 
    2497 /**
    2498  * Execute a live save pass.
    2499  *
    2500  * @returns VBox status code.
    2501  *
    2502  * @param   pVM         The VM handle.
    2503  * @param   pSSM        The SSM handle.
    2504  */
    2505 static DECLCALLBACK(int)  pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
    2506 {
    2507     return VINF_SUCCESS;
    2508 }
    2509 
    2510 
    2511 /**
    2512  * Votes on whether the live save phase is done or not.
    2513  *
    2514  * @returns VBox status code.
    2515  *
    2516  * @param   pVM         The VM handle.
    2517  * @param   pSSM        The SSM handle.
    2518  */
    2519 static DECLCALLBACK(int)  pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM)
    2520 {
    2521     return VINF_SUCCESS;
    2522 }
    2523 
    2524 
    2525 /** PGM fields to save/load. */
    2526 static const SSMFIELD s_aPGMFields[] =
    2527 {
    2528     SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    2529     SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    2530     SSMFIELD_ENTRY(         PGM, cbMappingFixed),
    2531     SSMFIELD_ENTRY_TERM()
    2532 };
    2533 
    2534 static const SSMFIELD s_aPGMCpuFields[] =
    2535 {
    2536     SSMFIELD_ENTRY(         PGMCPU, fA20Enabled),
    2537     SSMFIELD_ENTRY_GCPHYS(  PGMCPU, GCPhysA20Mask),
    2538     SSMFIELD_ENTRY(         PGMCPU, enmGuestMode),
    2539     SSMFIELD_ENTRY_TERM()
    2540 };
    2541 
    2542 /* For loading old saved states. (pre-smp) */
    2543 typedef struct
    2544 {
    2545     /** If set no conflict checks are required.  (boolean) */
    2546     bool                            fMappingsFixed;
    2547     /** Size of fixed mapping */
    2548     uint32_t                        cbMappingFixed;
    2549     /** Base address (GC) of fixed mapping */
    2550     RTGCPTR                         GCPtrMappingFixed;
    2551     /** A20 gate mask.
    2552      * Our current approach to A20 emulation is to let REM do it and don't bother
    2553      * anywhere else. The interesting Guests will be operating with it enabled anyway.
    2554      * But whould need arrise, we'll subject physical addresses to this mask. */
    2555     RTGCPHYS                        GCPhysA20Mask;
    2556     /** A20 gate state - boolean! */
    2557     bool                            fA20Enabled;
    2558     /** The guest paging mode. */
    2559     PGMMODE                         enmGuestMode;
    2560 } PGMOLD;
    2561 
    2562 static const SSMFIELD s_aPGMFields_Old[] =
    2563 {
    2564     SSMFIELD_ENTRY(         PGMOLD, fMappingsFixed),
    2565     SSMFIELD_ENTRY_GCPTR(   PGMOLD, GCPtrMappingFixed),
    2566     SSMFIELD_ENTRY(         PGMOLD, cbMappingFixed),
    2567     SSMFIELD_ENTRY(         PGMOLD, fA20Enabled),
    2568     SSMFIELD_ENTRY_GCPHYS(  PGMOLD, GCPhysA20Mask),
    2569     SSMFIELD_ENTRY(         PGMOLD, enmGuestMode),
    2570     SSMFIELD_ENTRY_TERM()
    2571 };
    2572 
    2573 
    2574 /**
    2575  * Execute state save operation.
    2576  *
    2577  * @returns VBox status code.
    2578  * @param   pVM             VM Handle.
    2579  * @param   pSSM            SSM operation handle.
    2580  */
    2581 static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
    2582 {
    2583     int         rc;
    2584     unsigned    i;
    2585     PPGM        pPGM = &pVM->pgm.s;
    2586 
    2587     /*
    2588      * Lock PGM and set the no-more-writes indicator.
    2589      */
    2590     pgmLock(pVM);
    2591     pVM->pgm.s.fNoMorePhysWrites = true;
    2592 
    2593     /*
    2594      * Save basic data (required / unaffected by relocation).
    2595      */
    2596     SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
    2597 
    2598     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    2599     {
    2600         PVMCPU pVCpu = &pVM->aCpus[idCpu];
    2601         SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
    2602     }
    2603 
    2604     /*
    2605      * The guest mappings.
    2606      */
    2607     i = 0;
    2608     for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
    2609     {
    2610         SSMR3PutU32(      pSSM, i);
    2611         SSMR3PutStrZ(     pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
    2612         SSMR3PutGCPtr(    pSSM, pMapping->GCPtr);
    2613         SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
    2614     }
    2615     rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
    2616 
    2617     /*
    2618      * Ram ranges and the memory they describe.
    2619      */
    2620     i = 0;
    2621     for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; pRam; pRam = pRam->pNextR3, i++)
    2622     {
    2623         /*
    2624          * Save the ram range details.
    2625          */
    2626         SSMR3PutU32(pSSM,       i);
    2627         SSMR3PutGCPhys(pSSM,    pRam->GCPhys);
    2628         SSMR3PutGCPhys(pSSM,    pRam->GCPhysLast);
    2629         SSMR3PutGCPhys(pSSM,    pRam->cb);
    2630         SSMR3PutU8(pSSM,        !!pRam->pvR3);      /* Boolean indicating memory or not. */
    2631         SSMR3PutStrZ(pSSM,      pRam->pszDesc);     /* This is the best unique id we have... */
    2632 
    2633         /*
    2634          * Iterate the pages, only two special case.
    2635          */
    2636         uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
    2637         for (uint32_t iPage = 0; iPage < cPages; iPage++)
    2638         {
    2639             RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
    2640             PPGMPAGE pPage      = &pRam->aPages[iPage];
    2641             uint8_t  uType      = PGM_PAGE_GET_TYPE(pPage);
    2642 
    2643             if (uType == PGMPAGETYPE_ROM_SHADOW)
    2644                 rc = pgmR3SaveShadowedRomPage(pVM, pSSM, pPage, GCPhysPage, pRam);
    2645             else if (uType == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
    2646             {
    2647                 /* MMIO2 alias -> MMIO; the device will just have to deal with this. */
    2648                 SSMR3PutU8(pSSM, PGMPAGETYPE_MMIO);
    2649                 rc = SSMR3PutU8(pSSM, 0 /* ZERO */);
    2650             }
    2651             else
    2652             {
    2653                 SSMR3PutU8(pSSM, uType);
    2654                 rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhysPage, pRam);
    2655             }
    2656             if (RT_FAILURE(rc))
    2657                 break;
    2658         }
    2659         if (RT_FAILURE(rc))
    2660             break;
    2661     }
    2662 
    2663     pgmUnlock(pVM);
    2664     return SSMR3PutU32(pSSM, ~0); /* terminator. */
    2665 }
    2666 
    2667 
    2668 /**
    2669  * Cleans up after an save state operation.
    2670  *
    2671  * @returns VBox status code.
    2672  * @param   pVM             VM Handle.
    2673  * @param   pSSM            SSM operation handle.
    2674  */
    2675 static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
    2676 {
    2677     /*
    2678      * Free the tracking arrays and disable write monitoring.
    2679      *
    2680      * Play nice with the PGM lock in case we're called while the VM is still
    2681      * running.  This means we have to delay the freeing since we wish to use
    2682      * paLSPages as an indicator of which RAM ranges which we need to scan for
    2683      * write monitored pages.
    2684      */
    2685     void *pvToFree = NULL;
    2686     PPGMRAMRANGE pCur;
    2687     uint32_t cMonitoredPages = 0;
    2688     pgmLock(pVM);
    2689     do
    2690     {
    2691         for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
    2692         {
    2693             if (pCur->paLSPages)
    2694             {
    2695                 if (pvToFree)
    2696                 {
    2697                     uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
    2698                     pgmUnlock(pVM);
    2699                     MMR3HeapFree(pvToFree);
    2700                     pvToFree = NULL;
    2701                     pgmLock(pVM);
    2702                     if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
    2703                         break;          /* start over again. */
    2704                 }
    2705 
    2706                 pvToFree = pCur->paLSPages;
    2707                 pCur->paLSPages = NULL;
    2708 
    2709                 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
    2710                 while (iPage--)
    2711                 {
    2712                     PPGMPAGE pPage = &pCur->aPages[iPage];
    2713                     PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
    2714                     if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
    2715                     {
    2716                         PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
    2717                         cMonitoredPages++;
    2718                     }
    2719                 }
    2720             }
    2721         }
    2722     } while (pCur);
    2723 
    2724     /** @todo this is blindly assuming that we're the only user of write
    2725      *        monitoring. Fix this when more users are added. */
    2726     pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
    2727     pgmUnlock(pVM);
    2728 
    2729     MMR3HeapFree(pvToFree);
    2730     pvToFree = NULL;
    2731 
    2732     return VINF_SUCCESS;
    2733 }
    2734 
    2735 
    2736 /**
    2737  * Load an ignored page.
    2738  *
    2739  * @returns VBox status code.
    2740  * @param   pSSM            The saved state handle.
    2741  */
    2742 static int pgmR3LoadPageToDevNull(PSSMHANDLE pSSM)
    2743 {
    2744     uint8_t abPage[PAGE_SIZE];
    2745     return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
    2746 }
    2747 
    2748 
    2749 /**
    2750  * Loads a page without any bits in the saved state, i.e. making sure it's
    2751  * really zero.
    2752  *
    2753  * @returns VBox status code.
    2754  * @param   pVM             The VM handle.
    2755  * @param   uType           The page type or PGMPAGETYPE_INVALID (old saved
    2756  *                          state).
    2757  * @param   pPage           The guest page tracking structure.
    2758  * @param   GCPhys          The page address.
    2759  * @param   pRam            The ram range (logging).
    2760  */
    2761 static int pgmR3LoadPageZero(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2762 {
    2763     if (    PGM_PAGE_GET_TYPE(pPage) != uType
    2764         &&  uType != PGMPAGETYPE_INVALID)
    2765         return VERR_SSM_UNEXPECTED_DATA;
    2766 
    2767     /* I think this should be sufficient. */
    2768     if (!PGM_PAGE_IS_ZERO(pPage))
    2769         return VERR_SSM_UNEXPECTED_DATA;
    2770 
    2771     NOREF(pVM);
    2772     NOREF(GCPhys);
    2773     NOREF(pRam);
    2774     return VINF_SUCCESS;
    2775 }
    2776 
    2777 
    2778 /**
    2779  * Loads a page from the saved state.
    2780  *
    2781  * @returns VBox status code.
    2782  * @param   pVM             The VM handle.
    2783  * @param   pSSM            The SSM handle.
    2784  * @param   uType           The page type or PGMPAGETYEP_INVALID (old saved
    2785  *                          state).
    2786  * @param   pPage           The guest page tracking structure.
    2787  * @param   GCPhys          The page address.
    2788  * @param   pRam            The ram range (logging).
    2789  */
    2790 static int pgmR3LoadPageBits(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2791 {
    2792     int rc;
    2793 
    2794     /*
    2795      * Match up the type, dealing with MMIO2 aliases (dropped).
    2796      */
    2797     AssertLogRelMsgReturn(   PGM_PAGE_GET_TYPE(pPage) == uType
    2798                           || uType == PGMPAGETYPE_INVALID,
    2799                           ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
    2800                           VERR_SSM_UNEXPECTED_DATA);
    2801 
    2802     /*
    2803      * Load the page.
    2804      */
    2805     void *pvPage;
    2806     rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
    2807     if (RT_SUCCESS(rc))
    2808         rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
    2809 
    2810     return rc;
    2811 }
    2812 
    2813 
    2814 /**
    2815  * Loads a page (counter part to pgmR3SavePage).
    2816  *
    2817  * @returns VBox status code, fully bitched errors.
    2818  * @param   pVM             The VM handle.
    2819  * @param   pSSM            The SSM handle.
    2820  * @param   uType           The page type.
    2821  * @param   pPage           The page.
    2822  * @param   GCPhys          The page address.
    2823  * @param   pRam            The RAM range (for error messages).
    2824  */
    2825 static int pgmR3LoadPage(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2826 {
    2827     uint8_t         uState;
    2828     int rc = SSMR3GetU8(pSSM, &uState);
    2829     AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
    2830     if (uState == 0 /* zero */)
    2831         rc = pgmR3LoadPageZero(pVM, uType, pPage, GCPhys, pRam);
    2832     else if (uState == 1)
    2833         rc = pgmR3LoadPageBits(pVM, pSSM, uType, pPage, GCPhys, pRam);
    2834     else
    2835         rc = VERR_INTERNAL_ERROR;
    2836     AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
    2837                                  pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
    2838                             rc);
    2839     return VINF_SUCCESS;
    2840 }
    2841 
    2842 
    2843 /**
    2844  * Loads a shadowed ROM page.
    2845  *
    2846  * @returns VBox status code, errors are fully bitched.
    2847  * @param   pVM             The VM handle.
    2848  * @param   pSSM            The saved state handle.
    2849  * @param   pPage           The page.
    2850  * @param   GCPhys          The page address.
    2851  * @param   pRam            The RAM range (for error messages).
    2852  */
    2853 static int pgmR3LoadShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
    2854 {
    2855     /*
    2856      * Load and set the protection first, then load the two pages, the first
    2857      * one is the active the other is the passive.
    2858      */
    2859     PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
    2860     AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
    2861 
    2862     uint8_t     uProt;
    2863     int rc = SSMR3GetU8(pSSM, &uProt);
    2864     AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
    2865     PGMROMPROT  enmProt = (PGMROMPROT)uProt;
    2866     AssertLogRelMsgReturn(    enmProt >= PGMROMPROT_INVALID
    2867                           &&  enmProt <  PGMROMPROT_END,
    2868                           ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
    2869                           VERR_SSM_UNEXPECTED_DATA);
    2870 
    2871     if (pRomPage->enmProt != enmProt)
    2872     {
    2873         rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
    2874         AssertLogRelRCReturn(rc, rc);
    2875         AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
    2876     }
    2877 
    2878     PPGMPAGE pPageActive  = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin      : &pRomPage->Shadow;
    2879     PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow      : &pRomPage->Virgin;
    2880     uint8_t  u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM        : PGMPAGETYPE_ROM_SHADOW;
    2881     uint8_t  u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
    2882 
    2883     rc = pgmR3LoadPage(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
    2884     if (RT_SUCCESS(rc))
    2885     {
    2886         *pPageActive = *pPage;
    2887         rc = pgmR3LoadPage(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
    2888     }
    2889     return rc;
    2890 }
    2891 
    2892 
    2893 /**
    2894  * Worker for pgmR3Load.
    2895  *
    2896  * @returns VBox status code.
    2897  *
    2898  * @param   pVM                 The VM handle.
    2899  * @param   pSSM                The SSM handle.
    2900  * @param   uVersion            The saved state version.
    2901  */
    2902 static int pgmR3LoadLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
    2903 {
    2904     PPGM        pPGM = &pVM->pgm.s;
    2905     int         rc;
    2906     uint32_t    u32Sep;
    2907 
    2908     /*
    2909      * Load basic data (required / unaffected by relocation).
    2910      */
    2911     if (uVersion >= PGM_SAVED_STATE_VERSION)
    2912     {
    2913         rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
    2914         AssertLogRelRCReturn(rc, rc);
    2915 
    2916         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2917         {
    2918             rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
    2919             AssertLogRelRCReturn(rc, rc);
    2920         }
    2921     }
    2922     else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
    2923     {
    2924         AssertRelease(pVM->cCpus == 1);
    2925 
    2926         PGMOLD pgmOld;
    2927         rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
    2928         AssertLogRelRCReturn(rc, rc);
    2929 
    2930         pPGM->fMappingsFixed    = pgmOld.fMappingsFixed;
    2931         pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
    2932         pPGM->cbMappingFixed    = pgmOld.cbMappingFixed;
    2933 
    2934         pVM->aCpus[0].pgm.s.fA20Enabled   = pgmOld.fA20Enabled;
    2935         pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
    2936         pVM->aCpus[0].pgm.s.enmGuestMode  = pgmOld.enmGuestMode;
    2937     }
    2938     else
    2939     {
    2940         AssertRelease(pVM->cCpus == 1);
    2941 
    2942         SSMR3GetBool(pSSM,      &pPGM->fMappingsFixed);
    2943         SSMR3GetGCPtr(pSSM,     &pPGM->GCPtrMappingFixed);
    2944         SSMR3GetU32(pSSM,       &pPGM->cbMappingFixed);
    2945 
    2946         uint32_t cbRamSizeIgnored;
    2947         rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
    2948         if (RT_FAILURE(rc))
    2949             return rc;
    2950         SSMR3GetGCPhys(pSSM,    &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
    2951 
    2952         uint32_t u32 = 0;
    2953         SSMR3GetUInt(pSSM,      &u32);
    2954         pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
    2955         SSMR3GetUInt(pSSM,      &pVM->aCpus[0].pgm.s.fSyncFlags);
    2956         RTUINT uGuestMode;
    2957         SSMR3GetUInt(pSSM,      &uGuestMode);
    2958         pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
    2959 
    2960         /* check separator. */
    2961         SSMR3GetU32(pSSM, &u32Sep);
    2962         if (RT_FAILURE(rc))
    2963             return rc;
    2964         if (u32Sep != (uint32_t)~0)
    2965         {
    2966             AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
    2967             return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    2968         }
    2969     }
    2970 
    2971     /*
    2972      * The guest mappings.
    2973      */
    2974     uint32_t i = 0;
    2975     for (;; i++)
    2976     {
    2977         /* Check the seqence number / separator. */
    2978         rc = SSMR3GetU32(pSSM, &u32Sep);
    2979         if (RT_FAILURE(rc))
    2980             return rc;
    2981         if (u32Sep == ~0U)
    2982             break;
    2983         if (u32Sep != i)
    2984         {
    2985             AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
    2986             return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    2987         }
    2988 
    2989         /* get the mapping details. */
    2990         char szDesc[256];
    2991         szDesc[0] = '\0';
    2992         rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
    2993         if (RT_FAILURE(rc))
    2994             return rc;
    2995         RTGCPTR GCPtr;
    2996         SSMR3GetGCPtr(pSSM, &GCPtr);
    2997         RTGCPTR cPTs;
    2998         rc = SSMR3GetGCUIntPtr(pSSM, &cPTs);
    2999         if (RT_FAILURE(rc))
    3000             return rc;
    3001 
    3002         /* find matching range. */
    3003         PPGMMAPPING pMapping;
    3004         for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
    3005             if (    pMapping->cPTs == cPTs
    3006                 &&  !strcmp(pMapping->pszDesc, szDesc))
    3007                 break;
    3008         AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n",
    3009                                          cPTs, szDesc, GCPtr),
    3010                               VERR_SSM_LOAD_CONFIG_MISMATCH);
    3011 
    3012         /* relocate it. */
    3013         if (pMapping->GCPtr != GCPtr)
    3014         {
    3015             AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr));
    3016             pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr);
    3017         }
    3018         else
    3019             Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr));
    3020     }
    3021 
    3022     /*
    3023      * Ram range flags and bits.
    3024      */
    3025     i = 0;
    3026     for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
    3027     {
    3028         /* Check the seqence number / separator. */
    3029         rc = SSMR3GetU32(pSSM, &u32Sep);
    3030         if (RT_FAILURE(rc))
    3031             return rc;
    3032         if (u32Sep == ~0U)
    3033             break;
    3034         if (u32Sep != i)
    3035         {
    3036             AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
    3037             return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    3038         }
    3039         AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3040 
    3041         /* Get the range details. */
    3042         RTGCPHYS GCPhys;
    3043         SSMR3GetGCPhys(pSSM, &GCPhys);
    3044         RTGCPHYS GCPhysLast;
    3045         SSMR3GetGCPhys(pSSM, &GCPhysLast);
    3046         RTGCPHYS cb;
    3047         SSMR3GetGCPhys(pSSM, &cb);
    3048         uint8_t     fHaveBits;
    3049         rc = SSMR3GetU8(pSSM, &fHaveBits);
    3050         if (RT_FAILURE(rc))
    3051             return rc;
    3052         if (fHaveBits & ~1)
    3053         {
    3054             AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
    3055             return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    3056         }
    3057         size_t  cchDesc = 0;
    3058         char    szDesc[256];
    3059         szDesc[0] = '\0';
    3060         if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
    3061         {
    3062             rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
    3063             if (RT_FAILURE(rc))
    3064                 return rc;
    3065             /* Since we've modified the description strings in r45878, only compare
    3066                them if the saved state is more recent. */
    3067             if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
    3068                 cchDesc = strlen(szDesc);
    3069         }
    3070 
    3071         /*
    3072          * Match it up with the current range.
    3073          *
    3074          * Note there is a hack for dealing with the high BIOS mapping
    3075          * in the old saved state format, this means we might not have
    3076          * a 1:1 match on success.
    3077          */
    3078         if (    (   GCPhys     != pRam->GCPhys
    3079                  || GCPhysLast != pRam->GCPhysLast
    3080                  || cb         != pRam->cb
    3081                  ||  (   cchDesc
    3082                       && strcmp(szDesc, pRam->pszDesc)) )
    3083                 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
    3084             &&  (   uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
    3085                  || GCPhys     != UINT32_C(0xfff80000)
    3086                  || GCPhysLast != UINT32_C(0xffffffff)
    3087                  || pRam->GCPhysLast != GCPhysLast
    3088                  || pRam->GCPhys     <  GCPhys
    3089                  || !fHaveBits)
    3090            )
    3091         {
    3092             LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
    3093                     "State    : %RGp-%RGp %RGp bytes %s %s\n",
    3094                     pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
    3095                     GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
    3096             /*
    3097              * If we're loading a state for debugging purpose, don't make a fuss if
    3098              * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
    3099              */
    3100             if (    SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
    3101                 ||  GCPhys < 8 * _1M)
    3102                 AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
    3103 
    3104             AssertMsgFailed(("debug skipping not implemented, sorry\n"));
    3105             continue;
    3106         }
    3107 
    3108         uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
    3109         if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
    3110         {
    3111             /*
    3112              * Load the pages one by one.
    3113              */
    3114             for (uint32_t iPage = 0; iPage < cPages; iPage++)
    3115             {
    3116                 RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
    3117                 PPGMPAGE        pPage      = &pRam->aPages[iPage];
    3118                 uint8_t         uType;
    3119                 rc = SSMR3GetU8(pSSM, &uType);
    3120                 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
    3121                 if (uType == PGMPAGETYPE_ROM_SHADOW)
    3122                     rc = pgmR3LoadShadowedRomPage(pVM, pSSM, pPage, GCPhysPage, pRam);
    3123                 else
    3124                     rc = pgmR3LoadPage(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
    3125                 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
    3126             }
    3127         }
    3128         else
    3129         {
    3130             /*
    3131              * Old format.
    3132              */
    3133             AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
    3134 
    3135             /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
    3136                The rest is generally irrelevant and wrong since the stuff have to match registrations. */
    3137             uint32_t fFlags = 0;
    3138             for (uint32_t iPage = 0; iPage < cPages; iPage++)
    3139             {
    3140                 uint16_t u16Flags;
    3141                 rc = SSMR3GetU16(pSSM, &u16Flags);
    3142                 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
    3143                 fFlags |= u16Flags;
    3144             }
    3145 
    3146             /* Load the bits */
    3147             if (    !fHaveBits
    3148                 &&  GCPhysLast < UINT32_C(0xe0000000))
    3149             {
    3150                 /*
    3151                  * Dynamic chunks.
    3152                  */
    3153                 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
    3154                 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
    3155                                       ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
    3156                                       VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3157 
    3158                 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
    3159                 {
    3160                     uint8_t fPresent;
    3161                     rc = SSMR3GetU8(pSSM, &fPresent);
    3162                     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
    3163                     AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
    3164                                           ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
    3165                                           VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3166 
    3167                     for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
    3168                     {
    3169                         RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
    3170                         PPGMPAGE        pPage      = &pRam->aPages[iPage];
    3171                         if (fPresent)
    3172                         {
    3173                             if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
    3174                                 rc = pgmR3LoadPageToDevNull(pSSM);
    3175                             else
    3176                                 rc = pgmR3LoadPageBits(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
    3177                         }
    3178                         else
    3179                             rc = pgmR3LoadPageZero(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
    3180                         AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
    3181                     }
    3182                 }
    3183             }
    3184             else if (pRam->pvR3)
    3185             {
    3186                 /*
    3187                  * MMIO2.
    3188                  */
    3189                 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
    3190                                       ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
    3191                                       VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3192                 AssertLogRelMsgReturn(pRam->pvR3,
    3193                                       ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
    3194                                       VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3195 
    3196                 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
    3197                 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
    3198             }
    3199             else if (GCPhysLast < UINT32_C(0xfff80000))
    3200             {
    3201                 /*
    3202                  * PCI MMIO, no pages saved.
    3203                  */
    3204             }
    3205             else
    3206             {
    3207                 /*
    3208                  * Load the 0xfff80000..0xffffffff BIOS range.
    3209                  * It starts with X reserved pages that we have to skip over since
    3210                  * the RAMRANGE create by the new code won't include those.
    3211                  */
    3212                 AssertLogRelMsgReturn(   !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
    3213                                       && (fFlags  & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
    3214                                       ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
    3215                                       VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3216                 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
    3217                                       ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
    3218                                       VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3219 
    3220                 /* Skip wasted reserved pages before the ROM. */
    3221                 while (GCPhys < pRam->GCPhys)
    3222                 {
    3223                     rc = pgmR3LoadPageToDevNull(pSSM);
    3224                     GCPhys += PAGE_SIZE;
    3225                 }
    3226 
    3227                 /* Load the bios pages. */
    3228                 cPages = pRam->cb >> PAGE_SHIFT;
    3229                 for (uint32_t iPage = 0; iPage < cPages; iPage++)
    3230                 {
    3231                     RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
    3232                     PPGMPAGE        pPage      = &pRam->aPages[iPage];
    3233 
    3234                     AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
    3235                                           ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
    3236                                           VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
    3237                     rc = pgmR3LoadPageBits(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
    3238                     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
    3239                 }
    3240             }
    3241         }
    3242     }
    3243 
    3244     return rc;
    3245 }
    3246 
    3247 
    3248 /**
    3249  * Execute state load operation.
    3250  *
    3251  * @returns VBox status code.
    3252  * @param   pVM             VM Handle.
    3253  * @param   pSSM            SSM operation handle.
    3254  * @param   uVersion        Data layout version.
    3255  * @param   uPass           The data pass.
    3256  */
    3257 static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
    3258 {
    3259     int     rc;
    3260     PPGM    pPGM = &pVM->pgm.s;
    3261     Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
    3262 
    3263     /*
    3264      * Validate version.
    3265      */
    3266     if (    uVersion != PGM_SAVED_STATE_VERSION
    3267         &&  uVersion != PGM_SAVED_STATE_VERSION_2_2_2
    3268         &&  uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
    3269         &&  uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
    3270     {
    3271         AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
    3272         return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    3273     }
    3274 
    3275     /*
    3276      * Call the reset function to make sure all the memory is cleared.
    3277      */
    3278     PGMR3Reset(pVM);
    3279 
    3280     /*
    3281      * Do the loading while owning the lock because a bunch of the functions
    3282      * we're using requires this.
    3283      */
    3284     pgmLock(pVM);
    3285     rc = pgmR3LoadLocked(pVM, pSSM, uVersion);
    3286     pgmUnlock(pVM);
    3287     if (RT_SUCCESS(rc))
    3288     {
    3289         /*
    3290          * We require a full resync now.
    3291          */
    3292         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    3293         {
    3294             PVMCPU pVCpu = &pVM->aCpus[i];
    3295             VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    3296             VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    3297 
    3298             pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    3299         }
    3300 
    3301         pgmR3HandlerPhysicalUpdateAll(pVM);
    3302 
    3303         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    3304         {
    3305             PVMCPU pVCpu = &pVM->aCpus[i];
    3306 
    3307             /*
    3308              * Change the paging mode.
    3309              */
    3310             rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
    3311 
    3312             /* Restore pVM->pgm.s.GCPhysCR3. */
    3313             Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
    3314             RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
    3315             if (    pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
    3316                 ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
    3317                 ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
    3318                 ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
    3319                 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
    3320             else
    3321                 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
    3322             pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    3323         }
    3324     }
    3325 
    3326     return rc;
    33272280}
    33282281
  • trunk/src/VBox/VMM/PGMInternal.h

    r23306 r23307  
    31443144#endif
    31453145DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    3146 
     3146int             pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
    31473147
    31483148int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
  • trunk/src/VBox/VMM/PGMSavedState.cpp

    r23306 r23307  
    11/* $Id$ */
    22/** @file
    3  * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
     3 * PGM - Page Manager and Monitor, The Saved State Part.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2007 Sun Microsystems, Inc.
     7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    2020 */
    2121
    22 
    23 /** @page pg_pgm PGM - The Page Manager and Monitor
    24  *
    25  * @see grp_pgm,
    26  * @ref pg_pgm_pool,
    27  * @ref pg_pgm_phys.
    28  *
    29  *
    30  * @section         sec_pgm_modes           Paging Modes
    31  *
    32  * There are three memory contexts: Host Context (HC), Guest Context (GC)
    33  * and intermediate context. When talking about paging HC can also be refered to
    34  * as "host paging", and GC refered to as "shadow paging".
    35  *
    36  * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
    37  * is defined by the host operating system. The mode used in the shadow paging mode
    38  * depends on the host paging mode and what the mode the guest is currently in. The
    39  * following relation between the two is defined:
    40  *
    41  * @verbatim
    42      Host > 32-bit |  PAE   | AMD64  |
    43    Guest  |        |        |        |
    44    ==v================================
    45    32-bit   32-bit    PAE     PAE
    46    -------|--------|--------|--------|
    47    PAE       PAE      PAE     PAE
    48    -------|--------|--------|--------|
    49    AMD64    AMD64    AMD64    AMD64
    50    -------|--------|--------|--------| @endverbatim
    51  *
    52  * All configuration except those in the diagonal (upper left) are expected to
    53  * require special effort from the switcher (i.e. a bit slower).
    54  *
    55  *
    56  *
    57  *
    58  * @section         sec_pgm_shw             The Shadow Memory Context
    59  *
    60  *
    61  *  [..]
    62  *
    63  * Because of guest context mappings requires PDPT and PML4 entries to allow
    64  * writing on AMD64, the two upper levels will have fixed flags whatever the
    65  * guest is thinking of using there. So, when shadowing the PD level we will
    66  * calculate the effective flags of PD and all the higher levels. In legacy
    67  * PAE mode this only applies to the PWT and PCD bits (the rest are
    68  * ignored/reserved/MBZ). We will ignore those bits for the present.
    69  *
    70  *
    71  *
    72  * @section         sec_pgm_int             The Intermediate Memory Context
    73  *
    74  * The world switch goes thru an intermediate memory context which purpose it is
    75  * to provide different mappings of the switcher code. All guest mappings are also
    76  * present in this context.
    77  *
    78  * The switcher code is mapped at the same location as on the host, at an
    79  * identity mapped location (physical equals virtual address), and at the
    80  * hypervisor location. The identity mapped location is for when the world
    81  * switches that involves disabling paging.
    82  *
    83  * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
    84  * simplifies switching guest CPU mode and consistency at the cost of more
    85  * code to do the work. All memory use for those page tables is located below
    86  * 4GB (this includes page tables for guest context mappings).
    87  *
    88  *
    89  * @subsection      subsec_pgm_int_gc       Guest Context Mappings
    90  *
    91  * During assignment and relocation of a guest context mapping the intermediate
    92  * memory context is used to verify the new location.
    93  *
    94  * Guest context mappings are currently restricted to below 4GB, for reasons
    95  * of simplicity. This may change when we implement AMD64 support.
    96  *
    97  *
    98  *
    99  *
    100  * @section         sec_pgm_misc            Misc
    101  *
    102  * @subsection      subsec_pgm_misc_diff    Differences Between Legacy PAE and Long Mode PAE
    103  *
    104  * The differences between legacy PAE and long mode PAE are:
    105  *      -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
    106  *         all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
    107  *         usual meanings while 6 is ignored (AMD). This means that upon switching to
    108  *         legacy PAE mode we'll have to clear these bits and when going to long mode
    109  *         they must be set. This applies to both intermediate and shadow contexts,
    110  *         however we don't need to do it for the intermediate one since we're
    111  *         executing with CR0.WP at that time.
    112  *      -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
    113  *         a page aligned one is required.
    114  *
    115  *
    116  * @section         sec_pgm_handlers        Access Handlers
    117  *
    118  * Placeholder.
    119  *
    120  *
    121  * @subsection      sec_pgm_handlers_virt   Virtual Access Handlers
    122  *
    123  * Placeholder.
    124  *
    125  *
    126  * @subsection      sec_pgm_handlers_virt   Virtual Access Handlers
    127  *
    128  * We currently implement three types of virtual access handlers:  ALL, WRITE
    129  * and HYPERVISOR (WRITE). See PGMVIRTHANDLERTYPE for some more details.
    130  *
    131  * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
    132  * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
    133  * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
    134  * rest of this section is going to be about these handlers.
    135  *
    136  * We'll go thru the life cycle of a handler and try make sense of it all, don't know
    137  * how successfull this is gonna be...
    138  *
    139  * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
    140  * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
    141  * and create a new node that is inserted into the AVL tree (range key). Then
    142  * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
    143  *
    144  * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
    145  *
    146  * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
    147  * via the current guest CR3 and update the physical page -> virtual handler
    148  * translation. Needless to say, this doesn't exactly scale very well. If any changes
    149  * are detected, it will flag a virtual bit update just like we did on registration.
    150  * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
    151  *
    152  * 2b. The virtual bit update process will iterate all the pages covered by all the
    153  * virtual handlers and update the PGMPAGE virtual handler state to the max of all
    154  * virtual handlers on that page.
    155  *
    156  * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
    157  * we don't miss any alias mappings of the monitored pages.
    158  *
    159  * 2d. SyncCR3 will then proceed with syncing the CR3 table.
    160  *
    161  * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
    162  * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
    163  * will call the handlers like in the next step. If the physical mapping has
    164  * changed we will - some time in the future - perform a handler callback
    165  * (optional) and update the physical -> virtual handler cache.
    166  *
    167  * 4. \#PF(,write) on a page in the range. This will cause the handler to
    168  * be invoked.
    169  *
    170  * 5. The guest invalidates the page and changes the physical backing or
    171  * unmaps it. This should cause the invalidation callback to be invoked
    172  * (it might not yet be 100% perfect). Exactly what happens next... is
    173  * this where we mess up and end up out of sync for a while?
    174  *
    175  * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
    176  * We will then set all PGMPAGEs in the physical -> virtual handler cache for
    177  * this handler to NONE and trigger a full PGM resync (basically the same
    178  * as int step 1). Which means 2 is executed again.
    179  *
    180  *
    181  * @subsubsection   sub_sec_pgm_handler_virt_todo   TODOs
    182  *
    183  * There is a bunch of things that needs to be done to make the virtual handlers
    184  * work 100% correctly and work more efficiently.
    185  *
    186  * The first bit hasn't been implemented yet because it's going to slow the
    187  * whole mess down even more, and besides it seems to be working reliably for
    188  * our current uses. OTOH, some of the optimizations might end up more or less
    189  * implementing the missing bits, so we'll see.
    190  *
    191  * On the optimization side, the first thing to do is to try avoid unnecessary
    192  * cache flushing. Then try team up with the shadowing code to track changes
    193  * in mappings by means of access to them (shadow in), updates to shadows pages,
    194  * invlpg, and shadow PT discarding (perhaps).
    195  *
    196  * Some idea that have popped up for optimization for current and new features:
    197  *    - bitmap indicating where there are virtual handlers installed.
    198  *      (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
    199  *    - Further optimize this by min/max (needs min/max avl getters).
    200  *    - Shadow page table entry bit (if any left)?
    201  *
    202  */
    203 
    204 
    205 /** @page pg_pgm_phys   PGM Physical Guest Memory Management
    206  *
    207  *
    208  * Objectives:
    209  *      - Guest RAM over-commitment using memory ballooning,
    210  *        zero pages and general page sharing.
    211  *      - Moving or mirroring a VM onto a different physical machine.
    212  *
    213  *
    214  * @subsection subsec_pgmPhys_Definitions       Definitions
    215  *
    216  * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
    217  * machinery assoicated with it.
    218  *
    219  *
    220  *
    221  *
    222  * @subsection subsec_pgmPhys_AllocPage         Allocating a page.
    223  *
    224  * Initially we map *all* guest memory to the (per VM) zero page, which
    225  * means that none of the read functions will cause pages to be allocated.
    226  *
    227  * Exception, access bit in page tables that have been shared. This must
    228  * be handled, but we must also make sure PGMGst*Modify doesn't make
    229  * unnecessary modifications.
    230  *
    231  * Allocation points:
    232  *      - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
    233  *      - Replacing a zero page mapping at \#PF.
    234  *      - Replacing a shared page mapping at \#PF.
    235  *      - ROM registration (currently MMR3RomRegister).
    236  *      - VM restore (pgmR3Load).
    237  *
    238  * For the first three it would make sense to keep a few pages handy
    239  * until we've reached the max memory commitment for the VM.
    240  *
    241  * For the ROM registration, we know exactly how many pages we need
    242  * and will request these from ring-0. For restore, we will save
    243  * the number of non-zero pages in the saved state and allocate
    244  * them up front. This would allow the ring-0 component to refuse
    245  * the request if the isn't sufficient memory available for VM use.
    246  *
    247  * Btw. for both ROM and restore allocations we won't be requiring
    248  * zeroed pages as they are going to be filled instantly.
    249  *
    250  *
    251  * @subsection subsec_pgmPhys_FreePage          Freeing a page
    252  *
    253  * There are a few points where a page can be freed:
    254  *      - After being replaced by the zero page.
    255  *      - After being replaced by a shared page.
    256  *      - After being ballooned by the guest additions.
    257  *      - At reset.
    258  *      - At restore.
    259  *
    260  * When freeing one or more pages they will be returned to the ring-0
    261  * component and replaced by the zero page.
    262  *
    263  * The reasoning for clearing out all the pages on reset is that it will
    264  * return us to the exact same state as on power on, and may thereby help
    265  * us reduce the memory load on the system. Further it might have a
    266  * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
    267  *
    268  * On restore, as mention under the allocation topic, pages should be
    269  * freed / allocated depending on how many is actually required by the
    270  * new VM state. The simplest approach is to do like on reset, and free
    271  * all non-ROM pages and then allocate what we need.
    272  *
    273  * A measure to prevent some fragmentation, would be to let each allocation
    274  * chunk have some affinity towards the VM having allocated the most pages
    275  * from it. Also, try make sure to allocate from allocation chunks that
    276  * are almost full. Admittedly, both these measures might work counter to
    277  * our intentions and its probably not worth putting a lot of effort,
    278  * cpu time or memory into this.
    279  *
    280  *
    281  * @subsection subsec_pgmPhys_SharePage         Sharing a page
    282  *
    283  * The basic idea is that there there will be a idle priority kernel
    284  * thread walking the non-shared VM pages hashing them and looking for
    285  * pages with the same checksum. If such pages are found, it will compare
    286  * them byte-by-byte to see if they actually are identical. If found to be
    287  * identical it will allocate a shared page, copy the content, check that
    288  * the page didn't change while doing this, and finally request both the
    289  * VMs to use the shared page instead. If the page is all zeros (special
    290  * checksum and byte-by-byte check) it will request the VM that owns it
    291  * to replace it with the zero page.
    292  *
    293  * To make this efficient, we will have to make sure not to try share a page
    294  * that will change its contents soon. This part requires the most work.
    295  * A simple idea would be to request the VM to write monitor the page for
    296  * a while to make sure it isn't modified any time soon. Also, it may
    297  * make sense to skip pages that are being write monitored since this
    298  * information is readily available to the thread if it works on the
    299  * per-VM guest memory structures (presently called PGMRAMRANGE).
    300  *
    301  *
    302  * @subsection subsec_pgmPhys_Fragmentation     Fragmentation Concerns and Counter Measures
    303  *
    304  * The pages are organized in allocation chunks in ring-0, this is a necessity
    305  * if we wish to have an OS agnostic approach to this whole thing. (On Linux we
    306  * could easily work on a page-by-page basis if we liked. Whether this is possible
    307  * or efficient on NT I don't quite know.) Fragmentation within these chunks may
    308  * become a problem as part of the idea here is that we wish to return memory to
    309  * the host system.
    310  *
    311  * For instance, starting two VMs at the same time, they will both allocate the
    312  * guest memory on-demand and if permitted their page allocations will be
    313  * intermixed. Shut down one of the two VMs and it will be difficult to return
    314  * any memory to the host system because the page allocation for the two VMs are
    315  * mixed up in the same allocation chunks.
    316  *
    317  * To further complicate matters, when pages are freed because they have been
    318  * ballooned or become shared/zero the whole idea is that the page is supposed
    319  * to be reused by another VM or returned to the host system. This will cause
    320  * allocation chunks to contain pages belonging to different VMs and prevent
    321  * returning memory to the host when one of those VM shuts down.
    322  *
    323  * The only way to really deal with this problem is to move pages. This can
    324  * either be done at VM shutdown and or by the idle priority worker thread
    325  * that will be responsible for finding sharable/zero pages. The mechanisms
    326  * involved for coercing a VM to move a page (or to do it for it) will be
    327  * the same as when telling it to share/zero a page.
    328  *
    329  *
    330  * @subsection subsec_pgmPhys_Tracking      Tracking Structures And Their Cost
    331  *
    332  * There's a difficult balance between keeping the per-page tracking structures
    333  * (global and guest page) easy to use and keeping them from eating too much
    334  * memory. We have limited virtual memory resources available when operating in
    335  * 32-bit kernel space (on 64-bit there'll it's quite a different story). The
    336  * tracking structures will be attemted designed such that we can deal with up
    337  * to 32GB of memory on a 32-bit system and essentially unlimited on 64-bit ones.
    338  *
    339  *
    340  * @subsubsection subsubsec_pgmPhys_Tracking_Kernel     Kernel Space
    341  *
    342  * @see pg_GMM
    343  *
    344  * @subsubsection subsubsec_pgmPhys_Tracking_PerVM      Per-VM
    345  *
    346  * Fixed info is the physical address of the page (HCPhys) and the page id
    347  * (described above). Theoretically we'll need 48(-12) bits for the HCPhys part.
    348  * Today we've restricting ourselves to 40(-12) bits because this is the current
    349  * restrictions of all AMD64 implementations (I think Barcelona will up this
    350  * to 48(-12) bits, not that it really matters) and I needed the bits for
    351  * tracking mappings of a page. 48-12 = 36. That leaves 28 bits, which means a
    352  * decent range for the page id: 2^(28+12) = 1024TB.
    353  *
    354  * In additions to these, we'll have to keep maintaining the page flags as we
    355  * currently do. Although it wouldn't harm to optimize these quite a bit, like
    356  * for instance the ROM shouldn't depend on having a write handler installed
    357  * in order for it to become read-only. A RO/RW bit should be considered so
    358  * that the page syncing code doesn't have to mess about checking multiple
    359  * flag combinations (ROM || RW handler || write monitored) in order to
    360  * figure out how to setup a shadow PTE. But this of course, is second
    361  * priority at present. Current this requires 12 bits, but could probably
    362  * be optimized to ~8.
    363  *
    364  * Then there's the 24 bits used to track which shadow page tables are
    365  * currently mapping a page for the purpose of speeding up physical
    366  * access handlers, and thereby the page pool cache. More bit for this
    367  * purpose wouldn't hurt IIRC.
    368  *
    369  * Then there is a new bit in which we need to record what kind of page
    370  * this is, shared, zero, normal or write-monitored-normal. This'll
    371  * require 2 bits. One bit might be needed for indicating whether a
    372  * write monitored page has been written to. And yet another one or
    373  * two for tracking migration status. 3-4 bits total then.
    374  *
    375  * Whatever is left will can be used to record the sharabilitiy of a
    376  * page. The page checksum will not be stored in the per-VM table as
    377  * the idle thread will not be permitted to do modifications to it.
    378  * It will instead have to keep its own working set of potentially
    379  * shareable pages and their check sums and stuff.
    380  *
    381  * For the present we'll keep the current packing of the
    382  * PGMRAMRANGE::aHCPhys to keep the changes simple, only of course,
    383  * we'll have to change it to a struct with a total of 128-bits at
    384  * our disposal.
    385  *
    386  * The initial layout will be like this:
    387  * @verbatim
    388     RTHCPHYS HCPhys;            The current stuff.
    389         63:40                   Current shadow PT tracking stuff.
    390         39:12                   The physical page frame number.
    391         11:0                    The current flags.
    392     uint32_t u28PageId : 28;    The page id.
    393     uint32_t u2State : 2;       The page state { zero, shared, normal, write monitored }.
    394     uint32_t fWrittenTo : 1;    Whether a write monitored page was written to.
    395     uint32_t u1Reserved : 1;    Reserved for later.
    396     uint32_t u32Reserved;       Reserved for later, mostly sharing stats.
    397  @endverbatim
    398  *
    399  * The final layout will be something like this:
    400  * @verbatim
    401     RTHCPHYS HCPhys;            The current stuff.
    402         63:48                   High page id (12+).
    403         47:12                   The physical page frame number.
    404         11:0                    Low page id.
    405     uint32_t fReadOnly : 1;     Whether it's readonly page (rom or monitored in some way).
    406     uint32_t u3Type : 3;        The page type {RESERVED, MMIO, MMIO2, ROM, shadowed ROM, RAM}.
    407     uint32_t u2PhysMon : 2;     Physical access handler type {none, read, write, all}.
    408     uint32_t u2VirtMon : 2;     Virtual access handler type {none, read, write, all}..
    409     uint32_t u2State : 2;       The page state { zero, shared, normal, write monitored }.
    410     uint32_t fWrittenTo : 1;    Whether a write monitored page was written to.
    411     uint32_t u20Reserved : 20;  Reserved for later, mostly sharing stats.
    412     uint32_t u32Tracking;       The shadow PT tracking stuff, roughly.
    413  @endverbatim
    414  *
    415  * Cost wise, this means we'll double the cost for guest memory. There isn't anyway
    416  * around that I'm afraid. It means that the cost of dealing out 32GB of memory
    417  * to one or more VMs is: (32GB >> PAGE_SHIFT) * 16 bytes, or 128MBs. Or another
    418  * example, the VM heap cost when assigning 1GB to a VM will be: 4MB.
    419  *
    420  * A couple of cost examples for the total cost per-VM + kernel.
    421  * 32-bit Windows and 32-bit linux:
    422  *      1GB guest ram, 256K pages:  4MB +  2MB(+) =   6MB
    423  *      4GB guest ram, 1M pages:   16MB +  8MB(+) =  24MB
    424  *     32GB guest ram, 8M pages:  128MB + 64MB(+) = 192MB
    425  * 64-bit Windows and 64-bit linux:
    426  *      1GB guest ram, 256K pages:  4MB +  3MB(+) =   7MB
    427  *      4GB guest ram, 1M pages:   16MB + 12MB(+) =  28MB
    428  *     32GB guest ram, 8M pages:  128MB + 96MB(+) = 224MB
    429  *
    430  * UPDATE - 2007-09-27:
    431  * Will need a ballooned flag/state too because we cannot
    432  * trust the guest 100% and reporting the same page as ballooned more
    433  * than once will put the GMM off balance.
    434  *
    435  *
    436  * @subsection subsec_pgmPhys_Serializing       Serializing Access
    437  *
    438  * Initially, we'll try a simple scheme:
    439  *
    440  *      - The per-VM RAM tracking structures (PGMRAMRANGE) is only modified
    441  *        by the EMT thread of that VM while in the pgm critsect.
    442  *      - Other threads in the VM process that needs to make reliable use of
    443  *        the per-VM RAM tracking structures will enter the critsect.
    444  *      - No process external thread or kernel thread will ever try enter
    445  *        the pgm critical section, as that just won't work.
    446  *      - The idle thread (and similar threads) doesn't not need 100% reliable
    447  *        data when performing it tasks as the EMT thread will be the one to
    448  *        do the actual changes later anyway. So, as long as it only accesses
    449  *        the main ram range, it can do so by somehow preventing the VM from
    450  *        being destroyed while it works on it...
    451  *
    452  *      - The over-commitment management, including the allocating/freeing
    453  *        chunks, is serialized by a ring-0 mutex lock (a fast one since the
    454  *        more mundane mutex implementation is broken on Linux).
    455  *      - A separeate mutex is protecting the set of allocation chunks so
    456  *        that pages can be shared or/and freed up while some other VM is
    457  *        allocating more chunks. This mutex can be take from under the other
    458  *        one, but not the otherway around.
    459  *
    460  *
    461  * @subsection subsec_pgmPhys_Request           VM Request interface
    462  *
    463  * When in ring-0 it will become necessary to send requests to a VM so it can
    464  * for instance move a page while defragmenting during VM destroy. The idle
    465  * thread will make use of this interface to request VMs to setup shared
    466  * pages and to perform write monitoring of pages.
    467  *
    468  * I would propose an interface similar to the current VMReq interface, similar
    469  * in that it doesn't require locking and that the one sending the request may
    470  * wait for completion if it wishes to. This shouldn't be very difficult to
    471  * realize.
    472  *
    473  * The requests themselves are also pretty simple. They are basically:
    474  *      -# Check that some precondition is still true.
    475  *      -# Do the update.
    476  *      -# Update all shadow page tables involved with the page.
    477  *
    478  * The 3rd step is identical to what we're already doing when updating a
    479  * physical handler, see pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs.
    480  *
    481  *
    482  *
    483  * @section sec_pgmPhys_MappingCaches   Mapping Caches
    484  *
    485  * In order to be able to map in and out memory and to be able to support
    486  * guest with more RAM than we've got virtual address space, we'll employing
    487  * a mapping cache. There is already a tiny one for GC (see PGMGCDynMapGCPageEx)
    488  * and we'll create a similar one for ring-0 unless we decide to setup a dedicate
    489  * memory context for the HWACCM execution.
    490  *
    491  *
    492  * @subsection subsec_pgmPhys_MappingCaches_R3  Ring-3
    493  *
    494  * We've considered implementing the ring-3 mapping cache page based but found
    495  * that this was bother some when one had to take into account TLBs+SMP and
    496  * portability (missing the necessary APIs on several platforms). There were
    497  * also some performance concerns with this approach which hadn't quite been
    498  * worked out.
    499  *
    500  * Instead, we'll be mapping allocation chunks into the VM process. This simplifies
    501  * matters greatly quite a bit since we don't need to invent any new ring-0 stuff,
    502  * only some minor RTR0MEMOBJ mapping stuff. The main concern here is that mapping
    503  * compared to the previous idea is that mapping or unmapping a 1MB chunk is more
    504  * costly than a single page, although how much more costly is uncertain. We'll
    505  * try address this by using a very big cache, preferably bigger than the actual
    506  * VM RAM size if possible. The current VM RAM sizes should give some idea for
    507  * 32-bit boxes, while on 64-bit we can probably get away with employing an
    508  * unlimited cache.
    509  *
    510  * The cache have to parts, as already indicated, the ring-3 side and the
    511  * ring-0 side.
    512  *
    513  * The ring-0 will be tied to the page allocator since it will operate on the
    514  * memory objects it contains. It will therefore require the first ring-0 mutex
    515  * discussed in @ref subsec_pgmPhys_Serializing. We
    516  * some double house keeping wrt to who has mapped what I think, since both
    517  * VMMR0.r0 and RTR0MemObj will keep track of mapping relataions
    518  *
    519  * The ring-3 part will be protected by the pgm critsect. For simplicity, we'll
    520  * require anyone that desires to do changes to the mapping cache to do that
    521  * from within this critsect. Alternatively, we could employ a separate critsect
    522  * for serializing changes to the mapping cache as this would reduce potential
    523  * contention with other threads accessing mappings unrelated to the changes
    524  * that are in process. We can see about this later, contention will show
    525  * up in the statistics anyway, so it'll be simple to tell.
    526  *
    527  * The organization of the ring-3 part will be very much like how the allocation
    528  * chunks are organized in ring-0, that is in an AVL tree by chunk id. To avoid
    529  * having to walk the tree all the time, we'll have a couple of lookaside entries
    530  * like in we do for I/O ports and MMIO in IOM.
    531  *
    532  * The simplified flow of a PGMPhysRead/Write function:
    533  *      -# Enter the PGM critsect.
    534  *      -# Lookup GCPhys in the ram ranges and get the Page ID.
    535  *      -# Calc the Allocation Chunk ID from the Page ID.
    536  *      -# Check the lookaside entries and then the AVL tree for the Chunk ID.
    537  *         If not found in cache:
    538  *              -# Call ring-0 and request it to be mapped and supply
    539  *                 a chunk to be unmapped if the cache is maxed out already.
    540  *              -# Insert the new mapping into the AVL tree (id + R3 address).
    541  *      -# Update the relevant lookaside entry and return the mapping address.
    542  *      -# Do the read/write according to monitoring flags and everything.
    543  *      -# Leave the critsect.
    544  *
    545  *
    546  * @section sec_pgmPhys_Fallback            Fallback
    547  *
    548  * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC
    549  * API and thus require a fallback.
    550  *
    551  * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator
    552  * will return to the ring-3 caller (and later ring-0) and asking it to seed
    553  * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will
    554  * then perform an SUPR3PageAlloc(cbChunk >> PAGE_SHIFT) call and make a
    555  * "SeededAllocPages" call to ring-0.
    556  *
    557  * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable
    558  * all page sharing (zero page detection will continue). It will also force
    559  * all allocations to come from the VM which seeded the page. Both these
    560  * measures are taken to make sure that there will never be any need for
    561  * mapping anything into ring-3 - everything will be mapped already.
    562  *
    563  * Whether we'll continue to use the current MM locked memory management
    564  * for this I don't quite know (I'd prefer not to and just ditch that all
    565  * togther), we'll see what's simplest to do.
    566  *
    567  *
    568  *
    569  * @section sec_pgmPhys_Changes             Changes
    570  *
    571  * Breakdown of the changes involved?
    572  */
    57322
    57423/*******************************************************************************
     
    61867
    61968/*******************************************************************************
    620 *   Internal Functions                                                         *
     69*   Structures and Typedefs                                                    *
    62170*******************************************************************************/
    622 static int                pgmR3InitPaging(PVM pVM);
    623 static void               pgmR3InitStats(PVM pVM);
    624 static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    625 static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    626 static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    627 static DECLCALLBACK(int)  pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser);
    628 static DECLCALLBACK(int)  pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
    629 static DECLCALLBACK(int)  pgmR3RelocateHyperVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
    630 #ifdef VBOX_STRICT
    631 static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
    632 #endif
    633 static DECLCALLBACK(int)  pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM);
    634 static DECLCALLBACK(int)  pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
    635 static DECLCALLBACK(int)  pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM);
    636 static DECLCALLBACK(int)  pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
    637 static DECLCALLBACK(int)  pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM);
    638 static DECLCALLBACK(int)  pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    639 static int                pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
    640 static void               pgmR3ModeDataSwitch(PVM pVM, PVMCPU pVCpu, PGMMODE enmShw, PGMMODE enmGst);
    641 static PGMMODE            pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
    642 
    643 #ifdef VBOX_WITH_DEBUGGER
    644 /** @todo Convert the first two commands to 'info' items. */
    645 static DECLCALLBACK(int)  pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    646 static DECLCALLBACK(int)  pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    647 static DECLCALLBACK(int)  pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    648 static DECLCALLBACK(int)  pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    649 static DECLCALLBACK(int)  pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    650 # ifdef VBOX_STRICT
    651 static DECLCALLBACK(int)  pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    652 # endif
    653 static DECLCALLBACK(int)  pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
    654 #endif
     71/** For loading old saved states. (pre-smp) */
     72typedef struct
     73{
     74    /** If set no conflict checks are required.  (boolean) */
     75    bool                            fMappingsFixed;
     76    /** Size of fixed mapping */
     77    uint32_t                        cbMappingFixed;
     78    /** Base address (GC) of fixed mapping */
     79    RTGCPTR                         GCPtrMappingFixed;
     80    /** A20 gate mask.
     81     * Our current approach to A20 emulation is to let REM do it and don't bother
     82     * anywhere else. The interesting Guests will be operating with it enabled anyway.
     83     * But whould need arrise, we'll subject physical addresses to this mask. */
     84    RTGCPHYS                        GCPhysA20Mask;
     85    /** A20 gate state - boolean! */
     86    bool                            fA20Enabled;
     87    /** The guest paging mode. */
     88    PGMMODE                         enmGuestMode;
     89} PGMOLD;
    65590
    65691
     
    65893*   Global Variables                                                           *
    65994*******************************************************************************/
    660 #ifdef VBOX_WITH_DEBUGGER
    661 /** Argument descriptors for '.pgmerror' and '.pgmerroroff'. */
    662 static const DBGCVARDESC g_aPgmErrorArgs[] =
    663 {
    664     /* cTimesMin,   cTimesMax,  enmCategory,            fFlags,                         pszName,        pszDescription */
    665     {  0,           1,          DBGCVAR_CAT_STRING,     0,                              "where",        "Error injection location." },
     95/** PGM fields to save/load. */
     96static const SSMFIELD s_aPGMFields[] =
     97{
     98    SSMFIELD_ENTRY(         PGM, fMappingsFixed),
     99    SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
     100    SSMFIELD_ENTRY(         PGM, cbMappingFixed),
     101    SSMFIELD_ENTRY_TERM()
    666102};
    667103
    668 static const DBGCVARDESC g_aPgmPhysToFileArgs[] =
    669 {
    670     /* cTimesMin,   cTimesMax,  enmCategory,            fFlags,                         pszName,        pszDescription */
    671     {  1,           1,          DBGCVAR_CAT_STRING,     0,                              "file",         "The file name." },
    672     {  0,           1,          DBGCVAR_CAT_STRING,     0,                              "nozero",       "If present, zero pages are skipped." },
     104static const SSMFIELD s_aPGMCpuFields[] =
     105{
     106    SSMFIELD_ENTRY(         PGMCPU, fA20Enabled),
     107    SSMFIELD_ENTRY_GCPHYS(  PGMCPU, GCPhysA20Mask),
     108    SSMFIELD_ENTRY(         PGMCPU, enmGuestMode),
     109    SSMFIELD_ENTRY_TERM()
    673110};
    674111
    675 /** Command descriptors. */
    676 static const DBGCCMD    g_aCmds[] =
    677 {
    678     /* pszCmd,  cArgsMin, cArgsMax, paArgDesc,                cArgDescs,    pResultDesc,        fFlags,     pfnHandler          pszSyntax,          ....pszDescription */
    679     { "pgmram",        0, 0,        NULL,                     0,            NULL,               0,          pgmR3CmdRam,        "",                     "Display the ram ranges." },
    680     { "pgmmap",        0, 0,        NULL,                     0,            NULL,               0,          pgmR3CmdMap,        "",                     "Display the mapping ranges." },
    681     { "pgmsync",       0, 0,        NULL,                     0,            NULL,               0,          pgmR3CmdSync,       "",                     "Sync the CR3 page." },
    682     { "pgmerror",      0, 1,        &g_aPgmErrorArgs[0],      1,            NULL,               0,          pgmR3CmdError,      "",                     "Enables inject runtime of errors into parts of PGM." },
    683     { "pgmerroroff",   0, 1,        &g_aPgmErrorArgs[0],      1,            NULL,               0,          pgmR3CmdError,      "",                     "Disables inject runtime errors into parts of PGM." },
    684 #ifdef VBOX_STRICT
    685     { "pgmassertcr3",  0, 0,        NULL,                     0,            NULL,               0,          pgmR3CmdAssertCR3,  "",                     "Check the shadow CR3 mapping." },
    686 #endif
    687     { "pgmsyncalways", 0, 0,        NULL,                     0,            NULL,               0,          pgmR3CmdSyncAlways, "",                     "Toggle permanent CR3 syncing." },
    688     { "pgmphystofile", 1, 2,        &g_aPgmPhysToFileArgs[0], 2,            NULL,               0,          pgmR3CmdPhysToFile, "",                     "Save the physical memory to file." },
     112static const SSMFIELD s_aPGMFields_Old[] =
     113{
     114    SSMFIELD_ENTRY(         PGMOLD, fMappingsFixed),
     115    SSMFIELD_ENTRY_GCPTR(   PGMOLD, GCPtrMappingFixed),
     116    SSMFIELD_ENTRY(         PGMOLD, cbMappingFixed),
     117    SSMFIELD_ENTRY(         PGMOLD, fA20Enabled),
     118    SSMFIELD_ENTRY_GCPHYS(  PGMOLD, GCPhysA20Mask),
     119    SSMFIELD_ENTRY(         PGMOLD, enmGuestMode),
     120    SSMFIELD_ENTRY_TERM()
    689121};
    690 #endif
    691 
    692 
    693 
    694 
    695 /*
    696  * Shadow - 32-bit mode
    697  */
    698 #define PGM_SHW_TYPE                PGM_TYPE_32BIT
    699 #define PGM_SHW_NAME(name)          PGM_SHW_NAME_32BIT(name)
    700 #define PGM_SHW_NAME_RC_STR(name)   PGM_SHW_NAME_RC_32BIT_STR(name)
    701 #define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_32BIT_STR(name)
    702 #include "PGMShw.h"
    703 
    704 /* Guest - real mode */
    705 #define PGM_GST_TYPE                PGM_TYPE_REAL
    706 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    707 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_REAL_STR(name)
    708 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_REAL_STR(name)
    709 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_32BIT_REAL(name)
    710 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_32BIT_REAL_STR(name)
    711 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_32BIT_REAL_STR(name)
    712 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_32BIT_PT_FOR_PHYS
    713 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_32BIT_PD_PHYS
    714 #include "PGMBth.h"
    715 #include "PGMGstDefs.h"
    716 #include "PGMGst.h"
    717 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    718 #undef BTH_PGMPOOLKIND_ROOT
    719 #undef PGM_BTH_NAME
    720 #undef PGM_BTH_NAME_RC_STR
    721 #undef PGM_BTH_NAME_R0_STR
    722 #undef PGM_GST_TYPE
    723 #undef PGM_GST_NAME
    724 #undef PGM_GST_NAME_RC_STR
    725 #undef PGM_GST_NAME_R0_STR
    726 
    727 /* Guest - protected mode */
    728 #define PGM_GST_TYPE                PGM_TYPE_PROT
    729 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    730 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PROT_STR(name)
    731 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PROT_STR(name)
    732 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_32BIT_PROT(name)
    733 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_32BIT_PROT_STR(name)
    734 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_32BIT_PROT_STR(name)
    735 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_32BIT_PT_FOR_PHYS
    736 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_32BIT_PD_PHYS
    737 #include "PGMBth.h"
    738 #include "PGMGstDefs.h"
    739 #include "PGMGst.h"
    740 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    741 #undef BTH_PGMPOOLKIND_ROOT
    742 #undef PGM_BTH_NAME
    743 #undef PGM_BTH_NAME_RC_STR
    744 #undef PGM_BTH_NAME_R0_STR
    745 #undef PGM_GST_TYPE
    746 #undef PGM_GST_NAME
    747 #undef PGM_GST_NAME_RC_STR
    748 #undef PGM_GST_NAME_R0_STR
    749 
    750 /* Guest - 32-bit mode */
    751 #define PGM_GST_TYPE                PGM_TYPE_32BIT
    752 #define PGM_GST_NAME(name)          PGM_GST_NAME_32BIT(name)
    753 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_32BIT_STR(name)
    754 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_32BIT_STR(name)
    755 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_32BIT_32BIT(name)
    756 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_32BIT_32BIT_STR(name)
    757 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_32BIT_32BIT_STR(name)
    758 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
    759 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
    760 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_32BIT_PD
    761 #include "PGMBth.h"
    762 #include "PGMGstDefs.h"
    763 #include "PGMGst.h"
    764 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    765 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    766 #undef BTH_PGMPOOLKIND_ROOT
    767 #undef PGM_BTH_NAME
    768 #undef PGM_BTH_NAME_RC_STR
    769 #undef PGM_BTH_NAME_R0_STR
    770 #undef PGM_GST_TYPE
    771 #undef PGM_GST_NAME
    772 #undef PGM_GST_NAME_RC_STR
    773 #undef PGM_GST_NAME_R0_STR
    774 
    775 #undef PGM_SHW_TYPE
    776 #undef PGM_SHW_NAME
    777 #undef PGM_SHW_NAME_RC_STR
    778 #undef PGM_SHW_NAME_R0_STR
    779 
    780 
    781 /*
    782  * Shadow - PAE mode
    783  */
    784 #define PGM_SHW_TYPE                PGM_TYPE_PAE
    785 #define PGM_SHW_NAME(name)          PGM_SHW_NAME_PAE(name)
    786 #define PGM_SHW_NAME_RC_STR(name)   PGM_SHW_NAME_RC_PAE_STR(name)
    787 #define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_PAE_STR(name)
    788 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_PAE_REAL(name)
    789 #include "PGMShw.h"
    790 
    791 /* Guest - real mode */
    792 #define PGM_GST_TYPE                PGM_TYPE_REAL
    793 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    794 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_REAL_STR(name)
    795 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_REAL_STR(name)
    796 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_PAE_REAL(name)
    797 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_PAE_REAL_STR(name)
    798 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_PAE_REAL_STR(name)
    799 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    800 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_PAE_PDPT_PHYS
    801 #include "PGMGstDefs.h"
    802 #include "PGMBth.h"
    803 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    804 #undef BTH_PGMPOOLKIND_ROOT
    805 #undef PGM_BTH_NAME
    806 #undef PGM_BTH_NAME_RC_STR
    807 #undef PGM_BTH_NAME_R0_STR
    808 #undef PGM_GST_TYPE
    809 #undef PGM_GST_NAME
    810 #undef PGM_GST_NAME_RC_STR
    811 #undef PGM_GST_NAME_R0_STR
    812 
    813 /* Guest - protected mode */
    814 #define PGM_GST_TYPE                PGM_TYPE_PROT
    815 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    816 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PROT_STR(name)
    817 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PROT_STR(name)
    818 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_PAE_PROT(name)
    819 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_PAE_PROT_STR(name)
    820 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_PAE_PROT_STR(name)
    821 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    822 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_PAE_PDPT_PHYS
    823 #include "PGMGstDefs.h"
    824 #include "PGMBth.h"
    825 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    826 #undef BTH_PGMPOOLKIND_ROOT
    827 #undef PGM_BTH_NAME
    828 #undef PGM_BTH_NAME_RC_STR
    829 #undef PGM_BTH_NAME_R0_STR
    830 #undef PGM_GST_TYPE
    831 #undef PGM_GST_NAME
    832 #undef PGM_GST_NAME_RC_STR
    833 #undef PGM_GST_NAME_R0_STR
    834 
    835 /* Guest - 32-bit mode */
    836 #define PGM_GST_TYPE                PGM_TYPE_32BIT
    837 #define PGM_GST_NAME(name)          PGM_GST_NAME_32BIT(name)
    838 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_32BIT_STR(name)
    839 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_32BIT_STR(name)
    840 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_PAE_32BIT(name)
    841 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_PAE_32BIT_STR(name)
    842 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_PAE_32BIT_STR(name)
    843 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
    844 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
    845 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_PAE_PDPT_FOR_32BIT
    846 #include "PGMGstDefs.h"
    847 #include "PGMBth.h"
    848 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    849 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    850 #undef BTH_PGMPOOLKIND_ROOT
    851 #undef PGM_BTH_NAME
    852 #undef PGM_BTH_NAME_RC_STR
    853 #undef PGM_BTH_NAME_R0_STR
    854 #undef PGM_GST_TYPE
    855 #undef PGM_GST_NAME
    856 #undef PGM_GST_NAME_RC_STR
    857 #undef PGM_GST_NAME_R0_STR
    858 
    859 /* Guest - PAE mode */
    860 #define PGM_GST_TYPE                PGM_TYPE_PAE
    861 #define PGM_GST_NAME(name)          PGM_GST_NAME_PAE(name)
    862 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PAE_STR(name)
    863 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PAE_STR(name)
    864 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_PAE_PAE(name)
    865 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_PAE_PAE_STR(name)
    866 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_PAE_PAE_STR(name)
    867 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    868 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    869 #define BTH_PGMPOOLKIND_ROOT        PGMPOOLKIND_PAE_PDPT
    870 #include "PGMBth.h"
    871 #include "PGMGstDefs.h"
    872 #include "PGMGst.h"
    873 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    874 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    875 #undef BTH_PGMPOOLKIND_ROOT
    876 #undef PGM_BTH_NAME
    877 #undef PGM_BTH_NAME_RC_STR
    878 #undef PGM_BTH_NAME_R0_STR
    879 #undef PGM_GST_TYPE
    880 #undef PGM_GST_NAME
    881 #undef PGM_GST_NAME_RC_STR
    882 #undef PGM_GST_NAME_R0_STR
    883 
    884 #undef PGM_SHW_TYPE
    885 #undef PGM_SHW_NAME
    886 #undef PGM_SHW_NAME_RC_STR
    887 #undef PGM_SHW_NAME_R0_STR
    888 
    889 
    890 /*
    891  * Shadow - AMD64 mode
    892  */
    893 #define PGM_SHW_TYPE                PGM_TYPE_AMD64
    894 #define PGM_SHW_NAME(name)          PGM_SHW_NAME_AMD64(name)
    895 #define PGM_SHW_NAME_RC_STR(name)   PGM_SHW_NAME_RC_AMD64_STR(name)
    896 #define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_AMD64_STR(name)
    897 #include "PGMShw.h"
    898 
    899 #ifdef VBOX_WITH_64_BITS_GUESTS
    900 /* Guest - AMD64 mode */
    901 # define PGM_GST_TYPE               PGM_TYPE_AMD64
    902 # define PGM_GST_NAME(name)         PGM_GST_NAME_AMD64(name)
    903 # define PGM_GST_NAME_RC_STR(name)  PGM_GST_NAME_RC_AMD64_STR(name)
    904 # define PGM_GST_NAME_R0_STR(name)  PGM_GST_NAME_R0_AMD64_STR(name)
    905 # define PGM_BTH_NAME(name)         PGM_BTH_NAME_AMD64_AMD64(name)
    906 # define PGM_BTH_NAME_RC_STR(name)  PGM_BTH_NAME_RC_AMD64_AMD64_STR(name)
    907 # define PGM_BTH_NAME_R0_STR(name)  PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)
    908 # define BTH_PGMPOOLKIND_PT_FOR_PT  PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    909 # define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    910 # define BTH_PGMPOOLKIND_ROOT       PGMPOOLKIND_64BIT_PML4
    911 # include "PGMBth.h"
    912 # include "PGMGstDefs.h"
    913 # include "PGMGst.h"
    914 # undef BTH_PGMPOOLKIND_PT_FOR_BIG
    915 # undef BTH_PGMPOOLKIND_PT_FOR_PT
    916 # undef BTH_PGMPOOLKIND_ROOT
    917 # undef PGM_BTH_NAME
    918 # undef PGM_BTH_NAME_RC_STR
    919 # undef PGM_BTH_NAME_R0_STR
    920 # undef PGM_GST_TYPE
    921 # undef PGM_GST_NAME
    922 # undef PGM_GST_NAME_RC_STR
    923 # undef PGM_GST_NAME_R0_STR
    924 #endif /* VBOX_WITH_64_BITS_GUESTS */
    925 
    926 #undef PGM_SHW_TYPE
    927 #undef PGM_SHW_NAME
    928 #undef PGM_SHW_NAME_RC_STR
    929 #undef PGM_SHW_NAME_R0_STR
    930 
    931 
    932 /*
    933  * Shadow - Nested paging mode
    934  */
    935 #define PGM_SHW_TYPE                PGM_TYPE_NESTED
    936 #define PGM_SHW_NAME(name)          PGM_SHW_NAME_NESTED(name)
    937 #define PGM_SHW_NAME_RC_STR(name)   PGM_SHW_NAME_RC_NESTED_STR(name)
    938 #define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_NESTED_STR(name)
    939 #include "PGMShw.h"
    940 
    941 /* Guest - real mode */
    942 #define PGM_GST_TYPE                PGM_TYPE_REAL
    943 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    944 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_REAL_STR(name)
    945 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_REAL_STR(name)
    946 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_NESTED_REAL(name)
    947 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_NESTED_REAL_STR(name)
    948 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_NESTED_REAL_STR(name)
    949 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    950 #include "PGMGstDefs.h"
    951 #include "PGMBth.h"
    952 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    953 #undef PGM_BTH_NAME
    954 #undef PGM_BTH_NAME_RC_STR
    955 #undef PGM_BTH_NAME_R0_STR
    956 #undef PGM_GST_TYPE
    957 #undef PGM_GST_NAME
    958 #undef PGM_GST_NAME_RC_STR
    959 #undef PGM_GST_NAME_R0_STR
    960 
    961 /* Guest - protected mode */
    962 #define PGM_GST_TYPE                PGM_TYPE_PROT
    963 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    964 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PROT_STR(name)
    965 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PROT_STR(name)
    966 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_NESTED_PROT(name)
    967 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_NESTED_PROT_STR(name)
    968 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_NESTED_PROT_STR(name)
    969 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    970 #include "PGMGstDefs.h"
    971 #include "PGMBth.h"
    972 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    973 #undef PGM_BTH_NAME
    974 #undef PGM_BTH_NAME_RC_STR
    975 #undef PGM_BTH_NAME_R0_STR
    976 #undef PGM_GST_TYPE
    977 #undef PGM_GST_NAME
    978 #undef PGM_GST_NAME_RC_STR
    979 #undef PGM_GST_NAME_R0_STR
    980 
    981 /* Guest - 32-bit mode */
    982 #define PGM_GST_TYPE                PGM_TYPE_32BIT
    983 #define PGM_GST_NAME(name)          PGM_GST_NAME_32BIT(name)
    984 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_32BIT_STR(name)
    985 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_32BIT_STR(name)
    986 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_NESTED_32BIT(name)
    987 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_NESTED_32BIT_STR(name)
    988 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_NESTED_32BIT_STR(name)
    989 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
    990 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
    991 #include "PGMGstDefs.h"
    992 #include "PGMBth.h"
    993 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    994 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    995 #undef PGM_BTH_NAME
    996 #undef PGM_BTH_NAME_RC_STR
    997 #undef PGM_BTH_NAME_R0_STR
    998 #undef PGM_GST_TYPE
    999 #undef PGM_GST_NAME
    1000 #undef PGM_GST_NAME_RC_STR
    1001 #undef PGM_GST_NAME_R0_STR
    1002 
    1003 /* Guest - PAE mode */
    1004 #define PGM_GST_TYPE                PGM_TYPE_PAE
    1005 #define PGM_GST_NAME(name)          PGM_GST_NAME_PAE(name)
    1006 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PAE_STR(name)
    1007 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PAE_STR(name)
    1008 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_NESTED_PAE(name)
    1009 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_NESTED_PAE_STR(name)
    1010 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_NESTED_PAE_STR(name)
    1011 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    1012 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    1013 #include "PGMGstDefs.h"
    1014 #include "PGMBth.h"
    1015 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    1016 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    1017 #undef PGM_BTH_NAME
    1018 #undef PGM_BTH_NAME_RC_STR
    1019 #undef PGM_BTH_NAME_R0_STR
    1020 #undef PGM_GST_TYPE
    1021 #undef PGM_GST_NAME
    1022 #undef PGM_GST_NAME_RC_STR
    1023 #undef PGM_GST_NAME_R0_STR
    1024 
    1025 #ifdef VBOX_WITH_64_BITS_GUESTS
    1026 /* Guest - AMD64 mode */
    1027 # define PGM_GST_TYPE               PGM_TYPE_AMD64
    1028 # define PGM_GST_NAME(name)         PGM_GST_NAME_AMD64(name)
    1029 # define PGM_GST_NAME_RC_STR(name)  PGM_GST_NAME_RC_AMD64_STR(name)
    1030 # define PGM_GST_NAME_R0_STR(name)  PGM_GST_NAME_R0_AMD64_STR(name)
    1031 # define PGM_BTH_NAME(name)         PGM_BTH_NAME_NESTED_AMD64(name)
    1032 # define PGM_BTH_NAME_RC_STR(name)  PGM_BTH_NAME_RC_NESTED_AMD64_STR(name)
    1033 # define PGM_BTH_NAME_R0_STR(name)  PGM_BTH_NAME_R0_NESTED_AMD64_STR(name)
    1034 # define BTH_PGMPOOLKIND_PT_FOR_PT  PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    1035 # define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    1036 # include "PGMGstDefs.h"
    1037 # include "PGMBth.h"
    1038 # undef BTH_PGMPOOLKIND_PT_FOR_BIG
    1039 # undef BTH_PGMPOOLKIND_PT_FOR_PT
    1040 # undef PGM_BTH_NAME
    1041 # undef PGM_BTH_NAME_RC_STR
    1042 # undef PGM_BTH_NAME_R0_STR
    1043 # undef PGM_GST_TYPE
    1044 # undef PGM_GST_NAME
    1045 # undef PGM_GST_NAME_RC_STR
    1046 # undef PGM_GST_NAME_R0_STR
    1047 #endif /* VBOX_WITH_64_BITS_GUESTS */
    1048 
    1049 #undef PGM_SHW_TYPE
    1050 #undef PGM_SHW_NAME
    1051 #undef PGM_SHW_NAME_RC_STR
    1052 #undef PGM_SHW_NAME_R0_STR
    1053 
    1054 
    1055 /*
    1056  * Shadow - EPT
    1057  */
    1058 #define PGM_SHW_TYPE                PGM_TYPE_EPT
    1059 #define PGM_SHW_NAME(name)          PGM_SHW_NAME_EPT(name)
    1060 #define PGM_SHW_NAME_RC_STR(name)   PGM_SHW_NAME_RC_EPT_STR(name)
    1061 #define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_EPT_STR(name)
    1062 #include "PGMShw.h"
    1063 
    1064 /* Guest - real mode */
    1065 #define PGM_GST_TYPE                PGM_TYPE_REAL
    1066 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    1067 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_REAL_STR(name)
    1068 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_REAL_STR(name)
    1069 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_EPT_REAL(name)
    1070 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_EPT_REAL_STR(name)
    1071 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_EPT_REAL_STR(name)
    1072 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    1073 #include "PGMGstDefs.h"
    1074 #include "PGMBth.h"
    1075 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    1076 #undef PGM_BTH_NAME
    1077 #undef PGM_BTH_NAME_RC_STR
    1078 #undef PGM_BTH_NAME_R0_STR
    1079 #undef PGM_GST_TYPE
    1080 #undef PGM_GST_NAME
    1081 #undef PGM_GST_NAME_RC_STR
    1082 #undef PGM_GST_NAME_R0_STR
    1083 
    1084 /* Guest - protected mode */
    1085 #define PGM_GST_TYPE                PGM_TYPE_PROT
    1086 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    1087 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PROT_STR(name)
    1088 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PROT_STR(name)
    1089 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_EPT_PROT(name)
    1090 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_EPT_PROT_STR(name)
    1091 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_EPT_PROT_STR(name)
    1092 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    1093 #include "PGMGstDefs.h"
    1094 #include "PGMBth.h"
    1095 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    1096 #undef PGM_BTH_NAME
    1097 #undef PGM_BTH_NAME_RC_STR
    1098 #undef PGM_BTH_NAME_R0_STR
    1099 #undef PGM_GST_TYPE
    1100 #undef PGM_GST_NAME
    1101 #undef PGM_GST_NAME_RC_STR
    1102 #undef PGM_GST_NAME_R0_STR
    1103 
    1104 /* Guest - 32-bit mode */
    1105 #define PGM_GST_TYPE                PGM_TYPE_32BIT
    1106 #define PGM_GST_NAME(name)          PGM_GST_NAME_32BIT(name)
    1107 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_32BIT_STR(name)
    1108 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_32BIT_STR(name)
    1109 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_EPT_32BIT(name)
    1110 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_EPT_32BIT_STR(name)
    1111 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_EPT_32BIT_STR(name)
    1112 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
    1113 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
    1114 #include "PGMGstDefs.h"
    1115 #include "PGMBth.h"
    1116 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    1117 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    1118 #undef PGM_BTH_NAME
    1119 #undef PGM_BTH_NAME_RC_STR
    1120 #undef PGM_BTH_NAME_R0_STR
    1121 #undef PGM_GST_TYPE
    1122 #undef PGM_GST_NAME
    1123 #undef PGM_GST_NAME_RC_STR
    1124 #undef PGM_GST_NAME_R0_STR
    1125 
    1126 /* Guest - PAE mode */
    1127 #define PGM_GST_TYPE                PGM_TYPE_PAE
    1128 #define PGM_GST_NAME(name)          PGM_GST_NAME_PAE(name)
    1129 #define PGM_GST_NAME_RC_STR(name)   PGM_GST_NAME_RC_PAE_STR(name)
    1130 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PAE_STR(name)
    1131 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_EPT_PAE(name)
    1132 #define PGM_BTH_NAME_RC_STR(name)   PGM_BTH_NAME_RC_EPT_PAE_STR(name)
    1133 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_EPT_PAE_STR(name)
    1134 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    1135 #define BTH_PGMPOOLKIND_PT_FOR_BIG  PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    1136 #include "PGMGstDefs.h"
    1137 #include "PGMBth.h"
    1138 #undef BTH_PGMPOOLKIND_PT_FOR_BIG
    1139 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    1140 #undef PGM_BTH_NAME
    1141 #undef PGM_BTH_NAME_RC_STR
    1142 #undef PGM_BTH_NAME_R0_STR
    1143 #undef PGM_GST_TYPE
    1144 #undef PGM_GST_NAME
    1145 #undef PGM_GST_NAME_RC_STR
    1146 #undef PGM_GST_NAME_R0_STR
    1147 
    1148 #ifdef VBOX_WITH_64_BITS_GUESTS
    1149 /* Guest - AMD64 mode */
    1150 # define PGM_GST_TYPE               PGM_TYPE_AMD64
    1151 # define PGM_GST_NAME(name)         PGM_GST_NAME_AMD64(name)
    1152 # define PGM_GST_NAME_RC_STR(name)  PGM_GST_NAME_RC_AMD64_STR(name)
    1153 # define PGM_GST_NAME_R0_STR(name)  PGM_GST_NAME_R0_AMD64_STR(name)
    1154 # define PGM_BTH_NAME(name)         PGM_BTH_NAME_EPT_AMD64(name)
    1155 # define PGM_BTH_NAME_RC_STR(name)  PGM_BTH_NAME_RC_EPT_AMD64_STR(name)
    1156 # define PGM_BTH_NAME_R0_STR(name)  PGM_BTH_NAME_R0_EPT_AMD64_STR(name)
    1157 # define BTH_PGMPOOLKIND_PT_FOR_PT  PGMPOOLKIND_PAE_PT_FOR_PAE_PT
    1158 # define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
    1159 # include "PGMGstDefs.h"
    1160 # include "PGMBth.h"
    1161 # undef BTH_PGMPOOLKIND_PT_FOR_BIG
    1162 # undef BTH_PGMPOOLKIND_PT_FOR_PT
    1163 # undef PGM_BTH_NAME
    1164 # undef PGM_BTH_NAME_RC_STR
    1165 # undef PGM_BTH_NAME_R0_STR
    1166 # undef PGM_GST_TYPE
    1167 # undef PGM_GST_NAME
    1168 # undef PGM_GST_NAME_RC_STR
    1169 # undef PGM_GST_NAME_R0_STR
    1170 #endif /* VBOX_WITH_64_BITS_GUESTS */
    1171 
    1172 #undef PGM_SHW_TYPE
    1173 #undef PGM_SHW_NAME
    1174 #undef PGM_SHW_NAME_RC_STR
    1175 #undef PGM_SHW_NAME_R0_STR
    1176 
    1177 
    1178 
    1179 /**
    1180  * Initiates the paging of VM.
    1181  *
    1182  * @returns VBox status code.
    1183  * @param   pVM     Pointer to VM structure.
    1184  */
    1185 VMMR3DECL(int) PGMR3Init(PVM pVM)
    1186 {
    1187     LogFlow(("PGMR3Init:\n"));
    1188     PCFGMNODE pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
    1189     int rc;
    1190 
    1191     /*
    1192      * Assert alignment and sizes.
    1193      */
    1194     AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
    1195     AssertCompileMemberAlignment(PGM, CritSect, sizeof(uintptr_t));
    1196 
    1197     /*
    1198      * Init the structure.
    1199      */
    1200     pVM->pgm.s.offVM       = RT_OFFSETOF(VM, pgm.s);
    1201     pVM->pgm.s.offVCpuPGM  = RT_OFFSETOF(VMCPU, pgm.s);
    1202 
    1203     /* Init the per-CPU part. */
    1204     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1205     {
    1206         PVMCPU pVCpu = &pVM->aCpus[i];
    1207         PPGMCPU pPGM = &pVCpu->pgm.s;
    1208 
    1209         pPGM->offVM      = (uintptr_t)&pVCpu->pgm.s - (uintptr_t)pVM;
    1210         pPGM->offVCpu    = RT_OFFSETOF(VMCPU, pgm.s);
    1211         pPGM->offPGM     = (uintptr_t)&pVCpu->pgm.s - (uintptr_t)&pVM->pgm.s;
    1212 
    1213         pPGM->enmShadowMode    = PGMMODE_INVALID;
    1214         pPGM->enmGuestMode     = PGMMODE_INVALID;
    1215 
    1216         pPGM->GCPhysCR3        = NIL_RTGCPHYS;
    1217 
    1218         pPGM->pGstPaePdptR3    = NULL;
    1219 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1220         pPGM->pGstPaePdptR0    = NIL_RTR0PTR;
    1221 #endif
    1222         pPGM->pGstPaePdptRC    = NIL_RTRCPTR;
    1223         for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.apGstPaePDsR3); i++)
    1224         {
    1225             pPGM->apGstPaePDsR3[i]             = NULL;
    1226 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1227             pPGM->apGstPaePDsR0[i]             = NIL_RTR0PTR;
    1228 #endif
    1229             pPGM->apGstPaePDsRC[i]             = NIL_RTRCPTR;
    1230             pPGM->aGCPhysGstPaePDs[i]          = NIL_RTGCPHYS;
    1231             pPGM->aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    1232         }
    1233 
    1234         pPGM->fA20Enabled      = true;
    1235     }
    1236 
    1237     pVM->pgm.s.enmHostMode      = SUPPAGINGMODE_INVALID;
    1238     pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
    1239     pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS;
    1240 
    1241     rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
    1242 #ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
    1243                             true
    1244 #else
    1245                             false
    1246 #endif
    1247                            );
    1248     AssertLogRelRCReturn(rc, rc);
    1249 
    1250 #if HC_ARCH_BITS == 64 || 1 /** @todo 4GB/32-bit: remove || 1 later and adjust the limit. */
    1251     rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
    1252 #else
    1253     rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);
    1254 #endif
    1255     AssertLogRelRCReturn(rc, rc);
    1256     for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
    1257         pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
    1258 
    1259     /*
    1260      * Get the configured RAM size - to estimate saved state size.
    1261      */
    1262     uint64_t    cbRam;
    1263     rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
    1264     if (rc == VERR_CFGM_VALUE_NOT_FOUND)
    1265         cbRam = 0;
    1266     else if (RT_SUCCESS(rc))
    1267     {
    1268         if (cbRam < PAGE_SIZE)
    1269             cbRam = 0;
    1270         cbRam = RT_ALIGN_64(cbRam, PAGE_SIZE);
    1271     }
    1272     else
    1273     {
    1274         AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
    1275         return rc;
    1276     }
    1277 
    1278     /*
    1279      * Register callbacks, string formatters and the saved state data unit.
    1280      */
    1281 #ifdef VBOX_STRICT
    1282     VMR3AtStateRegister(pVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
    1283 #endif
    1284     PGMRegisterStringFormatTypes();
    1285 
    1286     rc = SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
    1287                                pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
    1288                                NULL, pgmR3SaveExec, pgmR3SaveDone,
    1289                                NULL, pgmR3Load, NULL);
    1290     if (RT_FAILURE(rc))
    1291         return rc;
    1292 
    1293     /*
    1294      * Initialize the PGM critical section and flush the phys TLBs
    1295      */
    1296     rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, "PGM");
    1297     AssertRCReturn(rc, rc);
    1298 
    1299     PGMR3PhysChunkInvalidateTLB(pVM);
    1300     PGMPhysInvalidatePageR3MapTLB(pVM);
    1301     PGMPhysInvalidatePageR0MapTLB(pVM);
    1302     PGMPhysInvalidatePageGCMapTLB(pVM);
    1303 
    1304     /*
    1305      * For the time being we sport a full set of handy pages in addition to the base
    1306      * memory to simplify things.
    1307      */
    1308     rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
    1309     AssertRCReturn(rc, rc);
    1310 
    1311     /*
    1312      * Trees
    1313      */
    1314     rc = MMHyperAlloc(pVM, sizeof(PGMTREES), 0, MM_TAG_PGM, (void **)&pVM->pgm.s.pTreesR3);
    1315     if (RT_SUCCESS(rc))
    1316     {
    1317         pVM->pgm.s.pTreesR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pTreesR3);
    1318         pVM->pgm.s.pTreesRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pTreesR3);
    1319 
    1320         /*
    1321          * Alocate the zero page.
    1322          */
    1323         rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvZeroPgR3);
    1324     }
    1325     if (RT_SUCCESS(rc))
    1326     {
    1327         pVM->pgm.s.pvZeroPgRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pvZeroPgR3);
    1328         pVM->pgm.s.pvZeroPgR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pvZeroPgR3);
    1329         pVM->pgm.s.HCPhysZeroPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvZeroPgR3);
    1330         AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
    1331 
    1332         /*
    1333          * Init the paging.
    1334          */
    1335         rc = pgmR3InitPaging(pVM);
    1336     }
    1337     if (RT_SUCCESS(rc))
    1338     {
    1339         /*
    1340          * Init the page pool.
    1341          */
    1342         rc = pgmR3PoolInit(pVM);
    1343     }
    1344     if (RT_SUCCESS(rc))
    1345     {
    1346         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1347         {
    1348             PVMCPU pVCpu = &pVM->aCpus[i];
    1349             rc = PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
    1350             if (RT_FAILURE(rc))
    1351                 break;
    1352         }
    1353     }
    1354 
    1355     if (RT_SUCCESS(rc))
    1356     {
    1357         /*
    1358          * Info & statistics
    1359          */
    1360         DBGFR3InfoRegisterInternal(pVM, "mode",
    1361                                    "Shows the current paging mode. "
    1362                                    "Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
    1363                                    pgmR3InfoMode);
    1364         DBGFR3InfoRegisterInternal(pVM, "pgmcr3",
    1365                                    "Dumps all the entries in the top level paging table. No arguments.",
    1366                                    pgmR3InfoCr3);
    1367         DBGFR3InfoRegisterInternal(pVM, "phys",
    1368                                    "Dumps all the physical address ranges. No arguments.",
    1369                                    pgmR3PhysInfo);
    1370         DBGFR3InfoRegisterInternal(pVM, "handlers",
    1371                                    "Dumps physical, virtual and hyper virtual handlers. "
    1372                                    "Pass 'phys', 'virt', 'hyper' as argument if only one kind is wanted."
    1373                                    "Add 'nost' if the statistics are unwanted, use together with 'all' or explicit selection.",
    1374                                    pgmR3InfoHandlers);
    1375         DBGFR3InfoRegisterInternal(pVM, "mappings",
    1376                                    "Dumps guest mappings.",
    1377                                    pgmR3MapInfo);
    1378 
    1379         pgmR3InitStats(pVM);
    1380 
    1381 #ifdef VBOX_WITH_DEBUGGER
    1382         /*
    1383          * Debugger commands.
    1384          */
    1385         static bool s_fRegisteredCmds = false;
    1386         if (!s_fRegisteredCmds)
    1387         {
    1388             int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
    1389             if (RT_SUCCESS(rc))
    1390                 s_fRegisteredCmds = true;
    1391         }
    1392 #endif
    1393         return VINF_SUCCESS;
    1394     }
    1395 
    1396     /* Almost no cleanup necessary, MM frees all memory. */
    1397     PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
    1398 
    1399     return rc;
    1400 }
    1401 
    1402 
    1403 /**
    1404  * Initializes the per-VCPU PGM.
    1405  *
    1406  * @returns VBox status code.
    1407  * @param   pVM         The VM to operate on.
    1408  */
    1409 VMMR3DECL(int) PGMR3InitCPU(PVM pVM)
    1410 {
    1411     LogFlow(("PGMR3InitCPU\n"));
    1412     return VINF_SUCCESS;
    1413 }
    1414 
    1415 
    1416 /**
    1417  * Init paging.
    1418  *
    1419  * Since we need to check what mode the host is operating in before we can choose
    1420  * the right paging functions for the host we have to delay this until R0 has
    1421  * been initialized.
    1422  *
    1423  * @returns VBox status code.
    1424  * @param   pVM     VM handle.
    1425  */
    1426 static int pgmR3InitPaging(PVM pVM)
    1427 {
    1428     /*
    1429      * Force a recalculation of modes and switcher so everyone gets notified.
    1430      */
    1431     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1432     {
    1433         PVMCPU pVCpu = &pVM->aCpus[i];
    1434 
    1435         pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
    1436         pVCpu->pgm.s.enmGuestMode  = PGMMODE_INVALID;
    1437     }
    1438 
    1439     pVM->pgm.s.enmHostMode   = SUPPAGINGMODE_INVALID;
    1440 
    1441     /*
    1442      * Allocate static mapping space for whatever the cr3 register
    1443      * points to and in the case of PAE mode to the 4 PDs.
    1444      */
    1445     int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);
    1446     if (RT_FAILURE(rc))
    1447     {
    1448         AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Rrc\n", rc));
    1449         return rc;
    1450     }
    1451     MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
    1452 
    1453     /*
    1454      * Allocate pages for the three possible intermediate contexts
    1455      * (AMD64, PAE and plain 32-Bit). We maintain all three contexts
    1456      * for the sake of simplicity. The AMD64 uses the PAE for the
    1457      * lower levels, making the total number of pages 11 (3 + 7 + 1).
    1458      *
    1459      * We assume that two page tables will be enought for the core code
    1460      * mappings (HC virtual and identity).
    1461      */
    1462     pVM->pgm.s.pInterPD         = (PX86PD)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.pInterPD,         VERR_NO_PAGE_MEMORY);
    1463     pVM->pgm.s.apInterPTs[0]    = (PX86PT)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.apInterPTs[0],    VERR_NO_PAGE_MEMORY);
    1464     pVM->pgm.s.apInterPTs[1]    = (PX86PT)MMR3PageAllocLow(pVM);    AssertReturn(pVM->pgm.s.apInterPTs[1],    VERR_NO_PAGE_MEMORY);
    1465     pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePTs[0], VERR_NO_PAGE_MEMORY);
    1466     pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePTs[1], VERR_NO_PAGE_MEMORY);
    1467     pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[0], VERR_NO_PAGE_MEMORY);
    1468     pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[1], VERR_NO_PAGE_MEMORY);
    1469     pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[2], VERR_NO_PAGE_MEMORY);
    1470     pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);    AssertReturn(pVM->pgm.s.apInterPaePDs[3], VERR_NO_PAGE_MEMORY);
    1471     pVM->pgm.s.pInterPaePDPT    = (PX86PDPT)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePDPT,    VERR_NO_PAGE_MEMORY);
    1472     pVM->pgm.s.pInterPaePDPT64  = (PX86PDPT)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePDPT64,  VERR_NO_PAGE_MEMORY);
    1473     pVM->pgm.s.pInterPaePML4    = (PX86PML4)MMR3PageAllocLow(pVM);  AssertReturn(pVM->pgm.s.pInterPaePML4,    VERR_NO_PAGE_MEMORY);
    1474 
    1475     pVM->pgm.s.HCPhysInterPD = MMPage2Phys(pVM, pVM->pgm.s.pInterPD);
    1476     AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
    1477     pVM->pgm.s.HCPhysInterPaePDPT = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT);
    1478     AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
    1479     pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4);
    1480     AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
    1481 
    1482     /*
    1483      * Initialize the pages, setting up the PML4 and PDPT for repetitive 4GB action.
    1484      */
    1485     ASMMemZeroPage(pVM->pgm.s.pInterPD);
    1486     ASMMemZeroPage(pVM->pgm.s.apInterPTs[0]);
    1487     ASMMemZeroPage(pVM->pgm.s.apInterPTs[1]);
    1488 
    1489     ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[0]);
    1490     ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[1]);
    1491 
    1492     ASMMemZeroPage(pVM->pgm.s.pInterPaePDPT);
    1493     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apInterPaePDs); i++)
    1494     {
    1495         ASMMemZeroPage(pVM->pgm.s.apInterPaePDs[i]);
    1496         pVM->pgm.s.pInterPaePDPT->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT
    1497                                           | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[i]);
    1498     }
    1499 
    1500     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePDPT64->a); i++)
    1501     {
    1502         const unsigned iPD = i % RT_ELEMENTS(pVM->pgm.s.apInterPaePDs);
    1503         pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
    1504                                             | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[iPD]);
    1505     }
    1506 
    1507     RTHCPHYS HCPhysInterPaePDPT64 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64);
    1508     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePML4->a); i++)
    1509         pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
    1510                                          | HCPhysInterPaePDPT64;
    1511 
    1512     /*
    1513      * Initialize paging workers and mode from current host mode
    1514      * and the guest running in real mode.
    1515      */
    1516     pVM->pgm.s.enmHostMode = SUPR3GetPagingMode();
    1517     switch (pVM->pgm.s.enmHostMode)
    1518     {
    1519         case SUPPAGINGMODE_32_BIT:
    1520         case SUPPAGINGMODE_32_BIT_GLOBAL:
    1521         case SUPPAGINGMODE_PAE:
    1522         case SUPPAGINGMODE_PAE_GLOBAL:
    1523         case SUPPAGINGMODE_PAE_NX:
    1524         case SUPPAGINGMODE_PAE_GLOBAL_NX:
    1525             break;
    1526 
    1527         case SUPPAGINGMODE_AMD64:
    1528         case SUPPAGINGMODE_AMD64_GLOBAL:
    1529         case SUPPAGINGMODE_AMD64_NX:
    1530         case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    1531 #ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
    1532             if (ARCH_BITS != 64)
    1533             {
    1534                 AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
    1535                 LogRel(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
    1536                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    1537             }
    1538 #endif
    1539             break;
    1540         default:
    1541             AssertMsgFailed(("Host mode %d is not supported\n", pVM->pgm.s.enmHostMode));
    1542             return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    1543     }
    1544     rc = pgmR3ModeDataInit(pVM, false /* don't resolve GC and R0 syms yet */);
    1545     if (RT_SUCCESS(rc))
    1546     {
    1547         LogFlow(("pgmR3InitPaging: returns successfully\n"));
    1548 #if HC_ARCH_BITS == 64
    1549         LogRel(("Debug: HCPhysInterPD=%RHp HCPhysInterPaePDPT=%RHp HCPhysInterPaePML4=%RHp\n",
    1550                 pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPT, pVM->pgm.s.HCPhysInterPaePML4));
    1551         LogRel(("Debug: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
    1552                 MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]),    MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]),
    1553                 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[1]),
    1554                 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
    1555                 MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64)));
    1556 #endif
    1557 
    1558         return VINF_SUCCESS;
    1559     }
    1560 
    1561     LogFlow(("pgmR3InitPaging: returns %Rrc\n", rc));
    1562     return rc;
    1563 }
    1564 
    1565 
    1566 /**
    1567  * Init statistics
    1568  */
    1569 static void pgmR3InitStats(PVM pVM)
    1570 {
    1571     PPGM pPGM = &pVM->pgm.s;
    1572     int  rc;
    1573 
    1574     /* Common - misc variables */
    1575     STAM_REL_REG(pVM, &pPGM->cAllPages,                     STAMTYPE_U32,     "/PGM/Page/cAllPages",                STAMUNIT_OCCURENCES,     "The total number of pages.");
    1576     STAM_REL_REG(pVM, &pPGM->cPrivatePages,                 STAMTYPE_U32,     "/PGM/Page/cPrivatePages",            STAMUNIT_OCCURENCES,     "The number of private pages.");
    1577     STAM_REL_REG(pVM, &pPGM->cSharedPages,                  STAMTYPE_U32,     "/PGM/Page/cSharedPages",             STAMUNIT_OCCURENCES,     "The number of shared pages.");
    1578     STAM_REL_REG(pVM, &pPGM->cZeroPages,                    STAMTYPE_U32,     "/PGM/Page/cZeroPages",               STAMUNIT_OCCURENCES,     "The number of zero backed pages.");
    1579     STAM_REL_REG(pVM, &pPGM->cHandyPages,                   STAMTYPE_U32,     "/PGM/Page/cHandyPages",              STAMUNIT_OCCURENCES,     "The number of handy pages (not included in cAllPages).");
    1580     STAM_REL_REG(pVM, &pPGM->cRelocations,                  STAMTYPE_COUNTER, "/PGM/cRelocations",                  STAMUNIT_OCCURENCES,     "Number of hypervisor relocations.");
    1581     STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c,                  STAMTYPE_U32,     "/PGM/ChunkR3Map/c",                  STAMUNIT_OCCURENCES,     "Number of mapped chunks.");
    1582     STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax,               STAMTYPE_U32,     "/PGM/ChunkR3Map/cMax",               STAMUNIT_OCCURENCES,     "Maximum number of mapped chunks.");
    1583 
    1584 #ifdef VBOX_WITH_STATISTICS
    1585 
    1586 # define PGM_REG_COUNTER(a, b, c) \
    1587         rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b); \
    1588         AssertRC(rc);
    1589 
    1590 # define PGM_REG_COUNTER_BYTES(a, b, c) \
    1591         rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, c, b); \
    1592         AssertRC(rc);
    1593 
    1594 # define PGM_REG_PROFILE(a, b, c) \
    1595         rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b); \
    1596         AssertRC(rc);
    1597 
    1598     PGM_REG_COUNTER(&pPGM->StatR3DetectedConflicts,           "/PGM/R3/DetectedConflicts",          "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
    1599     PGM_REG_PROFILE(&pPGM->StatR3ResolveConflict,             "/PGM/R3/ResolveConflict",            "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
    1600     PGM_REG_COUNTER(&pPGM->StatR3PhysRead,                    "/PGM/R3/Phys/Read",                  "The number of times PGMPhysRead was called.");
    1601     PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysReadBytes,         "/PGM/R3/Phys/Read/Bytes",            "The number of bytes read by PGMPhysRead.");
    1602     PGM_REG_COUNTER(&pPGM->StatR3PhysWrite,                   "/PGM/R3/Phys/Write",                 "The number of times PGMPhysWrite was called.");
    1603     PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysWriteBytes,        "/PGM/R3/Phys/Write/Bytes",           "The number of bytes written by PGMPhysWrite.");
    1604     PGM_REG_COUNTER(&pPGM->StatR3PhysSimpleRead,              "/PGM/R3/Phys/Simple/Read",           "The number of times PGMPhysSimpleReadGCPtr was called.");
    1605     PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysSimpleReadBytes,   "/PGM/R3/Phys/Simple/Read/Bytes",     "The number of bytes read by PGMPhysSimpleReadGCPtr.");
    1606     PGM_REG_COUNTER(&pPGM->StatR3PhysSimpleWrite,             "/PGM/R3/Phys/Simple/Write",          "The number of times PGMPhysSimpleWriteGCPtr was called.");
    1607     PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysSimpleWriteBytes,  "/PGM/R3/Phys/Simple/Write/Bytes",    "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
    1608 
    1609     PGM_REG_COUNTER(&pPGM->StatRZChunkR3MapTlbHits,           "/PGM/ChunkR3Map/TlbHitsRZ",          "TLB hits.");
    1610     PGM_REG_COUNTER(&pPGM->StatRZChunkR3MapTlbMisses,         "/PGM/ChunkR3Map/TlbMissesRZ",        "TLB misses.");
    1611     PGM_REG_COUNTER(&pPGM->StatRZPageMapTlbHits,              "/PGM/RZ/Page/MapTlbHits",            "TLB hits.");
    1612     PGM_REG_COUNTER(&pPGM->StatRZPageMapTlbMisses,            "/PGM/RZ/Page/MapTlbMisses",          "TLB misses.");
    1613     PGM_REG_COUNTER(&pPGM->StatR3ChunkR3MapTlbHits,           "/PGM/ChunkR3Map/TlbHitsR3",          "TLB hits.");
    1614     PGM_REG_COUNTER(&pPGM->StatR3ChunkR3MapTlbMisses,         "/PGM/ChunkR3Map/TlbMissesR3",        "TLB misses.");
    1615     PGM_REG_COUNTER(&pPGM->StatR3PageMapTlbHits,              "/PGM/R3/Page/MapTlbHits",            "TLB hits.");
    1616     PGM_REG_COUNTER(&pPGM->StatR3PageMapTlbMisses,            "/PGM/R3/Page/MapTlbMisses",          "TLB misses.");
    1617 
    1618     PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualUpdate, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
    1619     PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualReset,  "/PGM/RZ/SyncCR3/Handlers/VirtualReset",  "Profiling of the virtual handler resets.");
    1620     PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualUpdate, "/PGM/R3/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
    1621     PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualReset,  "/PGM/R3/SyncCR3/Handlers/VirtualReset",  "Profiling of the virtual handler resets.");
    1622 
    1623     PGM_REG_COUNTER(&pPGM->StatRZPhysHandlerReset,            "/PGM/RZ/PhysHandlerReset",           "The number of times PGMHandlerPhysicalReset is called.");
    1624     PGM_REG_COUNTER(&pPGM->StatR3PhysHandlerReset,            "/PGM/R3/PhysHandlerReset",           "The number of times PGMHandlerPhysicalReset is called.");
    1625     PGM_REG_PROFILE(&pPGM->StatRZVirtHandlerSearchByPhys,     "/PGM/RZ/VirtHandlerSearchByPhys",    "Profiling of pgmHandlerVirtualFindByPhysAddr.");
    1626     PGM_REG_PROFILE(&pPGM->StatR3VirtHandlerSearchByPhys,     "/PGM/R3/VirtHandlerSearchByPhys",    "Profiling of pgmHandlerVirtualFindByPhysAddr.");
    1627 
    1628     PGM_REG_COUNTER(&pPGM->StatRZPageReplaceShared,           "/PGM/RZ/Page/ReplacedShared",        "Times a shared page was replaced.");
    1629     PGM_REG_COUNTER(&pPGM->StatRZPageReplaceZero,             "/PGM/RZ/Page/ReplacedZero",          "Times the zero page was replaced.");
    1630 /// @todo    PGM_REG_COUNTER(&pPGM->StatRZPageHandyAllocs,             "/PGM/RZ/Page/HandyAllocs",               "Number of times we've allocated more handy pages.");
    1631     PGM_REG_COUNTER(&pPGM->StatR3PageReplaceShared,           "/PGM/R3/Page/ReplacedShared",        "Times a shared page was replaced.");
    1632     PGM_REG_COUNTER(&pPGM->StatR3PageReplaceZero,             "/PGM/R3/Page/ReplacedZero",          "Times the zero page was replaced.");
    1633 /// @todo    PGM_REG_COUNTER(&pPGM->StatR3PageHandyAllocs,             "/PGM/R3/Page/HandyAllocs",               "Number of times we've allocated more handy pages.");
    1634 
    1635     PGM_REG_COUNTER(&pPGM->StatRZPhysRead,                    "/PGM/RZ/Phys/Read",                  "The number of times PGMPhysRead was called.");
    1636     PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysReadBytes,         "/PGM/RZ/Phys/Read/Bytes",            "The number of bytes read by PGMPhysRead.");
    1637     PGM_REG_COUNTER(&pPGM->StatRZPhysWrite,                   "/PGM/RZ/Phys/Write",                 "The number of times PGMPhysWrite was called.");
    1638     PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysWriteBytes,        "/PGM/RZ/Phys/Write/Bytes",           "The number of bytes written by PGMPhysWrite.");
    1639     PGM_REG_COUNTER(&pPGM->StatRZPhysSimpleRead,              "/PGM/RZ/Phys/Simple/Read",           "The number of times PGMPhysSimpleReadGCPtr was called.");
    1640     PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysSimpleReadBytes,   "/PGM/RZ/Phys/Simple/Read/Bytes",     "The number of bytes read by PGMPhysSimpleReadGCPtr.");
    1641     PGM_REG_COUNTER(&pPGM->StatRZPhysSimpleWrite,             "/PGM/RZ/Phys/Simple/Write",          "The number of times PGMPhysSimpleWriteGCPtr was called.");
    1642     PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysSimpleWriteBytes,  "/PGM/RZ/Phys/Simple/Write/Bytes",    "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
    1643 
    1644     /* GC only: */
    1645     PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheHits,             "/PGM/RC/DynMapCache/Hits" ,          "Number of dynamic page mapping cache hits.");
    1646     PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheMisses,           "/PGM/RC/DynMapCache/Misses" ,        "Number of dynamic page mapping cache misses.");
    1647     PGM_REG_COUNTER(&pPGM->StatRCInvlPgConflict,              "/PGM/RC/InvlPgConflict",             "Number of times PGMInvalidatePage() detected a mapping conflict.");
    1648     PGM_REG_COUNTER(&pPGM->StatRCInvlPgSyncMonCR3,            "/PGM/RC/InvlPgSyncMonitorCR3",       "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
    1649 
    1650     PGM_REG_COUNTER(&pPGM->StatRCPhysRead,                    "/PGM/RC/Phys/Read",                  "The number of times PGMPhysRead was called.");
    1651     PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysReadBytes,         "/PGM/RC/Phys/Read/Bytes",            "The number of bytes read by PGMPhysRead.");
    1652     PGM_REG_COUNTER(&pPGM->StatRCPhysWrite,                   "/PGM/RC/Phys/Write",                 "The number of times PGMPhysWrite was called.");
    1653     PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysWriteBytes,        "/PGM/RC/Phys/Write/Bytes",           "The number of bytes written by PGMPhysWrite.");
    1654     PGM_REG_COUNTER(&pPGM->StatRCPhysSimpleRead,              "/PGM/RC/Phys/Simple/Read",           "The number of times PGMPhysSimpleReadGCPtr was called.");
    1655     PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysSimpleReadBytes,   "/PGM/RC/Phys/Simple/Read/Bytes",     "The number of bytes read by PGMPhysSimpleReadGCPtr.");
    1656     PGM_REG_COUNTER(&pPGM->StatRCPhysSimpleWrite,             "/PGM/RC/Phys/Simple/Write",          "The number of times PGMPhysSimpleWriteGCPtr was called.");
    1657     PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysSimpleWriteBytes,  "/PGM/RC/Phys/Simple/Write/Bytes",    "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
    1658 
    1659 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING
    1660     PGM_REG_COUNTER(&pPGM->StatTrackVirgin,                   "/PGM/Track/Virgin",                  "The number of first time shadowings");
    1661     PGM_REG_COUNTER(&pPGM->StatTrackAliased,                  "/PGM/Track/Aliased",                 "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
    1662     PGM_REG_COUNTER(&pPGM->StatTrackAliasedMany,              "/PGM/Track/AliasedMany",             "The number of times we're tracking using cRef2.");
    1663     PGM_REG_COUNTER(&pPGM->StatTrackAliasedLots,              "/PGM/Track/AliasedLots",             "The number of times we're hitting pages which has overflowed cRef2");
    1664     PGM_REG_COUNTER(&pPGM->StatTrackOverflows,                "/PGM/Track/Overflows",               "The number of times the extent list grows too long.");
    1665     PGM_REG_PROFILE(&pPGM->StatTrackDeref,                    "/PGM/Track/Deref",                   "Profiling of SyncPageWorkerTrackDeref (expensive).");
    1666 # endif
    1667 
    1668 # undef PGM_REG_COUNTER
    1669 # undef PGM_REG_PROFILE
    1670 #endif
    1671 
    1672     /*
    1673      * Note! The layout below matches the member layout exactly!
    1674      */
    1675 
    1676     /*
    1677      * Common - stats
    1678      */
    1679     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1680     {
    1681         PVMCPU  pVCpu = &pVM->aCpus[i];
    1682         PPGMCPU pPGM  = &pVCpu->pgm.s;
    1683 
    1684 #define PGM_REG_COUNTER(a, b, c) \
    1685         rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
    1686         AssertRC(rc);
    1687 #define PGM_REG_PROFILE(a, b, c) \
    1688         rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
    1689         AssertRC(rc);
    1690 
    1691         PGM_REG_COUNTER(&pPGM->cGuestModeChanges, "/PGM/CPU%d/cGuestModeChanges",  "Number of guest mode changes.");
    1692 
    1693 #ifdef VBOX_WITH_STATISTICS
    1694 
    1695 # if 0 /* rarely useful; leave for debugging. */
    1696         for (unsigned j = 0; j < RT_ELEMENTS(pPGM->StatSyncPtPD); j++)
    1697             STAMR3RegisterF(pVM, &pPGM->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
    1698                             "The number of SyncPT per PD n.", "/PGM/CPU%d/PDSyncPT/%04X", i, j);
    1699         for (unsigned j = 0; j < RT_ELEMENTS(pPGM->StatSyncPagePD); j++)
    1700             STAMR3RegisterF(pVM, &pPGM->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
    1701                             "The number of SyncPage per PD n.", "/PGM/CPU%d/PDSyncPage/%04X", i, j);
    1702 # endif
    1703         /* R0 only: */
    1704         PGM_REG_COUNTER(&pPGM->StatR0DynMapMigrateInvlPg,         "/PGM/CPU%d/R0/DynMapMigrateInvlPg",        "invlpg count in PGMDynMapMigrateAutoSet.");
    1705         PGM_REG_PROFILE(&pPGM->StatR0DynMapGCPageInl,             "/PGM/CPU%d/R0/DynMapPageGCPageInl",        "Calls to pgmR0DynMapGCPageInlined.");
    1706         PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlHits,         "/PGM/CPU%d/R0/DynMapPageGCPageInl/Hits",   "Hash table lookup hits.");
    1707         PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlMisses,       "/PGM/CPU%d/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
    1708         PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamHits,      "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamHits",   "1st ram range hits.");
    1709         PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamMisses,    "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
    1710         PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPageInl,             "/PGM/CPU%d/R0/DynMapPageHCPageInl",        "Calls to pgmR0DynMapHCPageInlined.");
    1711         PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlHits,         "/PGM/CPU%d/R0/DynMapPageHCPageInl/Hits",   "Hash table lookup hits.");
    1712         PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlMisses,       "/PGM/CPU%d/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
    1713         PGM_REG_COUNTER(&pPGM->StatR0DynMapPage,                  "/PGM/CPU%d/R0/DynMapPage",                 "Calls to pgmR0DynMapPage");
    1714         PGM_REG_COUNTER(&pPGM->StatR0DynMapSetOptimize,           "/PGM/CPU%d/R0/DynMapPage/SetOptimize",     "Calls to pgmDynMapOptimizeAutoSet.");
    1715         PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchFlushes,      "/PGM/CPU%d/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");
    1716         PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchHits,         "/PGM/CPU%d/R0/DynMapPage/SetSearchHits",   "Set search hits.");
    1717         PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchMisses,       "/PGM/CPU%d/R0/DynMapPage/SetSearchMisses", "Set search misses.");
    1718         PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPage,                "/PGM/CPU%d/R0/DynMapPage/HCPage",          "Calls to PGMDynMapHCPage (ring-0).");
    1719         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits0,             "/PGM/CPU%d/R0/DynMapPage/Hits0",           "Hits at iPage+0");
    1720         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits1,             "/PGM/CPU%d/R0/DynMapPage/Hits1",           "Hits at iPage+1");
    1721         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits2,             "/PGM/CPU%d/R0/DynMapPage/Hits2",           "Hits at iPage+2");
    1722         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageInvlPg,            "/PGM/CPU%d/R0/DynMapPage/InvlPg",          "invlpg count in pgmR0DynMapPageSlow.");
    1723         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlow,              "/PGM/CPU%d/R0/DynMapPage/Slow",            "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
    1724         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopHits,      "/PGM/CPU%d/R0/DynMapPage/SlowLoopHits" ,   "Hits in the loop path.");
    1725         PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopMisses,    "/PGM/CPU%d/R0/DynMapPage/SlowLoopMisses",  "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
    1726         //PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLostHits,      "/PGM/CPU%d/R0/DynMapPage/SlowLostHits",    "Lost hits.");
    1727         PGM_REG_COUNTER(&pPGM->StatR0DynMapSubsets,               "/PGM/CPU%d/R0/Subsets",                    "Times PGMDynMapPushAutoSubset was called.");
    1728         PGM_REG_COUNTER(&pPGM->StatR0DynMapPopFlushes,            "/PGM/CPU%d/R0/SubsetPopFlushes",           "Times PGMDynMapPopAutoSubset flushes the subset.");
    1729         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[0],           "/PGM/CPU%d/R0/SetSize000..09",              "00-09% filled");
    1730         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[1],           "/PGM/CPU%d/R0/SetSize010..19",              "10-19% filled");
    1731         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[2],           "/PGM/CPU%d/R0/SetSize020..29",              "20-29% filled");
    1732         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[3],           "/PGM/CPU%d/R0/SetSize030..39",              "30-39% filled");
    1733         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[4],           "/PGM/CPU%d/R0/SetSize040..49",              "40-49% filled");
    1734         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[5],           "/PGM/CPU%d/R0/SetSize050..59",              "50-59% filled");
    1735         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[6],           "/PGM/CPU%d/R0/SetSize060..69",              "60-69% filled");
    1736         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[7],           "/PGM/CPU%d/R0/SetSize070..79",              "70-79% filled");
    1737         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[8],           "/PGM/CPU%d/R0/SetSize080..89",              "80-89% filled");
    1738         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[9],           "/PGM/CPU%d/R0/SetSize090..99",              "90-99% filled");
    1739         PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[10],          "/PGM/CPU%d/R0/SetSize100",                 "100% filled");
    1740 
    1741         /* RZ only: */
    1742         PGM_REG_PROFILE(&pPGM->StatRZTrap0e,                      "/PGM/CPU%d/RZ/Trap0e",                     "Profiling of the PGMTrap0eHandler() body.");
    1743         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeCheckPageFault,    "/PGM/CPU%d/RZ/Trap0e/Time/CheckPageFault", "Profiling of checking for dirty/access emulation faults.");
    1744         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeSyncPT,            "/PGM/CPU%d/RZ/Trap0e/Time/SyncPT",         "Profiling of lazy page table syncing.");
    1745         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeMapping,           "/PGM/CPU%d/RZ/Trap0e/Time/Mapping",        "Profiling of checking virtual mappings.");
    1746         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeOutOfSync,         "/PGM/CPU%d/RZ/Trap0e/Time/OutOfSync",      "Profiling of out of sync page handling.");
    1747         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeHandlers,          "/PGM/CPU%d/RZ/Trap0e/Time/Handlers",       "Profiling of checking handlers.");
    1748         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2CSAM,             "/PGM/CPU%d/RZ/Trap0e/Time2/CSAM",              "Profiling of the Trap0eHandler body when the cause is CSAM.");
    1749         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2DirtyAndAccessed, "/PGM/CPU%d/RZ/Trap0e/Time2/DirtyAndAccessedBits", "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
    1750         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2GuestTrap,        "/PGM/CPU%d/RZ/Trap0e/Time2/GuestTrap",         "Profiling of the Trap0eHandler body when the cause is a guest trap.");
    1751         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndPhys,          "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerPhysical",   "Profiling of the Trap0eHandler body when the cause is a physical handler.");
    1752         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndVirt,          "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerVirtual",    "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
    1753         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndUnhandled,     "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerUnhandled",  "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
    1754         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2Misc,             "/PGM/CPU%d/RZ/Trap0e/Time2/Misc",              "Profiling of the Trap0eHandler body when the cause is not known.");
    1755         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSync,        "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSync",         "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
    1756         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndPhys, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncHndPhys",  "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
    1757         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndVirt, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncHndVirt",  "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
    1758         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndObs,  "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncObsHnd",   "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
    1759         PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2SyncPT,           "/PGM/CPU%d/RZ/Trap0e/Time2/SyncPT",            "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
    1760         PGM_REG_COUNTER(&pPGM->StatRZTrap0eConflicts,             "/PGM/CPU%d/RZ/Trap0e/Conflicts",               "The number of times #PF was caused by an undetected conflict.");
    1761         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersMapping,       "/PGM/CPU%d/RZ/Trap0e/Handlers/Mapping",        "Number of traps due to access handlers in mappings.");
    1762         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersOutOfSync,     "/PGM/CPU%d/RZ/Trap0e/Handlers/OutOfSync",      "Number of traps due to out-of-sync handled pages.");
    1763         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersPhysical,      "/PGM/CPU%d/RZ/Trap0e/Handlers/Physical",       "Number of traps due to physical access handlers.");
    1764         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtual,       "/PGM/CPU%d/RZ/Trap0e/Handlers/Virtual",        "Number of traps due to virtual access handlers.");
    1765         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtualByPhys, "/PGM/CPU%d/RZ/Trap0e/Handlers/VirtualByPhys",  "Number of traps due to virtual access handlers by physical address.");
    1766         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtualUnmarked,"/PGM/CPU%d/RZ/Trap0e/Handlers/VirtualUnmarked","Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
    1767         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersUnhandled,     "/PGM/CPU%d/RZ/Trap0e/Handlers/Unhandled",      "Number of traps due to access outside range of monitored page(s).");
    1768         PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersInvalid,       "/PGM/CPU%d/RZ/Trap0e/Handlers/Invalid",        "Number of traps due to access to invalid physical memory.");
    1769         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNotPresentRead,      "/PGM/CPU%d/RZ/Trap0e/Err/User/NPRead",         "Number of user mode not present read page faults.");
    1770         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNotPresentWrite,     "/PGM/CPU%d/RZ/Trap0e/Err/User/NPWrite",        "Number of user mode not present write page faults.");
    1771         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSWrite,               "/PGM/CPU%d/RZ/Trap0e/Err/User/Write",          "Number of user mode write page faults.");
    1772         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSReserved,            "/PGM/CPU%d/RZ/Trap0e/Err/User/Reserved",       "Number of user mode reserved bit page faults.");
    1773         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNXE,                 "/PGM/CPU%d/RZ/Trap0e/Err/User/NXE",            "Number of user mode NXE page faults.");
    1774         PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSRead,                "/PGM/CPU%d/RZ/Trap0e/Err/User/Read",           "Number of user mode read page faults.");
    1775         PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVNotPresentRead,      "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NPRead",   "Number of supervisor mode not present read page faults.");
    1776         PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVNotPresentWrite,     "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NPWrite",  "Number of supervisor mode not present write page faults.");
    1777         PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVWrite,               "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/Write",    "Number of supervisor mode write page faults.");
    1778         PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVReserved,            "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/Reserved", "Number of supervisor mode reserved bit page faults.");
    1779         PGM_REG_COUNTER(&pPGM->StatRZTrap0eSNXE,                  "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NXE",      "Number of supervisor mode NXE page faults.");
    1780         PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPF,               "/PGM/CPU%d/RZ/Trap0e/GuestPF",                 "Number of real guest page faults.");
    1781         PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPFUnh,            "/PGM/CPU%d/RZ/Trap0e/GuestPF/Unhandled",       "Number of real guest page faults from the 'unhandled' case.");
    1782         PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPFMapping,        "/PGM/CPU%d/RZ/Trap0e/GuestPF/InMapping",       "Number of real guest page faults in a mapping.");
    1783         PGM_REG_COUNTER(&pPGM->StatRZTrap0eWPEmulInRZ,            "/PGM/CPU%d/RZ/Trap0e/WP/InRZ",                 "Number of guest page faults due to X86_CR0_WP emulation.");
    1784         PGM_REG_COUNTER(&pPGM->StatRZTrap0eWPEmulToR3,            "/PGM/CPU%d/RZ/Trap0e/WP/ToR3",                 "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
    1785 #if 0 /* rarely useful; leave for debugging. */
    1786         for (unsigned j = 0; j < RT_ELEMENTS(pPGM->StatRZTrap0ePD); j++)
    1787             STAMR3RegisterF(pVM, &pPGM->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
    1788                             "The number of traps in page directory n.", "/PGM/CPU%d/RZ/Trap0e/PD/%04X", i, j);
    1789 #endif
    1790         PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteHandled,        "/PGM/CPU%d/RZ/CR3WriteHandled",                "The number of times the Guest CR3 change was successfully handled.");
    1791         PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteUnhandled,      "/PGM/CPU%d/RZ/CR3WriteUnhandled",              "The number of times the Guest CR3 change was passed back to the recompiler.");
    1792         PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteConflict,       "/PGM/CPU%d/RZ/CR3WriteConflict",               "The number of times the Guest CR3 monitoring detected a conflict.");
    1793         PGM_REG_COUNTER(&pPGM->StatRZGuestROMWriteHandled,        "/PGM/CPU%d/RZ/ROMWriteHandled",                "The number of times the Guest ROM change was successfully handled.");
    1794         PGM_REG_COUNTER(&pPGM->StatRZGuestROMWriteUnhandled,      "/PGM/CPU%d/RZ/ROMWriteUnhandled",              "The number of times the Guest ROM change was passed back to the recompiler.");
    1795 
    1796         /* HC only: */
    1797 
    1798         /* RZ & R3: */
    1799         PGM_REG_PROFILE(&pPGM->StatRZSyncCR3,                     "/PGM/CPU%d/RZ/SyncCR3",                        "Profiling of the PGMSyncCR3() body.");
    1800         PGM_REG_PROFILE(&pPGM->StatRZSyncCR3Handlers,             "/PGM/CPU%d/RZ/SyncCR3/Handlers",               "Profiling of the PGMSyncCR3() update handler section.");
    1801         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3Global,               "/PGM/CPU%d/RZ/SyncCR3/Global",                 "The number of global CR3 syncs.");
    1802         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3NotGlobal,            "/PGM/CPU%d/RZ/SyncCR3/NotGlobal",              "The number of non-global CR3 syncs.");
    1803         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstCacheHit,          "/PGM/CPU%d/RZ/SyncCR3/DstChacheHit",           "The number of times we got some kind of a cache hit.");
    1804         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstFreed,             "/PGM/CPU%d/RZ/SyncCR3/DstFreed",               "The number of times we've had to free a shadow entry.");
    1805         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstFreedSrcNP,        "/PGM/CPU%d/RZ/SyncCR3/DstFreedSrcNP",          "The number of times we've had to free a shadow entry for which the source entry was not present.");
    1806         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstNotPresent,        "/PGM/CPU%d/RZ/SyncCR3/DstNotPresent",          "The number of times we've encountered a not present shadow entry for a present guest entry.");
    1807         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstSkippedGlobalPD,   "/PGM/CPU%d/RZ/SyncCR3/DstSkippedGlobalPD",     "The number of times a global page directory wasn't flushed.");
    1808         PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstSkippedGlobalPT,   "/PGM/CPU%d/RZ/SyncCR3/DstSkippedGlobalPT",     "The number of times a page table with only global entries wasn't flushed.");
    1809         PGM_REG_PROFILE(&pPGM->StatRZSyncPT,                      "/PGM/CPU%d/RZ/SyncPT",                         "Profiling of the pfnSyncPT() body.");
    1810         PGM_REG_COUNTER(&pPGM->StatRZSyncPTFailed,                "/PGM/CPU%d/RZ/SyncPT/Failed",                  "The number of times pfnSyncPT() failed.");
    1811         PGM_REG_COUNTER(&pPGM->StatRZSyncPT4K,                    "/PGM/CPU%d/RZ/SyncPT/4K",                      "Nr of 4K PT syncs");
    1812         PGM_REG_COUNTER(&pPGM->StatRZSyncPT4M,                    "/PGM/CPU%d/RZ/SyncPT/4M",                      "Nr of 4M PT syncs");
    1813         PGM_REG_COUNTER(&pPGM->StatRZSyncPagePDNAs,               "/PGM/CPU%d/RZ/SyncPagePDNAs",                  "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
    1814         PGM_REG_COUNTER(&pPGM->StatRZSyncPagePDOutOfSync,         "/PGM/CPU%d/RZ/SyncPagePDOutOfSync",            "The number of time we've encountered an out-of-sync PD in SyncPage.");
    1815         PGM_REG_COUNTER(&pPGM->StatRZAccessedPage,                "/PGM/CPU%d/RZ/AccessedPage",               "The number of pages marked not present for accessed bit emulation.");
    1816         PGM_REG_PROFILE(&pPGM->StatRZDirtyBitTracking,            "/PGM/CPU%d/RZ/DirtyPage",                  "Profiling the dirty bit tracking in CheckPageFault().");
    1817         PGM_REG_COUNTER(&pPGM->StatRZDirtyPage,                   "/PGM/CPU%d/RZ/DirtyPage/Mark",             "The number of pages marked read-only for dirty bit tracking.");
    1818         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageBig,                "/PGM/CPU%d/RZ/DirtyPage/MarkBig",          "The number of 4MB pages marked read-only for dirty bit tracking.");
    1819         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped,            "/PGM/CPU%d/RZ/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    1820         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap,               "/PGM/CPU%d/RZ/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
    1821         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageStale,              "/PGM/CPU%d/RZ/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
    1822         PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage,                 "/PGM/CPU%d/RZ/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    1823         PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF,            "/PGM/CPU%d/RZ/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
    1824         PGM_REG_COUNTER(&pPGM->StatRZPageAlreadyDirty,            "/PGM/CPU%d/RZ/DirtyPage/AlreadySet",       "The number of pages already marked dirty because of write accesses.");
    1825         PGM_REG_PROFILE(&pPGM->StatRZInvalidatePage,              "/PGM/CPU%d/RZ/InvalidatePage",             "PGMInvalidatePage() profiling.");
    1826         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4KBPages,      "/PGM/CPU%d/RZ/InvalidatePage/4KBPages",    "The number of times PGMInvalidatePage() was called for a 4KB page.");
    1827         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4MBPages,      "/PGM/CPU%d/RZ/InvalidatePage/4MBPages",    "The number of times PGMInvalidatePage() was called for a 4MB page.");
    1828         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4MBPagesSkip,  "/PGM/CPU%d/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
    1829         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDMappings,    "/PGM/CPU%d/RZ/InvalidatePage/PDMappings",  "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
    1830         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDNAs,         "/PGM/CPU%d/RZ/InvalidatePage/PDNAs",       "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
    1831         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDNPs,         "/PGM/CPU%d/RZ/InvalidatePage/PDNPs",       "The number of times PGMInvalidatePage() was called for a not present page directory.");
    1832         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDOutOfSync,   "/PGM/CPU%d/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
    1833         PGM_REG_COUNTER(&pPGM->StatRZInvalidatePageSkipped,       "/PGM/CPU%d/RZ/InvalidatePage/Skipped",     "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
    1834         PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncSupervisor,     "/PGM/CPU%d/RZ/OutOfSync/SuperVisor",       "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
    1835         PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncUser,           "/PGM/CPU%d/RZ/OutOfSync/User",             "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
    1836         PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncSupervisorWrite,"/PGM/CPU%d/RZ/OutOfSync/SuperVisorWrite",  "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
    1837         PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncUserWrite,      "/PGM/CPU%d/RZ/OutOfSync/UserWrite",        "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
    1838         PGM_REG_PROFILE(&pPGM->StatRZPrefetch,                    "/PGM/CPU%d/RZ/Prefetch",                   "PGMPrefetchPage profiling.");
    1839         PGM_REG_PROFILE(&pPGM->StatRZFlushTLB,                    "/PGM/CPU%d/RZ/FlushTLB",                   "Profiling of the PGMFlushTLB() body.");
    1840         PGM_REG_COUNTER(&pPGM->StatRZFlushTLBNewCR3,              "/PGM/CPU%d/RZ/FlushTLB/NewCR3",            "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
    1841         PGM_REG_COUNTER(&pPGM->StatRZFlushTLBNewCR3Global,        "/PGM/CPU%d/RZ/FlushTLB/NewCR3Global",      "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
    1842         PGM_REG_COUNTER(&pPGM->StatRZFlushTLBSameCR3,             "/PGM/CPU%d/RZ/FlushTLB/SameCR3",           "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
    1843         PGM_REG_COUNTER(&pPGM->StatRZFlushTLBSameCR3Global,       "/PGM/CPU%d/RZ/FlushTLB/SameCR3Global",     "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
    1844         PGM_REG_PROFILE(&pPGM->StatRZGstModifyPage,               "/PGM/CPU%d/RZ/GstModifyPage",              "Profiling of the PGMGstModifyPage() body.");
    1845 
    1846         PGM_REG_PROFILE(&pPGM->StatR3SyncCR3,                     "/PGM/CPU%d/R3/SyncCR3",                        "Profiling of the PGMSyncCR3() body.");
    1847         PGM_REG_PROFILE(&pPGM->StatR3SyncCR3Handlers,             "/PGM/CPU%d/R3/SyncCR3/Handlers",               "Profiling of the PGMSyncCR3() update handler section.");
    1848         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3Global,               "/PGM/CPU%d/R3/SyncCR3/Global",                 "The number of global CR3 syncs.");
    1849         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3NotGlobal,            "/PGM/CPU%d/R3/SyncCR3/NotGlobal",              "The number of non-global CR3 syncs.");
    1850         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstCacheHit,          "/PGM/CPU%d/R3/SyncCR3/DstChacheHit",           "The number of times we got some kind of a cache hit.");
    1851         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstFreed,             "/PGM/CPU%d/R3/SyncCR3/DstFreed",               "The number of times we've had to free a shadow entry.");
    1852         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstFreedSrcNP,        "/PGM/CPU%d/R3/SyncCR3/DstFreedSrcNP",          "The number of times we've had to free a shadow entry for which the source entry was not present.");
    1853         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstNotPresent,        "/PGM/CPU%d/R3/SyncCR3/DstNotPresent",          "The number of times we've encountered a not present shadow entry for a present guest entry.");
    1854         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstSkippedGlobalPD,   "/PGM/CPU%d/R3/SyncCR3/DstSkippedGlobalPD",     "The number of times a global page directory wasn't flushed.");
    1855         PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstSkippedGlobalPT,   "/PGM/CPU%d/R3/SyncCR3/DstSkippedGlobalPT",     "The number of times a page table with only global entries wasn't flushed.");
    1856         PGM_REG_PROFILE(&pPGM->StatR3SyncPT,                      "/PGM/CPU%d/R3/SyncPT",                         "Profiling of the pfnSyncPT() body.");
    1857         PGM_REG_COUNTER(&pPGM->StatR3SyncPTFailed,                "/PGM/CPU%d/R3/SyncPT/Failed",                  "The number of times pfnSyncPT() failed.");
    1858         PGM_REG_COUNTER(&pPGM->StatR3SyncPT4K,                    "/PGM/CPU%d/R3/SyncPT/4K",                      "Nr of 4K PT syncs");
    1859         PGM_REG_COUNTER(&pPGM->StatR3SyncPT4M,                    "/PGM/CPU%d/R3/SyncPT/4M",                      "Nr of 4M PT syncs");
    1860         PGM_REG_COUNTER(&pPGM->StatR3SyncPagePDNAs,               "/PGM/CPU%d/R3/SyncPagePDNAs",                  "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
    1861         PGM_REG_COUNTER(&pPGM->StatR3SyncPagePDOutOfSync,         "/PGM/CPU%d/R3/SyncPagePDOutOfSync",            "The number of time we've encountered an out-of-sync PD in SyncPage.");
    1862         PGM_REG_COUNTER(&pPGM->StatR3AccessedPage,                "/PGM/CPU%d/R3/AccessedPage",               "The number of pages marked not present for accessed bit emulation.");
    1863         PGM_REG_PROFILE(&pPGM->StatR3DirtyBitTracking,            "/PGM/CPU%d/R3/DirtyPage",                  "Profiling the dirty bit tracking in CheckPageFault().");
    1864         PGM_REG_COUNTER(&pPGM->StatR3DirtyPage,                   "/PGM/CPU%d/R3/DirtyPage/Mark",             "The number of pages marked read-only for dirty bit tracking.");
    1865         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageBig,                "/PGM/CPU%d/R3/DirtyPage/MarkBig",          "The number of 4MB pages marked read-only for dirty bit tracking.");
    1866         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped,            "/PGM/CPU%d/R3/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    1867         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap,               "/PGM/CPU%d/R3/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
    1868         PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage,                 "/PGM/CPU%d/R3/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    1869         PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF,            "/PGM/CPU%d/R3/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
    1870         PGM_REG_COUNTER(&pPGM->StatR3PageAlreadyDirty,            "/PGM/CPU%d/R3/DirtyPage/AlreadySet",       "The number of pages already marked dirty because of write accesses.");
    1871         PGM_REG_PROFILE(&pPGM->StatR3InvalidatePage,              "/PGM/CPU%d/R3/InvalidatePage",             "PGMInvalidatePage() profiling.");
    1872         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4KBPages,      "/PGM/CPU%d/R3/InvalidatePage/4KBPages",    "The number of times PGMInvalidatePage() was called for a 4KB page.");
    1873         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4MBPages,      "/PGM/CPU%d/R3/InvalidatePage/4MBPages",    "The number of times PGMInvalidatePage() was called for a 4MB page.");
    1874         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4MBPagesSkip,  "/PGM/CPU%d/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
    1875         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDMappings,    "/PGM/CPU%d/R3/InvalidatePage/PDMappings",  "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
    1876         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDNAs,         "/PGM/CPU%d/R3/InvalidatePage/PDNAs",       "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
    1877         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDNPs,         "/PGM/CPU%d/R3/InvalidatePage/PDNPs",       "The number of times PGMInvalidatePage() was called for a not present page directory.");
    1878         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDOutOfSync,   "/PGM/CPU%d/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
    1879         PGM_REG_COUNTER(&pPGM->StatR3InvalidatePageSkipped,       "/PGM/CPU%d/R3/InvalidatePage/Skipped",     "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
    1880         PGM_REG_COUNTER(&pPGM->StatR3PageOutOfSyncSupervisor,     "/PGM/CPU%d/R3/OutOfSync/SuperVisor",       "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
    1881         PGM_REG_COUNTER(&pPGM->StatR3PageOutOfSyncUser,           "/PGM/CPU%d/R3/OutOfSync/User",             "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
    1882         PGM_REG_PROFILE(&pPGM->StatR3Prefetch,                    "/PGM/CPU%d/R3/Prefetch",                   "PGMPrefetchPage profiling.");
    1883         PGM_REG_PROFILE(&pPGM->StatR3FlushTLB,                    "/PGM/CPU%d/R3/FlushTLB",                   "Profiling of the PGMFlushTLB() body.");
    1884         PGM_REG_COUNTER(&pPGM->StatR3FlushTLBNewCR3,              "/PGM/CPU%d/R3/FlushTLB/NewCR3",            "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
    1885         PGM_REG_COUNTER(&pPGM->StatR3FlushTLBNewCR3Global,        "/PGM/CPU%d/R3/FlushTLB/NewCR3Global",      "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
    1886         PGM_REG_COUNTER(&pPGM->StatR3FlushTLBSameCR3,             "/PGM/CPU%d/R3/FlushTLB/SameCR3",           "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
    1887         PGM_REG_COUNTER(&pPGM->StatR3FlushTLBSameCR3Global,       "/PGM/CPU%d/R3/FlushTLB/SameCR3Global",     "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
    1888         PGM_REG_PROFILE(&pPGM->StatR3GstModifyPage,               "/PGM/CPU%d/R3/GstModifyPage",              "Profiling of the PGMGstModifyPage() body.");
    1889 #endif /* VBOX_WITH_STATISTICS */
    1890 
    1891 #undef PGM_REG_PROFILE
    1892 #undef PGM_REG_COUNTER
    1893 
    1894     }
    1895 }
    1896 
    1897 
    1898 /**
    1899  * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
    1900  *
    1901  * The dynamic mapping area will also be allocated and initialized at this
    1902  * time. We could allocate it during PGMR3Init of course, but the mapping
    1903  * wouldn't be allocated at that time preventing us from setting up the
    1904  * page table entries with the dummy page.
    1905  *
    1906  * @returns VBox status code.
    1907  * @param   pVM     VM handle.
    1908  */
    1909 VMMR3DECL(int) PGMR3InitDynMap(PVM pVM)
    1910 {
    1911     RTGCPTR GCPtr;
    1912     int     rc;
    1913 
    1914     /*
    1915      * Reserve space for the dynamic mappings.
    1916      */
    1917     rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &GCPtr);
    1918     if (RT_SUCCESS(rc))
    1919         pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;
    1920 
    1921     if (    RT_SUCCESS(rc)
    1922         &&  (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
    1923     {
    1924         rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &GCPtr);
    1925         if (RT_SUCCESS(rc))
    1926             pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;
    1927     }
    1928     if (RT_SUCCESS(rc))
    1929     {
    1930         AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
    1931         MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
    1932     }
    1933     return rc;
    1934 }
    1935 
    1936 
    1937 /**
    1938  * Ring-3 init finalizing.
    1939  *
    1940  * @returns VBox status code.
    1941  * @param   pVM         The VM handle.
    1942  */
    1943 VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
    1944 {
    1945     int rc;
    1946 
    1947     /*
    1948      * Reserve space for the dynamic mappings.
    1949      * Initialize the dynamic mapping pages with dummy pages to simply the cache.
    1950      */
    1951     /* get the pointer to the page table entries. */
    1952     PPGMMAPPING pMapping = pgmGetMapping(pVM, pVM->pgm.s.pbDynPageMapBaseGC);
    1953     AssertRelease(pMapping);
    1954     const uintptr_t off = pVM->pgm.s.pbDynPageMapBaseGC - pMapping->GCPtr;
    1955     const unsigned iPT = off >> X86_PD_SHIFT;
    1956     const unsigned iPG = (off >> X86_PT_SHIFT) & X86_PT_MASK;
    1957     pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC      + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
    1958     pVM->pgm.s.paDynPageMapPaePTEsGC   = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
    1959 
    1960     /* init cache */
    1961     RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
    1962     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++)
    1963         pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy;
    1964 
    1965     for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
    1966     {
    1967         rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0);
    1968         AssertRCReturn(rc, rc);
    1969     }
    1970 
    1971     /*
    1972      * Note that AMD uses all the 8 reserved bits for the address (so 40 bits in total);
    1973      * Intel only goes up to 36 bits, so we stick to 36 as well.
    1974      */
    1975     /** @todo How to test for the 40 bits support? Long mode seems to be the test criterium. */
    1976     uint32_t u32Dummy, u32Features;
    1977     CPUMGetGuestCpuId(VMMGetCpu(pVM), 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
    1978 
    1979     if (u32Features & X86_CPUID_FEATURE_EDX_PSE36)
    1980         pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(36) - 1;
    1981     else
    1982         pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1;
    1983 
    1984     /*
    1985      * Allocate memory if we're supposed to do that.
    1986      */
    1987     if (pVM->pgm.s.fRamPreAlloc)
    1988         rc = pgmR3PhysRamPreAllocate(pVM);
    1989 
    1990     LogRel(("PGMR3InitFinalize: 4 MB PSE mask %RGp\n", pVM->pgm.s.GCPhys4MBPSEMask));
    1991     return rc;
    1992 }
    1993 
    1994 
    1995 /**
    1996  * Applies relocations to data and code managed by this component.
    1997  *
    1998  * This function will be called at init and whenever the VMM need to relocate it
    1999  * self inside the GC.
    2000  *
    2001  * @param   pVM     The VM.
    2002  * @param   offDelta    Relocation delta relative to old location.
    2003  */
    2004 VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
    2005 {
    2006     LogFlow(("PGMR3Relocate %RGv to %RGv\n", pVM->pgm.s.GCPtrCR3Mapping, pVM->pgm.s.GCPtrCR3Mapping + offDelta));
    2007 
    2008     /*
    2009      * Paging stuff.
    2010      */
    2011     pVM->pgm.s.GCPtrCR3Mapping += offDelta;
    2012 
    2013     pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */);
    2014 
    2015     /* Shadow, guest and both mode switch & relocation for each VCPU. */
    2016     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2017     {
    2018         PVMCPU  pVCpu = &pVM->aCpus[i];
    2019 
    2020         pgmR3ModeDataSwitch(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
    2021 
    2022         PGM_SHW_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    2023         PGM_GST_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    2024         PGM_BTH_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    2025     }
    2026 
    2027     /*
    2028      * Trees.
    2029      */
    2030     pVM->pgm.s.pTreesRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pTreesR3);
    2031 
    2032     /*
    2033      * Ram ranges.
    2034      */
    2035     if (pVM->pgm.s.pRamRangesR3)
    2036     {
    2037         /* Update the pSelfRC pointers and relink them. */
    2038         for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
    2039             if (!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING))
    2040                 pCur->pSelfRC = MMHyperCCToRC(pVM, pCur);
    2041         pgmR3PhysRelinkRamRanges(pVM);
    2042     }
    2043 
    2044     /*
    2045      * Update the pSelfRC pointer of the MMIO2 ram ranges since they might not
    2046      * be mapped and thus not included in the above exercise.
    2047      */
    2048     for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
    2049         if (!(pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING))
    2050             pCur->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pCur->RamRange);
    2051 
    2052     /*
    2053      * Update the two page directories with all page table mappings.
    2054      * (One or more of them have changed, that's why we're here.)
    2055      */
    2056     pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pMappingsR3);
    2057     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur->pNextR3; pCur = pCur->pNextR3)
    2058         pCur->pNextRC = MMHyperR3ToRC(pVM, pCur->pNextR3);
    2059 
    2060     /* Relocate GC addresses of Page Tables. */
    2061     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    2062     {
    2063         for (RTHCUINT i = 0; i < pCur->cPTs; i++)
    2064         {
    2065             pCur->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].pPTR3);
    2066             pCur->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].paPaePTsR3);
    2067         }
    2068     }
    2069 
    2070     /*
    2071      * Dynamic page mapping area.
    2072      */
    2073     pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
    2074     pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;
    2075     pVM->pgm.s.pbDynPageMapBaseGC += offDelta;
    2076 
    2077     /*
    2078      * The Zero page.
    2079      */
    2080     pVM->pgm.s.pvZeroPgR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pvZeroPgR3);
    2081 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    2082     AssertRelease(pVM->pgm.s.pvZeroPgR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
    2083 #else
    2084     AssertRelease(pVM->pgm.s.pvZeroPgR0 != NIL_RTR0PTR);
    2085 #endif
    2086 
    2087     /*
    2088      * Physical and virtual handlers.
    2089      */
    2090     RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers,     true, pgmR3RelocatePhysHandler,      &offDelta);
    2091     RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->VirtHandlers,      true, pgmR3RelocateVirtHandler,      &offDelta);
    2092     RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3RelocateHyperVirtHandler, &offDelta);
    2093 
    2094     /*
    2095      * The page pool.
    2096      */
    2097     pgmR3PoolRelocate(pVM);
    2098 }
    2099 
    2100 
    2101 /**
    2102  * Callback function for relocating a physical access handler.
    2103  *
    2104  * @returns 0 (continue enum)
    2105  * @param   pNode       Pointer to a PGMPHYSHANDLER node.
    2106  * @param   pvUser      Pointer to the offDelta. This is a pointer to the delta since we're
    2107  *                      not certain the delta will fit in a void pointer for all possible configs.
    2108  */
    2109 static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser)
    2110 {
    2111     PPGMPHYSHANDLER pHandler = (PPGMPHYSHANDLER)pNode;
    2112     RTGCINTPTR      offDelta = *(PRTGCINTPTR)pvUser;
    2113     if (pHandler->pfnHandlerRC)
    2114         pHandler->pfnHandlerRC += offDelta;
    2115     if (pHandler->pvUserRC >= 0x10000)
    2116         pHandler->pvUserRC += offDelta;
    2117     return 0;
    2118 }
    2119 
    2120 
    2121 /**
    2122  * Callback function for relocating a virtual access handler.
    2123  *
    2124  * @returns 0 (continue enum)
    2125  * @param   pNode       Pointer to a PGMVIRTHANDLER node.
    2126  * @param   pvUser      Pointer to the offDelta. This is a pointer to the delta since we're
    2127  *                      not certain the delta will fit in a void pointer for all possible configs.
    2128  */
    2129 static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser)
    2130 {
    2131     PPGMVIRTHANDLER pHandler = (PPGMVIRTHANDLER)pNode;
    2132     RTGCINTPTR      offDelta = *(PRTGCINTPTR)pvUser;
    2133     Assert(     pHandler->enmType == PGMVIRTHANDLERTYPE_ALL
    2134            ||   pHandler->enmType == PGMVIRTHANDLERTYPE_WRITE);
    2135     Assert(pHandler->pfnHandlerRC);
    2136     pHandler->pfnHandlerRC += offDelta;
    2137     return 0;
    2138 }
    2139 
    2140 
    2141 /**
    2142  * Callback function for relocating a virtual access handler for the hypervisor mapping.
    2143  *
    2144  * @returns 0 (continue enum)
    2145  * @param   pNode       Pointer to a PGMVIRTHANDLER node.
    2146  * @param   pvUser      Pointer to the offDelta. This is a pointer to the delta since we're
    2147  *                      not certain the delta will fit in a void pointer for all possible configs.
    2148  */
    2149 static DECLCALLBACK(int) pgmR3RelocateHyperVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser)
    2150 {
    2151     PPGMVIRTHANDLER pHandler = (PPGMVIRTHANDLER)pNode;
    2152     RTGCINTPTR      offDelta = *(PRTGCINTPTR)pvUser;
    2153     Assert(pHandler->enmType == PGMVIRTHANDLERTYPE_HYPERVISOR);
    2154     Assert(pHandler->pfnHandlerRC);
    2155     pHandler->pfnHandlerRC  += offDelta;
    2156     return 0;
    2157 }
    2158 
    2159 
    2160 /**
    2161  * The VM is being reset.
    2162  *
    2163  * For the PGM component this means that any PD write monitors
    2164  * needs to be removed.
    2165  *
    2166  * @param   pVM     VM handle.
    2167  */
    2168 VMMR3DECL(void) PGMR3Reset(PVM pVM)
    2169 {
    2170     int rc;
    2171 
    2172     LogFlow(("PGMR3Reset:\n"));
    2173     VM_ASSERT_EMT(pVM);
    2174 
    2175     pgmLock(pVM);
    2176 
    2177     /*
    2178      * Unfix any fixed mappings and disable CR3 monitoring.
    2179      */
    2180     pVM->pgm.s.fMappingsFixed    = false;
    2181     pVM->pgm.s.GCPtrMappingFixed = 0;
    2182     pVM->pgm.s.cbMappingFixed    = 0;
    2183 
    2184     /* Exit the guest paging mode before the pgm pool gets reset.
    2185      * Important to clean up the amd64 case.
    2186      */
    2187     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2188     {
    2189         PVMCPU  pVCpu = &pVM->aCpus[i];
    2190         rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
    2191         AssertRC(rc);
    2192     }
    2193 
    2194 #ifdef DEBUG
    2195     DBGFR3InfoLog(pVM, "mappings", NULL);
    2196     DBGFR3InfoLog(pVM, "handlers", "all nostat");
    2197 #endif
    2198 
    2199     /*
    2200      * Switch mode back to real mode. (before resetting the pgm pool!)
    2201      */
    2202     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2203     {
    2204         PVMCPU  pVCpu = &pVM->aCpus[i];
    2205 
    2206         rc = PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
    2207         AssertRC(rc);
    2208 
    2209         STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cGuestModeChanges);
    2210     }
    2211 
    2212     /*
    2213      * Reset the shadow page pool.
    2214      */
    2215     pgmR3PoolReset(pVM);
    2216 
    2217     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2218     {
    2219         PVMCPU  pVCpu = &pVM->aCpus[i];
    2220 
    2221         /*
    2222          * Re-init other members.
    2223          */
    2224         pVCpu->pgm.s.fA20Enabled = true;
    2225 
    2226         /*
    2227          * Clear the FFs PGM owns.
    2228          */
    2229         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    2230         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    2231     }
    2232 
    2233     /*
    2234      * Reset (zero) RAM pages.
    2235      */
    2236     rc = pgmR3PhysRamReset(pVM);
    2237     if (RT_SUCCESS(rc))
    2238     {
    2239         /*
    2240          * Reset (zero) shadow ROM pages.
    2241          */
    2242         rc = pgmR3PhysRomReset(pVM);
    2243     }
    2244 
    2245     pgmUnlock(pVM);
    2246     //return rc;
    2247     AssertReleaseRC(rc);
    2248 }
    2249 
    2250 
    2251 #ifdef VBOX_STRICT
    2252 /**
    2253  * VM state change callback for clearing fNoMorePhysWrites after
    2254  * a snapshot has been created.
    2255  */
    2256 static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
    2257 {
    2258     if (enmState == VMSTATE_RUNNING)
    2259         pVM->pgm.s.fNoMorePhysWrites = false;
    2260 }
    2261 #endif
    2262 
    2263 
    2264 /**
    2265  * Terminates the PGM.
    2266  *
    2267  * @returns VBox status code.
    2268  * @param   pVM     Pointer to VM structure.
    2269  */
    2270 VMMR3DECL(int) PGMR3Term(PVM pVM)
    2271 {
    2272     PGMDeregisterStringFormatTypes();
    2273     return PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
    2274 }
    2275 
    2276 
    2277 /**
    2278  * Terminates the per-VCPU PGM.
    2279  *
    2280  * Termination means cleaning up and freeing all resources,
    2281  * the VM it self is at this point powered off or suspended.
    2282  *
    2283  * @returns VBox status code.
    2284  * @param   pVM         The VM to operate on.
    2285  */
    2286 VMMR3DECL(int) PGMR3TermCPU(PVM pVM)
    2287 {
    2288     return 0;
    2289 }
    2290122
    2291123
     
    2523355
    2524356
    2525 /** PGM fields to save/load. */
    2526 static const SSMFIELD s_aPGMFields[] =
    2527 {
    2528     SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    2529     SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    2530     SSMFIELD_ENTRY(         PGM, cbMappingFixed),
    2531     SSMFIELD_ENTRY_TERM()
    2532 };
    2533 
    2534 static const SSMFIELD s_aPGMCpuFields[] =
    2535 {
    2536     SSMFIELD_ENTRY(         PGMCPU, fA20Enabled),
    2537     SSMFIELD_ENTRY_GCPHYS(  PGMCPU, GCPhysA20Mask),
    2538     SSMFIELD_ENTRY(         PGMCPU, enmGuestMode),
    2539     SSMFIELD_ENTRY_TERM()
    2540 };
    2541 
    2542 /* For loading old saved states. (pre-smp) */
    2543 typedef struct
    2544 {
    2545     /** If set no conflict checks are required.  (boolean) */
    2546     bool                            fMappingsFixed;
    2547     /** Size of fixed mapping */
    2548     uint32_t                        cbMappingFixed;
    2549     /** Base address (GC) of fixed mapping */
    2550     RTGCPTR                         GCPtrMappingFixed;
    2551     /** A20 gate mask.
    2552      * Our current approach to A20 emulation is to let REM do it and don't bother
    2553      * anywhere else. The interesting Guests will be operating with it enabled anyway.
    2554      * But whould need arrise, we'll subject physical addresses to this mask. */
    2555     RTGCPHYS                        GCPhysA20Mask;
    2556     /** A20 gate state - boolean! */
    2557     bool                            fA20Enabled;
    2558     /** The guest paging mode. */
    2559     PGMMODE                         enmGuestMode;
    2560 } PGMOLD;
    2561 
    2562 static const SSMFIELD s_aPGMFields_Old[] =
    2563 {
    2564     SSMFIELD_ENTRY(         PGMOLD, fMappingsFixed),
    2565     SSMFIELD_ENTRY_GCPTR(   PGMOLD, GCPtrMappingFixed),
    2566     SSMFIELD_ENTRY(         PGMOLD, cbMappingFixed),
    2567     SSMFIELD_ENTRY(         PGMOLD, fA20Enabled),
    2568     SSMFIELD_ENTRY_GCPHYS(  PGMOLD, GCPhysA20Mask),
    2569     SSMFIELD_ENTRY(         PGMOLD, enmGuestMode),
    2570     SSMFIELD_ENTRY_TERM()
    2571 };
    2572 
    2573 
    2574357/**
    2575358 * Execute state save operation.
     
    33291112
    33301113/**
    3331  * Show paging mode.
    3332  *
    3333  * @param   pVM         VM Handle.
    3334  * @param   pHlp        The info helpers.
    3335  * @param   pszArgs     "all" (default), "guest", "shadow" or "host".
    3336  */
    3337 static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    3338 {
    3339     /* digest argument. */
    3340     bool fGuest, fShadow, fHost;
    3341     if (pszArgs)
    3342         pszArgs = RTStrStripL(pszArgs);
    3343     if (!pszArgs || !*pszArgs || strstr(pszArgs, "all"))
    3344         fShadow = fHost = fGuest = true;
    3345     else
    3346     {
    3347         fShadow = fHost = fGuest = false;
    3348         if (strstr(pszArgs, "guest"))
    3349             fGuest = true;
    3350         if (strstr(pszArgs, "shadow"))
    3351             fShadow = true;
    3352         if (strstr(pszArgs, "host"))
    3353             fHost = true;
    3354     }
    3355 
    3356     /** @todo SMP support! */
    3357     /* print info. */
    3358     if (fGuest)
    3359         pHlp->pfnPrintf(pHlp, "Guest paging mode:  %s, changed %RU64 times, A20 %s\n",
    3360                         PGMGetModeName(pVM->aCpus[0].pgm.s.enmGuestMode), pVM->aCpus[0].pgm.s.cGuestModeChanges.c,
    3361                         pVM->aCpus[0].pgm.s.fA20Enabled ? "enabled" : "disabled");
    3362     if (fShadow)
    3363         pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->aCpus[0].pgm.s.enmShadowMode));
    3364     if (fHost)
    3365     {
    3366         const char *psz;
    3367         switch (pVM->pgm.s.enmHostMode)
    3368         {
    3369             case SUPPAGINGMODE_INVALID:             psz = "invalid"; break;
    3370             case SUPPAGINGMODE_32_BIT:              psz = "32-bit"; break;
    3371             case SUPPAGINGMODE_32_BIT_GLOBAL:       psz = "32-bit+G"; break;
    3372             case SUPPAGINGMODE_PAE:                 psz = "PAE"; break;
    3373             case SUPPAGINGMODE_PAE_GLOBAL:          psz = "PAE+G"; break;
    3374             case SUPPAGINGMODE_PAE_NX:              psz = "PAE+NX"; break;
    3375             case SUPPAGINGMODE_PAE_GLOBAL_NX:       psz = "PAE+G+NX"; break;
    3376             case SUPPAGINGMODE_AMD64:               psz = "AMD64"; break;
    3377             case SUPPAGINGMODE_AMD64_GLOBAL:        psz = "AMD64+G"; break;
    3378             case SUPPAGINGMODE_AMD64_NX:            psz = "AMD64+NX"; break;
    3379             case SUPPAGINGMODE_AMD64_GLOBAL_NX:     psz = "AMD64+G+NX"; break;
    3380             default:                                psz = "unknown"; break;
    3381         }
    3382         pHlp->pfnPrintf(pHlp, "Host paging mode:   %s\n", psz);
    3383     }
    3384 }
    3385 
    3386 
    3387 /**
    3388  * Dump registered MMIO ranges to the log.
    3389  *
    3390  * @param   pVM         VM Handle.
    3391  * @param   pHlp        The info helpers.
    3392  * @param   pszArgs     Arguments, ignored.
    3393  */
    3394 static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    3395 {
    3396     NOREF(pszArgs);
    3397     pHlp->pfnPrintf(pHlp,
    3398                     "RAM ranges (pVM=%p)\n"
    3399                     "%.*s %.*s\n",
    3400                     pVM,
    3401                     sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range                    ",
    3402                     sizeof(RTHCPTR) * 2,      "pvHC            ");
    3403 
    3404     for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
    3405         pHlp->pfnPrintf(pHlp,
    3406                         "%RGp-%RGp %RHv %s\n",
    3407                         pCur->GCPhys,
    3408                         pCur->GCPhysLast,
    3409                         pCur->pvR3,
    3410                         pCur->pszDesc);
    3411 }
    3412 
    3413 /**
    3414  * Dump the page directory to the log.
    3415  *
    3416  * @param   pVM         VM Handle.
    3417  * @param   pHlp        The info helpers.
    3418  * @param   pszArgs     Arguments, ignored.
    3419  */
    3420 static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    3421 {
    3422     /** @todo SMP support!! */
    3423     PVMCPU pVCpu = &pVM->aCpus[0];
    3424 
    3425 /** @todo fix this! Convert the PGMR3DumpHierarchyHC functions to do guest stuff. */
    3426     /* Big pages supported? */
    3427     const bool fPSE  = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
    3428 
    3429     /* Global pages supported? */
    3430     const bool fPGE = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE);
    3431 
    3432     NOREF(pszArgs);
    3433 
    3434     /*
    3435      * Get page directory addresses.
    3436      */
    3437     PX86PD     pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
    3438     Assert(pPDSrc);
    3439     Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
    3440 
    3441     /*
    3442      * Iterate the page directory.
    3443      */
    3444     for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
    3445     {
    3446         X86PDE PdeSrc = pPDSrc->a[iPD];
    3447         if (PdeSrc.n.u1Present)
    3448         {
    3449             if (PdeSrc.b.u1Size && fPSE)
    3450                 pHlp->pfnPrintf(pHlp,
    3451                                 "%04X - %RGp P=%d U=%d RW=%d G=%d - BIG\n",
    3452                                 iPD,
    3453                                 pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeSrc),
    3454                                 PdeSrc.b.u1Present, PdeSrc.b.u1User, PdeSrc.b.u1Write, PdeSrc.b.u1Global && fPGE);
    3455             else
    3456                 pHlp->pfnPrintf(pHlp,
    3457                                 "%04X - %RGp P=%d U=%d RW=%d [G=%d]\n",
    3458                                 iPD,
    3459                                 (RTGCPHYS)(PdeSrc.u & X86_PDE_PG_MASK),
    3460                                 PdeSrc.n.u1Present, PdeSrc.n.u1User, PdeSrc.n.u1Write, PdeSrc.b.u1Global && fPGE);
    3461         }
    3462     }
    3463 }
    3464 
    3465 
    3466 /**
    3467  * Service a VMMCALLRING3_PGM_LOCK call.
     1114 * Registers the saved state callbacks with SSM.
    34681115 *
    34691116 * @returns VBox status code.
    3470  * @param   pVM     The VM handle.
    3471  */
    3472 VMMR3DECL(int) PGMR3LockCall(PVM pVM)
    3473 {
    3474     int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSect, true /* fHostCall */);
    3475     AssertRC(rc);
    3476     return rc;
    3477 }
    3478 
    3479 
    3480 /**
    3481  * Converts a PGMMODE value to a PGM_TYPE_* \#define.
    3482  *
    3483  * @returns PGM_TYPE_*.
    3484  * @param   pgmMode     The mode value to convert.
    3485  */
    3486 DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
    3487 {
    3488     switch (pgmMode)
    3489     {
    3490         case PGMMODE_REAL:      return PGM_TYPE_REAL;
    3491         case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
    3492         case PGMMODE_32_BIT:    return PGM_TYPE_32BIT;
    3493         case PGMMODE_PAE:
    3494         case PGMMODE_PAE_NX:    return PGM_TYPE_PAE;
    3495         case PGMMODE_AMD64:
    3496         case PGMMODE_AMD64_NX:  return PGM_TYPE_AMD64;
    3497         case PGMMODE_NESTED:    return PGM_TYPE_NESTED;
    3498         case PGMMODE_EPT:       return PGM_TYPE_EPT;
    3499         default:
    3500             AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
    3501     }
    3502 }
    3503 
    3504 
    3505 /**
    3506  * Gets the index into the paging mode data array of a SHW+GST mode.
    3507  *
    3508  * @returns PGM::paPagingData index.
    3509  * @param   uShwType      The shadow paging mode type.
    3510  * @param   uGstType      The guest paging mode type.
    3511  */
    3512 DECLINLINE(unsigned) pgmModeDataIndex(unsigned uShwType, unsigned uGstType)
    3513 {
    3514     Assert(uShwType >= PGM_TYPE_32BIT && uShwType <= PGM_TYPE_MAX);
    3515     Assert(uGstType >= PGM_TYPE_REAL  && uGstType <= PGM_TYPE_AMD64);
    3516     return (uShwType - PGM_TYPE_32BIT) * (PGM_TYPE_AMD64 - PGM_TYPE_REAL + 1)
    3517          + (uGstType - PGM_TYPE_REAL);
    3518 }
    3519 
    3520 
    3521 /**
    3522  * Gets the index into the paging mode data array of a SHW+GST mode.
    3523  *
    3524  * @returns PGM::paPagingData index.
    3525  * @param   enmShw      The shadow paging mode.
    3526  * @param   enmGst      The guest paging mode.
    3527  */
    3528 DECLINLINE(unsigned) pgmModeDataIndexByMode(PGMMODE enmShw, PGMMODE enmGst)
    3529 {
    3530     Assert(enmShw >= PGMMODE_32_BIT && enmShw <= PGMMODE_MAX);
    3531     Assert(enmGst > PGMMODE_INVALID && enmGst < PGMMODE_MAX);
    3532     return pgmModeDataIndex(pgmModeToType(enmShw), pgmModeToType(enmGst));
    3533 }
    3534 
    3535 
    3536 /**
    3537  * Calculates the max data index.
    3538  * @returns The number of entries in the paging data array.
    3539  */
    3540 DECLINLINE(unsigned) pgmModeDataMaxIndex(void)
    3541 {
    3542     return pgmModeDataIndex(PGM_TYPE_MAX, PGM_TYPE_AMD64) + 1;
    3543 }
    3544 
    3545 
    3546 /**
    3547  * Initializes the paging mode data kept in PGM::paModeData.
    3548  *
    3549  * @param   pVM             The VM handle.
    3550  * @param   fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
    3551  *                          This is used early in the init process to avoid trouble with PDM
    3552  *                          not being initialized yet.
    3553  */
    3554 static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0)
    3555 {
    3556     PPGMMODEDATA pModeData;
    3557     int rc;
    3558 
    3559     /*
    3560      * Allocate the array on the first call.
    3561      */
    3562     if (!pVM->pgm.s.paModeData)
    3563     {
    3564         pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
    3565         AssertReturn(pVM->pgm.s.paModeData, VERR_NO_MEMORY);
    3566     }
    3567 
    3568     /*
    3569      * Initialize the array entries.
    3570      */
    3571     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_REAL)];
    3572     pModeData->uShwType = PGM_TYPE_32BIT;
    3573     pModeData->uGstType = PGM_TYPE_REAL;
    3574     rc = PGM_SHW_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3575     rc = PGM_GST_NAME_REAL(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3576     rc = PGM_BTH_NAME_32BIT_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3577 
    3578     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGMMODE_PROTECTED)];
    3579     pModeData->uShwType = PGM_TYPE_32BIT;
    3580     pModeData->uGstType = PGM_TYPE_PROT;
    3581     rc = PGM_SHW_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3582     rc = PGM_GST_NAME_PROT(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3583     rc = PGM_BTH_NAME_32BIT_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3584 
    3585     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_32BIT)];
    3586     pModeData->uShwType = PGM_TYPE_32BIT;
    3587     pModeData->uGstType = PGM_TYPE_32BIT;
    3588     rc = PGM_SHW_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3589     rc = PGM_GST_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3590     rc = PGM_BTH_NAME_32BIT_32BIT(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3591 
    3592     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_REAL)];
    3593     pModeData->uShwType = PGM_TYPE_PAE;
    3594     pModeData->uGstType = PGM_TYPE_REAL;
    3595     rc = PGM_SHW_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3596     rc = PGM_GST_NAME_REAL(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3597     rc = PGM_BTH_NAME_PAE_REAL(InitData)(   pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3598 
    3599     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PROT)];
    3600     pModeData->uShwType = PGM_TYPE_PAE;
    3601     pModeData->uGstType = PGM_TYPE_PROT;
    3602     rc = PGM_SHW_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3603     rc = PGM_GST_NAME_PROT(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3604     rc = PGM_BTH_NAME_PAE_PROT(InitData)(   pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3605 
    3606     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_32BIT)];
    3607     pModeData->uShwType = PGM_TYPE_PAE;
    3608     pModeData->uGstType = PGM_TYPE_32BIT;
    3609     rc = PGM_SHW_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3610     rc = PGM_GST_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3611     rc = PGM_BTH_NAME_PAE_32BIT(InitData)(  pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3612 
    3613     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PAE)];
    3614     pModeData->uShwType = PGM_TYPE_PAE;
    3615     pModeData->uGstType = PGM_TYPE_PAE;
    3616     rc = PGM_SHW_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3617     rc = PGM_GST_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3618     rc = PGM_BTH_NAME_PAE_PAE(InitData)(    pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3619 
    3620 #ifdef VBOX_WITH_64_BITS_GUESTS
    3621     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64)];
    3622     pModeData->uShwType = PGM_TYPE_AMD64;
    3623     pModeData->uGstType = PGM_TYPE_AMD64;
    3624     rc = PGM_SHW_NAME_AMD64(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3625     rc = PGM_GST_NAME_AMD64(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3626     rc = PGM_BTH_NAME_AMD64_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3627 #endif
    3628 
    3629     /* The nested paging mode. */
    3630     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_REAL)];
    3631     pModeData->uShwType = PGM_TYPE_NESTED;
    3632     pModeData->uGstType = PGM_TYPE_REAL;
    3633     rc = PGM_GST_NAME_REAL(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3634     rc = PGM_BTH_NAME_NESTED_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3635 
    3636     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGMMODE_PROTECTED)];
    3637     pModeData->uShwType = PGM_TYPE_NESTED;
    3638     pModeData->uGstType = PGM_TYPE_PROT;
    3639     rc = PGM_GST_NAME_PROT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3640     rc = PGM_BTH_NAME_NESTED_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3641 
    3642     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_32BIT)];
    3643     pModeData->uShwType = PGM_TYPE_NESTED;
    3644     pModeData->uGstType = PGM_TYPE_32BIT;
    3645     rc = PGM_GST_NAME_32BIT(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3646     rc = PGM_BTH_NAME_NESTED_32BIT(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3647 
    3648     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_PAE)];
    3649     pModeData->uShwType = PGM_TYPE_NESTED;
    3650     pModeData->uGstType = PGM_TYPE_PAE;
    3651     rc = PGM_GST_NAME_PAE(InitData)(         pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3652     rc = PGM_BTH_NAME_NESTED_PAE(InitData)(  pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3653 
    3654 #ifdef VBOX_WITH_64_BITS_GUESTS
    3655     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_AMD64)];
    3656     pModeData->uShwType = PGM_TYPE_NESTED;
    3657     pModeData->uGstType = PGM_TYPE_AMD64;
    3658     rc = PGM_GST_NAME_AMD64(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3659     rc = PGM_BTH_NAME_NESTED_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3660 #endif
    3661 
    3662     /* The shadow part of the nested callback mode depends on the host paging mode (AMD-V only). */
    3663     switch (pVM->pgm.s.enmHostMode)
    3664     {
    3665 #if HC_ARCH_BITS == 32
    3666     case SUPPAGINGMODE_32_BIT:
    3667     case SUPPAGINGMODE_32_BIT_GLOBAL:
    3668         for (unsigned i = PGM_TYPE_REAL; i <= PGM_TYPE_PAE; i++)
    3669         {
    3670             pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, i)];
    3671             rc = PGM_SHW_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3672         }
    3673 # ifdef VBOX_WITH_64_BITS_GUESTS
    3674         pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_AMD64)];
    3675         rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3676 # endif
    3677         break;
    3678 
    3679     case SUPPAGINGMODE_PAE:
    3680     case SUPPAGINGMODE_PAE_NX:
    3681     case SUPPAGINGMODE_PAE_GLOBAL:
    3682     case SUPPAGINGMODE_PAE_GLOBAL_NX:
    3683         for (unsigned i = PGM_TYPE_REAL; i <= PGM_TYPE_PAE; i++)
    3684         {
    3685             pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, i)];
    3686             rc = PGM_SHW_NAME_PAE(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3687         }
    3688 # ifdef VBOX_WITH_64_BITS_GUESTS
    3689         pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, PGM_TYPE_AMD64)];
    3690         rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3691 # endif
    3692         break;
    3693 #endif /* HC_ARCH_BITS == 32 */
    3694 
    3695 #if HC_ARCH_BITS == 64 || defined(RT_OS_DARWIN)
    3696     case SUPPAGINGMODE_AMD64:
    3697     case SUPPAGINGMODE_AMD64_GLOBAL:
    3698     case SUPPAGINGMODE_AMD64_NX:
    3699     case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    3700 # ifdef VBOX_WITH_64_BITS_GUESTS
    3701         for (unsigned i = PGM_TYPE_REAL; i <= PGM_TYPE_AMD64; i++)
    3702 # else
    3703         for (unsigned i = PGM_TYPE_REAL; i <= PGM_TYPE_PAE; i++)
    3704 # endif
    3705         {
    3706             pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_NESTED, i)];
    3707             rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3708         }
    3709         break;
    3710 #endif /* HC_ARCH_BITS == 64 || RT_OS_DARWIN */
    3711 
    3712     default:
    3713         AssertFailed();
    3714         break;
    3715     }
    3716 
    3717     /* Extended paging (EPT) / Intel VT-x */
    3718     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_EPT, PGM_TYPE_REAL)];
    3719     pModeData->uShwType = PGM_TYPE_EPT;
    3720     pModeData->uGstType = PGM_TYPE_REAL;
    3721     rc = PGM_SHW_NAME_EPT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3722     rc = PGM_GST_NAME_REAL(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3723     rc = PGM_BTH_NAME_EPT_REAL(InitData)(   pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3724 
    3725     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_EPT, PGM_TYPE_PROT)];
    3726     pModeData->uShwType = PGM_TYPE_EPT;
    3727     pModeData->uGstType = PGM_TYPE_PROT;
    3728     rc = PGM_SHW_NAME_EPT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3729     rc = PGM_GST_NAME_PROT(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3730     rc = PGM_BTH_NAME_EPT_PROT(InitData)(   pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3731 
    3732     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_EPT, PGM_TYPE_32BIT)];
    3733     pModeData->uShwType = PGM_TYPE_EPT;
    3734     pModeData->uGstType = PGM_TYPE_32BIT;
    3735     rc = PGM_SHW_NAME_EPT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3736     rc = PGM_GST_NAME_32BIT(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3737     rc = PGM_BTH_NAME_EPT_32BIT(InitData)(  pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3738 
    3739     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_EPT, PGM_TYPE_PAE)];
    3740     pModeData->uShwType = PGM_TYPE_EPT;
    3741     pModeData->uGstType = PGM_TYPE_PAE;
    3742     rc = PGM_SHW_NAME_EPT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3743     rc = PGM_GST_NAME_PAE(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3744     rc = PGM_BTH_NAME_EPT_PAE(InitData)(    pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3745 
    3746 #ifdef VBOX_WITH_64_BITS_GUESTS
    3747     pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_EPT, PGM_TYPE_AMD64)];
    3748     pModeData->uShwType = PGM_TYPE_EPT;
    3749     pModeData->uGstType = PGM_TYPE_AMD64;
    3750     rc = PGM_SHW_NAME_EPT(InitData)(        pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3751     rc = PGM_GST_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3752     rc = PGM_BTH_NAME_EPT_AMD64(InitData)(  pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    3753 #endif
    3754     return VINF_SUCCESS;
    3755 }
    3756 
    3757 
    3758 /**
    3759  * Switch to different (or relocated in the relocate case) mode data.
    3760  *
    3761  * @param   pVM         The VM handle.
    3762  * @param   pVCpu       The VMCPU to operate on.
    3763  * @param   enmShw      The the shadow paging mode.
    3764  * @param   enmGst      The the guest paging mode.
    3765  */
    3766 static void pgmR3ModeDataSwitch(PVM pVM, PVMCPU pVCpu, PGMMODE enmShw, PGMMODE enmGst)
    3767 {
    3768     PPGMMODEDATA pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndexByMode(enmShw, enmGst)];
    3769 
    3770     Assert(pModeData->uGstType == pgmModeToType(enmGst));
    3771     Assert(pModeData->uShwType == pgmModeToType(enmShw));
    3772 
    3773     /* shadow */
    3774     pVCpu->pgm.s.pfnR3ShwRelocate             = pModeData->pfnR3ShwRelocate;
    3775     pVCpu->pgm.s.pfnR3ShwExit                 = pModeData->pfnR3ShwExit;
    3776     pVCpu->pgm.s.pfnR3ShwGetPage              = pModeData->pfnR3ShwGetPage;
    3777     Assert(pVCpu->pgm.s.pfnR3ShwGetPage);
    3778     pVCpu->pgm.s.pfnR3ShwModifyPage           = pModeData->pfnR3ShwModifyPage;
    3779 
    3780     pVCpu->pgm.s.pfnRCShwGetPage              = pModeData->pfnRCShwGetPage;
    3781     pVCpu->pgm.s.pfnRCShwModifyPage           = pModeData->pfnRCShwModifyPage;
    3782 
    3783     pVCpu->pgm.s.pfnR0ShwGetPage              = pModeData->pfnR0ShwGetPage;
    3784     pVCpu->pgm.s.pfnR0ShwModifyPage           = pModeData->pfnR0ShwModifyPage;
    3785 
    3786 
    3787     /* guest */
    3788     pVCpu->pgm.s.pfnR3GstRelocate             = pModeData->pfnR3GstRelocate;
    3789     pVCpu->pgm.s.pfnR3GstExit                 = pModeData->pfnR3GstExit;
    3790     pVCpu->pgm.s.pfnR3GstGetPage              = pModeData->pfnR3GstGetPage;
    3791     Assert(pVCpu->pgm.s.pfnR3GstGetPage);
    3792     pVCpu->pgm.s.pfnR3GstModifyPage           = pModeData->pfnR3GstModifyPage;
    3793     pVCpu->pgm.s.pfnR3GstGetPDE               = pModeData->pfnR3GstGetPDE;
    3794     pVCpu->pgm.s.pfnRCGstGetPage              = pModeData->pfnRCGstGetPage;
    3795     pVCpu->pgm.s.pfnRCGstModifyPage           = pModeData->pfnRCGstModifyPage;
    3796     pVCpu->pgm.s.pfnRCGstGetPDE               = pModeData->pfnRCGstGetPDE;
    3797     pVCpu->pgm.s.pfnR0GstGetPage              = pModeData->pfnR0GstGetPage;
    3798     pVCpu->pgm.s.pfnR0GstModifyPage           = pModeData->pfnR0GstModifyPage;
    3799     pVCpu->pgm.s.pfnR0GstGetPDE               = pModeData->pfnR0GstGetPDE;
    3800 
    3801     /* both */
    3802     pVCpu->pgm.s.pfnR3BthRelocate             = pModeData->pfnR3BthRelocate;
    3803     pVCpu->pgm.s.pfnR3BthInvalidatePage       = pModeData->pfnR3BthInvalidatePage;
    3804     pVCpu->pgm.s.pfnR3BthSyncCR3              = pModeData->pfnR3BthSyncCR3;
    3805     Assert(pVCpu->pgm.s.pfnR3BthSyncCR3);
    3806     pVCpu->pgm.s.pfnR3BthSyncPage             = pModeData->pfnR3BthSyncPage;
    3807     pVCpu->pgm.s.pfnR3BthPrefetchPage         = pModeData->pfnR3BthPrefetchPage;
    3808     pVCpu->pgm.s.pfnR3BthVerifyAccessSyncPage = pModeData->pfnR3BthVerifyAccessSyncPage;
    3809 #ifdef VBOX_STRICT
    3810     pVCpu->pgm.s.pfnR3BthAssertCR3            = pModeData->pfnR3BthAssertCR3;
    3811 #endif
    3812     pVCpu->pgm.s.pfnR3BthMapCR3               = pModeData->pfnR3BthMapCR3;
    3813     pVCpu->pgm.s.pfnR3BthUnmapCR3             = pModeData->pfnR3BthUnmapCR3;
    3814 
    3815     pVCpu->pgm.s.pfnRCBthTrap0eHandler        = pModeData->pfnRCBthTrap0eHandler;
    3816     pVCpu->pgm.s.pfnRCBthInvalidatePage       = pModeData->pfnRCBthInvalidatePage;
    3817     pVCpu->pgm.s.pfnRCBthSyncCR3              = pModeData->pfnRCBthSyncCR3;
    3818     pVCpu->pgm.s.pfnRCBthSyncPage             = pModeData->pfnRCBthSyncPage;
    3819     pVCpu->pgm.s.pfnRCBthPrefetchPage         = pModeData->pfnRCBthPrefetchPage;
    3820     pVCpu->pgm.s.pfnRCBthVerifyAccessSyncPage = pModeData->pfnRCBthVerifyAccessSyncPage;
    3821 #ifdef VBOX_STRICT
    3822     pVCpu->pgm.s.pfnRCBthAssertCR3            = pModeData->pfnRCBthAssertCR3;
    3823 #endif
    3824     pVCpu->pgm.s.pfnRCBthMapCR3               = pModeData->pfnRCBthMapCR3;
    3825     pVCpu->pgm.s.pfnRCBthUnmapCR3             = pModeData->pfnRCBthUnmapCR3;
    3826 
    3827     pVCpu->pgm.s.pfnR0BthTrap0eHandler        = pModeData->pfnR0BthTrap0eHandler;
    3828     pVCpu->pgm.s.pfnR0BthInvalidatePage       = pModeData->pfnR0BthInvalidatePage;
    3829     pVCpu->pgm.s.pfnR0BthSyncCR3              = pModeData->pfnR0BthSyncCR3;
    3830     pVCpu->pgm.s.pfnR0BthSyncPage             = pModeData->pfnR0BthSyncPage;
    3831     pVCpu->pgm.s.pfnR0BthPrefetchPage         = pModeData->pfnR0BthPrefetchPage;
    3832     pVCpu->pgm.s.pfnR0BthVerifyAccessSyncPage = pModeData->pfnR0BthVerifyAccessSyncPage;
    3833 #ifdef VBOX_STRICT
    3834     pVCpu->pgm.s.pfnR0BthAssertCR3            = pModeData->pfnR0BthAssertCR3;
    3835 #endif
    3836     pVCpu->pgm.s.pfnR0BthMapCR3               = pModeData->pfnR0BthMapCR3;
    3837     pVCpu->pgm.s.pfnR0BthUnmapCR3             = pModeData->pfnR0BthUnmapCR3;
    3838 }
    3839 
    3840 
    3841 /**
    3842  * Calculates the shadow paging mode.
    3843  *
    3844  * @returns The shadow paging mode.
    3845  * @param   pVM             VM handle.
    3846  * @param   enmGuestMode    The guest mode.
    3847  * @param   enmHostMode     The host mode.
    3848  * @param   enmShadowMode   The current shadow mode.
    3849  * @param   penmSwitcher    Where to store the switcher to use.
    3850  *                          VMMSWITCHER_INVALID means no change.
    3851  */
    3852 static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
    3853 {
    3854     VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
    3855     switch (enmGuestMode)
    3856     {
    3857         /*
    3858          * When switching to real or protected mode we don't change
    3859          * anything since it's likely that we'll switch back pretty soon.
    3860          *
    3861          * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
    3862          * and is supposed to determine which shadow paging and switcher to
    3863          * use during init.
    3864          */
    3865         case PGMMODE_REAL:
    3866         case PGMMODE_PROTECTED:
    3867             if (    enmShadowMode != PGMMODE_INVALID
    3868                 && !HWACCMIsEnabled(pVM) /* always switch in hwaccm mode! */)
    3869                 break; /* (no change) */
    3870 
    3871             switch (enmHostMode)
    3872             {
    3873                 case SUPPAGINGMODE_32_BIT:
    3874                 case SUPPAGINGMODE_32_BIT_GLOBAL:
    3875                     enmShadowMode = PGMMODE_32_BIT;
    3876                     enmSwitcher = VMMSWITCHER_32_TO_32;
    3877                     break;
    3878 
    3879                 case SUPPAGINGMODE_PAE:
    3880                 case SUPPAGINGMODE_PAE_NX:
    3881                 case SUPPAGINGMODE_PAE_GLOBAL:
    3882                 case SUPPAGINGMODE_PAE_GLOBAL_NX:
    3883                     enmShadowMode = PGMMODE_PAE;
    3884                     enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
    3885 #ifdef DEBUG_bird
    3886                     if (RTEnvExist("VBOX_32BIT"))
    3887                     {
    3888                         enmShadowMode = PGMMODE_32_BIT;
    3889                         enmSwitcher = VMMSWITCHER_PAE_TO_32;
    3890                     }
    3891 #endif
    3892                     break;
    3893 
    3894                 case SUPPAGINGMODE_AMD64:
    3895                 case SUPPAGINGMODE_AMD64_GLOBAL:
    3896                 case SUPPAGINGMODE_AMD64_NX:
    3897                 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    3898                     enmShadowMode = PGMMODE_PAE;
    3899                     enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
    3900 #ifdef DEBUG_bird
    3901                     if (RTEnvExist("VBOX_32BIT"))
    3902                     {
    3903                         enmShadowMode = PGMMODE_32_BIT;
    3904                         enmSwitcher = VMMSWITCHER_AMD64_TO_32;
    3905                     }
    3906 #endif
    3907                     break;
    3908 
    3909                 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
    3910             }
    3911             break;
    3912 
    3913         case PGMMODE_32_BIT:
    3914             switch (enmHostMode)
    3915             {
    3916                 case SUPPAGINGMODE_32_BIT:
    3917                 case SUPPAGINGMODE_32_BIT_GLOBAL:
    3918                     enmShadowMode = PGMMODE_32_BIT;
    3919                     enmSwitcher = VMMSWITCHER_32_TO_32;
    3920                     break;
    3921 
    3922                 case SUPPAGINGMODE_PAE:
    3923                 case SUPPAGINGMODE_PAE_NX:
    3924                 case SUPPAGINGMODE_PAE_GLOBAL:
    3925                 case SUPPAGINGMODE_PAE_GLOBAL_NX:
    3926                     enmShadowMode = PGMMODE_PAE;
    3927                     enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
    3928 #ifdef DEBUG_bird
    3929                     if (RTEnvExist("VBOX_32BIT"))
    3930                     {
    3931                         enmShadowMode = PGMMODE_32_BIT;
    3932                         enmSwitcher = VMMSWITCHER_PAE_TO_32;
    3933                     }
    3934 #endif
    3935                     break;
    3936 
    3937                 case SUPPAGINGMODE_AMD64:
    3938                 case SUPPAGINGMODE_AMD64_GLOBAL:
    3939                 case SUPPAGINGMODE_AMD64_NX:
    3940                 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    3941                     enmShadowMode = PGMMODE_PAE;
    3942                     enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
    3943 #ifdef DEBUG_bird
    3944                     if (RTEnvExist("VBOX_32BIT"))
    3945                     {
    3946                         enmShadowMode = PGMMODE_32_BIT;
    3947                         enmSwitcher = VMMSWITCHER_AMD64_TO_32;
    3948                     }
    3949 #endif
    3950                     break;
    3951 
    3952                 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
    3953             }
    3954             break;
    3955 
    3956         case PGMMODE_PAE:
    3957         case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
    3958             switch (enmHostMode)
    3959             {
    3960                 case SUPPAGINGMODE_32_BIT:
    3961                 case SUPPAGINGMODE_32_BIT_GLOBAL:
    3962                     enmShadowMode = PGMMODE_PAE;
    3963                     enmSwitcher = VMMSWITCHER_32_TO_PAE;
    3964                     break;
    3965 
    3966                 case SUPPAGINGMODE_PAE:
    3967                 case SUPPAGINGMODE_PAE_NX:
    3968                 case SUPPAGINGMODE_PAE_GLOBAL:
    3969                 case SUPPAGINGMODE_PAE_GLOBAL_NX:
    3970                     enmShadowMode = PGMMODE_PAE;
    3971                     enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
    3972                     break;
    3973 
    3974                 case SUPPAGINGMODE_AMD64:
    3975                 case SUPPAGINGMODE_AMD64_GLOBAL:
    3976                 case SUPPAGINGMODE_AMD64_NX:
    3977                 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    3978                     enmShadowMode = PGMMODE_PAE;
    3979                     enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
    3980                     break;
    3981 
    3982                 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
    3983             }
    3984             break;
    3985 
    3986         case PGMMODE_AMD64:
    3987         case PGMMODE_AMD64_NX:
    3988             switch (enmHostMode)
    3989             {
    3990                 case SUPPAGINGMODE_32_BIT:
    3991                 case SUPPAGINGMODE_32_BIT_GLOBAL:
    3992                     enmShadowMode = PGMMODE_AMD64;
    3993                     enmSwitcher = VMMSWITCHER_32_TO_AMD64;
    3994                     break;
    3995 
    3996                 case SUPPAGINGMODE_PAE:
    3997                 case SUPPAGINGMODE_PAE_NX:
    3998                 case SUPPAGINGMODE_PAE_GLOBAL:
    3999                 case SUPPAGINGMODE_PAE_GLOBAL_NX:
    4000                     enmShadowMode = PGMMODE_AMD64;
    4001                     enmSwitcher = VMMSWITCHER_PAE_TO_AMD64;
    4002                     break;
    4003 
    4004                 case SUPPAGINGMODE_AMD64:
    4005                 case SUPPAGINGMODE_AMD64_GLOBAL:
    4006                 case SUPPAGINGMODE_AMD64_NX:
    4007                 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    4008                     enmShadowMode = PGMMODE_AMD64;
    4009                     enmSwitcher = VMMSWITCHER_AMD64_TO_AMD64;
    4010                     break;
    4011 
    4012                 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
    4013             }
    4014             break;
    4015 
    4016 
    4017         default:
    4018             AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
    4019             return PGMMODE_INVALID;
    4020     }
    4021     /* Override the shadow mode is nested paging is active. */
    4022     if (HWACCMIsNestedPagingActive(pVM))
    4023         enmShadowMode = HWACCMGetShwPagingMode(pVM);
    4024 
    4025     *penmSwitcher = enmSwitcher;
    4026     return enmShadowMode;
    4027 }
    4028 
    4029 
    4030 /**
    4031  * Performs the actual mode change.
    4032  * This is called by PGMChangeMode and pgmR3InitPaging().
    4033  *
    4034  * @returns VBox status code. May suspend or power off the VM on error, but this
    4035  *          will trigger using FFs and not status codes.
    4036  *
    4037  * @param   pVM             VM handle.
    4038  * @param   pVCpu           The VMCPU to operate on.
    4039  * @param   enmGuestMode    The new guest mode. This is assumed to be different from
    4040  *                          the current mode.
    4041  */
    4042 VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
    4043 {
    4044     Log(("PGMR3ChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
    4045     STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
    4046 
    4047     /*
    4048      * Calc the shadow mode and switcher.
    4049      */
    4050     VMMSWITCHER enmSwitcher;
    4051     PGMMODE     enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
    4052     if (enmSwitcher != VMMSWITCHER_INVALID)
    4053     {
    4054         /*
    4055          * Select new switcher.
    4056          */
    4057         int rc = VMMR3SelectSwitcher(pVM, enmSwitcher);
    4058         if (RT_FAILURE(rc))
    4059         {
    4060             AssertReleaseMsgFailed(("VMMR3SelectSwitcher(%d) -> %Rrc\n", enmSwitcher, rc));
    4061             return rc;
    4062         }
    4063     }
    4064 
    4065     /*
    4066      * Exit old mode(s).
    4067      */
    4068     /* shadow */
    4069     if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
    4070     {
    4071         LogFlow(("PGMR3ChangeMode: Shadow mode: %s -> %s\n",  PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
    4072         if (PGM_SHW_PFN(Exit, pVCpu))
    4073         {
    4074             int rc = PGM_SHW_PFN(Exit, pVCpu)(pVCpu);
    4075             if (RT_FAILURE(rc))
    4076             {
    4077                 AssertMsgFailed(("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc));
    4078                 return rc;
    4079             }
    4080         }
    4081 
    4082     }
    4083     else
    4084         LogFlow(("PGMR3ChangeMode: Shadow mode remains: %s\n",  PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
    4085 
    4086     /* guest */
    4087     if (PGM_GST_PFN(Exit, pVCpu))
    4088     {
    4089         int rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
    4090         if (RT_FAILURE(rc))
    4091         {
    4092             AssertMsgFailed(("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc));
    4093             return rc;
    4094         }
    4095     }
    4096 
    4097     /*
    4098      * Load new paging mode data.
    4099      */
    4100     pgmR3ModeDataSwitch(pVM, pVCpu, enmShadowMode, enmGuestMode);
    4101 
    4102     /*
    4103      * Enter new shadow mode (if changed).
    4104      */
    4105     if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
    4106     {
    4107         int rc;
    4108         pVCpu->pgm.s.enmShadowMode = enmShadowMode;
    4109         switch (enmShadowMode)
    4110         {
    4111             case PGMMODE_32_BIT:
    4112                 rc = PGM_SHW_NAME_32BIT(Enter)(pVCpu);
    4113                 break;
    4114             case PGMMODE_PAE:
    4115             case PGMMODE_PAE_NX:
    4116                 rc = PGM_SHW_NAME_PAE(Enter)(pVCpu);
    4117                 break;
    4118             case PGMMODE_AMD64:
    4119             case PGMMODE_AMD64_NX:
    4120                 rc = PGM_SHW_NAME_AMD64(Enter)(pVCpu);
    4121                 break;
    4122             case PGMMODE_NESTED:
    4123                 rc = PGM_SHW_NAME_NESTED(Enter)(pVCpu);
    4124                 break;
    4125             case PGMMODE_EPT:
    4126                 rc = PGM_SHW_NAME_EPT(Enter)(pVCpu);
    4127                 break;
    4128             case PGMMODE_REAL:
    4129             case PGMMODE_PROTECTED:
    4130             default:
    4131                 AssertReleaseMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
    4132                 return VERR_INTERNAL_ERROR;
    4133         }
    4134         if (RT_FAILURE(rc))
    4135         {
    4136             AssertReleaseMsgFailed(("Entering enmShadowMode=%d failed: %Rrc\n", enmShadowMode, rc));
    4137             pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
    4138             return rc;
    4139         }
    4140     }
    4141 
    4142     /*
    4143      * Always flag the necessary updates
    4144      */
    4145     VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    4146 
    4147     /*
    4148      * Enter the new guest and shadow+guest modes.
    4149      */
    4150     int rc = -1;
    4151     int rc2 = -1;
    4152     RTGCPHYS GCPhysCR3 = NIL_RTGCPHYS;
    4153     pVCpu->pgm.s.enmGuestMode = enmGuestMode;
    4154     switch (enmGuestMode)
    4155     {
    4156         case PGMMODE_REAL:
    4157             rc = PGM_GST_NAME_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    4158             switch (pVCpu->pgm.s.enmShadowMode)
    4159             {
    4160                 case PGMMODE_32_BIT:
    4161                     rc2 = PGM_BTH_NAME_32BIT_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    4162                     break;
    4163                 case PGMMODE_PAE:
    4164                 case PGMMODE_PAE_NX:
    4165                     rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    4166                     break;
    4167                 case PGMMODE_NESTED:
    4168                     rc2 = PGM_BTH_NAME_NESTED_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    4169                     break;
    4170                 case PGMMODE_EPT:
    4171                     rc2 = PGM_BTH_NAME_EPT_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    4172                     break;
    4173                 case PGMMODE_AMD64:
    4174                 case PGMMODE_AMD64_NX:
    4175                     AssertMsgFailed(("Should use PAE shadow mode!\n"));
    4176                 default: AssertFailed(); break;
    4177             }
    4178             break;
    4179 
    4180         case PGMMODE_PROTECTED:
    4181             rc = PGM_GST_NAME_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    4182             switch (pVCpu->pgm.s.enmShadowMode)
    4183             {
    4184                 case PGMMODE_32_BIT:
    4185                     rc2 = PGM_BTH_NAME_32BIT_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    4186                     break;
    4187                 case PGMMODE_PAE:
    4188                 case PGMMODE_PAE_NX:
    4189                     rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    4190                     break;
    4191                 case PGMMODE_NESTED:
    4192                     rc2 = PGM_BTH_NAME_NESTED_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    4193                     break;
    4194                 case PGMMODE_EPT:
    4195                     rc2 = PGM_BTH_NAME_EPT_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    4196                     break;
    4197                 case PGMMODE_AMD64:
    4198                 case PGMMODE_AMD64_NX:
    4199                     AssertMsgFailed(("Should use PAE shadow mode!\n"));
    4200                 default: AssertFailed(); break;
    4201             }
    4202             break;
    4203 
    4204         case PGMMODE_32_BIT:
    4205             GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
    4206             rc = PGM_GST_NAME_32BIT(Enter)(pVCpu, GCPhysCR3);
    4207             switch (pVCpu->pgm.s.enmShadowMode)
    4208             {
    4209                 case PGMMODE_32_BIT:
    4210                     rc2 = PGM_BTH_NAME_32BIT_32BIT(Enter)(pVCpu, GCPhysCR3);
    4211                     break;
    4212                 case PGMMODE_PAE:
    4213                 case PGMMODE_PAE_NX:
    4214                     rc2 = PGM_BTH_NAME_PAE_32BIT(Enter)(pVCpu, GCPhysCR3);
    4215                     break;
    4216                 case PGMMODE_NESTED:
    4217                     rc2 = PGM_BTH_NAME_NESTED_32BIT(Enter)(pVCpu, GCPhysCR3);
    4218                     break;
    4219                 case PGMMODE_EPT:
    4220                     rc2 = PGM_BTH_NAME_EPT_32BIT(Enter)(pVCpu, GCPhysCR3);
    4221                     break;
    4222                 case PGMMODE_AMD64:
    4223                 case PGMMODE_AMD64_NX:
    4224                     AssertMsgFailed(("Should use PAE shadow mode!\n"));
    4225                 default: AssertFailed(); break;
    4226             }
    4227             break;
    4228 
    4229         case PGMMODE_PAE_NX:
    4230         case PGMMODE_PAE:
    4231         {
    4232             uint32_t u32Dummy, u32Features;
    4233 
    4234             CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
    4235             if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
    4236                 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
    4237                                          N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
    4238 
    4239             GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
    4240             rc = PGM_GST_NAME_PAE(Enter)(pVCpu, GCPhysCR3);
    4241             switch (pVCpu->pgm.s.enmShadowMode)
    4242             {
    4243                 case PGMMODE_PAE:
    4244                 case PGMMODE_PAE_NX:
    4245                     rc2 = PGM_BTH_NAME_PAE_PAE(Enter)(pVCpu, GCPhysCR3);
    4246                     break;
    4247                 case PGMMODE_NESTED:
    4248                     rc2 = PGM_BTH_NAME_NESTED_PAE(Enter)(pVCpu, GCPhysCR3);
    4249                     break;
    4250                 case PGMMODE_EPT:
    4251                     rc2 = PGM_BTH_NAME_EPT_PAE(Enter)(pVCpu, GCPhysCR3);
    4252                     break;
    4253                 case PGMMODE_32_BIT:
    4254                 case PGMMODE_AMD64:
    4255                 case PGMMODE_AMD64_NX:
    4256                     AssertMsgFailed(("Should use PAE shadow mode!\n"));
    4257                 default: AssertFailed(); break;
    4258             }
    4259             break;
    4260         }
    4261 
    4262 #ifdef VBOX_WITH_64_BITS_GUESTS
    4263         case PGMMODE_AMD64_NX:
    4264         case PGMMODE_AMD64:
    4265             GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & UINT64_C(0xfffffffffffff000); /** @todo define this mask! */
    4266             rc = PGM_GST_NAME_AMD64(Enter)(pVCpu, GCPhysCR3);
    4267             switch (pVCpu->pgm.s.enmShadowMode)
    4268             {
    4269                 case PGMMODE_AMD64:
    4270                 case PGMMODE_AMD64_NX:
    4271                     rc2 = PGM_BTH_NAME_AMD64_AMD64(Enter)(pVCpu, GCPhysCR3);
    4272                     break;
    4273                 case PGMMODE_NESTED:
    4274                     rc2 = PGM_BTH_NAME_NESTED_AMD64(Enter)(pVCpu, GCPhysCR3);
    4275                     break;
    4276                 case PGMMODE_EPT:
    4277                     rc2 = PGM_BTH_NAME_EPT_AMD64(Enter)(pVCpu, GCPhysCR3);
    4278                     break;
    4279                 case PGMMODE_32_BIT:
    4280                 case PGMMODE_PAE:
    4281                 case PGMMODE_PAE_NX:
    4282                     AssertMsgFailed(("Should use AMD64 shadow mode!\n"));
    4283                 default: AssertFailed(); break;
    4284             }
    4285             break;
    4286 #endif
    4287 
    4288         default:
    4289             AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
    4290             rc = VERR_NOT_IMPLEMENTED;
    4291             break;
    4292     }
    4293 
    4294     /* status codes. */
    4295     AssertRC(rc);
    4296     AssertRC(rc2);
    4297     if (RT_SUCCESS(rc))
    4298     {
    4299         rc = rc2;
    4300         if (RT_SUCCESS(rc)) /* no informational status codes. */
    4301             rc = VINF_SUCCESS;
    4302     }
    4303 
    4304     /* Notify HWACCM as well. */
    4305     HWACCMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
    4306     return rc;
    4307 }
    4308 
    4309 /**
    4310  * Release the pgm lock if owned by the current VCPU
    4311  *
    4312  * @param   pVM         The VM to operate on.
    4313  */
    4314 VMMR3DECL(void) PGMR3ReleaseOwnedLocks(PVM pVM)
    4315 {
    4316     while (PDMCritSectIsOwner(&pVM->pgm.s.CritSect))
    4317         PDMCritSectLeave(&pVM->pgm.s.CritSect);
    4318 }
    4319 
    4320 /**
    4321  * Called by pgmPoolFlushAllInt prior to flushing the pool.
    4322  *
    4323  * @returns VBox status code, fully asserted.
    4324  * @param   pVM     The VM handle.
    4325  * @param   pVCpu   The VMCPU to operate on.
    4326  */
    4327 int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu)
    4328 {
    4329     /* Unmap the old CR3 value before flushing everything. */
    4330     int rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVCpu);
    4331     AssertRC(rc);
    4332 
    4333     /* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */
    4334     rc = PGM_SHW_PFN(Exit, pVCpu)(pVCpu);
    4335     AssertRC(rc);
    4336     Assert(pVCpu->pgm.s.pShwPageCR3R3 == NULL);
    4337     return rc;
    4338 }
    4339 
    4340 
    4341 /**
    4342  * Called by pgmPoolFlushAllInt after flushing the pool.
    4343  *
    4344  * @returns VBox status code, fully asserted.
    4345  * @param   pVM     The VM handle.
    4346  * @param   pVCpu   The VMCPU to operate on.
    4347  */
    4348 int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
    4349 {
    4350     pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
    4351     int rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu));
    4352     Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    4353     AssertRCReturn(rc, rc);
    4354     AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
    4355 
    4356     Assert(pVCpu->pgm.s.pShwPageCR3R3 != NULL);
    4357     AssertMsg(   pVCpu->pgm.s.enmShadowMode >= PGMMODE_NESTED
    4358               || CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu),
    4359               ("%RHp != %RHp %s\n", (RTHCPHYS)CPUMGetHyperCR3(pVCpu), PGMGetHyperCR3(pVCpu), PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
    4360     return rc;
    4361 }
    4362 
    4363 
    4364 /**
    4365  * Dumps a PAE shadow page table.
    4366  *
    4367  * @returns VBox status code (VINF_SUCCESS).
    4368  * @param   pVM         The VM handle.
    4369  * @param   pPT         Pointer to the page table.
    4370  * @param   u64Address  The virtual address of the page table starts.
    4371  * @param   fLongMode   Set if this a long mode table; clear if it's a legacy mode table.
    4372  * @param   cMaxDepth   The maxium depth.
    4373  * @param   pHlp        Pointer to the output functions.
    4374  */
    4375 static int  pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4376 {
    4377     for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
    4378     {
    4379         X86PTEPAE Pte = pPT->a[i];
    4380         if (Pte.n.u1Present)
    4381         {
    4382             pHlp->pfnPrintf(pHlp,
    4383                             fLongMode       /*P R  S  A  D  G  WT CD AT NX 4M a p ?  */
    4384                             ? "%016llx 3    | P %c %c %c %c %c %s %s %s %s 4K %c%c%c  %016llx\n"
    4385                             :  "%08llx 2   |  P %c %c %c %c %c %s %s %s %s 4K %c%c%c  %016llx\n",
    4386                             u64Address + ((uint64_t)i << X86_PT_PAE_SHIFT),
    4387                             Pte.n.u1Write       ? 'W'  : 'R',
    4388                             Pte.n.u1User        ? 'U'  : 'S',
    4389                             Pte.n.u1Accessed    ? 'A'  : '-',
    4390                             Pte.n.u1Dirty       ? 'D'  : '-',
    4391                             Pte.n.u1Global      ? 'G'  : '-',
    4392                             Pte.n.u1WriteThru   ? "WT" : "--",
    4393                             Pte.n.u1CacheDisable? "CD" : "--",
    4394                             Pte.n.u1PAT         ? "AT" : "--",
    4395                             Pte.n.u1NoExecute   ? "NX" : "--",
    4396                             Pte.u & PGM_PTFLAGS_TRACK_DIRTY   ? 'd' : '-',
    4397                             Pte.u & RT_BIT(10)                   ? '1' : '0',
    4398                             Pte.u & PGM_PTFLAGS_CSAM_VALIDATED? 'v' : '-',
    4399                             Pte.u & X86_PTE_PAE_PG_MASK);
    4400         }
    4401     }
    4402     return VINF_SUCCESS;
    4403 }
    4404 
    4405 
    4406 /**
    4407  * Dumps a PAE shadow page directory table.
    4408  *
    4409  * @returns VBox status code (VINF_SUCCESS).
    4410  * @param   pVM         The VM handle.
    4411  * @param   HCPhys      The physical address of the page directory table.
    4412  * @param   u64Address  The virtual address of the page table starts.
    4413  * @param   cr4         The CR4, PSE is currently used.
    4414  * @param   fLongMode   Set if this a long mode table; clear if it's a legacy mode table.
    4415  * @param   cMaxDepth   The maxium depth.
    4416  * @param   pHlp        Pointer to the output functions.
    4417  */
    4418 static int  pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4419 {
    4420     PX86PDPAE pPD = (PX86PDPAE)MMPagePhys2Page(pVM, HCPhys);
    4421     if (!pPD)
    4422     {
    4423         pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%RHp was not found in the page pool!\n",
    4424                         fLongMode ? 16 : 8, u64Address, HCPhys);
    4425         return VERR_INVALID_PARAMETER;
    4426     }
    4427     const bool fBigPagesSupported = fLongMode || !!(cr4 & X86_CR4_PSE);
    4428 
    4429     int rc = VINF_SUCCESS;
    4430     for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++)
    4431     {
    4432         X86PDEPAE Pde = pPD->a[i];
    4433         if (Pde.n.u1Present)
    4434         {
    4435             if (fBigPagesSupported && Pde.b.u1Size)
    4436                 pHlp->pfnPrintf(pHlp,
    4437                                 fLongMode       /*P R  S  A  D  G  WT CD AT NX 4M a p ?  */
    4438                                 ? "%016llx 2   |  P %c %c %c %c %c %s %s %s %s 4M %c%c%c  %016llx\n"
    4439                                 :  "%08llx 1  |   P %c %c %c %c %c %s %s %s %s 4M %c%c%c  %016llx\n",
    4440                                 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
    4441                                 Pde.b.u1Write       ? 'W'  : 'R',
    4442                                 Pde.b.u1User        ? 'U'  : 'S',
    4443                                 Pde.b.u1Accessed    ? 'A'  : '-',
    4444                                 Pde.b.u1Dirty       ? 'D'  : '-',
    4445                                 Pde.b.u1Global      ? 'G'  : '-',
    4446                                 Pde.b.u1WriteThru   ? "WT" : "--",
    4447                                 Pde.b.u1CacheDisable? "CD" : "--",
    4448                                 Pde.b.u1PAT         ? "AT" : "--",
    4449                                 Pde.b.u1NoExecute   ? "NX" : "--",
    4450                                 Pde.u & RT_BIT_64(9)                ? '1' : '0',
    4451                                 Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    4452                                 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    4453                                 Pde.u & X86_PDE_PAE_PG_MASK);
    4454             else
    4455             {
    4456                 pHlp->pfnPrintf(pHlp,
    4457                                 fLongMode       /*P R  S  A  D  G  WT CD AT NX 4M a p ?  */
    4458                                 ? "%016llx 2   |  P %c %c %c %c %c %s %s .. %s 4K %c%c%c  %016llx\n"
    4459                                 :  "%08llx 1  |   P %c %c %c %c %c %s %s .. %s 4K %c%c%c  %016llx\n",
    4460                                 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
    4461                                 Pde.n.u1Write       ? 'W'  : 'R',
    4462                                 Pde.n.u1User        ? 'U'  : 'S',
    4463                                 Pde.n.u1Accessed    ? 'A'  : '-',
    4464                                 Pde.n.u1Reserved0   ? '?'  : '.', /* ignored */
    4465                                 Pde.n.u1Reserved1   ? '?'  : '.', /* ignored */
    4466                                 Pde.n.u1WriteThru   ? "WT" : "--",
    4467                                 Pde.n.u1CacheDisable? "CD" : "--",
    4468                                 Pde.n.u1NoExecute   ? "NX" : "--",
    4469                                 Pde.u & RT_BIT_64(9)                ? '1' : '0',
    4470                                 Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    4471                                 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    4472                                 Pde.u & X86_PDE_PAE_PG_MASK);
    4473                 if (cMaxDepth >= 1)
    4474                 {
    4475                     /** @todo what about using the page pool for mapping PTs? */
    4476                     uint64_t    u64AddressPT = u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT);
    4477                     RTHCPHYS    HCPhysPT     = Pde.u & X86_PDE_PAE_PG_MASK;
    4478                     PX86PTPAE   pPT          = NULL;
    4479                     if (!(Pde.u & PGM_PDFLAGS_MAPPING))
    4480                         pPT = (PX86PTPAE)MMPagePhys2Page(pVM, HCPhysPT);
    4481                     else
    4482                     {
    4483                         for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
    4484                         {
    4485                             uint64_t off = u64AddressPT - pMap->GCPtr;
    4486                             if (off < pMap->cb)
    4487                             {
    4488                                 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);
    4489                                 const int iSub = (int)((off >> X86_PD_PAE_SHIFT) & 1); /* MSC is a pain sometimes */
    4490                                 if ((iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0) != HCPhysPT)
    4491                                     pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
    4492                                                     fLongMode ? 16 : 8, u64AddressPT, iPDE,
    4493                                                     iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0, HCPhysPT);
    4494                                 pPT = &pMap->aPTs[iPDE].paPaePTsR3[iSub];
    4495                             }
    4496                         }
    4497                     }
    4498                     int rc2 = VERR_INVALID_PARAMETER;
    4499                     if (pPT)
    4500                         rc2 = pgmR3DumpHierarchyHCPaePT(pVM, pPT, u64AddressPT, fLongMode, cMaxDepth - 1, pHlp);
    4501                     else
    4502                         pHlp->pfnPrintf(pHlp, "%0*llx error! Page table at HCPhys=%RHp was not found in the page pool!\n",
    4503                                         fLongMode ? 16 : 8, u64AddressPT, HCPhysPT);
    4504                     if (rc2 < rc && RT_SUCCESS(rc))
    4505                         rc = rc2;
    4506                 }
    4507             }
    4508         }
    4509     }
    4510     return rc;
    4511 }
    4512 
    4513 
    4514 /**
    4515  * Dumps a PAE shadow page directory pointer table.
    4516  *
    4517  * @returns VBox status code (VINF_SUCCESS).
    4518  * @param   pVM         The VM handle.
    4519  * @param   HCPhys      The physical address of the page directory pointer table.
    4520  * @param   u64Address  The virtual address of the page table starts.
    4521  * @param   cr4         The CR4, PSE is currently used.
    4522  * @param   fLongMode   Set if this a long mode table; clear if it's a legacy mode table.
    4523  * @param   cMaxDepth   The maxium depth.
    4524  * @param   pHlp        Pointer to the output functions.
    4525  */
    4526 static int  pgmR3DumpHierarchyHCPaePDPT(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4527 {
    4528     PX86PDPT pPDPT = (PX86PDPT)MMPagePhys2Page(pVM, HCPhys);
    4529     if (!pPDPT)
    4530     {
    4531         pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%RHp was not found in the page pool!\n",
    4532                         fLongMode ? 16 : 8, u64Address, HCPhys);
    4533         return VERR_INVALID_PARAMETER;
    4534     }
    4535 
    4536     int rc = VINF_SUCCESS;
    4537     const unsigned c = fLongMode ? RT_ELEMENTS(pPDPT->a) : X86_PG_PAE_PDPE_ENTRIES;
    4538     for (unsigned i = 0; i < c; i++)
    4539     {
    4540         X86PDPE Pdpe = pPDPT->a[i];
    4541         if (Pdpe.n.u1Present)
    4542         {
    4543             if (fLongMode)
    4544                 pHlp->pfnPrintf(pHlp,         /*P R  S  A  D  G  WT CD AT NX 4M a p ?  */
    4545                                 "%016llx 1  |   P %c %c %c %c %c %s %s %s %s .. %c%c%c  %016llx\n",
    4546                                 u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
    4547                                 Pdpe.lm.u1Write       ? 'W'  : 'R',
    4548                                 Pdpe.lm.u1User        ? 'U'  : 'S',
    4549                                 Pdpe.lm.u1Accessed    ? 'A'  : '-',
    4550                                 Pdpe.lm.u3Reserved & 1? '?'  : '.', /* ignored */
    4551                                 Pdpe.lm.u3Reserved & 4? '!'  : '.', /* mbz */
    4552                                 Pdpe.lm.u1WriteThru   ? "WT" : "--",
    4553                                 Pdpe.lm.u1CacheDisable? "CD" : "--",
    4554                                 Pdpe.lm.u3Reserved & 2? "!"  : "..",/* mbz */
    4555                                 Pdpe.lm.u1NoExecute   ? "NX" : "--",
    4556                                 Pdpe.u & RT_BIT(9)                   ? '1' : '0',
    4557                                 Pdpe.u & PGM_PLXFLAGS_PERMANENT   ? 'p' : '-',
    4558                                 Pdpe.u & RT_BIT(11)                  ? '1' : '0',
    4559                                 Pdpe.u & X86_PDPE_PG_MASK);
    4560             else
    4561                 pHlp->pfnPrintf(pHlp,      /*P             G  WT CD AT NX 4M a p ?  */
    4562                                 "%08x 0 |    P             %c %s %s %s %s .. %c%c%c  %016llx\n",
    4563                                 i << X86_PDPT_SHIFT,
    4564                                 Pdpe.n.u4Reserved & 1? '!'  : '.', /* mbz */
    4565                                 Pdpe.n.u4Reserved & 4? '!'  : '.', /* mbz */
    4566                                 Pdpe.n.u1WriteThru   ? "WT" : "--",
    4567                                 Pdpe.n.u1CacheDisable? "CD" : "--",
    4568                                 Pdpe.n.u4Reserved & 2? "!"  : "..",/* mbz */
    4569                                 Pdpe.u & RT_BIT(9)                   ? '1' : '0',
    4570                                 Pdpe.u & PGM_PLXFLAGS_PERMANENT   ? 'p' : '-',
    4571                                 Pdpe.u & RT_BIT(11)                  ? '1' : '0',
    4572                                 Pdpe.u & X86_PDPE_PG_MASK);
    4573             if (cMaxDepth >= 1)
    4574             {
    4575                 int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
    4576                                                     cr4, fLongMode, cMaxDepth - 1, pHlp);
    4577                 if (rc2 < rc && RT_SUCCESS(rc))
    4578                     rc = rc2;
    4579             }
    4580         }
    4581     }
    4582     return rc;
    4583 }
    4584 
    4585 
    4586 /**
    4587  * Dumps a 32-bit shadow page table.
    4588  *
    4589  * @returns VBox status code (VINF_SUCCESS).
    4590  * @param   pVM         The VM handle.
    4591  * @param   HCPhys      The physical address of the table.
    4592  * @param   cr4         The CR4, PSE is currently used.
    4593  * @param   cMaxDepth   The maxium depth.
    4594  * @param   pHlp        Pointer to the output functions.
    4595  */
    4596 static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4597 {
    4598     PX86PML4 pPML4 = (PX86PML4)MMPagePhys2Page(pVM, HCPhys);
    4599     if (!pPML4)
    4600     {
    4601         pHlp->pfnPrintf(pHlp, "Page map level 4 at HCPhys=%RHp was not found in the page pool!\n", HCPhys);
    4602         return VERR_INVALID_PARAMETER;
    4603     }
    4604 
    4605     int rc = VINF_SUCCESS;
    4606     for (unsigned i = 0; i < RT_ELEMENTS(pPML4->a); i++)
    4607     {
    4608         X86PML4E Pml4e = pPML4->a[i];
    4609         if (Pml4e.n.u1Present)
    4610         {
    4611             uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPT_SHIFT - 1)) * 0xffff000000000000ULL);
    4612             pHlp->pfnPrintf(pHlp,         /*P R  S  A  D  G  WT CD AT NX 4M a p ?  */
    4613                             "%016llx 0 |    P %c %c %c %c %c %s %s %s %s .. %c%c%c  %016llx\n",
    4614                             u64Address,
    4615                             Pml4e.n.u1Write       ? 'W'  : 'R',
    4616                             Pml4e.n.u1User        ? 'U'  : 'S',
    4617                             Pml4e.n.u1Accessed    ? 'A'  : '-',
    4618                             Pml4e.n.u3Reserved & 1? '?'  : '.', /* ignored */
    4619                             Pml4e.n.u3Reserved & 4? '!'  : '.', /* mbz */
    4620                             Pml4e.n.u1WriteThru   ? "WT" : "--",
    4621                             Pml4e.n.u1CacheDisable? "CD" : "--",
    4622                             Pml4e.n.u3Reserved & 2? "!"  : "..",/* mbz */
    4623                             Pml4e.n.u1NoExecute   ? "NX" : "--",
    4624                             Pml4e.u & RT_BIT(9)                   ? '1' : '0',
    4625                             Pml4e.u & PGM_PLXFLAGS_PERMANENT   ? 'p' : '-',
    4626                             Pml4e.u & RT_BIT(11)                  ? '1' : '0',
    4627                             Pml4e.u & X86_PML4E_PG_MASK);
    4628 
    4629             if (cMaxDepth >= 1)
    4630             {
    4631                 int rc2 = pgmR3DumpHierarchyHCPaePDPT(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
    4632                 if (rc2 < rc && RT_SUCCESS(rc))
    4633                     rc = rc2;
    4634             }
    4635         }
    4636     }
    4637     return rc;
    4638 }
    4639 
    4640 
    4641 /**
    4642  * Dumps a 32-bit shadow page table.
    4643  *
    4644  * @returns VBox status code (VINF_SUCCESS).
    4645  * @param   pVM         The VM handle.
    4646  * @param   pPT         Pointer to the page table.
    4647  * @param   u32Address  The virtual address this table starts at.
    4648  * @param   pHlp        Pointer to the output functions.
    4649  */
    4650 int  pgmR3DumpHierarchyHC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, PCDBGFINFOHLP pHlp)
    4651 {
    4652     for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
    4653     {
    4654         X86PTE Pte = pPT->a[i];
    4655         if (Pte.n.u1Present)
    4656         {
    4657             pHlp->pfnPrintf(pHlp,      /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4658                             "%08x 1  |   P %c %c %c %c %c %s %s %s .. 4K %c%c%c  %08x\n",
    4659                             u32Address + (i << X86_PT_SHIFT),
    4660                             Pte.n.u1Write       ? 'W'  : 'R',
    4661                             Pte.n.u1User        ? 'U'  : 'S',
    4662                             Pte.n.u1Accessed    ? 'A'  : '-',
    4663                             Pte.n.u1Dirty       ? 'D'  : '-',
    4664                             Pte.n.u1Global      ? 'G'  : '-',
    4665                             Pte.n.u1WriteThru   ? "WT" : "--",
    4666                             Pte.n.u1CacheDisable? "CD" : "--",
    4667                             Pte.n.u1PAT         ? "AT" : "--",
    4668                             Pte.u & PGM_PTFLAGS_TRACK_DIRTY     ? 'd' : '-',
    4669                             Pte.u & RT_BIT(10)                     ? '1' : '0',
    4670                             Pte.u & PGM_PTFLAGS_CSAM_VALIDATED  ? 'v' : '-',
    4671                             Pte.u & X86_PDE_PG_MASK);
    4672         }
    4673     }
    4674     return VINF_SUCCESS;
    4675 }
    4676 
    4677 
    4678 /**
    4679  * Dumps a 32-bit shadow page directory and page tables.
    4680  *
    4681  * @returns VBox status code (VINF_SUCCESS).
    4682  * @param   pVM         The VM handle.
    4683  * @param   cr3         The root of the hierarchy.
    4684  * @param   cr4         The CR4, PSE is currently used.
    4685  * @param   cMaxDepth   How deep into the hierarchy the dumper should go.
    4686  * @param   pHlp        Pointer to the output functions.
    4687  */
    4688 int  pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4689 {
    4690     PX86PD pPD = (PX86PD)MMPagePhys2Page(pVM, cr3 & X86_CR3_PAGE_MASK);
    4691     if (!pPD)
    4692     {
    4693         pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
    4694         return VERR_INVALID_PARAMETER;
    4695     }
    4696 
    4697     int rc = VINF_SUCCESS;
    4698     for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++)
    4699     {
    4700         X86PDE Pde = pPD->a[i];
    4701         if (Pde.n.u1Present)
    4702         {
    4703             const uint32_t u32Address = i << X86_PD_SHIFT;
    4704             if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
    4705                 pHlp->pfnPrintf(pHlp,      /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4706                                 "%08x 0 |    P %c %c %c %c %c %s %s %s .. 4M %c%c%c  %08x\n",
    4707                                 u32Address,
    4708                                 Pde.b.u1Write       ? 'W'  : 'R',
    4709                                 Pde.b.u1User        ? 'U'  : 'S',
    4710                                 Pde.b.u1Accessed    ? 'A'  : '-',
    4711                                 Pde.b.u1Dirty       ? 'D'  : '-',
    4712                                 Pde.b.u1Global      ? 'G'  : '-',
    4713                                 Pde.b.u1WriteThru   ? "WT" : "--",
    4714                                 Pde.b.u1CacheDisable? "CD" : "--",
    4715                                 Pde.b.u1PAT         ? "AT" : "--",
    4716                                 Pde.u & RT_BIT_64(9)                ? '1' : '0',
    4717                                 Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    4718                                 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    4719                                 Pde.u & X86_PDE4M_PG_MASK);
    4720             else
    4721             {
    4722                 pHlp->pfnPrintf(pHlp,      /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4723                                 "%08x 0 |    P %c %c %c %c %c %s %s .. .. 4K %c%c%c  %08x\n",
    4724                                 u32Address,
    4725                                 Pde.n.u1Write       ? 'W'  : 'R',
    4726                                 Pde.n.u1User        ? 'U'  : 'S',
    4727                                 Pde.n.u1Accessed    ? 'A'  : '-',
    4728                                 Pde.n.u1Reserved0   ? '?'  : '.', /* ignored */
    4729                                 Pde.n.u1Reserved1   ? '?'  : '.', /* ignored */
    4730                                 Pde.n.u1WriteThru   ? "WT" : "--",
    4731                                 Pde.n.u1CacheDisable? "CD" : "--",
    4732                                 Pde.u & RT_BIT_64(9)                ? '1' : '0',
    4733                                 Pde.u & PGM_PDFLAGS_MAPPING     ? 'm' : '-',
    4734                                 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
    4735                                 Pde.u & X86_PDE_PG_MASK);
    4736                 if (cMaxDepth >= 1)
    4737                 {
    4738                     /** @todo what about using the page pool for mapping PTs? */
    4739                     RTHCPHYS HCPhys = Pde.u & X86_PDE_PG_MASK;
    4740                     PX86PT pPT = NULL;
    4741                     if (!(Pde.u & PGM_PDFLAGS_MAPPING))
    4742                         pPT = (PX86PT)MMPagePhys2Page(pVM, HCPhys);
    4743                     else
    4744                     {
    4745                         for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
    4746                             if (u32Address - pMap->GCPtr < pMap->cb)
    4747                             {
    4748                                 int iPDE = (u32Address - pMap->GCPtr) >> X86_PD_SHIFT;
    4749                                 if (pMap->aPTs[iPDE].HCPhysPT != HCPhys)
    4750                                     pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
    4751                                                     u32Address, iPDE, pMap->aPTs[iPDE].HCPhysPT, HCPhys);
    4752                                 pPT = pMap->aPTs[iPDE].pPTR3;
    4753                             }
    4754                     }
    4755                     int rc2 = VERR_INVALID_PARAMETER;
    4756                     if (pPT)
    4757                         rc2 = pgmR3DumpHierarchyHC32BitPT(pVM, pPT, u32Address, pHlp);
    4758                     else
    4759                         pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
    4760                     if (rc2 < rc && RT_SUCCESS(rc))
    4761                         rc = rc2;
    4762                 }
    4763             }
    4764         }
    4765     }
    4766 
    4767     return rc;
    4768 }
    4769 
    4770 
    4771 /**
    4772  * Dumps a 32-bit shadow page table.
    4773  *
    4774  * @returns VBox status code (VINF_SUCCESS).
    4775  * @param   pVM         The VM handle.
    4776  * @param   pPT         Pointer to the page table.
    4777  * @param   u32Address  The virtual address this table starts at.
    4778  * @param   PhysSearch  Address to search for.
    4779  */
    4780 int pgmR3DumpHierarchyGC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, RTGCPHYS PhysSearch)
    4781 {
    4782     for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
    4783     {
    4784         X86PTE Pte = pPT->a[i];
    4785         if (Pte.n.u1Present)
    4786         {
    4787             Log((           /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4788                  "%08x 1  |   P %c %c %c %c %c %s %s %s .. 4K %c%c%c  %08x\n",
    4789                  u32Address + (i << X86_PT_SHIFT),
    4790                  Pte.n.u1Write       ? 'W'  : 'R',
    4791                  Pte.n.u1User        ? 'U'  : 'S',
    4792                  Pte.n.u1Accessed    ? 'A'  : '-',
    4793                  Pte.n.u1Dirty       ? 'D'  : '-',
    4794                  Pte.n.u1Global      ? 'G'  : '-',
    4795                  Pte.n.u1WriteThru   ? "WT" : "--",
    4796                  Pte.n.u1CacheDisable? "CD" : "--",
    4797                  Pte.n.u1PAT         ? "AT" : "--",
    4798                  Pte.u & PGM_PTFLAGS_TRACK_DIRTY     ? 'd' : '-',
    4799                  Pte.u & RT_BIT(10)                     ? '1' : '0',
    4800                  Pte.u & PGM_PTFLAGS_CSAM_VALIDATED  ? 'v' : '-',
    4801                  Pte.u & X86_PDE_PG_MASK));
    4802 
    4803             if ((Pte.u & X86_PDE_PG_MASK) == PhysSearch)
    4804             {
    4805                 uint64_t fPageShw = 0;
    4806                 RTHCPHYS pPhysHC = 0;
    4807 
    4808                 /** @todo SMP support!! */
    4809                 PGMShwGetPage(&pVM->aCpus[0], (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), &fPageShw, &pPhysHC);
    4810                 Log(("Found %RGp at %RGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
    4811             }
    4812         }
    4813     }
    4814     return VINF_SUCCESS;
    4815 }
    4816 
    4817 
    4818 /**
    4819  * Dumps a 32-bit guest page directory and page tables.
    4820  *
    4821  * @returns VBox status code (VINF_SUCCESS).
    4822  * @param   pVM         The VM handle.
    4823  * @param   cr3         The root of the hierarchy.
    4824  * @param   cr4         The CR4, PSE is currently used.
    4825  * @param   PhysSearch  Address to search for.
    4826  */
    4827 VMMR3DECL(int) PGMR3DumpHierarchyGC(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPHYS PhysSearch)
    4828 {
    4829     bool fLongMode = false;
    4830     const unsigned cch = fLongMode ? 16 : 8; NOREF(cch);
    4831     PX86PD pPD = 0;
    4832 
    4833     int rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
    4834     if (RT_FAILURE(rc) || !pPD)
    4835     {
    4836         Log(("Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK));
    4837         return VERR_INVALID_PARAMETER;
    4838     }
    4839 
    4840     Log(("cr3=%08x cr4=%08x%s\n"
    4841          "%-*s        P - Present\n"
    4842          "%-*s        | R/W - Read (0) / Write (1)\n"
    4843          "%-*s        | | U/S - User (1) / Supervisor (0)\n"
    4844          "%-*s        | | | A - Accessed\n"
    4845          "%-*s        | | | | D - Dirty\n"
    4846          "%-*s        | | | | | G - Global\n"
    4847          "%-*s        | | | | | | WT - Write thru\n"
    4848          "%-*s        | | | | | | |  CD - Cache disable\n"
    4849          "%-*s        | | | | | | |  |  AT - Attribute table (PAT)\n"
    4850          "%-*s        | | | | | | |  |  |  NX - No execute (K8)\n"
    4851          "%-*s        | | | | | | |  |  |  |  4K/4M/2M - Page size.\n"
    4852          "%-*s        | | | | | | |  |  |  |  |  AVL - a=allocated; m=mapping; d=track dirty;\n"
    4853          "%-*s        | | | | | | |  |  |  |  |  |     p=permanent; v=validated;\n"
    4854          "%-*s Level  | | | | | | |  |  |  |  |  |    Page\n"
    4855        /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
    4856                       - W U - - - -- -- -- -- -- 010 */
    4857          , cr3, cr4, fLongMode ? " Long Mode" : "",
    4858          cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
    4859          cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address"));
    4860 
    4861     for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++)
    4862     {
    4863         X86PDE Pde = pPD->a[i];
    4864         if (Pde.n.u1Present)
    4865         {
    4866             const uint32_t u32Address = i << X86_PD_SHIFT;
    4867 
    4868             if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
    4869                 Log((           /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4870                      "%08x 0 |    P %c %c %c %c %c %s %s %s .. 4M %c%c%c  %08x\n",
    4871                      u32Address,
    4872                      Pde.b.u1Write       ? 'W'  : 'R',
    4873                      Pde.b.u1User        ? 'U'  : 'S',
    4874                      Pde.b.u1Accessed    ? 'A'  : '-',
    4875                      Pde.b.u1Dirty       ? 'D'  : '-',
    4876                      Pde.b.u1Global      ? 'G'  : '-',
    4877                      Pde.b.u1WriteThru   ? "WT" : "--",
    4878                      Pde.b.u1CacheDisable? "CD" : "--",
    4879                      Pde.b.u1PAT         ? "AT" : "--",
    4880                      Pde.u & RT_BIT(9)      ? '1' : '0',
    4881                      Pde.u & RT_BIT(10)     ? '1' : '0',
    4882                      Pde.u & RT_BIT(11)     ? '1' : '0',
    4883                      pgmGstGet4MBPhysPage(&pVM->pgm.s, Pde)));
    4884             /** @todo PhysSearch */
    4885             else
    4886             {
    4887                 Log((           /*P R  S  A  D  G  WT CD AT NX 4M a m d  */
    4888                      "%08x 0 |    P %c %c %c %c %c %s %s .. .. 4K %c%c%c  %08x\n",
    4889                      u32Address,
    4890                      Pde.n.u1Write       ? 'W'  : 'R',
    4891                      Pde.n.u1User        ? 'U'  : 'S',
    4892                      Pde.n.u1Accessed    ? 'A'  : '-',
    4893                      Pde.n.u1Reserved0   ? '?'  : '.', /* ignored */
    4894                      Pde.n.u1Reserved1   ? '?'  : '.', /* ignored */
    4895                      Pde.n.u1WriteThru   ? "WT" : "--",
    4896                      Pde.n.u1CacheDisable? "CD" : "--",
    4897                      Pde.u & RT_BIT(9)      ? '1' : '0',
    4898                      Pde.u & RT_BIT(10)     ? '1' : '0',
    4899                      Pde.u & RT_BIT(11)     ? '1' : '0',
    4900                      Pde.u & X86_PDE_PG_MASK));
    4901                 ////if (cMaxDepth >= 1)
    4902                 {
    4903                     /** @todo what about using the page pool for mapping PTs? */
    4904                     RTGCPHYS GCPhys = Pde.u & X86_PDE_PG_MASK;
    4905                     PX86PT pPT = NULL;
    4906 
    4907                     rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pPT);
    4908 
    4909                     int rc2 = VERR_INVALID_PARAMETER;
    4910                     if (pPT)
    4911                         rc2 = pgmR3DumpHierarchyGC32BitPT(pVM, pPT, u32Address, PhysSearch);
    4912                     else
    4913                         Log(("%08x error! Page table at %#x was not found in the page pool!\n", u32Address, GCPhys));
    4914                     if (rc2 < rc && RT_SUCCESS(rc))
    4915                         rc = rc2;
    4916                 }
    4917             }
    4918         }
    4919     }
    4920 
    4921     return rc;
    4922 }
    4923 
    4924 
    4925 /**
    4926  * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
    4927  *
    4928  * @returns VBox status code (VINF_SUCCESS).
    4929  * @param   pVM         The VM handle.
    4930  * @param   cr3         The root of the hierarchy.
    4931  * @param   cr4         The cr4, only PAE and PSE is currently used.
    4932  * @param   fLongMode   Set if long mode, false if not long mode.
    4933  * @param   cMaxDepth   Number of levels to dump.
    4934  * @param   pHlp        Pointer to the output functions.
    4935  */
    4936 VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
    4937 {
    4938     if (!pHlp)
    4939         pHlp = DBGFR3InfoLogHlp();
    4940     if (!cMaxDepth)
    4941         return VINF_SUCCESS;
    4942     const unsigned cch = fLongMode ? 16 : 8;
    4943     pHlp->pfnPrintf(pHlp,
    4944                     "cr3=%08x cr4=%08x%s\n"
    4945                     "%-*s        P - Present\n"
    4946                     "%-*s        | R/W - Read (0) / Write (1)\n"
    4947                     "%-*s        | | U/S - User (1) / Supervisor (0)\n"
    4948                     "%-*s        | | | A - Accessed\n"
    4949                     "%-*s        | | | | D - Dirty\n"
    4950                     "%-*s        | | | | | G - Global\n"
    4951                     "%-*s        | | | | | | WT - Write thru\n"
    4952                     "%-*s        | | | | | | |  CD - Cache disable\n"
    4953                     "%-*s        | | | | | | |  |  AT - Attribute table (PAT)\n"
    4954                     "%-*s        | | | | | | |  |  |  NX - No execute (K8)\n"
    4955                     "%-*s        | | | | | | |  |  |  |  4K/4M/2M - Page size.\n"
    4956                     "%-*s        | | | | | | |  |  |  |  |  AVL - a=allocated; m=mapping; d=track dirty;\n"
    4957                     "%-*s        | | | | | | |  |  |  |  |  |     p=permanent; v=validated;\n"
    4958                     "%-*s Level  | | | | | | |  |  |  |  |  |    Page\n"
    4959                   /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
    4960                                  - W U - - - -- -- -- -- -- 010 */
    4961                     , cr3, cr4, fLongMode ? " Long Mode" : "",
    4962                     cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
    4963                     cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address");
    4964     if (cr4 & X86_CR4_PAE)
    4965     {
    4966         if (fLongMode)
    4967             return pgmR3DumpHierarchyHcPaePML4(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
    4968         return pgmR3DumpHierarchyHCPaePDPT(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
    4969     }
    4970     return pgmR3DumpHierarchyHC32BitPD(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
    4971 }
    4972 
    4973 #ifdef VBOX_WITH_DEBUGGER
    4974 
    4975 /**
    4976  * The '.pgmram' command.
    4977  *
    4978  * @returns VBox status.
    4979  * @param   pCmd        Pointer to the command descriptor (as registered).
    4980  * @param   pCmdHlp     Pointer to command helper functions.
    4981  * @param   pVM         Pointer to the current VM (if any).
    4982  * @param   paArgs      Pointer to (readonly) array of arguments.
    4983  * @param   cArgs       Number of arguments in the array.
    4984  */
    4985 static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    4986 {
    4987     /*
    4988      * Validate input.
    4989      */
    4990     if (!pVM)
    4991         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    4992     if (!pVM->pgm.s.pRamRangesRC)
    4993         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no Ram is registered.\n");
    4994 
    4995     /*
    4996      * Dump the ranges.
    4997      */
    4998     int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "From     - To (incl) pvHC\n");
    4999     PPGMRAMRANGE pRam;
    5000     for (pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
    5001     {
    5002         rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
    5003             "%RGp - %RGp  %p\n",
    5004             pRam->GCPhys, pRam->GCPhysLast, pRam->pvR3);
    5005         if (RT_FAILURE(rc))
    5006             return rc;
    5007     }
    5008 
    5009     return VINF_SUCCESS;
    5010 }
    5011 
    5012 
    5013 /**
    5014  * The '.pgmmap' command.
    5015  *
    5016  * @returns VBox status.
    5017  * @param   pCmd        Pointer to the command descriptor (as registered).
    5018  * @param   pCmdHlp     Pointer to command helper functions.
    5019  * @param   pVM         Pointer to the current VM (if any).
    5020  * @param   paArgs      Pointer to (readonly) array of arguments.
    5021  * @param   cArgs       Number of arguments in the array.
    5022  */
    5023 static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5024 {
    5025     /*
    5026      * Validate input.
    5027      */
    5028     if (!pVM)
    5029         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5030     if (!pVM->pgm.s.pMappingsR3)
    5031         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no mappings are registered.\n");
    5032 
    5033     /*
    5034      * Print message about the fixedness of the mappings.
    5035      */
    5036     int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
    5037     if (RT_FAILURE(rc))
    5038         return rc;
    5039 
    5040     /*
    5041      * Dump the ranges.
    5042      */
    5043     PPGMMAPPING pCur;
    5044     for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    5045     {
    5046         rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
    5047             "%08x - %08x %s\n",
    5048             pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
    5049         if (RT_FAILURE(rc))
    5050             return rc;
    5051     }
    5052 
    5053     return VINF_SUCCESS;
    5054 }
    5055 
    5056 
    5057 /**
    5058  * The '.pgmerror' and '.pgmerroroff' commands.
    5059  *
    5060  * @returns VBox status.
    5061  * @param   pCmd        Pointer to the command descriptor (as registered).
    5062  * @param   pCmdHlp     Pointer to command helper functions.
    5063  * @param   pVM         Pointer to the current VM (if any).
    5064  * @param   paArgs      Pointer to (readonly) array of arguments.
    5065  * @param   cArgs       Number of arguments in the array.
    5066  */
    5067 static DECLCALLBACK(int)  pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5068 {
    5069     /*
    5070      * Validate input.
    5071      */
    5072     if (!pVM)
    5073         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5074     AssertReturn(cArgs == 0 || (cArgs == 1 && paArgs[0].enmType == DBGCVAR_TYPE_STRING),
    5075                  pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Hit bug in the parser.\n"));
    5076 
    5077     if (!cArgs)
    5078     {
    5079         /*
    5080          * Print the list of error injection locations with status.
    5081          */
    5082         pCmdHlp->pfnPrintf(pCmdHlp, NULL, "PGM error inject locations:\n");
    5083         pCmdHlp->pfnPrintf(pCmdHlp, NULL, "  handy - %RTbool\n", pVM->pgm.s.fErrInjHandyPages);
    5084     }
    5085     else
    5086     {
    5087 
    5088         /*
    5089          * String switch on where to inject the error.
    5090          */
    5091         bool const  fNewState = !strcmp(pCmd->pszCmd, "pgmerror");
    5092         const char *pszWhere = paArgs[0].u.pszString;
    5093         if (!strcmp(pszWhere, "handy"))
    5094             ASMAtomicWriteBool(&pVM->pgm.s.fErrInjHandyPages, fNewState);
    5095         else
    5096             return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 'where' value: %s.\n", pszWhere);
    5097         pCmdHlp->pfnPrintf(pCmdHlp, NULL, "done\n");
    5098     }
    5099     return VINF_SUCCESS;
    5100 }
    5101 
    5102 
    5103 /**
    5104  * The '.pgmsync' command.
    5105  *
    5106  * @returns VBox status.
    5107  * @param   pCmd        Pointer to the command descriptor (as registered).
    5108  * @param   pCmdHlp     Pointer to command helper functions.
    5109  * @param   pVM         Pointer to the current VM (if any).
    5110  * @param   paArgs      Pointer to (readonly) array of arguments.
    5111  * @param   cArgs       Number of arguments in the array.
    5112  */
    5113 static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5114 {
    5115     /** @todo SMP support */
    5116     PVMCPU pVCpu = &pVM->aCpus[0];
    5117 
    5118     /*
    5119      * Validate input.
    5120      */
    5121     if (!pVM)
    5122         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5123 
    5124     /*
    5125      * Force page directory sync.
    5126      */
    5127     VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    5128 
    5129     int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Forcing page directory sync.\n");
    5130     if (RT_FAILURE(rc))
    5131         return rc;
    5132 
    5133     return VINF_SUCCESS;
    5134 }
    5135 
    5136 
    5137 #ifdef VBOX_STRICT
    5138 /**
    5139  * The '.pgmassertcr3' command.
    5140  *
    5141  * @returns VBox status.
    5142  * @param   pCmd        Pointer to the command descriptor (as registered).
    5143  * @param   pCmdHlp     Pointer to command helper functions.
    5144  * @param   pVM         Pointer to the current VM (if any).
    5145  * @param   paArgs      Pointer to (readonly) array of arguments.
    5146  * @param   cArgs       Number of arguments in the array.
    5147  */
    5148 static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5149 {
    5150     /** @todo SMP support!! */
    5151     PVMCPU pVCpu = &pVM->aCpus[0];
    5152 
    5153     /*
    5154      * Validate input.
    5155      */
    5156     if (!pVM)
    5157         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5158 
    5159     int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Checking shadow CR3 page tables for consistency.\n");
    5160     if (RT_FAILURE(rc))
    5161         return rc;
    5162 
    5163     PGMAssertCR3(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu));
    5164 
    5165     return VINF_SUCCESS;
    5166 }
    5167 #endif /* VBOX_STRICT */
    5168 
    5169 
    5170 /**
    5171  * The '.pgmsyncalways' command.
    5172  *
    5173  * @returns VBox status.
    5174  * @param   pCmd        Pointer to the command descriptor (as registered).
    5175  * @param   pCmdHlp     Pointer to command helper functions.
    5176  * @param   pVM         Pointer to the current VM (if any).
    5177  * @param   paArgs      Pointer to (readonly) array of arguments.
    5178  * @param   cArgs       Number of arguments in the array.
    5179  */
    5180 static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5181 {
    5182     /** @todo SMP support!! */
    5183     PVMCPU pVCpu = &pVM->aCpus[0];
    5184 
    5185     /*
    5186      * Validate input.
    5187      */
    5188     if (!pVM)
    5189         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5190 
    5191     /*
    5192      * Force page directory sync.
    5193      */
    5194     if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS)
    5195     {
    5196         ASMAtomicAndU32(&pVCpu->pgm.s.fSyncFlags, ~PGM_SYNC_ALWAYS);
    5197         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Disabled permanent forced page directory syncing.\n");
    5198     }
    5199     else
    5200     {
    5201         ASMAtomicOrU32(&pVCpu->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
    5202         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    5203         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Enabled permanent forced page directory syncing.\n");
    5204     }
    5205 }
    5206 
    5207 
    5208 /**
    5209  * The '.pgmsyncalways' command.
    5210  *
    5211  * @returns VBox status.
    5212  * @param   pCmd        Pointer to the command descriptor (as registered).
    5213  * @param   pCmdHlp     Pointer to command helper functions.
    5214  * @param   pVM         Pointer to the current VM (if any).
    5215  * @param   paArgs      Pointer to (readonly) array of arguments.
    5216  * @param   cArgs       Number of arguments in the array.
    5217  */
    5218 static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    5219 {
    5220     /*
    5221      * Validate input.
    5222      */
    5223     if (!pVM)
    5224         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
    5225     if (    cArgs < 1
    5226         ||  cArgs > 2
    5227         ||  paArgs[0].enmType != DBGCVAR_TYPE_STRING
    5228         ||  (   cArgs > 1
    5229              && paArgs[1].enmType != DBGCVAR_TYPE_STRING))
    5230         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: parser error, invalid arguments.\n");
    5231     if (    cArgs >= 2
    5232         &&  strcmp(paArgs[1].u.pszString, "nozero"))
    5233         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
    5234     bool fIncZeroPgs = cArgs < 2;
    5235 
    5236     /*
    5237      * Open the output file and get the ram parameters.
    5238      */
    5239     RTFILE hFile;
    5240     int rc = RTFileOpen(&hFile, paArgs[0].u.pszString, RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE);
    5241     if (RT_FAILURE(rc))
    5242         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
    5243 
    5244     uint32_t cbRamHole = 0;
    5245     CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
    5246     uint64_t cbRam     = 0;
    5247     CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
    5248     RTGCPHYS GCPhysEnd = cbRam + cbRamHole;
    5249 
    5250     /*
    5251      * Dump the physical memory, page by page.
    5252      */
    5253     RTGCPHYS    GCPhys = 0;
    5254     char        abZeroPg[PAGE_SIZE];
    5255     RT_ZERO(abZeroPg);
    5256 
    5257     pgmLock(pVM);
    5258     for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
    5259           pRam && pRam->GCPhys < GCPhysEnd && RT_SUCCESS(rc);
    5260           pRam = pRam->pNextR3)
    5261     {
    5262         /* fill the gap */
    5263         if (pRam->GCPhys > GCPhys && fIncZeroPgs)
    5264         {
    5265             while (pRam->GCPhys > GCPhys && RT_SUCCESS(rc))
    5266             {
    5267                 rc = RTFileWrite(hFile, abZeroPg, PAGE_SIZE, NULL);
    5268                 GCPhys += PAGE_SIZE;
    5269             }
    5270         }
    5271 
    5272         PCPGMPAGE pPage = &pRam->aPages[0];
    5273         while (GCPhys < pRam->GCPhysLast && RT_SUCCESS(rc))
    5274         {
    5275             if (PGM_PAGE_IS_ZERO(pPage))
    5276             {
    5277                 if (fIncZeroPgs)
    5278                 {
    5279                     rc = RTFileWrite(hFile, abZeroPg, PAGE_SIZE, NULL);
    5280                     if (RT_FAILURE(rc))
    5281                         pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
    5282                 }
    5283             }
    5284             else
    5285             {
    5286                 switch (PGM_PAGE_GET_TYPE(pPage))
    5287                 {
    5288                     case PGMPAGETYPE_RAM:
    5289                     case PGMPAGETYPE_ROM_SHADOW: /* trouble?? */
    5290                     case PGMPAGETYPE_ROM:
    5291                     case PGMPAGETYPE_MMIO2:
    5292                     {
    5293                         void const     *pvPage;
    5294                         PGMPAGEMAPLOCK  Lock;
    5295                         rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvPage, &Lock);
    5296                         if (RT_SUCCESS(rc))
    5297                         {
    5298                             rc = RTFileWrite(hFile, pvPage, PAGE_SIZE, NULL);
    5299                             PGMPhysReleasePageMappingLock(pVM, &Lock);
    5300                             if (RT_FAILURE(rc))
    5301                                 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
    5302                         }
    5303                         else
    5304                             pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
    5305                         break;
    5306                     }
    5307 
    5308                     default:
    5309                         AssertFailed();
    5310                     case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
    5311                     case PGMPAGETYPE_MMIO:
    5312                         if (fIncZeroPgs)
    5313                         {
    5314                             rc = RTFileWrite(hFile, abZeroPg, PAGE_SIZE, NULL);
    5315                             if (RT_FAILURE(rc))
    5316                                 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
    5317                         }
    5318                         break;
    5319                 }
    5320             }
    5321 
    5322 
    5323             /* advance */
    5324             GCPhys += PAGE_SIZE;
    5325             pPage++;
    5326         }
    5327     }
    5328     pgmUnlock(pVM);
    5329 
    5330     RTFileClose(hFile);
    5331     if (RT_SUCCESS(rc))
    5332         return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
    5333     return VINF_SUCCESS;
    5334 }
    5335 
    5336 #endif /* VBOX_WITH_DEBUGGER */
    5337 
    5338 /**
    5339  * pvUser argument of the pgmR3CheckIntegrity*Node callbacks.
    5340  */
    5341 typedef struct PGMCHECKINTARGS
    5342 {
    5343     bool                    fLeftToRight;    /**< true: left-to-right; false: right-to-left. */
    5344     PPGMPHYSHANDLER         pPrevPhys;
    5345     PPGMVIRTHANDLER         pPrevVirt;
    5346     PPGMPHYS2VIRTHANDLER    pPrevPhys2Virt;
    5347     PVM                     pVM;
    5348 } PGMCHECKINTARGS, *PPGMCHECKINTARGS;
    5349 
    5350 /**
    5351  * Validate a node in the physical handler tree.
    5352  *
    5353  * @returns 0 on if ok, other wise 1.
    5354  * @param   pNode       The handler node.
    5355  * @param   pvUser      pVM.
    5356  */
    5357 static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
    5358 {
    5359     PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
    5360     PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode;
    5361     AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
    5362     AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
    5363     AssertReleaseMsg(   !pArgs->pPrevPhys
    5364                      || (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
    5365                      ("pPrevPhys=%p %RGp-%RGp %s\n"
    5366                       "     pCur=%p %RGp-%RGp %s\n",
    5367                       pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
    5368                       pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
    5369     pArgs->pPrevPhys = pCur;
    5370     return 0;
    5371 }
    5372 
    5373 
    5374 /**
    5375  * Validate a node in the virtual handler tree.
    5376  *
    5377  * @returns 0 on if ok, other wise 1.
    5378  * @param   pNode       The handler node.
    5379  * @param   pvUser      pVM.
    5380  */
    5381 static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
    5382 {
    5383     PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
    5384     PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
    5385     AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
    5386     AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGv-%RGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
    5387     AssertReleaseMsg(   !pArgs->pPrevVirt
    5388                      || (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
    5389                      ("pPrevVirt=%p %RGv-%RGv %s\n"
    5390                       "     pCur=%p %RGv-%RGv %s\n",
    5391                       pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
    5392                       pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
    5393     for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
    5394     {
    5395         AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
    5396                          ("pCur=%p %RGv-%RGv %s\n"
    5397                           "iPage=%d offVirtHandle=%#x expected %#x\n",
    5398                           pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc,
    5399                           iPage, pCur->aPhysToVirt[iPage].offVirtHandler, -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage])));
    5400     }
    5401     pArgs->pPrevVirt = pCur;
    5402     return 0;
    5403 }
    5404 
    5405 
    5406 /**
    5407  * Validate a node in the virtual handler tree.
    5408  *
    5409  * @returns 0 on if ok, other wise 1.
    5410  * @param   pNode       The handler node.
    5411  * @param   pvUser      pVM.
    5412  */
    5413 static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
    5414 {
    5415     PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
    5416     PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
    5417     AssertReleaseMsgReturn(!((uintptr_t)pCur & 3),      ("\n"), 1);
    5418     AssertReleaseMsgReturn(!(pCur->offVirtHandler & 3), ("\n"), 1);
    5419     AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
    5420     AssertReleaseMsg(   !pArgs->pPrevPhys2Virt
    5421                      || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
    5422                      ("pPrevPhys2Virt=%p %RGp-%RGp\n"
    5423                       "          pCur=%p %RGp-%RGp\n",
    5424                       pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
    5425                       pCur, pCur->Core.Key, pCur->Core.KeyLast));
    5426     AssertReleaseMsg(   !pArgs->pPrevPhys2Virt
    5427                      || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
    5428                      ("pPrevPhys2Virt=%p %RGp-%RGp\n"
    5429                       "          pCur=%p %RGp-%RGp\n",
    5430                       pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
    5431                       pCur, pCur->Core.Key, pCur->Core.KeyLast));
    5432     AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
    5433                      ("pCur=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    5434                       pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
    5435     if (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
    5436     {
    5437         PPGMPHYS2VIRTHANDLER pCur2 = pCur;
    5438         for (;;)
    5439         {
    5440             pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
    5441             AssertReleaseMsg(pCur2 != pCur,
    5442                              (" pCur=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    5443                               pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
    5444             AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
    5445                              (" pCur=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
    5446                               "pCur2=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    5447                               pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
    5448                               pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
    5449             AssertReleaseMsg((pCur2->Core.Key ^ pCur->Core.Key) < PAGE_SIZE,
    5450                              (" pCur=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
    5451                               "pCur2=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    5452                               pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
    5453                               pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
    5454             AssertReleaseMsg((pCur2->Core.KeyLast ^ pCur->Core.KeyLast) < PAGE_SIZE,
    5455                              (" pCur=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
    5456                               "pCur2=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    5457                               pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
    5458                               pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
    5459             if (!(pCur2->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
    5460                 break;
    5461         }
    5462     }
    5463 
    5464     pArgs->pPrevPhys2Virt = pCur;
    5465     return 0;
    5466 }
    5467 
    5468 
    5469 /**
    5470  * Perform an integrity check on the PGM component.
    5471  *
    5472  * @returns VINF_SUCCESS if everything is fine.
    5473  * @returns VBox error status after asserting on integrity breach.
    5474  * @param   pVM     The VM handle.
    5475  */
    5476 VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
    5477 {
    5478     AssertReleaseReturn(pVM->pgm.s.offVM, VERR_INTERNAL_ERROR);
    5479 
    5480     /*
    5481      * Check the trees.
    5482      */
    5483     int cErrors = 0;
    5484     const static PGMCHECKINTARGS s_LeftToRight = { true, NULL, NULL, NULL, pVM };
    5485     const static PGMCHECKINTARGS s_RightToLeft = { false, NULL, NULL, NULL, pVM };
    5486     PGMCHECKINTARGS Args = s_LeftToRight;
    5487     cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers,       true,  pgmR3CheckIntegrityPhysHandlerNode, &Args);
    5488     Args = s_RightToLeft;
    5489     cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers,       false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
    5490     Args = s_LeftToRight;
    5491     cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers,       true,  pgmR3CheckIntegrityVirtHandlerNode, &Args);
    5492     Args = s_RightToLeft;
    5493     cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers,       false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
    5494     Args = s_LeftToRight;
    5495     cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers,  true,  pgmR3CheckIntegrityVirtHandlerNode, &Args);
    5496     Args = s_RightToLeft;
    5497     cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers,  false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
    5498     Args = s_LeftToRight;
    5499     cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, true,  pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
    5500     Args = s_RightToLeft;
    5501     cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
    5502 
    5503     return !cErrors ? VINF_SUCCESS : VERR_INTERNAL_ERROR;
    5504 }
    5505 
    5506 
     1117 * @param   pVM     Pointer to VM structure.
     1118 * @param   cbRam   The RAM size.
     1119 */
     1120int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
     1121{
     1122    return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
     1123                                 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
     1124                                 NULL, pgmR3SaveExec, pgmR3SaveDone,
     1125                                 NULL, pgmR3Load, NULL);
     1126}
     1127
     1128
     1129
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette