VirtualBox

Changeset 18665 in vbox


Ignore:
Timestamp:
Apr 2, 2009 7:44:18 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
45592
Message:

VMM: Clean out the VBOX_WITH_NEW_PHYS_CODE #ifdefs. (part 1)

Location:
trunk/src/VBox/VMM
Files:
1 deleted
12 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified trunk/src/VBox/VMM/MM.cpp

    r18607 r18665  
    412412     * Setup the base ram (PGM).
    413413     */
    414 #ifdef VBOX_WITH_NEW_PHYS_CODE
    415414    if (cbRam > offRamHole)
    416415    {
     
    427426        return VM_SET_ERROR(pVM, VERR_NOT_IMPLEMENTED, "TODO: RamPreAlloc");
    428427    }
    429 #else
    430     rc = PGMR3PhysRegisterRam(pVM, 0, cbRam, "Base RAM");
    431     if (RT_SUCCESS(rc))
    432     {
    433         /*
    434          * Allocate the first chunk, as we'll map ROM ranges there.
    435          * If requested, allocated the rest too.
    436          */
    437         RTGCPHYS GCPhys = (RTGCPHYS)0;
    438         rc = PGM3PhysGrowRange(pVM, &GCPhys);
    439         if (RT_SUCCESS(rc) && fPreAlloc)
    440             for (GCPhys = PGM_DYNAMIC_CHUNK_SIZE;
    441                  GCPhys < cbRam && RT_SUCCESS(rc);
    442                  GCPhys += PGM_DYNAMIC_CHUNK_SIZE)
    443                 rc = PGM3PhysGrowRange(pVM, &GCPhys);
    444     }
    445 #endif
    446 
    447 #ifdef VBOX_WITH_NEW_PHYS_CODE
     428
    448429    /*
    449430     * Enabled mmR3UpdateReservation here since we don't want the
     
    452433    pVM->mm.s.fDoneMMR3InitPaging = true;
    453434    AssertMsg(pVM->mm.s.cBasePages == cBasePages || RT_FAILURE(rc), ("%RX64 != %RX64\n", pVM->mm.s.cBasePages, cBasePages));
    454 #endif
    455435
    456436    LogFlow(("MMR3InitPaging: returns %Rrc\n", rc));
     
    536516 * Reset notification.
    537517 *
    538  * MM will reload shadow ROMs into RAM at this point and make
    539  * the ROM writable.
    540  *
    541518 * @param   pVM             The VM handle.
    542519 */
    543520VMMR3DECL(void) MMR3Reset(PVM pVM)
    544521{
    545 #ifndef VBOX_WITH_NEW_PHYS_CODE
    546     mmR3PhysRomReset(pVM);
    547 #endif
     522    /* nothing to do anylonger. */
    548523}
    549524
  • TabularUnified trunk/src/VBox/VMM/MMInternal.h

    r17513 r18665  
    565565
    566566
    567 #ifndef VBOX_WITH_NEW_PHYS_CODE
    568 /**
    569  * A registered Rom range.
    570  *
    571  * This is used to track ROM registrations both for debug reasons
    572  * and for resetting shadow ROM at reset.
    573  *
    574  * This is allocated of the MMR3Heap and thus only accessibel from ring-3.
    575  */
    576 typedef struct MMROMRANGE
    577 {
    578     /** Pointer to the next */
    579     struct MMROMRANGE      *pNext;
    580     /** Address of the range. */
    581     RTGCPHYS                GCPhys;
    582     /** Size of the range. */
    583     uint32_t                cbRange;
    584     /** Shadow ROM? */
    585     bool                    fShadow;
    586     /** Is the shadow ROM currently wriable? */
    587     bool                    fWritable;
    588     /** The address of the virgin ROM image for shadow ROM. */
    589     const void             *pvBinary;
    590     /** The address of the guest RAM that's shadowing the ROM. (lazy bird) */
    591     void                   *pvCopy;
    592     /** The ROM description. */
    593     const char             *pszDesc;
    594 } MMROMRANGE;
    595 /** Pointer to a ROM range. */
    596 typedef MMROMRANGE *PMMROMRANGE;
    597 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    598 
    599 
    600567/**
    601568 * Hypervisor memory mapping type.
     
    771738    /** Padding. */
    772739    uint32_t                    u32Padding0;
    773 
    774 #ifndef VBOX_WITH_NEW_PHYS_CODE
    775     /** The head of the ROM ranges. */
    776     R3PTRTYPE(PMMROMRANGE)      pRomHead;
    777 #endif
    778740} MM;
    779741/** Pointer to MM Data (part of VM). */
     
    811773
    812774const char *mmR3GetTagName(MMTAG enmTag);
    813 
    814 #ifndef VBOX_WITH_NEW_PHYS_CODE
    815 void mmR3PhysRomReset(PVM pVM);
    816 #endif
    817775
    818776/**
  • TabularUnified trunk/src/VBox/VMM/Makefile.kmk

    r17294 r18665  
    7171        MMHyper.cpp \
    7272        MMPagePool.cpp \
    73         MMPhys.cpp \
    7473        PDM.cpp \
    7574        PDMDevice.cpp \
  • TabularUnified trunk/src/VBox/VMM/PDMDevHlp.cpp

    r18645 r18665  
    354354             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, GCPhysStart, cbRange, pvBinary, fFlags, pszDesc, pszDesc));
    355355
    356 #ifdef VBOX_WITH_NEW_PHYS_CODE
    357356    int rc = PGMR3PhysRomRegister(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvBinary, fFlags, pszDesc);
    358 #else
    359     int rc = MMR3PhysRomRegister(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvBinary,
    360                                  !!(fFlags & PGMPHYS_ROM_FLAGS_SHADOWED), pszDesc);
    361 #endif
    362357
    363358    LogFlow(("pdmR3DevHlp_ROMRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc));
     
    20392034             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, GCPhys, pvBuf, cbRead));
    20402035
    2041 #ifdef VBOX_WITH_NEW_PHYS_CODE
    20422036#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
    20432037    if (!VM_IS_EMT(pVM)) /** @todo not true for SMP. oh joy! */
     
    20542048    else
    20552049        rc = PGMR3PhysReadExternal(pVM, GCPhys, pvBuf, cbRead);
    2056 #else
    2057     PGMPhysRead(pVM, GCPhys, pvBuf, cbRead);
    2058     int rc = VINF_SUCCESS;
    2059 #endif
     2050
    20602051    Log(("pdmR3DevHlp_PhysRead: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc));
    20612052    return rc;
     
    20712062             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, GCPhys, pvBuf, cbWrite));
    20722063
    2073 #ifdef VBOX_WITH_NEW_PHYS_CODE
    20742064#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
    20752065    if (!VM_IS_EMT(pVM)) /** @todo not true for SMP. oh joy! */
     
    20862076    else
    20872077        rc = PGMR3PhysWriteExternal(pVM, GCPhys, pvBuf, cbWrite);
    2088 #else
    2089     PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite);
    2090     int rc = VINF_SUCCESS;
    2091 #endif
     2078
    20922079    Log(("pdmR3DevHlp_PhysWrite: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc));
    20932080    return rc;
     
    21042091    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
    21052092
    2106 #ifdef VBOX_WITH_NEW_PHYS_CODE
    21072093#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
    21082094    if (!VM_IS_EMT(pVM)) /** @todo not true for SMP. oh joy! */
     
    21132099    }
    21142100#endif
    2115 #endif
    21162101
    21172102    int rc = PGMR3PhysGCPhys2CCPtrExternal(pVM, GCPhys, ppv, pLock);
     
    21312116    AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
    21322117
    2133 #ifdef VBOX_WITH_NEW_PHYS_CODE
    21342118#if defined(VBOX_STRICT) && defined(PDM_DEVHLP_DEADLOCK_DETECTION)
    21352119    if (!VM_IS_EMT(pVM)) /** @todo not true for SMP. oh joy! */
     
    21392123        AssertMsg(cLocks == 0, ("cLocks=%u %s\n", cLocks, szNames));
    21402124    }
    2141 #endif
    21422125#endif
    21432126
     
    25572540             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, GCPhysStart, cbRange, enmProt));
    25582541
    2559 #ifdef VBOX_WITH_NEW_PHYS_CODE
    25602542    int rc = PGMR3PhysRomProtect(pDevIns->Internal.s.pVMR3, GCPhysStart, cbRange, enmProt);
    2561 #else
    2562     int rc = MMR3PhysRomProtect(pDevIns->Internal.s.pVMR3, GCPhysStart, cbRange);
    2563 #endif
    25642543
    25652544    LogFlow(("pdmR3DevHlp_ROMProtectShadow: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc));
  • TabularUnified trunk/src/VBox/VMM/PGM.cpp

    r18645 r18665  
    610610*******************************************************************************/
    611611/** Saved state data unit version. */
    612 #ifdef VBOX_WITH_NEW_PHYS_CODE
    613 # define PGM_SAVED_STATE_VERSION                7
    614 #else
    615 # define PGM_SAVED_STATE_VERSION                6
    616 #endif
     612#define PGM_SAVED_STATE_VERSION                 7
    617613/** Saved state data unit version. */
    618614#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE   6
     
    12291225    rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
    12301226    if (rc == VERR_CFGM_VALUE_NOT_FOUND)
    1231         cbRam = pVM->pgm.s.cbRamSize = 0;
     1227        cbRam = 0;
    12321228    else if (RT_SUCCESS(rc))
    12331229    {
     
    12351231            cbRam = 0;
    12361232        cbRam = RT_ALIGN_64(cbRam, PAGE_SIZE);
    1237         pVM->pgm.s.cbRamSize = (RTUINT)cbRam; /* pointless legacy, remove after enabling the new phys code. */
    12381233    }
    12391234    else
     
    12681263    PGMPhysInvalidatePageGCMapTLB(pVM);
    12691264
    1270 #ifdef VBOX_WITH_NEW_PHYS_CODE
    12711265    /*
    12721266     * For the time being we sport a full set of handy pages in addition to the base
    12731267     * memory to simplify things.
    12741268     */
    1275     rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages));
     1269    rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
    12761270    AssertRCReturn(rc, rc);
    1277 #endif
    12781271
    12791272    /*
     
    15791572    STAM_REG(pVM, &pPGM->StatR3GuestPDWrite,                STAMTYPE_COUNTER, "/PGM/R3/PDWrite",                    STAMUNIT_OCCURENCES,     "The total number of times pgmHCGuestPDWriteHandler() was called.");
    15801573    STAM_REG(pVM, &pPGM->StatR3GuestPDWriteConflict,        STAMTYPE_COUNTER, "/PGM/R3/PDWriteConflict",            STAMUNIT_OCCURENCES,     "The number of times pgmHCGuestPDWriteHandler() detected a conflict.");
    1581 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1582     STAM_REG(pVM, &pPGM->StatR3DynRamTotal,                 STAMTYPE_COUNTER, "/PGM/DynAlloc/TotalAlloc",           STAMUNIT_MEGABYTES,      "Allocated MBs of guest ram.");
    1583     STAM_REG(pVM, &pPGM->StatR3DynRamGrow,                  STAMTYPE_COUNTER, "/PGM/DynAlloc/Grow",                 STAMUNIT_OCCURENCES,     "Nr of pgmr3PhysGrowRange calls.");
    1584 #endif
    15851574
    15861575    /* R0 only: */
     
    21012090    if (RT_SUCCESS(rc))
    21022091    {
    2103 #ifdef VBOX_WITH_NEW_PHYS_CODE
    21042092        /*
    21052093         * Reset (zero) shadow ROM pages.
    21062094         */
    21072095        rc = pgmR3PhysRomReset(pVM);
    2108 #endif
    21092096        if (RT_SUCCESS(rc))
    21102097        {
     
    21632150}
    21642151
    2165 #ifdef VBOX_WITH_NEW_PHYS_CODE
    21662152
    21672153/**
     
    22562242    SSMFIELD_ENTRY_TERM()
    22572243};
    2258 #endif /* VBOX_WITH_NEW_PHYS_CODE */
    22592244
    22602245
     
    22742259     * Lock PGM and set the no-more-writes indicator.
    22752260     */
    2276 #ifdef VBOX_WITH_NEW_PHYS_CODE
    22772261    pgmLock(pVM);
    2278 #endif
    22792262    pVM->pgm.s.fNoMorePhysWrites = true;
    22802263
     
    22822265     * Save basic data (required / unaffected by relocation).
    22832266     */
    2284 #ifdef VBOX_WITH_NEW_PHYS_CODE
    22852267    SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
    2286 #else
    2287     SSMR3PutBool(  pSSM, pPGM->fMappingsFixed);
    2288     SSMR3PutGCPtr( pSSM, pPGM->GCPtrMappingFixed);
    2289     SSMR3PutU32(   pSSM, pPGM->cbMappingFixed);
    2290     SSMR3PutUInt(  pSSM, pPGM->cbRamSize);
    2291     SSMR3PutGCPhys(pSSM, pPGM->GCPhysA20Mask);
    2292     SSMR3PutUInt(  pSSM, pPGM->fA20Enabled);
    2293     SSMR3PutUInt(  pSSM, pPGM->fSyncFlags);
    2294     SSMR3PutUInt(  pSSM, pPGM->enmGuestMode);
    2295     SSMR3PutU32(   pSSM, ~0);      /* Separator. */
    2296 #endif
    22972268
    22982269    /*
     
    23232294        SSMR3PutGCPhys(pSSM,    pRam->cb);
    23242295        SSMR3PutU8(pSSM,        !!pRam->pvR3);      /* Boolean indicating memory or not. */
    2325 #ifdef VBOX_WITH_NEW_PHYS_CODE
    23262296        SSMR3PutStrZ(pSSM,      pRam->pszDesc);     /* This is the best unique id we have... */
    23272297
     
    23542324        if (RT_FAILURE(rc))
    23552325            break;
    2356 
    2357 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    2358         /* Flags. */
    2359         const unsigned cPages = pRam->cb >> PAGE_SHIFT;
    2360         for (unsigned iPage = 0; iPage < cPages; iPage++)
    2361             SSMR3PutU16(pSSM, (uint16_t)(pRam->aPages[iPage].HCPhys & ~X86_PTE_PAE_PG_MASK)); /** @todo PAGE FLAGS */
    2362 
    2363         /* Any memory associated with the range. */
    2364         if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    2365         {
    2366             for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
    2367             {
    2368                 if (pRam->paChunkR3Ptrs[iChunk])
    2369                 {
    2370                     SSMR3PutU8(pSSM, 1);    /* chunk present */
    2371                     SSMR3PutMem(pSSM, (void *)pRam->paChunkR3Ptrs[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
    2372                 }
    2373                 else
    2374                     SSMR3PutU8(pSSM, 0);    /* no chunk present */
    2375             }
    2376         }
    2377         else if (pRam->pvR3)
    2378         {
    2379             rc = SSMR3PutMem(pSSM, pRam->pvR3, pRam->cb);
    2380             if (RT_FAILURE(rc))
    2381             {
    2382                 Log(("pgmR3Save: SSMR3PutMem(, %p, %#x) -> %Rrc\n", pRam->pvR3, pRam->cb, rc));
    2383                 return rc;
    2384             }
    2385         }
    2386 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    2387     }
    2388 
    2389 #ifdef VBOX_WITH_NEW_PHYS_CODE
     2326    }
     2327
    23902328    pgmUnlock(pVM);
    2391 #endif
    23922329    return SSMR3PutU32(pSSM, ~0); /* terminator. */
    23932330}
    23942331
    2395 
    2396 #ifdef VBOX_WITH_NEW_PHYS_CODE
    23972332
    23982333/**
     
    25522487}
    25532488
    2554 #endif  /* VBOX_WITH_NEW_PHYS_CODE */
    25552489
    25562490/**
     
    25722506     * Load basic data (required / unaffected by relocation).
    25732507     */
    2574 #ifdef VBOX_WITH_NEW_PHYS_CODE
    25752508    if (u32Version >= PGM_SAVED_STATE_VERSION)
    25762509    {
     
    25792512    }
    25802513    else
    2581 #endif
    25822514    {
    25832515        SSMR3GetBool(pSSM,      &pPGM->fMappingsFixed);
     
    25852517        SSMR3GetU32(pSSM,       &pPGM->cbMappingFixed);
    25862518
    2587         RTUINT cbRamSize;
    2588         rc = SSMR3GetU32(pSSM, &cbRamSize);
     2519        uint32_t cbRamSizeIgnored;
     2520        rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
    25892521        if (RT_FAILURE(rc))
    25902522            return rc;
    2591         AssertLogRelMsgReturn(cbRamSize == pPGM->cbRamSize, ("%#x != %#x\n", cbRamSize, pPGM->cbRamSize),
    2592                               VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH);
    25932523        SSMR3GetGCPhys(pSSM,    &pPGM->GCPhysA20Mask);
    25942524
     
    27012631        char szDesc[256];
    27022632        szDesc[0] = '\0';
    2703 #ifdef VBOX_WITH_NEW_PHYS_CODE
    27042633        if (u32Version >= PGM_SAVED_STATE_VERSION)
    27052634        {
     
    27082637                return rc;
    27092638        }
    2710 #endif
    27112639
    27122640        /*
     
    27202648                 || GCPhysLast != pRam->GCPhysLast
    27212649                 || cb         != pRam->cb
    2722 #ifdef VBOX_WITH_NEW_PHYS_CODE
    2723                  ||  (szDesc[0] && strcmp(szDesc, pRam->pszDesc))
    2724 #else
    2725                  ||  fHaveBits  != !!pRam->pvR3
    2726 #endif
    2727                  )
    2728 #ifdef VBOX_WITH_NEW_PHYS_CODE
     2650                 ||  (szDesc[0] && strcmp(szDesc, pRam->pszDesc)) )
    27292651                /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
    27302652            &&  (   u32Version != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
     
    27342656                 || pRam->GCPhys     <  GCPhys
    27352657                 || !fHaveBits)
    2736 #endif
    27372658           )
    27382659        {
     
    27492670                AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
    27502671
    2751 #ifdef VBOX_WITH_NEW_PHYS_CODE
    2752             if (u32Version > PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
    2753                 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
    2754             else
    2755 #else
    2756             {
    2757                 RTGCPHYS cPages = ((GCPhysLast - GCPhys) + 1) >> PAGE_SHIFT;
    2758                 while (cPages-- > 0)
    2759                 {
    2760                     uint16_t u16Ignore;
    2761                     SSMR3GetU16(pSSM, &u16Ignore);
    2762                 }
    2763             }
    2764 #endif
     2672            AssertMsgFailed(("debug skipping not implemented, sorry\n"));
    27652673            continue;
    27662674        }
    27672675
    27682676        uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
    2769 
    2770 #ifdef VBOX_WITH_NEW_PHYS_CODE
    27712677        if (u32Version >= PGM_SAVED_STATE_VERSION)
    27722678        {
     
    29022808            }
    29032809        }
    2904 
    2905 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    2906         /* Flags. */
    2907         for (uint32_t iPage = 0; iPage < cPages; iPage++)
    2908         {
    2909             uint16_t    u16 = 0;
    2910             SSMR3GetU16(pSSM, &u16);
    2911             u16 &= PAGE_OFFSET_MASK & ~(  RT_BIT(4) | RT_BIT(5) | RT_BIT(6)
    2912                                         | RT_BIT(7) | RT_BIT(8) | RT_BIT(9) | RT_BIT(10) );
    2913                    // &= MM_RAM_FLAGS_DYNAMIC_ALLOC | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2
    2914             pRam->aPages[iPage].HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) | (RTHCPHYS)u16; /** @todo PAGE FLAGS */
    2915         }
    2916 
    2917         /* any memory associated with the range. */
    2918         if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    2919         {
    2920             for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
    2921             {
    2922                 uint8_t fValidChunk;
    2923 
    2924                 rc = SSMR3GetU8(pSSM, &fValidChunk);
    2925                 if (RT_FAILURE(rc))
    2926                     return rc;
    2927                 if (fValidChunk > 1)
    2928                     return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    2929 
    2930                 if (fValidChunk)
    2931                 {
    2932                     if (!pRam->paChunkR3Ptrs[iChunk])
    2933                     {
    2934                         rc = pgmr3PhysGrowRange(pVM, pRam->GCPhys + iChunk * PGM_DYNAMIC_CHUNK_SIZE);
    2935                         if (RT_FAILURE(rc))
    2936                             return rc;
    2937                     }
    2938                     Assert(pRam->paChunkR3Ptrs[iChunk]);
    2939 
    2940                     SSMR3GetMem(pSSM, (void *)pRam->paChunkR3Ptrs[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
    2941                 }
    2942                 /* else nothing to do */
    2943             }
    2944         }
    2945         else if (pRam->pvR3)
    2946         {
    2947             rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
    2948             if (RT_FAILURE(rc))
    2949             {
    2950                 Log(("pgmR3Save: SSMR3GetMem(, %p, %#x) -> %Rrc\n", pRam->pvR3, pRam->cb, rc));
    2951                 return rc;
    2952             }
    2953         }
    2954 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    29552810    }
    29562811
     
    29762831     */
    29772832    if (    u32Version != PGM_SAVED_STATE_VERSION
    2978 #ifdef VBOX_WITH_NEW_PHYS_CODE
    2979         &&  u32Version != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
    2980 #endif
    2981        )
     2833        &&  u32Version != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
    29822834    {
    29832835        AssertMsgFailed(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
  • TabularUnified trunk/src/VBox/VMM/PGMDbg.cpp

    r17370 r18665  
    5555VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PVM pVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys)
    5656{
    57 #ifdef VBOX_WITH_NEW_PHYS_CODE
    5857    *pGCPhys = NIL_RTGCPHYS;
    5958    return VERR_NOT_IMPLEMENTED;
    60 
    61 #else
    62     for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    63          pRam;
    64          pRam = pRam->CTX_SUFF(pNext))
    65     {
    66         if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    67         {
    68             for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
    69             {
    70                 if (pRam->paChunkR3Ptrs[iChunk])
    71                 {
    72                     RTR3UINTPTR off = (RTR3UINTPTR)R3Ptr - pRam->paChunkR3Ptrs[iChunk];
    73                     if (off < PGM_DYNAMIC_CHUNK_SIZE)
    74                     {
    75                         *pGCPhys = pRam->GCPhys + iChunk*PGM_DYNAMIC_CHUNK_SIZE + off;
    76                         return VINF_SUCCESS;
    77                     }
    78                 }
    79             }
    80         }
    81         else if (pRam->pvR3)
    82         {
    83             RTR3UINTPTR off = (RTR3UINTPTR)R3Ptr - (RTR3UINTPTR)pRam->pvR3;
    84             if (off < pRam->cb)
    85             {
    86                 *pGCPhys = pRam->GCPhys + off;
    87                 return VINF_SUCCESS;
    88             }
    89         }
    90     }
    91     return VERR_INVALID_POINTER;
    92 #endif
    9359}
    9460
     
    11076VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PVM pVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys)
    11177{
    112 #ifdef VBOX_WITH_NEW_PHYS_CODE
    11378    *pHCPhys = NIL_RTHCPHYS;
    11479    return VERR_NOT_IMPLEMENTED;
    115 
    116 #else
    117     for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    118          pRam;
    119          pRam = pRam->CTX_SUFF(pNext))
    120     {
    121         if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    122         {
    123             for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
    124             {
    125                 if (pRam->paChunkR3Ptrs[iChunk])
    126                 {
    127                     RTR3UINTPTR off = (RTR3UINTPTR)R3Ptr - pRam->paChunkR3Ptrs[iChunk];
    128                     if (off < PGM_DYNAMIC_CHUNK_SIZE)
    129                     {
    130                         PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
    131                         if (PGM_PAGE_IS_RESERVED(pPage))
    132                             return VERR_PGM_PHYS_PAGE_RESERVED;
    133                         *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage)
    134                                  | (off & PAGE_OFFSET_MASK);
    135                         return VINF_SUCCESS;
    136                     }
    137                 }
    138             }
    139         }
    140         else if (pRam->pvR3)
    141         {
    142             RTR3UINTPTR off = (RTR3UINTPTR)R3Ptr - (RTR3UINTPTR)pRam->pvR3;
    143             if (off < pRam->cb)
    144             {
    145                 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
    146                 if (PGM_PAGE_IS_RESERVED(pPage))
    147                     return VERR_PGM_PHYS_PAGE_RESERVED;
    148                 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage)
    149                          | (off & PAGE_OFFSET_MASK);
    150                 return VINF_SUCCESS;
    151             }
    152         }
    153     }
    154     return VERR_INVALID_POINTER;
    155 #endif
    15680}
    15781
     
    582506            {
    583507                PPGMPAGE pPage = &pRam->aPages[iPage];
    584 #ifdef VBOX_WITH_NEW_PHYS_CODE
    585508                if (    !PGM_PAGE_IS_ZERO(pPage)
    586509                    &&  !PGM_PAGE_IS_MMIO(pPage))
    587 #else
    588                 if (    /** @todo !PGM_PAGE_IS_ZERO(pPage)
    589                     &&*/  !PGM_PAGE_IS_MMIO(pPage))
    590 #endif
    591510                {
    592511                    void const *pvPage;
  • TabularUnified trunk/src/VBox/VMM/PGMHandler.cpp

    r17517 r18665  
    9999    if (!pszModR0)
    100100        pszModR0 = VMMR0_MAIN_MODULE_NAME;
    101 #ifdef VBOX_WITH_NEW_PHYS_CODE
    102101    AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
    103102    AssertPtrReturn(pszHandlerR0, VERR_INVALID_POINTER);
    104103    AssertPtrReturn(pszHandlerRC, VERR_INVALID_POINTER);
    105 #endif
    106104
    107105    /*
     
    110108    R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0 = NIL_RTR0PTR;
    111109    int rc = VINF_SUCCESS;
    112 #ifndef VBOX_WITH_NEW_PHYS_CODE
    113   if (pszHandlerR0)
    114 #endif
    115110    rc = PDMR3LdrGetSymbolR0Lazy(pVM, pszModR0, pszHandlerR0, &pfnHandlerR0);
    116111    if (RT_SUCCESS(rc))
     
    120115         */
    121116        RTRCPTR pfnHandlerRC = NIL_RTRCPTR;
    122 #ifndef VBOX_WITH_NEW_PHYS_CODE
    123       if (pszHandlerRC)
    124 #endif
    125117        rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, pszHandlerRC, &pfnHandlerRC);
    126 
    127118        if (RT_SUCCESS(rc))
    128119            return PGMHandlerPhysicalRegisterEx(pVM, enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3,
  • TabularUnified trunk/src/VBox/VMM/PGMInternal.h

    r18617 r18665  
    143143#endif
    144144
    145 #ifdef VBOX_WITH_NEW_PHYS_CODE
    146145/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    147146 * Enables the experimental lazy page allocation code. */
    148147/*# define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
    149 #endif
     148
    150149/** @} */
    151150
     
    587586{
    588587    /** The physical address and a whole lot of other stuff. All bits are used! */
    589 #ifdef VBOX_WITH_NEW_PHYS_CODE
    590588    RTHCPHYS    HCPhysX;
    591 #else
    592     RTHCPHYS    HCPhys;
    593 #define HCPhysX HCPhys /**< Temporary while in the process of eliminating direct access to PGMPAGE::HCPhys. */
    594 #endif
    595589    /** The page state. */
    596590    uint32_t    u2StateX : 2;
     
    655649 * @param   pPage       Pointer to the physical guest page tracking structure.
    656650 */
    657 #ifdef VBOX_WITH_NEW_PHYS_CODE
    658 # define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType)  \
     651#define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType)  \
    659652    PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
    660 #else
    661 # define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType)  \
    662     PGM_PAGE_INIT(pPage, 0, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
    663 #endif
    664653/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
    665654# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType)  \
     
    788777 * @param   _enmType    The new page type (PGMPAGETYPE).
    789778 */
    790 #ifdef VBOX_WITH_NEW_PHYS_CODE
    791779#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
    792780    do { (pPage)->u3Type = (_enmType); } while (0)
    793 #else
    794 #define PGM_PAGE_SET_TYPE(pPage, _enmType) \
    795     do { \
    796         (pPage)->u3Type = (_enmType); \
    797         if ((_enmType) == PGMPAGETYPE_ROM) \
    798             (pPage)->HCPhysX |= MM_RAM_FLAGS_ROM; \
    799         else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \
    800             (pPage)->HCPhysX |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \
    801         else if ((_enmType) == PGMPAGETYPE_MMIO2) \
    802             (pPage)->HCPhysX |= MM_RAM_FLAGS_MMIO2; \
    803     } while (0)
    804 #endif
    805 
    806 
    807 /**
    808  * Checks if the page is 'reserved'.
    809  * @returns true/false.
    810  * @param   pPage       Pointer to the physical guest page tracking structure.
    811  */
    812 #define PGM_PAGE_IS_RESERVED(pPage)     ( !!((pPage)->HCPhysX & MM_RAM_FLAGS_RESERVED) )
    813781
    814782/**
     
    817785 * @param   pPage       Pointer to the physical guest page tracking structure.
    818786 */
    819 #ifdef VBOX_WITH_NEW_PHYS_CODE
    820 # define PGM_PAGE_IS_MMIO(pPage)        ( (pPage)->u3Type == PGMPAGETYPE_MMIO )
    821 #else
    822 # define PGM_PAGE_IS_MMIO(pPage)        ( !!((pPage)->HCPhysX & MM_RAM_FLAGS_MMIO) )
    823 #endif
     787#define PGM_PAGE_IS_MMIO(pPage)         ( (pPage)->u3Type == PGMPAGETYPE_MMIO )
    824788
    825789/**
     
    835799 * @param   pPage       Pointer to the physical guest page tracking structure.
    836800 */
    837 #define PGM_PAGE_IS_SHARED(pPage)        ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
     801#define PGM_PAGE_IS_SHARED(pPage)       ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
    838802
    839803
     
    1032996    /** Start of the HC mapping of the range. This is only used for MMIO2. */
    1033997    R3PTRTYPE(void *)                   pvR3;
    1034 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1035     /** R3 virtual lookup ranges for chunks.
    1036      * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges.
    1037      * @remarks This is occationally accessed from ring-0!! (not darwin) */
    1038 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1039     R3PTRTYPE(PRTR3UINTPTR)             paChunkR3Ptrs;
    1040 # else
    1041     R3R0PTRTYPE(PRTR3UINTPTR)           paChunkR3Ptrs;
    1042 # endif
    1043 #endif
    1044998    /** The range description. */
    1045999    R3PTRTYPE(const char *)             pszDesc;
     
    10491003    RCPTRTYPE(struct PGMRAMRANGE *)     pSelfRC;
    10501004    /** Padding to make aPage aligned on sizeof(PGMPAGE). */
    1051 #ifdef VBOX_WITH_NEW_PHYS_CODE
    10521005    uint32_t                            au32Alignment2[HC_ARCH_BITS == 32 ? 2 : 1];
    1053 #else
    1054 # if HC_ARCH_BITS == 32
    1055     uint32_t                            u32Alignment2;
    1056 # endif
    1057 #endif
    10581006    /** Array of physical guest page tracking structures. */
    10591007    PGMPAGE                             aPages[1];
     
    10681016/** @} */
    10691017
    1070 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1071 /** Return hc ptr corresponding to the ram range and physical offset */
    1072 #define PGMRAMRANGE_GETHCPTR(pRam, off) \
    1073     (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((pRam)->paChunkR3Ptrs[(off) >> PGM_DYNAMIC_CHUNK_SHIFT] + ((off) & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
    1074                                                 : (RTHCPTR)((RTR3UINTPTR)(pRam)->pvR3 + (off));
    1075 #endif
    10761018
    10771019/**
     
    23882330    /** RC pointer corresponding to PGM::pRamRangesR3. */
    23892331    RCPTRTYPE(PPGMRAMRANGE)         pRamRangesRC;
    2390     /** The configured RAM size.
    2391      * @remarks Do NOT use this, it's too small to hold the whole stuff.
    2392      * @todo    Remove with VBOX_WITH_NEW_PHYS_CODE! */
    2393     RTUINT                          cbRamSize;
     2332    RTRCPTR                         alignment4; /**< structure alignment. */
    23942333
    23952334    /** Pointer to the list of ROM ranges - for R3.
     
    26322571    STAMCOUNTER StatR3GuestPDWrite;                 /**< R3: The total number of times pgmHCGuestPDWriteHandler() was called. */
    26332572    STAMCOUNTER StatR3GuestPDWriteConflict;         /**< R3: The number of times GuestPDWriteContlict() detected a conflict. */
    2634 #ifndef VBOX_WITH_NEW_PHYS_CODE
    2635     STAMCOUNTER StatR3DynRamTotal;                  /**< R3: Allocated MBs of guest ram */
    2636     STAMCOUNTER StatR3DynRamGrow;                   /**< R3: Nr of pgmr3PhysGrowRange calls. */
    2637 #endif
    26382573
    26392574    /* R0 only: */
     
    29092844int             pgmR3PhysRamReset(PVM pVM);
    29102845int             pgmR3PhysRomReset(PVM pVM);
    2911 # ifndef VBOX_WITH_NEW_PHYS_CODE
    2912 int             pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
    2913 # endif
    29142846
    29152847int             pgmR3PoolInit(PVM pVM);
     
    30602992    }
    30612993    *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3062 #ifndef VBOX_WITH_NEW_PHYS_CODE
    3063 
    3064     /*
    3065      * Make sure it's present.
    3066      */
    3067     if (RT_UNLIKELY(    !PGM_PAGE_GET_HCPHYS(*ppPage)
    3068                     &&  (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
    3069     {
    3070 #ifdef IN_RING3
    3071         int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
    3072 #else
    3073         int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
    3074 #endif
    3075         if (RT_FAILURE(rc))
    3076         {
    3077             *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
    3078             return rc;
    3079         }
    3080         Assert(rc == VINF_SUCCESS);
    3081     }
    3082 #endif
    30832994    return VINF_SUCCESS;
    30842995}
     
    31273038    }
    31283039    *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3129 #ifndef VBOX_WITH_NEW_PHYS_CODE
    3130 
    3131     /*
    3132      * Make sure it's present.
    3133      */
    3134     if (RT_UNLIKELY(    !PGM_PAGE_GET_HCPHYS(*ppPage)
    3135                     &&  (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
    3136     {
    3137 #ifdef IN_RING3
    3138         int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
    3139 #else
    3140         int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
    3141 #endif
    3142         if (RT_FAILURE(rc))
    3143         {
    3144             *ppPage = NULL; /* Shut up annoying smart ass. */
    3145             return rc;
    3146         }
    3147         Assert(rc == VINF_SUCCESS);
    3148     }
    3149 #endif
    31503040    return VINF_SUCCESS;
    31513041}
     
    32183108    *ppRam = pRam;
    32193109    *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3220 #ifndef VBOX_WITH_NEW_PHYS_CODE
    3221 
    3222     /*
    3223      * Make sure it's present.
    3224      */
    3225     if (RT_UNLIKELY(    !PGM_PAGE_GET_HCPHYS(*ppPage)
    3226                     &&  (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
    3227     {
    3228 #ifdef IN_RING3
    3229         int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
    3230 #else
    3231         int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
    3232 #endif
    3233         if (RT_FAILURE(rc))
    3234         {
    3235             *ppPage = NULL; /* Shut up silly GCC warnings. */
    3236             *ppPage = NULL; /* ditto */
    3237             return rc;
    3238         }
    3239         Assert(rc == VINF_SUCCESS);
    3240 
    3241     }
    3242 #endif
    32433110    return VINF_SUCCESS;
    32443111}
     
    34083275
    34093276#endif /*  VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
    3410 
    34113277#ifndef IN_RC
     3278
    34123279/**
    34133280 * Queries the Physical TLB entry for a physical guest page,
     
    34663333    return rc;
    34673334}
     3335
    34683336#endif /* !IN_RC */
    3469 
    3470 
    3471 #ifndef VBOX_WITH_NEW_PHYS_CODE
    3472 /**
    3473  * Convert GC Phys to HC Virt and HC Phys.
    3474  *
    3475  * @returns VBox status.
    3476  * @param   pPGM        PGM handle.
    3477  * @param   GCPhys      The GC physical address.
    3478  * @param   pHCPtr      Where to store the corresponding HC virtual address.
    3479  * @param   pHCPhys     Where to store the HC Physical address and its flags.
    3480  *
    3481  * @deprecated  Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
    3482  *              and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
    3483  *              Either way, we have to make sure the page is writable in MapCR3.
    3484  */
    3485 DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
    3486 {
    3487     PPGMRAMRANGE pRam;
    3488     PPGMPAGE pPage;
    3489     int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
    3490     if (RT_FAILURE(rc))
    3491     {
    3492         *pHCPtr = 0;    /* Shut up crappy GCC warnings */
    3493         *pHCPhys = 0;   /* ditto */
    3494         return rc;
    3495     }
    3496     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3497 
    3498     *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    3499     if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    3500     {
    3501         unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
    3502 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES only MapCR3 usage. */
    3503         PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(PGM2VM(pPGM), pRam->paChunkR3Ptrs);
    3504         *pHCPtr = (RTHCPTR)(paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
    3505 #else
    3506         *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
    3507 #endif
    3508         return VINF_SUCCESS;
    3509     }
    3510     if (pRam->pvR3)
    3511     {
    3512         *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off);
    3513         return VINF_SUCCESS;
    3514     }
    3515     *pHCPtr = 0;
    3516     return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    3517 }
    3518 #endif /* VBOX_WITH_NEW_PHYS_CODE */
    3519 
    35203337
    35213338/**
     
    35553372#else
    35563373    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3557 # ifdef VBOX_WITH_NEW_PHYS_CODE
    35583374# ifdef IN_RING3
    35593375    if (!pGuestPD)
    35603376        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3561 # endif
    35623377# endif
    35633378#endif
     
    35813396#else
    35823397    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3583 # ifdef VBOX_WITH_NEW_PHYS_CODE
    35843398# ifdef IN_RING3
    35853399    if (!pGuestPD)
    35863400        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3587 # endif
    35883401# endif
    35893402#endif
     
    36063419#else
    36073420    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3608 # ifdef VBOX_WITH_NEW_PHYS_CODE
    36093421# ifdef IN_RING3
    36103422    if (!pGuestPD)
    36113423        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3612 # endif
    36133424# endif
    36143425#endif
     
    36323443#else
    36333444    PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
    3634 # ifdef VBOX_WITH_NEW_PHYS_CODE
    36353445# ifdef IN_RING3
    36363446    if (!pGuestPDPT)
    36373447        pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
    3638 # endif
    36393448# endif
    36403449#endif
     
    36613470#else
    36623471    PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
    3663 # ifdef VBOX_WITH_NEW_PHYS_CODE
    36643472# ifdef IN_RING3
    36653473    if (!pGuestPDPT)
    36663474        pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
    3667 # endif
    36683475# endif
    36693476#endif
     
    36893496    if (pGuestPDPT->a[iPdpt].n.u1Present)
    36903497    {
    3691 #ifdef VBOX_WITH_NEW_PHYS_CODE
    36923498#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    36933499        PX86PDPAE   pGuestPD = NULL;
     
    37013507#endif
    37023508        return pGuestPD;
    3703 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    3704 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3705         if ((pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdpt])
    3706             return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    3707 #endif
    3708 
    3709         /* cache is out-of-sync. */
    3710         PX86PDPAE pPD;
    3711         int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    3712         if (RT_SUCCESS(rc))
    3713             return pPD;
    3714         AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdpt].u));
    3715 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    37163509        /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
    37173510    }
     
    37383531    {
    37393532        const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    3740 #ifdef VBOX_WITH_NEW_PHYS_CODE
    37413533#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    37423534        PX86PDPAE   pGuestPD = NULL;
     
    37503542#endif
    37513543        return &pGuestPD->a[iPD];
    3752 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    3753 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3754         if ((pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdpt])
    3755             return &pPGM->CTX_SUFF(apGstPaePDs)[iPdpt]->a[iPD];
    3756 #endif
    3757 
    3758         /* The cache is out-of-sync. */
    3759         PX86PDPAE pPD;
    3760         int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    3761         if (RT_SUCCESS(rc))
    3762             return &pPD->a[iPD];
    3763         AssertMsgFailed(("Impossible! rc=%Rrc PDPE=%RX64\n", rc, pGuestPDPT->a[iPdpt].u));
    3764 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    37653544        /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
    37663545    }
     
    37883567        {
    37893568            const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    3790 #ifdef VBOX_WITH_NEW_PHYS_CODE
    37913569#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    37923570            PX86PDPAE   pGuestPD = NULL;
     
    38003578#endif
    38013579            return pGuestPD->a[iPD];
    3802 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    3803 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3804             if ((pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdpt])
    3805                 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt]->a[iPD];
    3806 #endif
    3807 
    3808             /* cache is out-of-sync. */
    3809             PX86PDPAE pPD;
    3810             int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    3811             if (RT_SUCCESS(rc))
    3812                 return pPD->a[iPD];
    3813             AssertMsgFailed(("Impossible! rc=%d PDPE=%RX64\n", rc, pGuestPDPT->a[iPdpt].u));
    3814 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    38153580        }
    38163581    }
     
    38423607    {
    38433608        const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    3844 #ifdef VBOX_WITH_NEW_PHYS_CODE
    38453609#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    38463610        PX86PDPAE   pGuestPD = NULL;
     
    38553619        *piPD = iPD;
    38563620        return pGuestPD;
    3857 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    3858 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3859         if ((pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdpt])
    3860         {
    3861             *piPD = iPD;
    3862             return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    3863         }
    3864 #endif
    3865 
    3866         /* cache is out-of-sync. */
    3867         PX86PDPAE pPD;
    3868         int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    3869         if (RT_SUCCESS(rc))
    3870         {
    3871             *piPD = iPD;
    3872             return pPD;
    3873         }
    3874         AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdpt].u));
    3875 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    38763621        /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
    38773622    }
     
    38953640#else
    38963641    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    3897 # ifdef VBOX_WITH_NEW_PHYS_CODE
    38983642# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    38993643    if (!pGuestPml4)
    39003644        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    3901 # endif
    39023645# endif
    39033646    Assert(pGuestPml4);
     
    39223665#else
    39233666    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    3924 # ifdef VBOX_WITH_NEW_PHYS_CODE
    39253667# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    39263668    if (!pGuestPml4)
    39273669        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    3928 # endif
    39293670# endif
    39303671    Assert(pGuestPml4);
     
    39533694#else
    39543695    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    3955 # ifdef VBOX_WITH_NEW_PHYS_CODE
    39563696# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    39573697    if (!pGuestPml4)
    39583698        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    3959 # endif
    39603699# endif
    39613700    Assert(pGuestPml4);
  • TabularUnified trunk/src/VBox/VMM/PGMPhys.cpp

    r18645 r18665  
    346346
    347347
    348 #ifdef VBOX_WITH_NEW_PHYS_CODE
    349348/**
    350349 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
     
    386385    return rc;
    387386}
    388 #endif /* VBOX_WITH_NEW_PHYS_CODE */
    389387
    390388
     
    421419    AssertPtr(pLock);
    422420
    423 #ifdef VBOX_WITH_NEW_PHYS_CODE
    424421    int rc = pgmLock(pVM);
    425422    AssertRCReturn(rc, rc);
     
    483480    pgmUnlock(pVM);
    484481    return rc;
    485 
    486 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    487     /*
    488      * Fallback code.
    489      */
    490     return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);
    491 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    492482}
    493483
     
    517507VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
    518508{
    519 #ifdef VBOX_WITH_NEW_PHYS_CODE
    520509    int rc = pgmLock(pVM);
    521510    AssertRCReturn(rc, rc);
     
    560549    pgmUnlock(pVM);
    561550    return rc;
    562 
    563 #else  /* !VBOX_WITH_NEW_PHYS_CODE */
    564     /*
    565      * Fallback code.
    566      */
    567     return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
    568 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    569551}
    570552
     
    716698
    717699
    718 #ifdef VBOX_WITH_NEW_PHYS_CODE
    719700/**
    720701 * Frees a range of pages, replacing them with ZERO pages of the specified type.
     
    757738    return rc;
    758739}
    759 #endif /* VBOX_WITH_NEW_PHYS_CODE */
    760740
    761741
     
    972952        return rc;
    973953
    974 #ifdef VBOX_WITH_NEW_PHYS_CODE
    975954    if (    GCPhys >= _4G
    976955        &&  cPages > 256)
     
    10251004    }
    10261005    else
    1027 #endif
    10281006    {
    10291007        /*
     
    10351013        AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
    10361014
    1037 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1038         /* Allocate memory for chunk to HC ptr lookup array. */
    1039         pNew->paChunkR3Ptrs = NULL;
    1040         rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
    1041         AssertRCReturn(rc, rc);
    1042         pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
    1043 #endif
    1044 
    10451015        pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
    10461016    }
     
    10491019     * Notify REM.
    10501020     */
    1051 #ifdef VBOX_WITH_NEW_PHYS_CODE
    10521021    REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
    1053 #else
    1054     REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
    1055 #endif
    10561022
    10571023    return VINF_SUCCESS;
     
    10691035int pgmR3PhysRamReset(PVM pVM)
    10701036{
    1071 #ifdef VBOX_WITH_NEW_PHYS_CODE
    10721037    /*
    10731038     * We batch up pages before freeing them.
     
    10771042    int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
    10781043    AssertLogRelRCReturn(rc, rc);
    1079 #endif
    10801044
    10811045    /*
     
    10871051        AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
    10881052
    1089 #ifdef VBOX_WITH_NEW_PHYS_CODE
    10901053        if (!pVM->pgm.s.fRamPreAlloc)
    10911054        {
     
    11191082        }
    11201083        else
    1121 #endif
    11221084        {
    11231085            /* Zero the memory. */
     
    11271089                switch (PGM_PAGE_GET_TYPE(pPage))
    11281090                {
    1129 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1130                     case PGMPAGETYPE_INVALID:
    1131                     case PGMPAGETYPE_RAM:
    1132                         if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
    1133                         {
    1134                             /* shadow ram is reloaded elsewhere. */
    1135                             Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
    1136                             continue;
    1137                         }
    1138                         if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    1139                         {
    1140                             unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
    1141                             if (pRam->paChunkR3Ptrs[iChunk])
    1142                                 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
    1143                         }
    1144                         else
    1145                             ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
    1146                         break;
    1147 #else /* VBOX_WITH_NEW_PHYS_CODE */
    11481091                    case PGMPAGETYPE_RAM:
    11491092                        switch (PGM_PAGE_GET_STATE(pPage))
     
    11661109                        }
    11671110                        break;
    1168 #endif /* VBOX_WITH_NEW_PHYS_CODE */
    11691111
    11701112                    case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
     
    11861128    }
    11871129
    1188 #ifdef VBOX_WITH_NEW_PHYS_CODE
    11891130    /*
    11901131     * Finish off any pages pending freeing.
     
    11961137    }
    11971138    GMMR3FreePagesCleanup(pReq);
    1198 #endif
    1199 
    12001139
    12011140    return VINF_SUCCESS;
     
    12851224    {
    12861225        pNew = NULL;
    1287 #ifdef VBOX_WITH_NEW_PHYS_CODE
     1226
    12881227        /*
    12891228         * Make all the pages in the range MMIO/ZERO pages, freeing any
     
    12981237        }
    12991238        AssertRCReturn(rc, rc);
    1300 #endif
    13011239    }
    13021240    else
     
    13251263
    13261264        pNew->pvR3          = NULL;
    1327 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1328         pNew->paChunkR3Ptrs = NULL;
    1329 #endif
    13301265
    13311266        uint32_t iPage = cPages;
     
    14111346                    {
    14121347                        fAllMMIO = false;
    1413 #ifdef VBOX_WITH_NEW_PHYS_CODE
    14141348                        Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
    14151349                        AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
    1416 #endif
    14171350                        break;
    14181351                    }
     
    14381371            }
    14391372
    1440 #ifdef VBOX_WITH_NEW_PHYS_CODE
    14411373            /*
    14421374             * Range match? It will all be within one range (see PGMAllHandler.cpp).
     
    14631395                break;
    14641396            }
    1465 #endif
    14661397
    14671398            /* next */
     
    15841515
    15851516            pNew->RamRange.pvR3         = pvPages;
    1586 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1587             pNew->RamRange.paChunkR3Ptrs = NULL;
    1588 #endif
    15891517
    15901518            uint32_t iPage = cPages;
     
    26712599}
    26722600
    2673 #ifndef VBOX_WITH_NEW_PHYS_CODE
    2674 
    2675 /**
    2676  * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
    2677  * registration APIs calls to inform PGM about memory registrations.
    2678  *
    2679  * It registers the physical memory range with PGM. MM is responsible
    2680  * for the toplevel things - allocation and locking - while PGM is taking
    2681  * care of all the details and implements the physical address space virtualization.
    2682  *
    2683  * @returns VBox status.
    2684  * @param   pVM             The VM handle.
    2685  * @param   pvRam           HC virtual address of the RAM range. (page aligned)
    2686  * @param   GCPhys          GC physical address of the RAM range. (page aligned)
    2687  * @param   cb              Size of the RAM range. (page aligned)
    2688  * @param   fFlags          Flags, MM_RAM_*.
    2689  * @param   paPages         Pointer an array of physical page descriptors.
    2690  * @param   pszDesc         Description string.
    2691  */
    2692 VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
    2693 {
    2694     /*
    2695      * Validate input.
    2696      * (Not so important because callers are only MMR3PhysRegister()
    2697      *  and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
    2698      */
    2699     Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
    2700 
    2701     Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
    2702     /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
    2703     Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
    2704     /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
    2705     Assert(!(fFlags & ~0xfff));
    2706     Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
    2707     Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
    2708     Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
    2709     Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
    2710     RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
    2711     if (GCPhysLast < GCPhys)
    2712     {
    2713         AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
    2714         return VERR_INVALID_PARAMETER;
    2715     }
    2716 
    2717     /*
    2718      * Find range location and check for conflicts.
    2719      */
    2720     PPGMRAMRANGE    pPrev = NULL;
    2721     PPGMRAMRANGE    pCur = pVM->pgm.s.pRamRangesR3;
    2722     while (pCur)
    2723     {
    2724         if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
    2725         {
    2726             AssertMsgFailed(("Conflict! This cannot happen!\n"));
    2727             return VERR_PGM_RAM_CONFLICT;
    2728         }
    2729         if (GCPhysLast < pCur->GCPhys)
    2730             break;
    2731 
    2732         /* next */
    2733         pPrev = pCur;
    2734         pCur = pCur->pNextR3;
    2735     }
    2736 
    2737     /*
    2738      * Allocate RAM range.
    2739      * Small ranges are allocated from the heap, big ones have separate mappings.
    2740      */
    2741     size_t          cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
    2742     PPGMRAMRANGE    pNew;
    2743     int             rc = VERR_NO_MEMORY;
    2744     if (cbRam > PAGE_SIZE / 2)
    2745     {   /* large */
    2746         cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
    2747         rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);
    2748         AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));
    2749     }
    2750     else
    2751     {   /* small */
    2752         rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
    2753         AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));
    2754     }
    2755     if (RT_SUCCESS(rc))
    2756     {
    2757         /*
    2758          * Initialize the range.
    2759          */
    2760         pNew->pvR3          = pvRam;
    2761         pNew->GCPhys        = GCPhys;
    2762         pNew->GCPhysLast    = GCPhysLast;
    2763         pNew->cb            = cb;
    2764         pNew->fFlags        = fFlags;
    2765         pNew->paChunkR3Ptrs = NULL;
    2766 
    2767         unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
    2768         if (paPages)
    2769         {
    2770             while (iPage-- > 0)
    2771             {
    2772                 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
    2773                               fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
    2774                               PGM_PAGE_STATE_ALLOCATED);
    2775                 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
    2776             }
    2777         }
    2778         else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    2779         {
    2780             /* Allocate memory for chunk to HC ptr lookup array. */
    2781             rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
    2782             AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);
    2783 
    2784             /* Physical memory will be allocated on demand. */
    2785             while (iPage-- > 0)
    2786             {
    2787                 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
    2788                 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
    2789             }
    2790         }
    2791         else
    2792         {
    2793             Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
    2794             RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
    2795             while (iPage-- > 0)
    2796             {
    2797                 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
    2798                 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
    2799             }
    2800         }
    2801 
    2802         /*
    2803          * Insert the new RAM range.
    2804          */
    2805         pgmLock(pVM);
    2806         pNew->pNextR3 = pCur;
    2807         pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
    2808         pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
    2809         if (pPrev)
    2810         {
    2811             pPrev->pNextR3 = pNew;
    2812             pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
    2813             pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
    2814         }
    2815         else
    2816         {
    2817             pVM->pgm.s.pRamRangesR3 = pNew;
    2818             pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
    2819             pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
    2820         }
    2821         pgmUnlock(pVM);
    2822     }
    2823     return rc;
    2824 }
    2825 
    2826 
    2827 /**
    2828  * Register a chunk of a the physical memory range with PGM. MM is responsible
    2829  * for the toplevel things - allocation and locking - while PGM is taking
    2830  * care of all the details and implements the physical address space virtualization.
    2831  *
    2832  *
    2833  * @returns VBox status.
    2834  * @param   pVM             The VM handle.
    2835  * @param   pvRam           HC virtual address of the RAM range. (page aligned)
    2836  * @param   GCPhys          GC physical address of the RAM range. (page aligned)
    2837  * @param   cb              Size of the RAM range. (page aligned)
    2838  * @param   fFlags          Flags, MM_RAM_*.
    2839  * @param   paPages         Pointer an array of physical page descriptors.
    2840  * @param   pszDesc         Description string.
    2841  */
    2842 VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
    2843 {
    2844     NOREF(pszDesc);
    2845 
    2846     /*
    2847      * Validate input.
    2848      * (Not so important because callers are only MMR3PhysRegister()
    2849      *  and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
    2850      */
    2851     Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
    2852 
    2853     Assert(paPages);
    2854     Assert(pvRam);
    2855     Assert(!(fFlags & ~0xfff));
    2856     Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
    2857     Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
    2858     Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
    2859     Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
    2860     Assert(VM_IS_EMT(pVM));
    2861     Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
    2862     Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
    2863 
    2864     RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
    2865     if (GCPhysLast < GCPhys)
    2866     {
    2867         AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
    2868         return VERR_INVALID_PARAMETER;
    2869     }
    2870 
    2871     /*
    2872      * Find existing range location.
    2873      */
    2874     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2875     while (pRam)
    2876     {
    2877         RTGCPHYS off = GCPhys - pRam->GCPhys;
    2878         if (    off < pRam->cb
    2879             &&  (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
    2880             break;
    2881 
    2882         pRam = pRam->CTX_SUFF(pNext);
    2883     }
    2884     AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
    2885 
    2886     unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
    2887     unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
    2888     if (paPages)
    2889     {
    2890         while (iPage-- > 0)
    2891             pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags;  /** @todo PAGE FLAGS */
    2892     }
    2893     off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
    2894     pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
    2895 
    2896     /* Notify the recompiler. */
    2897     REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
    2898 
    2899     return VINF_SUCCESS;
    2900 }
    2901 
    2902 
    2903 /**
    2904  * Allocate missing physical pages for an existing guest RAM range.
    2905  *
    2906  * @returns VBox status.
    2907  * @param   pVM             The VM handle.
    2908  * @param   GCPhys          GC physical address of the RAM range. (page aligned)
    2909  */
    2910 VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
    2911 {
    2912     RTGCPHYS GCPhys = *pGCPhys;
    2913 
    2914     /*
    2915      * Walk range list.
    2916      */
    2917     pgmLock(pVM);
    2918 
    2919     PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    2920     while (pRam)
    2921     {
    2922         RTGCPHYS off = GCPhys - pRam->GCPhys;
    2923         if (    off < pRam->cb
    2924             &&  (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
    2925         {
    2926             bool     fRangeExists = false;
    2927             unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
    2928 
    2929             /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
    2930             if (pRam->paChunkR3Ptrs[off])
    2931                 fRangeExists = true;
    2932 
    2933             pgmUnlock(pVM);
    2934             if (fRangeExists)
    2935                 return VINF_SUCCESS;
    2936             return pgmr3PhysGrowRange(pVM, GCPhys);
    2937         }
    2938 
    2939         pRam = pRam->CTX_SUFF(pNext);
    2940     }
    2941     pgmUnlock(pVM);
    2942     return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    2943 }
    2944 
    2945 
    2946 /**
    2947  * Allocate missing physical pages for an existing guest RAM range.
    2948  *
    2949  * @returns VBox status.
    2950  * @param   pVM             The VM handle.
    2951  * @param   pRamRange       RAM range
    2952  * @param   GCPhys          GC physical address of the RAM range. (page aligned)
    2953  */
    2954 int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
    2955 {
    2956     void *pvRam;
    2957     int   rc;
    2958 
    2959     /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
    2960     if (!VM_IS_EMT(pVM))
    2961     {
    2962         PVMREQ pReq;
    2963         const RTGCPHYS GCPhysParam = GCPhys;
    2964 
    2965         AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
    2966 
    2967         rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
    2968         if (RT_SUCCESS(rc))
    2969         {
    2970             rc = pReq->iStatus;
    2971             VMR3ReqFree(pReq);
    2972         }
    2973         return rc;
    2974     }
    2975 
    2976     /* Round down to chunk boundary */
    2977     GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
    2978 
    2979     STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
    2980     STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
    2981 
    2982     Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
    2983 
    2984     unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
    2985 
    2986     for (;;)
    2987     {
    2988         rc = SUPPageAlloc(cPages, &pvRam);
    2989         if (RT_SUCCESS(rc))
    2990         {
    2991             rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
    2992             if (RT_SUCCESS(rc))
    2993                 return rc;
    2994 
    2995             SUPPageFree(pvRam, cPages);
    2996         }
    2997 
    2998         VMSTATE enmVMState = VMR3GetState(pVM);
    2999         if (enmVMState != VMSTATE_RUNNING)
    3000         {
    3001             AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));
    3002             LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
    3003             return rc;
    3004         }
    3005 
    3006         LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
    3007 
    3008         /* Pause first, then inform Main. */
    3009         rc = VMR3SuspendNoSave(pVM);
    3010         AssertRC(rc);
    3011 
    3012         VMSetRuntimeError(pVM, 0/*fFlags*/, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
    3013 
    3014         /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
    3015         rc = VMR3WaitForResume(pVM);
    3016 
    3017         /* Retry */
    3018         LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
    3019     }
    3020 }
    3021 
    3022 
    3023 /**
    3024  * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
    3025  * flags of existing RAM ranges.
    3026  *
    3027  * @returns VBox status.
    3028  * @param   pVM             The VM handle.
    3029  * @param   GCPhys          GC physical address of the RAM range. (page aligned)
    3030  * @param   cb              Size of the RAM range. (page aligned)
    3031  * @param   fFlags          The Or flags, MM_RAM_* \#defines.
    3032  * @param   fMask           The and mask for the flags.
    3033  */
    3034 VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
    3035 {
    3036     Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
    3037 
    3038     /*
    3039      * Validate input.
    3040      * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
    3041      */
    3042     Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
    3043     Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
    3044     Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
    3045     RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
    3046     AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
    3047 
    3048     /*
    3049      * Lookup the range.
    3050      */
    3051     PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    3052     while (pRam && GCPhys > pRam->GCPhysLast)
    3053         pRam = pRam->CTX_SUFF(pNext);
    3054     if (    !pRam
    3055         ||  GCPhys > pRam->GCPhysLast
    3056         ||  GCPhysLast < pRam->GCPhys)
    3057     {
    3058         AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
    3059         return VERR_INVALID_PARAMETER;
    3060     }
    3061 
    3062     /*
    3063      * Update the requested flags.
    3064      */
    3065     RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
    3066                          | fMask;
    3067     unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
    3068     unsigned iPage    = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
    3069     for ( ; iPage < iPageEnd; iPage++)
    3070         pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
    3071 
    3072     return VINF_SUCCESS;
    3073 }
    3074 
    3075 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    30762601
    30772602/**
     
    36073132    if (RT_SUCCESS(rc))
    36083133    {
    3609 #ifdef VBOX_WITH_NEW_PHYS_CODE
    36103134        if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
    36113135            rc = VINF_SUCCESS;
     
    36643188
    36653189        /* else: handler catching all access, no pointer returned. */
    3666 
    3667 #else
    3668         if (0)
    3669             /* nothing */;
    3670         else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
    3671         {
    3672             if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
    3673                 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
    3674             else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
    3675                 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
    3676             else
    3677             {
    3678                 /* Temporariliy disabled phycial handler(s), since the recompiler
    3679                    doesn't get notified when it's reset we'll have to pretend its
    3680                    operating normally. */
    3681                 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
    3682                     rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
    3683                 else
    3684                     rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
    3685             }
    3686         }
    3687         else
    3688             rc = VINF_SUCCESS;
    3689         if (RT_SUCCESS(rc))
    3690         {
    3691             if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
    3692             {
    3693                 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
    3694                 RTGCPHYS off = GCPhys - pRam->GCPhys;
    3695                 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
    3696                 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
    3697             }
    3698             else if (RT_LIKELY(pRam->pvR3))
    3699             {
    3700                 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
    3701                 RTGCPHYS off = GCPhys - pRam->GCPhys;
    3702                 *ppv = (uint8_t *)pRam->pvR3 + off;
    3703             }
    3704             else
    3705                 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
    3706         }
    3707 #endif /* !VBOX_WITH_NEW_PHYS_CODE */
    37083190    }
    37093191    else
  • TabularUnified trunk/src/VBox/VMM/VMM.cpp

    r18645 r18665  
    361361    STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk,        STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk",      STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_MAP_CHUNK calls.");
    362362    STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy,      STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy",    STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES calls.");
    363 #ifndef VBOX_WITH_NEW_PHYS_CODE
    364     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMGrowRAM,         STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMGrowRAM",       STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_RAM_GROW_RANGE calls.");
    365 #endif
    366363    STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay,          STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay",        STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
    367364    STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush,           STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush",      STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VMM_LOGGER_FLUSH calls.");
     
    14081405            break;
    14091406        }
    1410 #ifndef VBOX_WITH_NEW_PHYS_CODE
    1411 
    1412         case VMMCALLHOST_PGM_RAM_GROW_RANGE:
    1413         {
    1414             const RTGCPHYS GCPhys = pVM->vmm.s.u64CallHostArg;
    1415             pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, &GCPhys);
    1416             break;
    1417         }
    1418 #endif
    14191407
    14201408        /*
  • TabularUnified trunk/src/VBox/VMM/VMMInternal.h

    r17546 r18665  
    333333    STAMCOUNTER                 StatRZRetEmulHlt;
    334334    STAMCOUNTER                 StatRZRetPendingRequest;
    335 #ifndef VBOX_WITH_NEW_PHYS_CODE
    336     STAMCOUNTER                 StatRZCallPGMGrowRAM;
    337 #endif
    338335    STAMCOUNTER                 StatRZCallPDMLock;
    339336    STAMCOUNTER                 StatRZCallLogFlush;
  • TabularUnified trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r18617 r18665  
    467467    GEN_CHECK_OFF(PGM, pRomRangesR0);
    468468    GEN_CHECK_OFF(PGM, pRomRangesRC);
    469     GEN_CHECK_OFF(PGM, cbRamSize);
    470469    GEN_CHECK_OFF(PGM, pTreesR3);
    471470    GEN_CHECK_OFF(PGM, pTreesR0);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette