VirtualBox

Changeset 37354 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jun 7, 2011 3:05:32 PM (14 years ago)
Author:
vboxsync
Message:

PGM: Fixed locking issues in PGMR3PhysMMIORegister and PGMR3PhysMMIODeregister. Also addressed a harmless on in PGMR3PhysRomRegister (only used at init time, so no races). Fortified the code with assertions more lock assertion, replacing the incorrect PGMIsLocked() checks (we only care if the current thread is the lock owner). Cleaned up some ReturnStmt macros and adding more of them.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r36891 r37354  
    457457    if (fLockTaken)
    458458    {
    459         Assert(PGMIsLockOwner(pVM));
     459        PGM_LOCK_ASSERT_OWNER(pVM);
    460460        pgmUnlock(pVM);
    461461    }
     
    933933    int            rc;
    934934
    935     Assert(PGMIsLockOwner(pVM));
     935    PGM_LOCK_ASSERT_OWNER(pVM);
    936936
    937937    /* Allocate page directory if not present. */
     
    10201020    PVM             pVM   = pVCpu->CTX_SUFF(pVM);
    10211021
    1022     Assert(PGMIsLockOwner(pVM));
     1022    PGM_LOCK_ASSERT_OWNER(pVM);
    10231023
    10241024    AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT);    /* can't happen */
     
    10661066    int            rc;
    10671067
    1068     Assert(PGMIsLockOwner(pVM));
     1068    PGM_LOCK_ASSERT_OWNER(pVM);
    10691069
    10701070    /* Allocate page directory pointer table if not present. */
     
    11601160    PCX86PML4E      pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
    11611161
    1162     Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
     1162    PGM_LOCK_ASSERT_OWNER(PGMCPU2VM(pPGM));
    11631163
    11641164    AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
     
    12111211
    12121212    Assert(pVM->pgm.s.fNestedPaging);
    1213     Assert(PGMIsLockOwner(pVM));
     1213    PGM_LOCK_ASSERT_OWNER(pVM);
    12141214
    12151215    pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
     
    12891289int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
    12901290{
    1291     Assert(PGMIsLockOwner(pVCpu->CTX_SUFF(pVM)));
     1291    PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
    12921292
    12931293    int rc;
     
    13411341 *
    13421342 * @returns VBox status.
    1343  * @param   pVCpu       VMCPU handle.
     1343 * @param   pVCpu       The current CPU.
    13441344 * @param   GCPtr       Guest Context virtual address of the page.
    13451345 * @param   pfFlags     Where to store the flags. These are X86_PTE_*, even for big pages.
     
    13491349VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
    13501350{
     1351    VMCPU_ASSERT_EMT(pVCpu);
    13511352    return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
    13521353}
     
    13631364VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
    13641365{
     1366    VMCPU_ASSERT_EMT(pVCpu);
    13651367    int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
    13661368    return RT_SUCCESS(rc);
     
    13791381VMMDECL(int)  PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
    13801382{
     1383    VMCPU_ASSERT_EMT(pVCpu);
    13811384    return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
    13821385}
     
    13991402{
    14001403    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
     1404    VMCPU_ASSERT_EMT(pVCpu);
    14011405
    14021406    /*
     
    17741778VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
    17751779{
     1780    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
    17761781    PVM pVM = pVCpu->CTX_SUFF(pVM);
    17771782
    1778     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
     1783    VMCPU_ASSERT_EMT(pVCpu);
    17791784
    17801785    /*
     
    18831888    PVM pVM = pVCpu->CTX_SUFF(pVM);
    18841889
     1890    VMCPU_ASSERT_EMT(pVCpu);
    18851891    LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
    18861892
     
    18971903    switch (pVCpu->pgm.s.enmGuestMode)
    18981904    {
    1899     case PGMMODE_PAE:
    1900     case PGMMODE_PAE_NX:
    1901         GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
    1902         break;
    1903     case PGMMODE_AMD64:
    1904     case PGMMODE_AMD64_NX:
    1905         GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
    1906         break;
    1907     default:
    1908         GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
    1909         break;
    1910     }
    1911     if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
    1912     {
    1913         pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    1914         rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    1915         AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
    1916     }
    1917     return rc;
    1918 }
    1919 
    1920 
    1921 /**
    1922  * Synchronize the paging structures.
    1923  *
    1924  * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
    1925  * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
    1926  * in several places, most importantly whenever the CR3 is loaded.
    1927  *
    1928  * @returns VBox status code.
    1929  * @param   pVCpu       VMCPU handle.
    1930  * @param   cr0         Guest context CR0 register
    1931  * @param   cr3         Guest context CR3 register
    1932  * @param   cr4         Guest context CR4 register
    1933  * @param   fGlobal     Including global page directories or not
    1934  */
    1935 VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
    1936 {
    1937     PVM pVM = pVCpu->CTX_SUFF(pVM);
    1938     int rc;
    1939 
    1940     /*
    1941      * The pool may have pending stuff and even require a return to ring-3 to
    1942      * clear the whole thing.
    1943      */
    1944     rc = pgmPoolSyncCR3(pVCpu);
    1945     if (rc != VINF_SUCCESS)
    1946         return rc;
    1947 
    1948     /*
    1949      * We might be called when we shouldn't.
    1950      *
    1951      * The mode switching will ensure that the PD is resynced
    1952      * after every mode switch. So, if we find ourselves here
    1953      * when in protected or real mode we can safely disable the
    1954      * FF and return immediately.
    1955      */
    1956     if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
    1957     {
    1958         Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
    1959         Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
    1960         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    1961         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    1962         return VINF_SUCCESS;
    1963     }
    1964 
    1965     /* If global pages are not supported, then all flushes are global. */
    1966     if (!(cr4 & X86_CR4_PGE))
    1967         fGlobal = true;
    1968     LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
    1969              VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
    1970 
    1971     /*
    1972      * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
    1973      * This should be done before SyncCR3.
    1974      */
    1975     if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
    1976     {
    1977         pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
    1978 
    1979         RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
    1980         RTGCPHYS GCPhysCR3;
    1981         switch (pVCpu->pgm.s.enmGuestMode)
    1982         {
    19831905        case PGMMODE_PAE:
    19841906        case PGMMODE_PAE_NX:
     
    19921914            GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
    19931915            break;
     1916    }
     1917    if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
     1918    {
     1919        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
     1920        rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
     1921        AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
     1922    }
     1923    return rc;
     1924}
     1925
     1926
     1927/**
     1928 * Synchronize the paging structures.
     1929 *
     1930 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
     1931 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
     1932 * in several places, most importantly whenever the CR3 is loaded.
     1933 *
     1934 * @returns VBox status code.
     1935 * @param   pVCpu       VMCPU handle.
     1936 * @param   cr0         Guest context CR0 register
     1937 * @param   cr3         Guest context CR3 register
     1938 * @param   cr4         Guest context CR4 register
     1939 * @param   fGlobal     Including global page directories or not
     1940 */
     1941VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
     1942{
     1943    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1944    int rc;
     1945
     1946    VMCPU_ASSERT_EMT(pVCpu);
     1947
     1948    /*
     1949     * The pool may have pending stuff and even require a return to ring-3 to
     1950     * clear the whole thing.
     1951     */
     1952    rc = pgmPoolSyncCR3(pVCpu);
     1953    if (rc != VINF_SUCCESS)
     1954        return rc;
     1955
     1956    /*
     1957     * We might be called when we shouldn't.
     1958     *
     1959     * The mode switching will ensure that the PD is resynced
     1960     * after every mode switch. So, if we find ourselves here
     1961     * when in protected or real mode we can safely disable the
     1962     * FF and return immediately.
     1963     */
     1964    if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
     1965    {
     1966        Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
     1967        Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
     1968        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     1969        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
     1970        return VINF_SUCCESS;
     1971    }
     1972
     1973    /* If global pages are not supported, then all flushes are global. */
     1974    if (!(cr4 & X86_CR4_PGE))
     1975        fGlobal = true;
     1976    LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
     1977             VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
     1978
     1979    /*
     1980     * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
     1981     * This should be done before SyncCR3.
     1982     */
     1983    if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
     1984    {
     1985        pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
     1986
     1987        RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
     1988        RTGCPHYS GCPhysCR3;
     1989        switch (pVCpu->pgm.s.enmGuestMode)
     1990        {
     1991            case PGMMODE_PAE:
     1992            case PGMMODE_PAE_NX:
     1993                GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
     1994                break;
     1995            case PGMMODE_AMD64:
     1996            case PGMMODE_AMD64_NX:
     1997                GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
     1998                break;
     1999            default:
     2000                GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
     2001                break;
    19942002        }
    19952003
     
    19992007            rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    20002008        }
     2009
    20012010        /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
    20022011        if (    rc == VINF_PGM_SYNC_CR3
     
    20772086    PGMMODE enmGuestMode;
    20782087
     2088    VMCPU_ASSERT_EMT(pVCpu);
     2089
    20792090    /*
    20802091     * Calc the new guest mode.
     
    21492160    return pVCpu->pgm.s.enmShadowMode;
    21502161}
     2162
    21512163
    21522164/**
     
    22202232VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
    22212233{
     2234    VMCPU_ASSERT_EMT(pVCpu);
    22222235    Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
     2236
    22232237    pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
    22242238    if (fNxe)
     
    22782292}
    22792293
    2280 /**
    2281  * Check if the PGM lock is currently taken.
    2282  *
    2283  * @returns bool locked/not locked
    2284  * @param   pVM         The VM to operate on.
    2285  */
    2286 VMMDECL(bool) PGMIsLocked(PVM pVM)
    2287 {
    2288     return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
    2289 }
    2290 
    22912294
    22922295/**
     
    23052308 * Enable or disable large page usage
    23062309 *
     2310 * @returns VBox status code.
    23072311 * @param   pVM             The VM to operate on.
    23082312 * @param   fUseLargePages  Use/not use large pages
    23092313 */
    2310 VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
    2311 {
     2314VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
     2315{
     2316    VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
     2317
    23122318    pVM->fUseLargePages = fUseLargePages;
    2313 }
     2319    return VINF_SUCCESS;
     2320}
     2321
    23142322
    23152323/**
     
    24062414        /* The single char state stuff. */
    24072415        static const char s_achPageStates[4]    = { 'Z', 'A', 'W', 'S' };
    2408         szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
     2416        szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
    24092417
    24102418#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
     
    24212429            szTmp[cch++] = ':';
    24222430            static const char s_achPageTypes[8][4]  = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
    2423             szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
    2424             szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
    2425             szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
     2431            szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
     2432            szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
     2433            szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
    24262434        }
    24272435
     
    24302438        {
    24312439            szTmp[cch++] = ':';
    2432             cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
     2440            cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
    24332441        }
    24342442
     
    24432451            szTmp[cch++] = ':';
    24442452            static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
    2445             szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
    2446             cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
     2453            szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
     2454            cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
    24472455        }
    24482456#undef IS_PART_INCLUDED
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r36891 r37354  
    11201120    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    11211121
    1122     Assert(PGMIsLockOwner(pVM));
     1122    PGM_LOCK_ASSERT_OWNER(pVM);
    11231123
    11241124    LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
     
    14811481        u16 = PGMPOOL_TD_MAKE(1, pShwPage->idx);
    14821482        /* Save the page table index. */
    1483         PGM_PAGE_SET_PTE_INDEX(pPage, iPTDst);
     1483        PGM_PAGE_SET_PTE_INDEX(pVM, pPage, iPTDst);
    14841484    }
    14851485    else
     
    14881488    /* write back */
    14891489    Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x  iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
    1490     PGM_PAGE_SET_TRACKING(pPage, u16);
     1490    PGM_PAGE_SET_TRACKING(pVM, pPage, u16);
    14911491
    14921492    /* update statistics. */
     
    18061806    LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
    18071807
    1808     Assert(PGMIsLockOwner(pVM));
     1808    PGM_LOCK_ASSERT_OWNER(pVM);
    18091809
    18101810#if    (   PGM_GST_TYPE == PGM_TYPE_32BIT  \
     
    23542354    PPGMPOOL    pPool = pVM->pgm.s.CTX_SUFF(pPool);
    23552355
    2356     Assert(PGMIsLockOwner(pVM));
     2356    PGM_LOCK_ASSERT_OWNER(pVM);
    23572357
    23582358    /*
     
    25452545    LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage));
    25462546
    2547     Assert(PGMIsLocked(pVM));
     2547    PGM_LOCK_ASSERT_OWNER(pVM);
    25482548
    25492549#if (   PGM_GST_TYPE == PGM_TYPE_32BIT \
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r36891 r37354  
    435435     * Make it an MMIO/Zero page.
    436436     */
    437     PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
    438     PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO);
    439     PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
    440     PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
     437    PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
     438    PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
     439    PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
     440    PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
    441441    PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
    442442
    443443    /* Flush its TLB entry. */
    444     PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
     444    pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
    445445
    446446    /*
     
    10941094            LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
    10951095                     GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
    1096             PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
    1097             PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
    1098             PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
    1099             PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
     1096            PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
     1097            PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
     1098            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
     1099            PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
    11001100            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
    11011101            pCur->cAliasedPages++;
     
    11031103
    11041104            /* Flush its TLB entry. */
    1105             PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
     1105            pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
    11061106
    11071107            LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
     
    11951195            LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n",
    11961196                     GCPhysPage, pPage, HCPhysPageRemap));
    1197             PGM_PAGE_SET_HCPHYS(pPage, HCPhysPageRemap);
    1198             PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
    1199             PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     1197            PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
     1198            PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
     1199            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
    12001200            /** @todo hack alert
    12011201             *  This needs to be done properly. Currently we get away with it as the recompiler directly calls
    12021202             *  IOM read and write functions. Access through PGMPhysRead/Write will crash the process.
    12031203             */
    1204             PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
     1204            PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
    12051205            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
    12061206            pCur->cAliasedPages++;
     
    12081208
    12091209            /* Flush its TLB entry. */
    1210             PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
     1210            pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
    12111211
    12121212            LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
     
    14121412    PVM             pVM = (PVM)pvUser;
    14131413
    1414     Assert(PGMIsLockOwner(pVM));
     1414    PGM_LOCK_ASSERT_OWNER(pVM);
     1415
    14151416    /*
    14161417     * Iterate the pages and apply the new state.
     
    16781679    State.pVM = pVM;
    16791680
    1680     Assert(PGMIsLockOwner(pVM));
     1681    PGM_LOCK_ASSERT_OWNER(pVM);
    16811682
    16821683    /*
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r37137 r37354  
    397397 * @param   pVM     The VM handle.
    398398 */
    399 VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
     399void pgmPhysInvalidatePageMapTLB(PVM pVM)
    400400{
    401401    pgmLock(pVM);
    402402    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
     403
    403404    /* Clear the shared R0/R3 TLB completely. */
    404405    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
     
    409410        pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
    410411    }
     412
    411413    /** @todo clear the RC TLB whenever we add it. */
     414
    412415    pgmUnlock(pVM);
    413416}
     417
    414418
    415419/**
     
    419423 * @param   GCPhys  GCPhys entry to flush
    420424 */
    421 VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
    422 {
    423     Assert(PGMIsLocked(pVM));
     425void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
     426{
     427    PGM_LOCK_ASSERT_OWNER(pVM);
    424428
    425429    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
    426     /* Clear the shared R0/R3 TLB entry. */
     430
    427431#ifdef IN_RC
    428432    unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
     
    432436    pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
    433437#else
     438    /* Clear the shared R0/R3 TLB entry. */
    434439    PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
    435440    pTlbe->GCPhys = NIL_RTGCPHYS;
     
    438443    pTlbe->pv     = 0;
    439444#endif
    440     /* @todo clear the RC TLB whenever we add it. */
     445
     446    /** @todo clear the RC TLB whenever we add it. */
    441447}
    442448
     
    557563     * Prereqs.
    558564     */
    559     Assert(PGMIsLocked(pVM));
     565    PGM_LOCK_ASSERT_OWNER(pVM);
    560566    AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
    561567    Assert(!PGM_PAGE_IS_MMIO(pPage));
     
    580586        }
    581587        /* Mark the base as type page table, so we don't check over and over again. */
    582         PGM_PAGE_SET_PDE_TYPE(pBasePage, PGM_PAGE_PDE_TYPE_PT);
     588        PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
    583589
    584590        /* fall back to 4KB pages. */
     
    606612    }
    607613    /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
    608     Assert(PGMIsLocked(pVM));
     614    PGM_LOCK_ASSERT_OWNER(pVM);
    609615    AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
    610616    Assert(!PGM_PAGE_IS_MMIO(pPage));
     
    654660     */
    655661    pVM->pgm.s.cPrivatePages++;
    656     PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
    657     PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
    658     PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
    659     PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
    660     PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
     662    PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
     663    PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
     664    PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
     665    PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
     666    pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
    661667
    662668    /* Copy the shared page contents to the replacement page. */
     
    706712     * Prereqs.
    707713     */
    708     Assert(PGMIsLocked(pVM));
     714    PGM_LOCK_ASSERT_OWNER(pVM);
    709715    Assert(PGMIsUsingLargePages(pVM));
    710716
     
    744750                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
    745751                STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
    746                 PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PT);
     752                PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
    747753                return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
    748754            }
     
    826832    if (i == _2M/PAGE_SIZE)
    827833    {
    828         PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
     834        PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
    829835        pVM->pgm.s.cLargePagesDisabled--;
    830836        Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
     
    850856{
    851857    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
    852     PGM_PAGE_SET_WRITTEN_TO(pPage);
    853     PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     858    PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
     859    PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
    854860    Assert(pVM->pgm.s.cMonitoredPages > 0);
    855861    pVM->pgm.s.cMonitoredPages--;
     
    874880int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
    875881{
    876     Assert(PGMIsLockOwner(pVM));
     882    PGM_LOCK_ASSERT_OWNER(pVM);
    877883    switch (PGM_PAGE_GET_STATE(pPage))
    878884    {
     
    923929     * Validation.
    924930     */
    925     Assert(PGMIsLocked(pVM));
     931    PGM_LOCK_ASSERT_OWNER(pVM);
    926932    AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
    927933    const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
     
    9991005static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
    10001006{
    1001     Assert(PGMIsLocked(pVM));
     1007    PGM_LOCK_ASSERT_OWNER(pVM);
    10021008
    10031009#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     
    11991205int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
    12001206{
    1201     Assert(PGMIsLocked(pVM));
     1207    PGM_LOCK_ASSERT_OWNER(pVM);
    12021208
    12031209    /*
     
    12301236int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
    12311237{
    1232     Assert(PGMIsLocked(pVM));
     1238    PGM_LOCK_ASSERT_OWNER(pVM);
    12331239    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
    12341240
     
    12521258    else
    12531259    {
    1254         Assert(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg);
     1260        AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
    12551261        pTlbe->pMap = NULL;
    12561262        pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
     
    12911297    int rc;
    12921298    AssertReturn(pPage, VERR_INTERNAL_ERROR);
    1293     Assert(PGMIsLocked(pVM));
     1299    PGM_LOCK_ASSERT_OWNER(pVM);
    12941300
    12951301    /*
     
    13471353{
    13481354    AssertReturn(pPage, VERR_INTERNAL_ERROR);
    1349     Assert(PGMIsLocked(pVM));
     1355    PGM_LOCK_ASSERT_OWNER(pVM);
    13501356    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
    13511357
     
    17281734        if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
    17291735        {
    1730             PGM_PAGE_SET_WRITTEN_TO(pPage);
    1731             PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     1736            PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
     1737            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
    17321738            Assert(pVM->pgm.s.cMonitoredPages > 0);
    17331739            pVM->pgm.s.cMonitoredPages--;
     
    19551961        Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
    19561962        STAM_PROFILE_START(&pPhys->Stat, h);
    1957         Assert(PGMIsLockOwner(pVM));
     1963        PGM_LOCK_ASSERT_OWNER(pVM);
    19581964        /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
    19591965        pgmUnlock(pVM);
     
    21992205
    22002206                STAM_PROFILE_START(&pCur->Stat, h);
    2201                 Assert(PGMIsLockOwner(pVM));
     2207                PGM_LOCK_ASSERT_OWNER(pVM);
    22022208                /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
    22032209                pgmUnlock(pVM);
     
    24112417            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
    24122418            STAM_PROFILE_START(&pPhys->Stat, h);
    2413             Assert(PGMIsLockOwner(pVM));
     2419            PGM_LOCK_ASSERT_OWNER(pVM);
    24142420            /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
    24152421            pgmUnlock(pVM);
     
    24802486
    24812487            STAM_PROFILE_START(&pPhys->Stat, h);
    2482             Assert(PGMIsLockOwner(pVM));
     2488            PGM_LOCK_ASSERT_OWNER(pVM);
    24832489            /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
    24842490            pgmUnlock(pVM);
     
    28062812 *
    28072813 * @returns VBox status.
    2808  * @param   pVCpu       The VMCPU handle.
     2814 * @param   pVCpu       Handle to the current virtual CPU.
    28092815 * @param   pvDst       The destination address.
    28102816 * @param   GCPtrSrc    The source address (GC pointer).
     
    28142820{
    28152821    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2822    VMCPU_ASSERT_EMT(pVCpu);
    28162823
    28172824    /*
     
    28962903 *
    28972904 * @returns VBox status.
    2898  * @param   pVCpu       The VMCPU handle.
     2905 * @param   pVCpu       Handle to the current virtual CPU.
    28992906 * @param   GCPtrDst    The destination address (GC pointer).
    29002907 * @param   pvSrc       The source address.
     
    29042911{
    29052912    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2913    VMCPU_ASSERT_EMT(pVCpu);
    29062914
    29072915    /*
     
    29752983 *
    29762984 * @returns VBox status.
    2977  * @param   pVCpu       The VMCPU handle.
     2985 * @param   pVCpu       Handle to the current virtual CPU.
    29782986 * @param   GCPtrDst    The destination address (GC pointer).
    29792987 * @param   pvSrc       The source address.
     
    29832991{
    29842992    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2993    VMCPU_ASSERT_EMT(pVCpu);
    29852994
    29862995    /*
     
    30543063 *
    30553064 * @returns VBox status.
    3056  * @param   pVCpu       The VMCPU handle.
     3065 * @param   pVCpu       Handle to the current virtual CPU.
    30573066 * @param   pvDst       The destination address.
    30583067 * @param   GCPtrSrc    The source address (GC pointer).
     
    30663075    int         rc;
    30673076    PVM         pVM = pVCpu->CTX_SUFF(pVM);
     3077    VMCPU_ASSERT_EMT(pVCpu);
    30683078
    30693079    /*
     
    31363146 * @retval  VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
    31373147 *
    3138  * @param   pVCpu       The VMCPU handle.
     3148 * @param   pVCpu       Handle to the current virtual CPU.
    31393149 * @param   GCPtrDst    The destination address (GC pointer).
    31403150 * @param   pvSrc       The source address.
     
    31473157    int         rc;
    31483158    PVM         pVM = pVCpu->CTX_SUFF(pVM);
     3159    VMCPU_ASSERT_EMT(pVCpu);
    31493160
    31503161    /*
     
    32293240 * @retval  VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
    32303241 *
    3231  * @param   pVCpu       The VMCPU handle.
     3242 * @param   pVCpu       Handle to the current virtual CPU.
    32323243 * @param   pCtxCore    The context core.
    32333244 * @param   pvDst       Where to put the bytes we've read.
     
    32423253    PVM pVM = pVCpu->CTX_SUFF(pVM);
    32433254    Assert(cb <= PAGE_SIZE);
     3255    VMCPU_ASSERT_EMT(pVCpu);
    32443256
    32453257/** @todo r=bird: This isn't perfect!
     
    33923404 * @retval  VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
    33933405 *
    3394  * @param   pVCpu       The VMCPU handle.
     3406 * @param   pVCpu       Handle to the current virtual CPU.
    33953407 * @param   pCtxCore    The context core.
    33963408 * @param   pvDst       Where to put the bytes we've read.
     
    34093421 *          unmap mappings done by the caller. Be careful!
    34103422 */
    3411 VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
     3423VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
     3424                                              bool fRaiseTrap)
    34123425{
    34133426    PVM pVM = pVCpu->CTX_SUFF(pVM);
    34143427    Assert(cb <= PAGE_SIZE);
     3428    VMCPU_ASSERT_EMT(pVCpu);
    34153429
    34163430    /*
     
    35843598 * @retval  VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
    35853599 *
    3586  * @param   pVCpu       The VMCPU handle.
     3600 * @param   pVCpu       Handle to the current virtual CPU.
    35873601 * @param   pCtxCore    The context core.
    35883602 * @param   GCPtrDst    The destination address.
     
    36003614 *          unmap mappings done by the caller. Be careful!
    36013615 */
    3602 VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
     3616VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
     3617                                               size_t cb, bool fRaiseTrap)
    36033618{
    36043619    Assert(cb <= PAGE_SIZE);
    36053620    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3621    VMCPU_ASSERT_EMT(pVCpu);
    36063622
    36073623    /*
     
    37783794}
    37793795
    3780 /**
    3781  * Return the page type of the specified physical address
    3782  *
     3796
     3797/**
     3798 * Return the page type of the specified physical address.
     3799 *
     3800 * @returns The page type.
    37833801 * @param   pVM             VM Handle.
    37843802 * @param   GCPhys          Guest physical address
     
    37863804VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
    37873805{
    3788     PPGMPAGE pPage;
    3789 
    3790     pPage = pgmPhysGetPage(pVM, GCPhys);
    3791     if (pPage)
    3792         return (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
    3793 
    3794     return PGMPAGETYPE_INVALID;
    3795 }
    3796 
     3806    pgmLock(pVM);
     3807    PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
     3808    PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
     3809    pgmUnlock(pVM);
     3810
     3811    return enmPgType;
     3812}
     3813
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r36891 r37354  
    17171717    unsigned idxFree;
    17181718
    1719     Assert(PGMIsLocked(pVM));
     1719    PGM_LOCK_ASSERT_OWNER(pVM);
    17201720    AssertCompile(RT_ELEMENTS(pPool->aDirtyPages) == 8 || RT_ELEMENTS(pPool->aDirtyPages) == 16);
    17211721    Assert(!pPage->fDirty);
     
    17911791{
    17921792    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    1793     Assert(PGMIsLocked(pVM));
     1793    PGM_LOCK_ASSERT_OWNER(pVM);
    17941794    if (!pPool->cDirtyPages)
    17951795        return false;
     
    18201820{
    18211821    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    1822     Assert(PGMIsLocked(pVM));
     1822    PGM_LOCK_ASSERT_OWNER(pVM);
    18231823    Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
    18241824
     
    18591859{
    18601860    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    1861     Assert(PGMIsLocked(pVM));
     1861    PGM_LOCK_ASSERT_OWNER(pVM);
    18621862    Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
    18631863
     
    18801880{
    18811881    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    1882     Assert(PGMIsLocked(pVM));
     1882    PGM_LOCK_ASSERT_OWNER(pVM);
    18831883    Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
    18841884    unsigned idxDirtyPage = RT_ELEMENTS(pPool->aDirtyPages);
     
    33553355    bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, iShw, PGM_PAGE_GET_PTE_INDEX(pPhysPage));
    33563356    if (!fKeptPTEs)
    3357         PGM_PAGE_SET_TRACKING(pPhysPage, 0);
     3357        PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
    33583358    STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f);
    33593359}
     
    33703370static void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iPhysExt)
    33713371{
    3372     Assert(PGMIsLockOwner(pVM));
     3372    PGM_LOCK_ASSERT_OWNER(pVM);
    33733373    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    33743374    bool     fKeepList = false;
     
    34073407        pPool->iPhysExtFreeHead = iPhysExtStart;
    34083408        /* Invalidate the tracking data. */
    3409         PGM_PAGE_SET_TRACKING(pPhysPage, 0);
     3409        PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
    34103410    }
    34113411
     
    34623462        {
    34633463            /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
    3464             PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     3464            PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
    34653465            pVM->pgm.s.cLargePagesDisabled++;
    34663466
     
    36663666    }
    36673667
    3668     PGM_PAGE_SET_TRACKING(pPhysPage, 0);
     3668    PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
    36693669    STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
    36703670
     
    38523852PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt)
    38533853{
    3854     Assert(PGMIsLockOwner(pVM));
     3854    PGM_LOCK_ASSERT_OWNER(pVM);
    38553855    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    38563856    uint16_t iPhysExt = pPool->iPhysExtFreeHead;
     
    38763876void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt)
    38773877{
    3878     Assert(PGMIsLockOwner(pVM));
     3878    PGM_LOCK_ASSERT_OWNER(pVM);
    38793879    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    38803880    Assert(iPhysExt < pPool->cMaxPhysExts);
     
    38983898void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt)
    38993899{
    3900     Assert(PGMIsLockOwner(pVM));
     3900    PGM_LOCK_ASSERT_OWNER(pVM);
    39013901    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    39023902
     
    39353935static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT, uint16_t iPte)
    39363936{
    3937     Assert(PGMIsLockOwner(pVM));
     3937    PGM_LOCK_ASSERT_OWNER(pVM);
    39383938    PPGMPOOL        pPool = pVM->pgm.s.CTX_SUFF(pPool);
    39393939    PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
     
    40694069void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage, uint16_t iPte)
    40704070{
     4071    PVM            pVM = pPool->CTX_SUFF(pVM);
    40714072    const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
    40724073    AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
     
    40754076    if (iPhysExt != PGMPOOL_TD_IDX_OVERFLOWED)
    40764077    {
    4077         PVM pVM = pPool->CTX_SUFF(pVM);
    40784078        pgmLock(pVM);
    40794079
     
    41114111                        pgmPoolTrackPhysExtFree(pVM, iPhysExt);
    41124112                        Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d lonely\n", pPhysPage, pPage->idx));
    4113                         PGM_PAGE_SET_TRACKING(pPhysPage, 0);
     4113                        PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
    41144114                    }
    41154115                    else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX)
     
    41174117                        /* head */
    41184118                        Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d head\n", pPhysPage, pPage->idx));
    4119                         PGM_PAGE_SET_TRACKING(pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));
     4119                        PGM_PAGE_SET_TRACKING(pVM, pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));
    41204120                        pgmPoolTrackPhysExtFree(pVM, iPhysExt);
    41214121                    }
     
    41634163     * Lookup the page and check if it checks out before derefing it.
    41644164     */
    4165     PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhys);
     4165    PVM      pVM       = pPool->CTX_SUFF(pVM);
     4166    PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhys);
    41664167    if (pPhysPage)
    41674168    {
     
    42054206     */
    42064207    RTHCPHYS HCPhysHinted;
    4207     PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhysHint);
     4208    PVM      pVM       = pPool->CTX_SUFF(pVM);
     4209    PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysHint);
    42084210    if (pPhysPage)
    42094211    {
     
    50945096{
    50955097    PVM pVM = pPool->CTX_SUFF(pVM);
    5096 
    5097     Assert(PGMIsLockOwner(pVM));
     5098    PGM_LOCK_ASSERT_OWNER(pVM);
    50985099
    50995100    /*
     
    51175118{
    51185119    PVM pVM = pPool->CTX_SUFF(pVM);
    5119     Assert(PGMIsLockOwner(pVM));
     5120    PGM_LOCK_ASSERT_OWNER(pVM);
    51205121    return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
    51215122}
     
    52375238    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    52385239
    5239     Assert(PGMIsLockOwner(pVM));
     5240    PGM_LOCK_ASSERT_OWNER(pVM);
    52405241    STAM_PROFILE_START(&pPool->StatR3Reset, a);
    52415242    LogFlow(("pgmR3PoolReset:\n"));
     
    53215322        unsigned iPage = pRam->cb >> PAGE_SHIFT;
    53225323        while (iPage-- > 0)
    5323             PGM_PAGE_SET_TRACKING(&pRam->aPages[iPage], 0);
     5324            PGM_PAGE_SET_TRACKING(pVM, &pRam->aPages[iPage], 0);
    53245325    }
    53255326
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r36891 r37354  
    202202    PVM pVM = pVCpu->CTX_SUFF(pVM);
    203203
    204     Assert(PGMIsLockOwner(pVM));
     204    PGM_LOCK_ASSERT_OWNER(pVM);
    205205
    206206    /*
     
    365365    int rc;
    366366
    367     Assert(PGMIsLockOwner(pVM));
     367    PGM_LOCK_ASSERT_OWNER(pVM);
     368
    368369    /*
    369370     * Walk page tables and pages till we're done.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette