Changeset 37354 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 7, 2011 3:05:32 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r36891 r37354 457 457 if (fLockTaken) 458 458 { 459 Assert(PGMIsLockOwner(pVM));459 PGM_LOCK_ASSERT_OWNER(pVM); 460 460 pgmUnlock(pVM); 461 461 } … … 933 933 int rc; 934 934 935 Assert(PGMIsLockOwner(pVM));935 PGM_LOCK_ASSERT_OWNER(pVM); 936 936 937 937 /* Allocate page directory if not present. */ … … 1020 1020 PVM pVM = pVCpu->CTX_SUFF(pVM); 1021 1021 1022 Assert(PGMIsLockOwner(pVM));1022 PGM_LOCK_ASSERT_OWNER(pVM); 1023 1023 1024 1024 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */ … … 1066 1066 int rc; 1067 1067 1068 Assert(PGMIsLockOwner(pVM));1068 PGM_LOCK_ASSERT_OWNER(pVM); 1069 1069 1070 1070 /* Allocate page directory pointer table if not present. */ … … 1160 1160 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1161 1161 1162 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));1162 PGM_LOCK_ASSERT_OWNER(PGMCPU2VM(pPGM)); 1163 1163 1164 1164 AssertReturn(pPml4e, VERR_INTERNAL_ERROR); … … 1211 1211 1212 1212 Assert(pVM->pgm.s.fNestedPaging); 1213 Assert(PGMIsLockOwner(pVM));1213 PGM_LOCK_ASSERT_OWNER(pVM); 1214 1214 1215 1215 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); … … 1289 1289 int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode) 1290 1290 { 1291 Assert(PGMIsLockOwner(pVCpu->CTX_SUFF(pVM)));1291 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM)); 1292 1292 1293 1293 int rc; … … 1341 1341 * 1342 1342 * @returns VBox status. 1343 * @param pVCpu VMCPU handle.1343 * @param pVCpu The current CPU. 1344 1344 * @param GCPtr Guest Context virtual address of the page. 1345 1345 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages. … … 1349 1349 VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys) 1350 1350 { 1351 VMCPU_ASSERT_EMT(pVCpu); 1351 1352 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys); 1352 1353 } … … 1363 1364 VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr) 1364 1365 { 1366 VMCPU_ASSERT_EMT(pVCpu); 1365 1367 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL); 1366 1368 return RT_SUCCESS(rc); … … 1379 1381 VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags) 1380 1382 { 1383 VMCPU_ASSERT_EMT(pVCpu); 1381 1384 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0); 1382 1385 } … … 1399 1402 { 1400 1403 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a); 1404 VMCPU_ASSERT_EMT(pVCpu); 1401 1405 1402 1406 /* … … 1774 1778 VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal) 1775 1779 { 1780 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a); 1776 1781 PVM pVM = pVCpu->CTX_SUFF(pVM); 1777 1782 1778 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);1783 VMCPU_ASSERT_EMT(pVCpu); 1779 1784 1780 1785 /* … … 1883 1888 PVM pVM = pVCpu->CTX_SUFF(pVM); 1884 1889 1890 VMCPU_ASSERT_EMT(pVCpu); 1885 1891 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3)); 1886 1892 … … 1897 1903 switch (pVCpu->pgm.s.enmGuestMode) 1898 1904 { 1899 case PGMMODE_PAE:1900 case PGMMODE_PAE_NX:1901 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);1902 break;1903 case PGMMODE_AMD64:1904 case PGMMODE_AMD64_NX:1905 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);1906 break;1907 default:1908 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);1909 break;1910 }1911 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)1912 {1913 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;1914 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);1915 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */1916 }1917 return rc;1918 }1919 1920 1921 /**1922 * Synchronize the paging structures.1923 *1924 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and1925 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set1926 * in several places, most importantly whenever the CR3 is loaded.1927 *1928 * @returns VBox status code.1929 * @param pVCpu VMCPU handle.1930 * @param cr0 Guest context CR0 register1931 * @param cr3 Guest context CR3 register1932 * @param cr4 Guest context CR4 register1933 * @param fGlobal Including global page directories or not1934 */1935 VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)1936 {1937 PVM pVM = pVCpu->CTX_SUFF(pVM);1938 int rc;1939 1940 /*1941 * The pool may have pending stuff and even require a return to ring-3 to1942 * clear the whole thing.1943 */1944 rc = pgmPoolSyncCR3(pVCpu);1945 if (rc != VINF_SUCCESS)1946 return rc;1947 1948 /*1949 * We might be called when we shouldn't.1950 *1951 * The mode switching will ensure that the PD is resynced1952 * after every mode switch. So, if we find ourselves here1953 * when in protected or real mode we can safely disable the1954 * FF and return immediately.1955 */1956 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)1957 {1958 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));1959 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));1960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);1961 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);1962 return VINF_SUCCESS;1963 }1964 1965 /* If global pages are not supported, then all flushes are global. */1966 if (!(cr4 & X86_CR4_PGE))1967 fGlobal = true;1968 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,1969 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));1970 1971 /*1972 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).1973 * This should be done before SyncCR3.1974 */1975 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)1976 {1977 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;1978 1979 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;1980 RTGCPHYS GCPhysCR3;1981 switch (pVCpu->pgm.s.enmGuestMode)1982 {1983 1905 case PGMMODE_PAE: 1984 1906 case PGMMODE_PAE_NX: … … 1992 1914 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK); 1993 1915 break; 1916 } 1917 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 1918 { 1919 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 1920 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3); 1921 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */ 1922 } 1923 return rc; 1924 } 1925 1926 1927 /** 1928 * Synchronize the paging structures. 1929 * 1930 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and 1931 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set 1932 * in several places, most importantly whenever the CR3 is loaded. 1933 * 1934 * @returns VBox status code. 1935 * @param pVCpu VMCPU handle. 1936 * @param cr0 Guest context CR0 register 1937 * @param cr3 Guest context CR3 register 1938 * @param cr4 Guest context CR4 register 1939 * @param fGlobal Including global page directories or not 1940 */ 1941 VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal) 1942 { 1943 PVM pVM = pVCpu->CTX_SUFF(pVM); 1944 int rc; 1945 1946 VMCPU_ASSERT_EMT(pVCpu); 1947 1948 /* 1949 * The pool may have pending stuff and even require a return to ring-3 to 1950 * clear the whole thing. 1951 */ 1952 rc = pgmPoolSyncCR3(pVCpu); 1953 if (rc != VINF_SUCCESS) 1954 return rc; 1955 1956 /* 1957 * We might be called when we shouldn't. 1958 * 1959 * The mode switching will ensure that the PD is resynced 1960 * after every mode switch. So, if we find ourselves here 1961 * when in protected or real mode we can safely disable the 1962 * FF and return immediately. 1963 */ 1964 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED) 1965 { 1966 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE)); 1967 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); 1968 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 1969 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL); 1970 return VINF_SUCCESS; 1971 } 1972 1973 /* If global pages are not supported, then all flushes are global. */ 1974 if (!(cr4 & X86_CR4_PGE)) 1975 fGlobal = true; 1976 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal, 1977 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))); 1978 1979 /* 1980 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB). 1981 * This should be done before SyncCR3. 1982 */ 1983 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3) 1984 { 1985 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3; 1986 1987 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; 1988 RTGCPHYS GCPhysCR3; 1989 switch (pVCpu->pgm.s.enmGuestMode) 1990 { 1991 case PGMMODE_PAE: 1992 case PGMMODE_PAE_NX: 1993 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK); 1994 break; 1995 case PGMMODE_AMD64: 1996 case PGMMODE_AMD64_NX: 1997 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK); 1998 break; 1999 default: 2000 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK); 2001 break; 1994 2002 } 1995 2003 … … 1999 2007 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3); 2000 2008 } 2009 2001 2010 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */ 2002 2011 if ( rc == VINF_PGM_SYNC_CR3 … … 2077 2086 PGMMODE enmGuestMode; 2078 2087 2088 VMCPU_ASSERT_EMT(pVCpu); 2089 2079 2090 /* 2080 2091 * Calc the new guest mode. … … 2149 2160 return pVCpu->pgm.s.enmShadowMode; 2150 2161 } 2162 2151 2163 2152 2164 /** … … 2220 2232 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe) 2221 2233 { 2234 VMCPU_ASSERT_EMT(pVCpu); 2222 2235 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe)); 2236 2223 2237 pVCpu->pgm.s.fNoExecuteEnabled = fNxe; 2224 2238 if (fNxe) … … 2278 2292 } 2279 2293 2280 /**2281 * Check if the PGM lock is currently taken.2282 *2283 * @returns bool locked/not locked2284 * @param pVM The VM to operate on.2285 */2286 VMMDECL(bool) PGMIsLocked(PVM pVM)2287 {2288 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);2289 }2290 2291 2294 2292 2295 /** … … 2305 2308 * Enable or disable large page usage 2306 2309 * 2310 * @returns VBox status code. 2307 2311 * @param pVM The VM to operate on. 2308 2312 * @param fUseLargePages Use/not use large pages 2309 2313 */ 2310 VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages) 2311 { 2314 VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages) 2315 { 2316 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 2317 2312 2318 pVM->fUseLargePages = fUseLargePages; 2313 } 2319 return VINF_SUCCESS; 2320 } 2321 2314 2322 2315 2323 /** … … 2406 2414 /* The single char state stuff. */ 2407 2415 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' }; 2408 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE (pPage)];2416 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)]; 2409 2417 2410 2418 #define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 ) … … 2421 2429 szTmp[cch++] = ':'; 2422 2430 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" }; 2423 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE (pPage)][0];2424 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE (pPage)][1];2425 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE (pPage)][2];2431 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0]; 2432 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1]; 2433 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2]; 2426 2434 } 2427 2435 … … 2430 2438 { 2431 2439 szTmp[cch++] = ':'; 2432 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS (pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);2440 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT); 2433 2441 } 2434 2442 … … 2443 2451 szTmp[cch++] = ':'; 2444 2452 static const char s_achRefs[4] = { '-', 'U', '!', 'L' }; 2445 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS (pPage)];2446 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX (pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);2453 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)]; 2454 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT); 2447 2455 } 2448 2456 #undef IS_PART_INCLUDED -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r36891 r37354 1120 1120 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1121 1121 1122 Assert(PGMIsLockOwner(pVM));1122 PGM_LOCK_ASSERT_OWNER(pVM); 1123 1123 1124 1124 LogFlow(("InvalidatePage %RGv\n", GCPtrPage)); … … 1481 1481 u16 = PGMPOOL_TD_MAKE(1, pShwPage->idx); 1482 1482 /* Save the page table index. */ 1483 PGM_PAGE_SET_PTE_INDEX(p Page, iPTDst);1483 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, iPTDst); 1484 1484 } 1485 1485 else … … 1488 1488 /* write back */ 1489 1489 Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst)); 1490 PGM_PAGE_SET_TRACKING(p Page, u16);1490 PGM_PAGE_SET_TRACKING(pVM, pPage, u16); 1491 1491 1492 1492 /* update statistics. */ … … 1806 1806 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr)); 1807 1807 1808 Assert(PGMIsLockOwner(pVM));1808 PGM_LOCK_ASSERT_OWNER(pVM); 1809 1809 1810 1810 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ … … 2354 2354 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2355 2355 2356 Assert(PGMIsLockOwner(pVM));2356 PGM_LOCK_ASSERT_OWNER(pVM); 2357 2357 2358 2358 /* … … 2545 2545 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage)); 2546 2546 2547 Assert(PGMIsLocked(pVM));2547 PGM_LOCK_ASSERT_OWNER(pVM); 2548 2548 2549 2549 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r36891 r37354 435 435 * Make it an MMIO/Zero page. 436 436 */ 437 PGM_PAGE_SET_HCPHYS(p Page, pVM->pgm.s.HCPhysZeroPg);438 PGM_PAGE_SET_TYPE(p Page, PGMPAGETYPE_MMIO);439 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ZERO);440 PGM_PAGE_SET_PAGEID(p Page, NIL_GMM_PAGEID);437 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg); 438 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO); 439 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO); 440 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID); 441 441 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL); 442 442 443 443 /* Flush its TLB entry. */ 444 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);444 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 445 445 446 446 /* … … 1094 1094 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n", 1095 1095 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap )); 1096 PGM_PAGE_SET_HCPHYS(p Page, PGM_PAGE_GET_HCPHYS(pPageRemap));1097 PGM_PAGE_SET_TYPE(p Page, PGMPAGETYPE_MMIO2_ALIAS_MMIO);1098 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ALLOCATED);1099 PGM_PAGE_SET_PAGEID(p Page, PGM_PAGE_GET_PAGEID(pPageRemap));1096 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap)); 1097 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1098 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); 1099 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap)); 1100 1100 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1101 1101 pCur->cAliasedPages++; … … 1103 1103 1104 1104 /* Flush its TLB entry. */ 1105 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);1105 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 1106 1106 1107 1107 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage)); … … 1195 1195 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n", 1196 1196 GCPhysPage, pPage, HCPhysPageRemap)); 1197 PGM_PAGE_SET_HCPHYS(p Page, HCPhysPageRemap);1198 PGM_PAGE_SET_TYPE(p Page, PGMPAGETYPE_MMIO2_ALIAS_MMIO);1199 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ALLOCATED);1197 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap); 1198 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1199 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); 1200 1200 /** @todo hack alert 1201 1201 * This needs to be done properly. Currently we get away with it as the recompiler directly calls 1202 1202 * IOM read and write functions. Access through PGMPhysRead/Write will crash the process. 1203 1203 */ 1204 PGM_PAGE_SET_PAGEID(p Page, NIL_GMM_PAGEID);1204 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID); 1205 1205 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1206 1206 pCur->cAliasedPages++; … … 1208 1208 1209 1209 /* Flush its TLB entry. */ 1210 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);1210 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 1211 1211 1212 1212 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage)); … … 1412 1412 PVM pVM = (PVM)pvUser; 1413 1413 1414 Assert(PGMIsLockOwner(pVM)); 1414 PGM_LOCK_ASSERT_OWNER(pVM); 1415 1415 1416 /* 1416 1417 * Iterate the pages and apply the new state. … … 1678 1679 State.pVM = pVM; 1679 1680 1680 Assert(PGMIsLockOwner(pVM));1681 PGM_LOCK_ASSERT_OWNER(pVM); 1681 1682 1682 1683 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r37137 r37354 397 397 * @param pVM The VM handle. 398 398 */ 399 VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)399 void pgmPhysInvalidatePageMapTLB(PVM pVM) 400 400 { 401 401 pgmLock(pVM); 402 402 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes); 403 403 404 /* Clear the shared R0/R3 TLB completely. */ 404 405 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++) … … 409 410 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0; 410 411 } 412 411 413 /** @todo clear the RC TLB whenever we add it. */ 414 412 415 pgmUnlock(pVM); 413 416 } 417 414 418 415 419 /** … … 419 423 * @param GCPhys GCPhys entry to flush 420 424 */ 421 VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)422 { 423 Assert(PGMIsLocked(pVM));425 void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys) 426 { 427 PGM_LOCK_ASSERT_OWNER(pVM); 424 428 425 429 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry); 426 /* Clear the shared R0/R3 TLB entry. */ 430 427 431 #ifdef IN_RC 428 432 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys); … … 432 436 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0; 433 437 #else 438 /* Clear the shared R0/R3 TLB entry. */ 434 439 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)]; 435 440 pTlbe->GCPhys = NIL_RTGCPHYS; … … 438 443 pTlbe->pv = 0; 439 444 #endif 440 /* @todo clear the RC TLB whenever we add it. */ 445 446 /** @todo clear the RC TLB whenever we add it. */ 441 447 } 442 448 … … 557 563 * Prereqs. 558 564 */ 559 Assert(PGMIsLocked(pVM));565 PGM_LOCK_ASSERT_OWNER(pVM); 560 566 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys)); 561 567 Assert(!PGM_PAGE_IS_MMIO(pPage)); … … 580 586 } 581 587 /* Mark the base as type page table, so we don't check over and over again. */ 582 PGM_PAGE_SET_PDE_TYPE(p BasePage, PGM_PAGE_PDE_TYPE_PT);588 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT); 583 589 584 590 /* fall back to 4KB pages. */ … … 606 612 } 607 613 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */ 608 Assert(PGMIsLocked(pVM));614 PGM_LOCK_ASSERT_OWNER(pVM); 609 615 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys)); 610 616 Assert(!PGM_PAGE_IS_MMIO(pPage)); … … 654 660 */ 655 661 pVM->pgm.s.cPrivatePages++; 656 PGM_PAGE_SET_HCPHYS(p Page, HCPhys);657 PGM_PAGE_SET_PAGEID(p Page, pVM->pgm.s.aHandyPages[iHandyPage].idPage);658 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ALLOCATED);659 PGM_PAGE_SET_PDE_TYPE(p Page, PGM_PAGE_PDE_TYPE_PT);660 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);662 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys); 663 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage); 664 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); 665 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT); 666 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys); 661 667 662 668 /* Copy the shared page contents to the replacement page. */ … … 706 712 * Prereqs. 707 713 */ 708 Assert(PGMIsLocked(pVM));714 PGM_LOCK_ASSERT_OWNER(pVM); 709 715 Assert(PGMIsUsingLargePages(pVM)); 710 716 … … 744 750 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */ 745 751 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused); 746 PGM_PAGE_SET_PDE_TYPE(p FirstPage, PGM_PAGE_PDE_TYPE_PT);752 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT); 747 753 return VERR_PGM_INVALID_LARGE_PAGE_RANGE; 748 754 } … … 826 832 if (i == _2M/PAGE_SIZE) 827 833 { 828 PGM_PAGE_SET_PDE_TYPE(p LargePage, PGM_PAGE_PDE_TYPE_PDE);834 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE); 829 835 pVM->pgm.s.cLargePagesDisabled--; 830 836 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M)); … … 850 856 { 851 857 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED); 852 PGM_PAGE_SET_WRITTEN_TO(p Page);853 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ALLOCATED);858 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage); 859 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); 854 860 Assert(pVM->pgm.s.cMonitoredPages > 0); 855 861 pVM->pgm.s.cMonitoredPages--; … … 874 880 int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) 875 881 { 876 Assert(PGMIsLockOwner(pVM));882 PGM_LOCK_ASSERT_OWNER(pVM); 877 883 switch (PGM_PAGE_GET_STATE(pPage)) 878 884 { … … 923 929 * Validation. 924 930 */ 925 Assert(PGMIsLocked(pVM));931 PGM_LOCK_ASSERT_OWNER(pVM); 926 932 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 927 933 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT; … … 999 1005 static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv) 1000 1006 { 1001 Assert(PGMIsLocked(pVM));1007 PGM_LOCK_ASSERT_OWNER(pVM); 1002 1008 1003 1009 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) … … 1199 1205 int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys) 1200 1206 { 1201 Assert(PGMIsLocked(pVM));1207 PGM_LOCK_ASSERT_OWNER(pVM); 1202 1208 1203 1209 /* … … 1230 1236 int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) 1231 1237 { 1232 Assert(PGMIsLocked(pVM));1238 PGM_LOCK_ASSERT_OWNER(pVM); 1233 1239 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses)); 1234 1240 … … 1252 1258 else 1253 1259 { 1254 Assert (PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg);1260 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage)); 1255 1261 pTlbe->pMap = NULL; 1256 1262 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg); … … 1291 1297 int rc; 1292 1298 AssertReturn(pPage, VERR_INTERNAL_ERROR); 1293 Assert(PGMIsLocked(pVM));1299 PGM_LOCK_ASSERT_OWNER(pVM); 1294 1300 1295 1301 /* … … 1347 1353 { 1348 1354 AssertReturn(pPage, VERR_INTERNAL_ERROR); 1349 Assert(PGMIsLocked(pVM));1355 PGM_LOCK_ASSERT_OWNER(pVM); 1350 1356 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0); 1351 1357 … … 1728 1734 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED) 1729 1735 { 1730 PGM_PAGE_SET_WRITTEN_TO(p Page);1731 PGM_PAGE_SET_STATE(p Page, PGM_PAGE_STATE_ALLOCATED);1736 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage); 1737 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); 1732 1738 Assert(pVM->pgm.s.cMonitoredPages > 0); 1733 1739 pVM->pgm.s.cMonitoredPages--; … … 1955 1961 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) )); 1956 1962 STAM_PROFILE_START(&pPhys->Stat, h); 1957 Assert(PGMIsLockOwner(pVM));1963 PGM_LOCK_ASSERT_OWNER(pVM); 1958 1964 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 1959 1965 pgmUnlock(pVM); … … 2199 2205 2200 2206 STAM_PROFILE_START(&pCur->Stat, h); 2201 Assert(PGMIsLockOwner(pVM));2207 PGM_LOCK_ASSERT_OWNER(pVM); 2202 2208 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2203 2209 pgmUnlock(pVM); … … 2411 2417 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) )); 2412 2418 STAM_PROFILE_START(&pPhys->Stat, h); 2413 Assert(PGMIsLockOwner(pVM));2419 PGM_LOCK_ASSERT_OWNER(pVM); 2414 2420 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2415 2421 pgmUnlock(pVM); … … 2480 2486 2481 2487 STAM_PROFILE_START(&pPhys->Stat, h); 2482 Assert(PGMIsLockOwner(pVM));2488 PGM_LOCK_ASSERT_OWNER(pVM); 2483 2489 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */ 2484 2490 pgmUnlock(pVM); … … 2806 2812 * 2807 2813 * @returns VBox status. 2808 * @param pVCpu The VMCPU handle.2814 * @param pVCpu Handle to the current virtual CPU. 2809 2815 * @param pvDst The destination address. 2810 2816 * @param GCPtrSrc The source address (GC pointer). … … 2814 2820 { 2815 2821 PVM pVM = pVCpu->CTX_SUFF(pVM); 2822 VMCPU_ASSERT_EMT(pVCpu); 2816 2823 2817 2824 /* … … 2896 2903 * 2897 2904 * @returns VBox status. 2898 * @param pVCpu The VMCPU handle.2905 * @param pVCpu Handle to the current virtual CPU. 2899 2906 * @param GCPtrDst The destination address (GC pointer). 2900 2907 * @param pvSrc The source address. … … 2904 2911 { 2905 2912 PVM pVM = pVCpu->CTX_SUFF(pVM); 2913 VMCPU_ASSERT_EMT(pVCpu); 2906 2914 2907 2915 /* … … 2975 2983 * 2976 2984 * @returns VBox status. 2977 * @param pVCpu The VMCPU handle.2985 * @param pVCpu Handle to the current virtual CPU. 2978 2986 * @param GCPtrDst The destination address (GC pointer). 2979 2987 * @param pvSrc The source address. … … 2983 2991 { 2984 2992 PVM pVM = pVCpu->CTX_SUFF(pVM); 2993 VMCPU_ASSERT_EMT(pVCpu); 2985 2994 2986 2995 /* … … 3054 3063 * 3055 3064 * @returns VBox status. 3056 * @param pVCpu The VMCPU handle.3065 * @param pVCpu Handle to the current virtual CPU. 3057 3066 * @param pvDst The destination address. 3058 3067 * @param GCPtrSrc The source address (GC pointer). … … 3066 3075 int rc; 3067 3076 PVM pVM = pVCpu->CTX_SUFF(pVM); 3077 VMCPU_ASSERT_EMT(pVCpu); 3068 3078 3069 3079 /* … … 3136 3146 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3. 3137 3147 * 3138 * @param pVCpu The VMCPU handle.3148 * @param pVCpu Handle to the current virtual CPU. 3139 3149 * @param GCPtrDst The destination address (GC pointer). 3140 3150 * @param pvSrc The source address. … … 3147 3157 int rc; 3148 3158 PVM pVM = pVCpu->CTX_SUFF(pVM); 3159 VMCPU_ASSERT_EMT(pVCpu); 3149 3160 3150 3161 /* … … 3229 3240 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched. 3230 3241 * 3231 * @param pVCpu The VMCPU handle.3242 * @param pVCpu Handle to the current virtual CPU. 3232 3243 * @param pCtxCore The context core. 3233 3244 * @param pvDst Where to put the bytes we've read. … … 3242 3253 PVM pVM = pVCpu->CTX_SUFF(pVM); 3243 3254 Assert(cb <= PAGE_SIZE); 3255 VMCPU_ASSERT_EMT(pVCpu); 3244 3256 3245 3257 /** @todo r=bird: This isn't perfect! … … 3392 3404 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched. 3393 3405 * 3394 * @param pVCpu The VMCPU handle.3406 * @param pVCpu Handle to the current virtual CPU. 3395 3407 * @param pCtxCore The context core. 3396 3408 * @param pvDst Where to put the bytes we've read. … … 3409 3421 * unmap mappings done by the caller. Be careful! 3410 3422 */ 3411 VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap) 3423 VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, 3424 bool fRaiseTrap) 3412 3425 { 3413 3426 PVM pVM = pVCpu->CTX_SUFF(pVM); 3414 3427 Assert(cb <= PAGE_SIZE); 3428 VMCPU_ASSERT_EMT(pVCpu); 3415 3429 3416 3430 /* … … 3584 3598 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched. 3585 3599 * 3586 * @param pVCpu The VMCPU handle.3600 * @param pVCpu Handle to the current virtual CPU. 3587 3601 * @param pCtxCore The context core. 3588 3602 * @param GCPtrDst The destination address. … … 3600 3614 * unmap mappings done by the caller. Be careful! 3601 3615 */ 3602 VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap) 3616 VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, 3617 size_t cb, bool fRaiseTrap) 3603 3618 { 3604 3619 Assert(cb <= PAGE_SIZE); 3605 3620 PVM pVM = pVCpu->CTX_SUFF(pVM); 3621 VMCPU_ASSERT_EMT(pVCpu); 3606 3622 3607 3623 /* … … 3778 3794 } 3779 3795 3780 /** 3781 * Return the page type of the specified physical address 3782 * 3796 3797 /** 3798 * Return the page type of the specified physical address. 3799 * 3800 * @returns The page type. 3783 3801 * @param pVM VM Handle. 3784 3802 * @param GCPhys Guest physical address … … 3786 3804 VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys) 3787 3805 { 3788 PPGMPAGE pPage; 3789 3790 pPage = pgmPhysGetPage(pVM, GCPhys); 3791 if (pPage) 3792 return (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage); 3793 3794 return PGMPAGETYPE_INVALID; 3795 } 3796 3806 pgmLock(pVM); 3807 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 3808 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID; 3809 pgmUnlock(pVM); 3810 3811 return enmPgType; 3812 } 3813 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r36891 r37354 1717 1717 unsigned idxFree; 1718 1718 1719 Assert(PGMIsLocked(pVM));1719 PGM_LOCK_ASSERT_OWNER(pVM); 1720 1720 AssertCompile(RT_ELEMENTS(pPool->aDirtyPages) == 8 || RT_ELEMENTS(pPool->aDirtyPages) == 16); 1721 1721 Assert(!pPage->fDirty); … … 1791 1791 { 1792 1792 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1793 Assert(PGMIsLocked(pVM));1793 PGM_LOCK_ASSERT_OWNER(pVM); 1794 1794 if (!pPool->cDirtyPages) 1795 1795 return false; … … 1820 1820 { 1821 1821 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1822 Assert(PGMIsLocked(pVM));1822 PGM_LOCK_ASSERT_OWNER(pVM); 1823 1823 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages)); 1824 1824 … … 1859 1859 { 1860 1860 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1861 Assert(PGMIsLocked(pVM));1861 PGM_LOCK_ASSERT_OWNER(pVM); 1862 1862 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages)); 1863 1863 … … 1880 1880 { 1881 1881 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1882 Assert(PGMIsLocked(pVM));1882 PGM_LOCK_ASSERT_OWNER(pVM); 1883 1883 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages)); 1884 1884 unsigned idxDirtyPage = RT_ELEMENTS(pPool->aDirtyPages); … … 3355 3355 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, iShw, PGM_PAGE_GET_PTE_INDEX(pPhysPage)); 3356 3356 if (!fKeptPTEs) 3357 PGM_PAGE_SET_TRACKING(p PhysPage, 0);3357 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0); 3358 3358 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f); 3359 3359 } … … 3370 3370 static void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iPhysExt) 3371 3371 { 3372 Assert(PGMIsLockOwner(pVM));3372 PGM_LOCK_ASSERT_OWNER(pVM); 3373 3373 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3374 3374 bool fKeepList = false; … … 3407 3407 pPool->iPhysExtFreeHead = iPhysExtStart; 3408 3408 /* Invalidate the tracking data. */ 3409 PGM_PAGE_SET_TRACKING(p PhysPage, 0);3409 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0); 3410 3410 } 3411 3411 … … 3462 3462 { 3463 3463 /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */ 3464 PGM_PAGE_SET_PDE_TYPE(p LargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);3464 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED); 3465 3465 pVM->pgm.s.cLargePagesDisabled++; 3466 3466 … … 3666 3666 } 3667 3667 3668 PGM_PAGE_SET_TRACKING(p PhysPage, 0);3668 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0); 3669 3669 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s); 3670 3670 … … 3852 3852 PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt) 3853 3853 { 3854 Assert(PGMIsLockOwner(pVM));3854 PGM_LOCK_ASSERT_OWNER(pVM); 3855 3855 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3856 3856 uint16_t iPhysExt = pPool->iPhysExtFreeHead; … … 3876 3876 void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt) 3877 3877 { 3878 Assert(PGMIsLockOwner(pVM));3878 PGM_LOCK_ASSERT_OWNER(pVM); 3879 3879 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3880 3880 Assert(iPhysExt < pPool->cMaxPhysExts); … … 3898 3898 void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt) 3899 3899 { 3900 Assert(PGMIsLockOwner(pVM));3900 PGM_LOCK_ASSERT_OWNER(pVM); 3901 3901 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3902 3902 … … 3935 3935 static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT, uint16_t iPte) 3936 3936 { 3937 Assert(PGMIsLockOwner(pVM));3937 PGM_LOCK_ASSERT_OWNER(pVM); 3938 3938 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3939 3939 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts); … … 4069 4069 void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage, uint16_t iPte) 4070 4070 { 4071 PVM pVM = pPool->CTX_SUFF(pVM); 4071 4072 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage); 4072 4073 AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx)); … … 4075 4076 if (iPhysExt != PGMPOOL_TD_IDX_OVERFLOWED) 4076 4077 { 4077 PVM pVM = pPool->CTX_SUFF(pVM);4078 4078 pgmLock(pVM); 4079 4079 … … 4111 4111 pgmPoolTrackPhysExtFree(pVM, iPhysExt); 4112 4112 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d lonely\n", pPhysPage, pPage->idx)); 4113 PGM_PAGE_SET_TRACKING(p PhysPage, 0);4113 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0); 4114 4114 } 4115 4115 else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX) … … 4117 4117 /* head */ 4118 4118 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d head\n", pPhysPage, pPage->idx)); 4119 PGM_PAGE_SET_TRACKING(p PhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));4119 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext)); 4120 4120 pgmPoolTrackPhysExtFree(pVM, iPhysExt); 4121 4121 } … … 4163 4163 * Lookup the page and check if it checks out before derefing it. 4164 4164 */ 4165 PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhys); 4165 PVM pVM = pPool->CTX_SUFF(pVM); 4166 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhys); 4166 4167 if (pPhysPage) 4167 4168 { … … 4205 4206 */ 4206 4207 RTHCPHYS HCPhysHinted; 4207 PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhysHint); 4208 PVM pVM = pPool->CTX_SUFF(pVM); 4209 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysHint); 4208 4210 if (pPhysPage) 4209 4211 { … … 5094 5096 { 5095 5097 PVM pVM = pPool->CTX_SUFF(pVM); 5096 5097 Assert(PGMIsLockOwner(pVM)); 5098 PGM_LOCK_ASSERT_OWNER(pVM); 5098 5099 5099 5100 /* … … 5117 5118 { 5118 5119 PVM pVM = pPool->CTX_SUFF(pVM); 5119 Assert(PGMIsLockOwner(pVM));5120 PGM_LOCK_ASSERT_OWNER(pVM); 5120 5121 return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK); 5121 5122 } … … 5237 5238 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 5238 5239 5239 Assert(PGMIsLockOwner(pVM));5240 PGM_LOCK_ASSERT_OWNER(pVM); 5240 5241 STAM_PROFILE_START(&pPool->StatR3Reset, a); 5241 5242 LogFlow(("pgmR3PoolReset:\n")); … … 5321 5322 unsigned iPage = pRam->cb >> PAGE_SHIFT; 5322 5323 while (iPage-- > 0) 5323 PGM_PAGE_SET_TRACKING( &pRam->aPages[iPage], 0);5324 PGM_PAGE_SET_TRACKING(pVM, &pRam->aPages[iPage], 0); 5324 5325 } 5325 5326 -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r36891 r37354 202 202 PVM pVM = pVCpu->CTX_SUFF(pVM); 203 203 204 Assert(PGMIsLockOwner(pVM));204 PGM_LOCK_ASSERT_OWNER(pVM); 205 205 206 206 /* … … 365 365 int rc; 366 366 367 Assert(PGMIsLockOwner(pVM)); 367 PGM_LOCK_ASSERT_OWNER(pVM); 368 368 369 /* 369 370 * Walk page tables and pages till we're done.
Note:
See TracChangeset
for help on using the changeset viewer.