Changeset 14748 in vbox for trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
- Timestamp:
- Nov 28, 2008 12:34:24 AM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14674 r14748 50 50 /** The number of pages we reserve per CPU. */ 51 51 #define PGMR0DYNMAP_PAGES_PER_CPU 64 52 /** The number of guard pages. */ 53 #if defined(VBOX_STRICT) 54 # define PGMR0DYNMAP_GUARD_PAGES 7 55 #else 56 # define PGMR0DYNMAP_GUARD_PAGES 0 57 #endif 58 /** The dummy physical address of guard pages. */ 59 #define PGMR0DYNMAP_GUARD_PAGE_HCPHYS UINT32_C(0x7777feed) 60 /** The dummy reference count of guard pages. (Must be non-zero.) */ 61 #define PGMR0DYNMAP_GUARD_PAGE_REF_COUNT INT32_C(0x7777feed) 62 #if 0 63 /** Define this to just clear the present bit on guard pages. 64 * The alternative is to replace the entire PTE with an bad not-present 65 * PTE. Either way, XNU will screw us. :-/ */ 66 #define PGMR0DYNMAP_GUARD_NP 67 #endif 68 /** The dummy PTE value for a page. */ 69 #define PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE X86_PTE_PG_MASK 70 /** The dummy PTE value for a page. */ 71 #define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE UINT64_MAX /*X86_PTE_PAE_PG_MASK*/ 52 72 /** Calcs the overload threshold. Current set at 50%. */ 53 73 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 54 74 75 #if 0 55 76 /* Assertions causes panics if preemption is disabled, this can be used to work aroudn that. */ 56 /*#define RTSpinlockAcquire(a,b) do {} while (0)57 #define RTSpinlockRelease(a,b) do {} while (0) */58 77 #define RTSpinlockAcquire(a,b) do {} while (0) 78 #define RTSpinlockRelease(a,b) do {} while (0) 79 #endif 59 80 60 81 … … 145 166 /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */ 146 167 bool fLegacyMode; 147 /** The current load. */ 168 /** The current load. 169 * This does not include guard pages. */ 148 170 uint32_t cLoad; 149 171 /** The max load ever. … … 152 174 /** Initialization / termination lock. */ 153 175 RTSEMFASTMUTEX hInitLock; 176 /** The number of guard pages. */ 177 uint32_t cGuardPages; 154 178 /** The number of users (protected by hInitLock). */ 155 179 uint32_t cUsers; … … 365 389 #endif 366 390 } 367 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages ))391 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages - pThis->cGuardPages)) 368 392 rc = pgmR0DynMapExpand(pThis); 369 393 if (RT_SUCCESS(rc)) … … 399 423 { 400 424 pVM->pgm.s.pvR0DynMapUsed = NULL; 425 426 #ifdef VBOX_STRICT 427 PGMR0DynMapAssertIntegrity(); 428 #endif 401 429 402 430 /* … … 455 483 456 484 /** 485 * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper. 486 * 487 * @param idCpu The current CPU. 488 * @param pvUser1 The dynamic mapping cache instance. 489 * @param pvUser2 Unused, NULL. 490 */ 491 static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2) 492 { 493 Assert(!pvUser2); 494 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1; 495 Assert(pThis == g_pPGMR0DynMap); 496 PPGMR0DYNMAPENTRY paPages = pThis->paPages; 497 uint32_t iPage = pThis->cPages; 498 while (iPage-- > 0) 499 ASMInvalidatePage(paPages[iPage].pvPage); 500 } 501 502 503 /** 504 * Shoot down the TLBs for every single cache entry on all CPUs. 505 * 506 * @returns IPRT status code (RTMpOnAll). 507 * @param pThis The dynamic mapping cache instance. 508 */ 509 static int pgmR0DynMapTlbShootDown(PPGMR0DYNMAP pThis) 510 { 511 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL); 512 AssertRC(rc); 513 if (RT_FAILURE(rc)) 514 { 515 uint32_t iPage = pThis->cPages; 516 while (iPage-- > 0) 517 ASMInvalidatePage(pThis->paPages[iPage].pvPage); 518 } 519 return rc; 520 } 521 522 523 /** 457 524 * Calculate the new cache size based on cMaxLoad statistics. 458 525 * … … 468 535 RTCPUID cCpus = RTMpGetCount(); 469 536 AssertReturn(cCpus > 0 && cCpus <= RTCPUSET_MAX_CPUS, 0); 470 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU;537 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU; 471 538 uint32_t cMinPages = cCpus * (PGMR0DYNMAP_PAGES_PER_CPU / 2); 472 539 … … 485 552 if (cPages < pThis->cPages) 486 553 cPages = pThis->cPages; 554 cPages *= PGMR0DYNMAP_GUARD_PAGES + 1; 487 555 if (cPages > PGMR0DYNMAP_MAX_PAGES) 488 556 cPages = PGMR0DYNMAP_MAX_PAGES; … … 490 558 if (cMinPages < pThis->cPages) 491 559 cMinPages = pThis->cPages; 560 cMinPages *= PGMR0DYNMAP_GUARD_PAGES + 1; 492 561 if (cMinPages > PGMR0DYNMAP_MAX_PAGES) 493 562 cMinPages = PGMR0DYNMAP_MAX_PAGES; … … 687 756 return VERR_INTERNAL_ERROR; 688 757 } 689 Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));758 /*Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));*/ 690 759 } 691 760 … … 697 766 698 767 /** 768 * Sets up a guard page. 769 * 770 * @param pThis The dynamic mapping cache instance. 771 * @param pPage The page. 772 */ 773 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage) 774 { 775 memset(pPage->pvPage, 0xfd, PAGE_SIZE); 776 pPage->cRefs = PGMR0DYNMAP_GUARD_PAGE_REF_COUNT; 777 pPage->HCPhys = PGMR0DYNMAP_GUARD_PAGE_HCPHYS; 778 #ifdef PGMR0DYNMAP_GUARD_NP 779 ASMAtomicBitClear(pPage->uPte.pv, X86_PTE_BIT_P); 780 #else 781 if (pThis->fLegacyMode) 782 ASMAtomicWriteU32(&pPage->uPte.pLegacy->u, PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE); 783 else 784 ASMAtomicWriteU64(&pPage->uPte.pPae->u, PGMR0DYNMAP_GUARD_PAGE_PAE_PTE); 785 #endif 786 pThis->cGuardPages++; 787 } 788 789 790 /** 699 791 * Adds a new segment of the specified size. 700 792 * … … 709 801 710 802 /* 711 * Do the array re llocationfirst.803 * Do the array reallocations first. 712 804 * (The pages array has to be replaced behind the spinlock of course.) 713 805 */ … … 760 852 PGMR0DYNMAPPGLVL PgLvl; 761 853 pgmR0DynMapPagingArrayInit(pThis, &PgLvl); 762 uint32_t 854 uint32_t const iEndPage = pThis->cPages + cPages; 763 855 for (uint32_t iPage = pThis->cPages; 764 856 iPage < iEndPage; … … 809 901 if (RT_SUCCESS(rc)) 810 902 { 811 /** @todo setup guard pages here later (strict builds should leave every 812 * second page and the start/end pages not present). */ 903 #if PGMR0DYNMAP_GUARD_PAGES > 0 904 /* 905 * Setup guard pages. 906 * (Note: TLBs will be shot down later on.) 907 */ 908 uint32_t iPage = pThis->cPages; 909 while (iPage < iEndPage) 910 { 911 for (uint32_t iGPg = 0; iGPg < PGMR0DYNMAP_GUARD_PAGES && iPage < iEndPage; iGPg++, iPage++) 912 pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]); 913 iPage++; /* the guarded page */ 914 } 915 916 /* Make sure the very last page is a guard page too. */ 917 iPage = iEndPage - 1; 918 if (pThis->paPages[iPage].cRefs != PGMR0DYNMAP_GUARD_PAGE_REF_COUNT) 919 pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]); 920 #endif /* PGMR0DYNMAP_GUARD_PAGES > 0 */ 813 921 814 922 /* … … 882 990 } 883 991 Assert(ASMGetFlags() & X86_EFL_IF); 992 993 #if PGMR0DYNMAP_GUARD_PAGES > 0 994 /* paranoia */ 995 if (RT_SUCCESS(rc)) 996 pgmR0DynMapTlbShootDown(pThis); 997 #endif 884 998 return rc; 885 999 } … … 919 1033 } 920 1034 Assert(ASMGetFlags() & X86_EFL_IF); 1035 1036 #if PGMR0DYNMAP_GUARD_PAGES > 0 1037 /* paranoia */ 1038 if (RT_SUCCESS(rc)) 1039 pgmR0DynMapTlbShootDown(pThis); 1040 #endif 921 1041 return rc; 922 }923 924 925 /**926 * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.927 *928 * @param idCpu The current CPU.929 * @param pvUser1 The dynamic mapping cache instance.930 * @param pvUser2 Unused, NULL.931 */932 static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2)933 {934 Assert(!pvUser2);935 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1;936 Assert(pThis == g_pPGMR0DynMap);937 PPGMR0DYNMAPENTRY paPages = pThis->paPages;938 uint32_t iPage = pThis->cPages;939 while (iPage-- > 0)940 ASMInvalidatePage(paPages[iPage].pvPage);941 1042 } 942 1043 … … 982 1083 /* 983 1084 * Shoot down the TLBs on all CPUs before freeing them. 984 * If RTMpOnAll fails, make sure the TLBs are invalidated on the current CPU at least. 985 */ 986 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL); 987 AssertRC(rc); 988 if (RT_FAILURE(rc)) 989 { 990 iPage = pThis->cPages; 991 while (iPage-- > 0) 992 ASMInvalidatePage(paPages[iPage].pvPage); 993 } 1085 */ 1086 pgmR0DynMapTlbShootDown(pThis); 994 1087 995 1088 /* … … 998 1091 while (pThis->pSegHead) 999 1092 { 1093 int rc; 1000 1094 PPGMR0DYNMAPSEG pSeg = pThis->pSegHead; 1001 1095 pThis->pSegHead = pSeg->pNext; … … 1081 1175 iFreePage = iPage; 1082 1176 else if (!paPages[(iPage + 1) % cPages].cRefs) 1083 iFreePage = iPage ;1177 iFreePage = iPage + 1; 1084 1178 else if (!paPages[(iPage + 2) % cPages].cRefs) 1085 iFreePage = iPage ;1179 iFreePage = iPage + 2; 1086 1180 else if (!paPages[(iPage + 3) % cPages].cRefs) 1087 iFreePage = iPage ;1181 iFreePage = iPage + 3; 1088 1182 else if (!paPages[(iPage + 4) % cPages].cRefs) 1089 iFreePage = iPage ;1183 iFreePage = iPage + 4; 1090 1184 else 1091 1185 { … … 1093 1187 * Search for an unused or matching entry. 1094 1188 */ 1095 iFreePage = (iPage + 5) % pThis->cPages;1189 iFreePage = (iPage + 5) % cPages; 1096 1190 for (;;) 1097 1191 { … … 1103 1197 /* advance */ 1104 1198 iFreePage = (iFreePage + 1) % cPages; 1105 if (RT_UNLIKELY(iFreePage != iPage))1199 if (RT_UNLIKELY(iFreePage == iPage)) 1106 1200 return UINT32_MAX; 1107 1201 } … … 1111 1205 * Setup the new entry. 1112 1206 */ 1207 /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/ 1113 1208 paPages[iFreePage].HCPhys = HCPhys; 1114 1209 RTCpuSetFill(&paPages[iFreePage].PendingSet); … … 1141 1236 * Maps a page into the pool. 1142 1237 * 1143 * @returns P ointer to the mapping.1238 * @returns Page index on success, UINT32_MAX on failure. 1144 1239 * @param pThis The dynamic mapping cache instance. 1145 1240 * @param HCPhys The address of the page to be mapped. 1146 * @param p iPage Where to store the page index.1147 */ 1148 DECLINLINE( void *) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t *piPage)1241 * @param ppvPage Where to the page address. 1242 */ 1243 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, void **ppvPage) 1149 1244 { 1150 1245 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; … … 1181 1276 { 1182 1277 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1183 return NULL;1278 return iPage; 1184 1279 } 1185 1280 } … … 1206 1301 if (pThis->cLoad > pThis->cMaxLoad) 1207 1302 pThis->cMaxLoad = pThis->cLoad; 1208 AssertMsg(pThis->cLoad <= pThis->cPages , ("%d/%d\n", pThis->cLoad, pThis->cPages));1303 AssertMsg(pThis->cLoad <= pThis->cPages - pThis->cGuardPages, ("%d/%d\n", pThis->cLoad, pThis->cPages - pThis->cGuardPages)); 1209 1304 } 1210 1305 else if (RT_UNLIKELY(cRefs <= 0)) … … 1212 1307 ASMAtomicDecS32(&paPages[iPage].cRefs); 1213 1308 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1214 AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), NULL);1309 AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX); 1215 1310 } 1216 1311 void *pvPage = paPages[iPage].pvPage; … … 1231 1326 ASMInvalidatePage(pvPage); 1232 1327 1233 *piPage = iPage; 1234 return pvPage; 1328 *ppvPage = pvPage; 1329 return iPage; 1330 } 1331 1332 1333 /** 1334 * Assert the the integrity of the pool. 1335 * 1336 * @returns VBox status code. 1337 */ 1338 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void) 1339 { 1340 /* 1341 * Basic pool stuff that doesn't require any lock, just assumes we're a user. 1342 */ 1343 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1344 if (!pThis) 1345 return VINF_SUCCESS; 1346 AssertPtrReturn(pThis, VERR_INVALID_POINTER); 1347 AssertReturn(pThis->u32Magic == PGMR0DYNMAP_MAGIC, VERR_INVALID_MAGIC); 1348 if (!pThis->cUsers) 1349 return VERR_INVALID_PARAMETER; 1350 1351 1352 int rc = VINF_SUCCESS; 1353 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1354 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1355 #define CHECK_RET(expr, a) \ 1356 do { \ 1357 if (!(expr)) \ 1358 { \ 1359 RTSpinlockRelease(pThis->hSpinlock, &Tmp); \ 1360 AssertMsg1(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \ 1361 AssertMsg2 a; \ 1362 return VERR_INTERNAL_ERROR; \ 1363 } \ 1364 } while (0) 1365 1366 /* 1367 * Check that the PTEs are correct. 1368 */ 1369 uint32_t cGuard = 0; 1370 uint32_t cLoad = 0; 1371 PPGMR0DYNMAPENTRY paPages = pThis->paPages; 1372 uint32_t iPage = pThis->cPages; 1373 if (pThis->fLegacyMode) 1374 { 1375 PCX86PGUINT paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1376 while (iPage-- > 0) 1377 { 1378 CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage)); 1379 if ( paPages[iPage].cRefs == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT 1380 && paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS) 1381 { 1382 #ifdef PGMR0DYNMAP_GUARD_NP 1383 CHECK_RET(paPages[iPage].uPte.pLegacy->u == (paSavedPTEs[iPage] & ~(X86PGUINT)X86_PTE_P), 1384 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage])); 1385 #else 1386 CHECK_RET(paPages[iPage].uPte.pLegacy->u == PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE, 1387 ("#%u: %#x", iPage, paPages[iPage].uPte.pLegacy->u)); 1388 #endif 1389 cGuard++; 1390 } 1391 else if (paPages[iPage].HCPhys != NIL_RTHCPHYS) 1392 { 1393 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1394 X86PGUINT uPte = (paSavedPTEs[iPage] & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 1395 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1396 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1397 CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte, 1398 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, uPte)); 1399 if (paPages[iPage].cRefs) 1400 cLoad++; 1401 } 1402 else 1403 CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage], 1404 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage])); 1405 } 1406 } 1407 else 1408 { 1409 PCX86PGPAEUINT paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1410 while (iPage-- > 0) 1411 { 1412 CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage)); 1413 if ( paPages[iPage].cRefs == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT 1414 && paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS) 1415 { 1416 #ifdef PGMR0DYNMAP_GUARD_NP 1417 CHECK_RET(paPages[iPage].uPte.pPae->u == (paSavedPTEs[iPage] & ~(X86PGPAEUINT)X86_PTE_P), 1418 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage])); 1419 #else 1420 CHECK_RET(paPages[iPage].uPte.pPae->u == PGMR0DYNMAP_GUARD_PAGE_PAE_PTE, 1421 ("#%u: %#llx", iPage, paPages[iPage].uPte.pPae->u)); 1422 #endif 1423 cGuard++; 1424 } 1425 else if (paPages[iPage].HCPhys != NIL_RTHCPHYS) 1426 { 1427 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1428 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 1429 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1430 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1431 CHECK_RET(paPages[iPage].uPte.pPae->u == uPte, 1432 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pLegacy->u, uPte)); 1433 if (paPages[iPage].cRefs) 1434 cLoad++; 1435 } 1436 else 1437 CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage], 1438 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage])); 1439 } 1440 } 1441 1442 CHECK_RET(cLoad == pThis->cLoad, ("%u %u\n", cLoad, pThis->cLoad)); 1443 CHECK_RET(cGuard == pThis->cGuardPages, ("%u %u\n", cGuard, pThis->cGuardPages)); 1444 1445 #undef CHECK_RET 1446 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1447 return VINF_SUCCESS; 1235 1448 } 1236 1449 … … 1285 1498 } 1286 1499 1287 Assert(pThis->cLoad <= pThis->cPages );1500 Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages); 1288 1501 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1289 1502 } … … 1399 1612 * Map it. 1400 1613 */ 1401 uint32_t iPage; 1402 void *pvPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, &iPage); 1403 if (RT_UNLIKELY(!pvPage)) 1614 uint32_t const iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, ppv); 1615 if (RT_UNLIKELY(iPage == UINT32_MAX)) 1404 1616 { 1405 1617 static uint32_t s_cBitched = 0; 1406 1618 if (++s_cBitched < 10) 1407 LogRel(("PGMDynMapHCPage: cLoad=%u/%u cPages=%u \n",1408 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages ));1619 LogRel(("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n", 1620 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages)); 1409 1621 return VERR_PGM_DYNMAP_FAILED; 1410 1622 } 1411 *ppv = pvPage;1412 1623 1413 1624 /* … … 1497 1708 PPGMMAPSET pSet = &pVM->aCpus[0].pgm.s.AutoSet; 1498 1709 uint32_t i; 1710 1711 /* 1712 * Assert internal integrity first. 1713 */ 1714 LogRel(("Test #0\n")); 1715 int rc = PGMR0DynMapAssertIntegrity(); 1716 if (RT_FAILURE(rc)) 1717 return rc; 1718 1499 1719 void *pvR0DynMapUsedSaved = pVM->pgm.s.pvR0DynMapUsed; 1500 1720 pVM->pgm.s.pvR0DynMapUsed = pThis; … … 1511 1731 void *pv = (void *)(intptr_t)-1; 1512 1732 void *pv2 = (void *)(intptr_t)-2; 1513 int rc= PGMDynMapHCPage(pVM, cr3, &pv);1733 rc = PGMDynMapHCPage(pVM, cr3, &pv); 1514 1734 int rc2 = PGMDynMapHCPage(pVM, cr3, &pv2); 1515 1735 ASMIntEnable(); … … 1518 1738 && pv == pv2) 1519 1739 { 1520 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1740 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1741 rc = PGMR0DynMapAssertIntegrity(); 1521 1742 1522 1743 /* … … 1554 1775 } 1555 1776 if (RT_SUCCESS(rc)) 1777 rc = PGMR0DynMapAssertIntegrity(); 1778 if (RT_SUCCESS(rc)) 1556 1779 { 1557 1780 /* … … 1577 1800 rc = VERR_INTERNAL_ERROR; 1578 1801 } 1579 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1802 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1803 if (RT_SUCCESS(rc)) 1804 rc = PGMR0DynMapAssertIntegrity(); 1580 1805 if (RT_SUCCESS(rc)) 1581 1806 { … … 1585 1810 LogRel(("Test #4\n")); 1586 1811 ASMIntDisable(); 1587 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) / 2 - 3 + 1 && RT_SUCCESS(rc) && pv2 != pv; i++) 1588 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * -(i + 5), &pv2); 1812 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) / 2 - 3 + 1 && pv2 != pv; i++) 1813 { 1814 rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2); 1815 if (RT_SUCCESS(rc)) 1816 rc = PGMR0DynMapAssertIntegrity(); 1817 if (RT_FAILURE(rc)) 1818 break; 1819 } 1589 1820 ASMIntEnable(); 1590 1821 if (rc == VERR_PGM_DYNMAP_FULL_SET) 1591 1822 { 1592 rc = VINF_SUCCESS;1593 1594 1823 /* flush the set. */ 1824 LogRel(("Test #5\n")); 1595 1825 ASMIntDisable(); 1596 1826 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]); … … 1598 1828 PGMDynMapStartAutoSet(&pVM->aCpus[0]); 1599 1829 ASMIntEnable(); 1830 1831 rc = PGMR0DynMapAssertIntegrity(); 1600 1832 } 1601 1833 else 1602 1834 { 1603 LogRel(("failed(%d): rc=%Rrc, wanted %d ; pv2=%p Set=%u/%u \n", __LINE__,1604 rc, VERR_PGM_DYNMAP_FULL_SET, pv2, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) ));1835 LogRel(("failed(%d): rc=%Rrc, wanted %d ; pv2=%p Set=%u/%u; i=%d\n", __LINE__, 1836 rc, VERR_PGM_DYNMAP_FULL_SET, pv2, pSet->cEntries, RT_ELEMENTS(pSet->aEntries), i)); 1605 1837 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR; 1606 1838 } … … 1662 1894 ASMIntEnable(); 1663 1895 1896 if (RT_SUCCESS(rc)) 1897 rc = PGMR0DynMapAssertIntegrity(); 1898 else 1899 PGMR0DynMapAssertIntegrity(); 1900 1664 1901 LogRel(("Result: rc=%Rrc Load=%u/%u/%u Set=%#x/%u\n", rc, 1665 pThis->cLoad, pThis->cMaxLoad, pThis->cPages , pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));1902 pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1666 1903 pVM->pgm.s.pvR0DynMapUsed = pvR0DynMapUsedSaved; 1667 1904 LogRel(("pgmR0DynMapTest: ****** END ******\n"));
Note:
See TracChangeset
for help on using the changeset viewer.