Changeset 14536 in vbox
- Timestamp:
- Nov 24, 2008 5:39:36 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14528 r14536 333 333 * Do we need the cache? Skip the last bit if we don't. 334 334 */ 335 #if 1 335 336 if (!HWACCMIsEnabled(pVM)) 336 337 return VINF_SUCCESS; 338 #endif 337 339 338 340 /* … … 346 348 pThis->cUsers++; 347 349 if (pThis->cUsers == 1) 350 { 348 351 rc = pgmR0DynMapSetup(pThis); 352 #ifdef DEBUG 353 if (RT_SUCCESS(rc)) 354 { 355 rc = pgmR0DynMapTest(pVM); 356 if (RT_FAILURE(rc)) 357 pgmR0DynMapTearDown(pThis); 358 } 359 #endif 360 } 349 361 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages)) 350 362 rc = pgmR0DynMapExpand(pThis); … … 355 367 356 368 RTSemFastMutexRelease(pThis->hInitLock); 357 358 #ifdef DEBUG359 /*360 * Run some tests.361 */362 if (RT_SUCCESS(rc))363 pgmR0DynMapTest(pVM);364 #endif365 369 return rc; 366 370 } … … 528 532 529 533 pPgLvl->a[1].fPhysMask = X86_PDPE_PG_MASK; 530 pPgLvl->a[1].fPtrMask = X86_PD_ MASK;531 pPgLvl->a[1].fPtrShift = X86_PD_ SHIFT;534 pPgLvl->a[1].fPtrMask = X86_PD_PAE_MASK; 535 pPgLvl->a[1].fPtrShift = X86_PD_PAE_SHIFT; 532 536 pPgLvl->a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 533 537 pPgLvl->a[1].fResMask = X86_PDE_P | X86_PDE_RW; 534 538 535 539 pPgLvl->a[2].fPhysMask = X86_PDE_PAE_PG_MASK; 536 pPgLvl->a[2].fPtrMask = X86_PT_ MASK;537 pPgLvl->a[2].fPtrShift = X86_PT_ SHIFT;540 pPgLvl->a[2].fPtrMask = X86_PT_PAE_MASK; 541 pPgLvl->a[2].fPtrShift = X86_PT_PAE_SHIFT; 538 542 pPgLvl->a[2].fAndMask = X86_PTE_P | X86_PTE_RW; 539 543 pPgLvl->a[2].fResMask = X86_PTE_P | X86_PTE_RW; … … 544 548 case SUPPAGINGMODE_AMD64_NX: 545 549 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 546 pPgLvl->cLevels = 3;550 pPgLvl->cLevels = 4; 547 551 pPgLvl->a[0].fPhysMask = X86_CR3_AMD64_PAGE_MASK; 552 pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT; 548 553 pPgLvl->a[0].fPtrMask = X86_PML4_MASK; 549 pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT;550 554 pPgLvl->a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW; 551 555 pPgLvl->a[0].fResMask = X86_PML4E_P | X86_PML4E_RW; 552 556 553 557 pPgLvl->a[1].fPhysMask = X86_PML4E_PG_MASK; 558 pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT; 554 559 pPgLvl->a[1].fPtrMask = X86_PDPT_MASK_AMD64; 555 pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT;556 560 pPgLvl->a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */; 557 561 pPgLvl->a[1].fResMask = X86_PDPE_P | X86_PDPE_RW; 558 562 559 563 pPgLvl->a[2].fPhysMask = X86_PDPE_PG_MASK; 560 pPgLvl->a[2].fPtr Mask = X86_PD_MASK;561 pPgLvl->a[2].fPtr Shift = X86_PD_SHIFT;564 pPgLvl->a[2].fPtrShift = X86_PD_PAE_SHIFT; 565 pPgLvl->a[2].fPtrMask = X86_PD_PAE_MASK; 562 566 pPgLvl->a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 563 567 pPgLvl->a[2].fResMask = X86_PDE_P | X86_PDE_RW; 564 568 565 569 pPgLvl->a[3].fPhysMask = X86_PDE_PAE_PG_MASK; 566 pPgLvl->a[3].fPtr Mask = X86_PT_MASK;567 pPgLvl->a[3].fPtr Shift = X86_PT_SHIFT;570 pPgLvl->a[3].fPtrShift = X86_PT_PAE_SHIFT; 571 pPgLvl->a[3].fPtrMask = X86_PT_PAE_MASK; 568 572 pPgLvl->a[3].fAndMask = X86_PTE_P | X86_PTE_RW; 569 573 pPgLvl->a[3].fResMask = X86_PTE_P | X86_PTE_RW; … … 609 613 { 610 614 Assert(!(ASMGetFlags() & X86_EFL_IF)); 611 612 615 void *pvEntry = NULL; 613 616 X86PGPAEUINT uEntry = ASMGetCR3(); … … 678 681 return VERR_INTERNAL_ERROR; 679 682 } 683 Log(("#%d: iEntry=%d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys)); 680 684 } 681 685 … … 775 779 else 776 780 ((PX86PGPAEUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pPae->u; 781 782 #ifdef VBOX_STRICT 783 /* Check that we've got the right entry. */ 784 RTHCPHYS HCPhysPage = RTR0MemObjGetPagePhysAddr(pSeg->hMemObj, iPage - pSeg->iPage); 785 RTHCPHYS HCPhysPte = pThis->fLegacyMode 786 ? pThis->paPages[iPage].uPte.pLegacy->u & X86_PTE_PG_MASK 787 : pThis->paPages[iPage].uPte.pPae->u & X86_PTE_PAE_PG_MASK; 788 if (HCPhysPage != HCPhysPte) 789 { 790 LogRel(("pgmR0DynMapAddSeg: internal error - page #%u HCPhysPage=%RHp HCPhysPte=%RHp pbPage=%p pvPte=%p\n", 791 iPage - pSeg->iPage, HCPhysPage, HCPhysPte, pbPage, pThis->paPages[iPage].uPte.pv)); 792 rc = VERR_INTERNAL_ERROR; 793 break; 794 } 795 #endif 777 796 } /* for each page */ 778 797 ASMIntEnable(); … … 1431 1450 1432 1451 #ifdef DEBUG 1452 /** For pgmR0DynMapTest3PerCpu. */ 1453 typedef struct PGMR0DYNMAPTEST 1454 { 1455 uint32_t u32Expect; 1456 uint32_t *pu32; 1457 uint32_t volatile cFailures; 1458 } PGMR0DYNMAPTEST; 1459 typedef PGMR0DYNMAPTEST *PPGMR0DYNMAPTEST; 1460 1461 /** 1462 * Checks that the content of the page is the same on all CPUs, i.e. that there 1463 * are no CPU specfic PTs or similar nasty stuff involved. 1464 * 1465 * @param idCpu The current CPU. 1466 * @param pvUser1 Pointer a PGMR0DYNMAPTEST structure. 1467 * @param pvUser2 Unused, ignored. 1468 */ 1469 static DECLCALLBACK(void) pgmR0DynMapTest3PerCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) 1470 { 1471 PPGMR0DYNMAPTEST pTest = (PPGMR0DYNMAPTEST)pvUser1; 1472 ASMInvalidatePage(pTest->pu32); 1473 if (*pTest->pu32 != pTest->u32Expect) 1474 ASMAtomicIncU32(&pTest->cFailures); 1475 NOREF(pvUser2); NOREF(idCpu); 1476 } 1477 1478 1433 1479 /** 1434 1480 * Performs some basic tests in debug builds. … … 1436 1482 static int pgmR0DynMapTest(PVM pVM) 1437 1483 { 1484 LogRel(("pgmR0DynMapTest: ****** START ******\n")); 1438 1485 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1439 1486 PPGMMAPSET pSet = &pVM->aCpus[0].pgm.s.AutoSet; 1487 uint32_t i; 1488 void *pvR0DynMapUsedSaved = pVM->pgm.s.pvR0DynMapUsed; 1489 pVM->pgm.s.pvR0DynMapUsed = pThis; 1440 1490 1441 1491 /* … … 1443 1493 * same mapping address back. 1444 1494 */ 1445 LogRel((" pgmR0DynMapTest:1\n"));1495 LogRel(("Test #1\n")); 1446 1496 ASMIntDisable(); 1447 1497 PGMDynMapStartAutoSet(&pVM->aCpus[0]); … … 1463 1513 * with more CR3 mappings. 1464 1514 */ 1465 LogRel((" pgmR0DynMapTest:2\n"));1515 LogRel(("Test #2\n")); 1466 1516 ASMIntDisable(); 1467 for ( uint32_ti = 0 ; i < UINT16_MAX*2 + RT_ELEMENTS(pSet->aEntries) / 2 && RT_SUCCESS(rc) && pv2 == pv; i++)1517 for (i = 0 ; i < UINT16_MAX*2 + RT_ELEMENTS(pSet->aEntries) / 2 && RT_SUCCESS(rc) && pv2 == pv; i++) 1468 1518 { 1469 1519 pv2 = (void *)(intptr_t)-4; … … 1473 1523 if (RT_FAILURE(rc) || pv != pv2) 1474 1524 { 1475 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p \n", __LINE__, rc, pv, pv2));1476 if (RT_SUCCESS(rc 2)) rc2= VERR_INTERNAL_ERROR;1525 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%p\n", __LINE__, rc, pv, pv2, i)); 1526 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR; 1477 1527 } 1478 1528 else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries) / 2) … … 1487 1537 { 1488 1538 LogRel(("failed(%d): bad set dist: ", __LINE__)); 1489 for ( uint32_ti = 0; i < pSet->cEntries; i++)1539 for (i = 0; i < pSet->cEntries; i++) 1490 1540 LogRel(("[%d]=%d, ", i, pSet->aEntries[i].cRefs)); 1491 1541 LogRel(("\n")); … … 1497 1547 * Trigger an set optimization run (exactly). 1498 1548 */ 1499 LogRel((" pgmR0DynMapTest: 2\n"));1549 LogRel(("Test #3\n")); 1500 1550 ASMIntDisable(); 1501 for (uint32_t i = 0 ; i < RT_ELEMENTS(pSet->aEntries) / 2 && RT_SUCCESS(rc) && pv2 != pv; i++) 1551 pv2 = NULL; 1552 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) / 2 && RT_SUCCESS(rc) && pv2 != pv; i++) 1502 1553 { 1503 1554 pv2 = (void *)(intptr_t)(-5 - i); 1504 rc = PGMDynMapHCPage(pVM, cr3 + (PAGE_SIZE * i), &pv2);1555 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2); 1505 1556 } 1506 1557 ASMIntEnable(); 1507 1558 if (RT_FAILURE(rc) || pv == pv2) 1508 1559 { 1509 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p \n", __LINE__, rc, pv, pv2));1510 if (RT_SUCCESS(rc 2)) rc2= VERR_INTERNAL_ERROR;1560 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%d\n", __LINE__, rc, pv, pv2, i)); 1561 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR; 1511 1562 } 1512 else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries) / 2 + 3)1563 else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries)) 1513 1564 { 1514 LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) / 2 + 3));1565 LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1515 1566 rc = VERR_INTERNAL_ERROR; 1516 1567 } … … 1525 1576 } 1526 1577 1527 /* clean up */ 1528 LogRel(("pgmR0DynMapTest: cleanup\n")); 1578 /* 1579 * Check that everyone sees the same stuff. 1580 */ 1581 if (RT_SUCCESS(rc)) 1582 { 1583 LogRel(("Test #4\n")); 1584 ASMIntDisable(); 1585 RTHCPHYS HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0); 1586 rc = PGMDynMapHCPage(pVM, HCPhysPT, &pv); 1587 if (RT_SUCCESS(rc)) 1588 { 1589 PGMR0DYNMAPTEST Test; 1590 uint32_t *pu32Real = &pThis->paPages[pThis->pSegHead->iPage].uPte.pLegacy->u; 1591 Test.pu32 = (uint32_t *)((uintptr_t)pv | ((uintptr_t)pu32Real & PAGE_OFFSET_MASK)); 1592 Test.u32Expect = *pu32Real; 1593 ASMAtomicWriteU32(&Test.cFailures, 0); 1594 ASMIntEnable(); 1595 1596 rc = RTMpOnAll(pgmR0DynMapTest3PerCpu, &Test, NULL); 1597 if (RT_FAILURE(rc)) 1598 LogRel(("failed(%d): RTMpOnAll rc=%Rrc\n", __LINE__, rc)); 1599 else if (Test.cFailures) 1600 { 1601 LogRel(("failed(%d): cFailures=%d pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n", __LINE__, 1602 Test.cFailures, pu32Real, Test.pu32, Test.u32Expect, *Test.pu32)); 1603 rc = VERR_INTERNAL_ERROR; 1604 } 1605 else 1606 LogRel(("pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n", 1607 pu32Real, Test.pu32, Test.u32Expect, *Test.pu32)); 1608 } 1609 else 1610 { 1611 ASMIntEnable(); 1612 LogRel(("failed(%d): rc=%Rrc\n", rc)); 1613 } 1614 } 1615 1616 /* 1617 * Clean up. 1618 */ 1619 LogRel(("Cleanup.\n")); 1529 1620 ASMIntDisable(); 1530 1621 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]); … … 1532 1623 ASMIntEnable(); 1533 1624 1534 LogRel(("Load=%u/%u/%u Set=%#x/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1625 LogRel(("Result: rc=%Rrc Load=%u/%u/%u Set=%#x/%u\n", rc, 1626 pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries))); 1627 pVM->pgm.s.pvR0DynMapUsed = pvR0DynMapUsedSaved; 1628 LogRel(("pgmR0DynMapTest: ****** END ******\n")); 1535 1629 return rc; 1536 1630 } 1537 1631 #endif /* DEBUG */ 1632
Note:
See TracChangeset
for help on using the changeset viewer.