- Timestamp:
- Oct 7, 2020 5:22:00 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 140781
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r86470 r86472 21 21 *********************************************************************************************************************************/ 22 22 #define LOG_GROUP LOG_GROUP_PGM_POOL 23 #define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */ 23 24 #include <VBox/vmm/pgm.h> 24 25 #include <VBox/vmm/mm.h> … … 444 445 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */ 445 446 { 446 if (uShw.pPDPT->a[iShw].n.u1Present) 447 X86PGPAEUINT const uPdpe = uShw.pPDPT->a[iShw].u; 448 if (uPdpe & X86_PDPE_P) 447 449 { 448 450 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u)); 449 pgmPoolFree(pVM, 450 uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, 451 pPage->idx, 452 iShw); 451 pgmPoolFree(pVM, uPdpe & X86_PDPE_PG_MASK, pPage->idx, iShw); 453 452 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw].u, 0); 454 453 } … … 462 461 && iShw2 < X86_PG_PAE_PDPE_ENTRIES) 463 462 { 464 if (uShw.pPDPT->a[iShw2].n.u1Present) 463 X86PGPAEUINT const uPdpe2 = uShw.pPDPT->a[iShw2].u; 464 if (uPdpe2 & X86_PDPE_P) 465 465 { 466 466 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u)); 467 pgmPoolFree(pVM, 468 uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, 469 pPage->idx, 470 iShw2); 467 pgmPoolFree(pVM, uPdpe2 & X86_PDPE_PG_MASK, pPage->idx, iShw2); 471 468 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw2].u, 0); 472 469 } … … 526 523 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 527 524 const unsigned iShw = off / sizeof(X86PDPE); 528 if (uShw.pPDPT->a[iShw].n.u1Present) 525 X86PGPAEUINT const uPdpe = uShw.pPDPT->a[iShw].u; 526 if (uPdpe & X86_PDPE_P) 529 527 { 530 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, u Shw.pPDPT->a[iShw].u));531 pgmPoolFree(pVM, u Shw.pPDPT->a[iShw].u& X86_PDPE_PG_MASK, pPage->idx, iShw);528 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uPdpe)); 529 pgmPoolFree(pVM, uPdpe & X86_PDPE_PG_MASK, pPage->idx, iShw); 532 530 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw].u, 0); 533 531 } … … 537 535 { 538 536 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE); 539 if (uShw.pPDPT->a[iShw2].n.u1Present) 537 X86PGPAEUINT const uPdpe2 = uShw.pPDPT->a[iShw2].u; 538 if (uPdpe2 & X86_PDPE_P) 540 539 { 541 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, u Shw.pPDPT->a[iShw2].u));542 pgmPoolFree(pVM, u Shw.pPDPT->a[iShw2].u& X86_PDPE_PG_MASK, pPage->idx, iShw2);540 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uPdpe2)); 541 pgmPoolFree(pVM, uPdpe2 & X86_PDPE_PG_MASK, pPage->idx, iShw2); 543 542 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw2].u, 0); 544 543 } … … 556 555 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 557 556 const unsigned iShw = off / sizeof(X86PDPE); 558 if (uShw.pPML4->a[iShw].n.u1Present) 557 X86PGPAEUINT const uPml4e = uShw.pPML4->a[iShw].u; 558 if (uPml4e & X86_PML4E_P) 559 559 { 560 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, u Shw.pPML4->a[iShw].u));561 pgmPoolFree(pVM, u Shw.pPML4->a[iShw].u& X86_PML4E_PG_MASK, pPage->idx, iShw);560 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uPml4e)); 561 pgmPoolFree(pVM, uPml4e & X86_PML4E_PG_MASK, pPage->idx, iShw); 562 562 ASMAtomicWriteU64(&uShw.pPML4->a[iShw].u, 0); 563 563 } … … 567 567 { 568 568 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E); 569 if (uShw.pPML4->a[iShw2].n.u1Present) 569 X86PGPAEUINT const uPml4e2 = uShw.pPML4->a[iShw2].u; 570 if (uPml4e2 & X86_PML4E_P) 570 571 { 571 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, u Shw.pPML4->a[iShw2].u));572 pgmPoolFree(pVM, u Shw.pPML4->a[iShw2].u& X86_PML4E_PG_MASK, pPage->idx, iShw2);572 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uPml4e2)); 573 pgmPoolFree(pVM, uPml4e2 & X86_PML4E_PG_MASK, pPage->idx, iShw2); 573 574 ASMAtomicWriteU64(&uShw.pPML4->a[iShw2].u, 0); 574 575 } … … 3631 3632 * This is simple but not quite optimal solution. 3632 3633 */ 3633 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; /** @todo drop X86_PTE_P here as we always test if present separately, anyway. */ 3634 const uint32_t u32 = u64; /** @todo move into the 32BIT_PT_xx case */ 3634 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage); 3635 3635 unsigned cLeft = pPool->cUsedPages; 3636 3636 unsigned iPage = pPool->cCurPages; … … 3650 3650 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: 3651 3651 { 3652 unsigned cPresent = pPage->cPresent; 3653 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3652 const uint32_t u32 = (uint32_t)u64; 3653 unsigned cPresent = pPage->cPresent; 3654 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3654 3655 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3655 3656 if (pPT->a[i].n.u1Present) 3656 3657 { 3657 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)3658 if ((pPT->a[i].u & X86_PTE_PG_MASK) == u32) 3658 3659 { 3659 3660 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX32\n", iPage, i, pPT->a[i])); … … 3684 3685 if (PGMSHWPTEPAE_IS_P(pPT->a[i])) 3685 3686 { 3686 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)3687 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & X86_PTE_PAE_PG_MASK) == u64) 3687 3688 { 3688 3689 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i])); … … 3707 3708 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3708 3709 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3709 if (pPT->a[i].n.u1Present) 3710 { 3711 X86PGPAEUINT const uPte = pPT->a[i].u; 3712 if (uPte & EPT_E_READ) 3710 3713 { 3711 if (( pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)3714 if ((uPte & EPT_PTE_PG_MASK) == u64) 3712 3715 { 3713 3716 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i])); … … 3723 3726 break; 3724 3727 } 3728 } 3725 3729 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT); 3726 3730 break; … … 4466 4470 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE) 4467 4471 { 4468 Assert((pShwPT->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); 4469 if (pShwPT->a[i].n.u1Present) 4472 X86PGPAEUINT const uPte = pShwPT->a[i].u; 4473 Assert((uPte & UINT64_C(0xfff0000000000f80)) == 0); 4474 if (uPte & EPT_E_READ) 4470 4475 { 4471 4476 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n", 4472 i, pShwPT->a[i].u& EPT_PTE_PG_MASK, pPage->GCPhys));4473 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u& EPT_PTE_PG_MASK, GCPhys & GCPhysA20Mask, i);4477 i, uPte & EPT_PTE_PG_MASK, pPage->GCPhys)); 4478 pgmPoolTracDerefGCPhys(pPool, pPage, uPte & EPT_PTE_PG_MASK, GCPhys & GCPhysA20Mask, i); 4474 4479 if (!pPage->cPresent) 4475 4480 break; … … 4559 4564 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 4560 4565 { 4561 Assert((pShwPDPT->a[i].u & (X86_PDPE_PAE_MBZ_MASK | UINT64_C(0x7ff0000000000200))) == 0); 4562 if ( pShwPDPT->a[i].n.u1Present 4566 X86PGPAEUINT const uPdpe = pShwPDPT->a[i].u; 4567 Assert((uPdpe & (X86_PDPE_PAE_MBZ_MASK | UINT64_C(0x7ff0000000000200))) == 0); 4568 if ( uPdpe & X86_PDPE_P 4563 4569 #ifndef PGM_WITHOUT_MAPPINGS 4564 && !( pShwPDPT->a[i].u& PGM_PLXFLAGS_MAPPING)4570 && !(uPdpe & PGM_PLXFLAGS_MAPPING) 4565 4571 #endif 4566 4572 ) 4567 4573 { 4568 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u& X86_PDPE_PG_MASK);4574 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPdpe & X86_PDPE_PG_MASK); 4569 4575 if (pSubPage) 4570 4576 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 4571 4577 else 4572 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u& X86_PDPE_PG_MASK));4578 AssertFatalMsgFailed(("%RX64\n", uPdpe & X86_PDPE_PG_MASK)); 4573 4579 } 4574 4580 } … … 4587 4593 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++) 4588 4594 { 4589 Assert((pShwPDPT->a[i].u & (X86_PDPE_LM_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0); 4590 if (pShwPDPT->a[i].n.u1Present) 4591 { 4592 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK); 4595 X86PGPAEUINT const uPdpe = pShwPDPT->a[i].u; 4596 Assert((uPdpe & (X86_PDPE_LM_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0); 4597 if (uPdpe & X86_PDPE_P) 4598 { 4599 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPdpe & X86_PDPE_PG_MASK); 4593 4600 if (pSubPage) 4594 4601 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 4595 4602 else 4596 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u& X86_PDPE_PG_MASK));4603 AssertFatalMsgFailed(("%RX64\n", uPdpe & X86_PDPE_PG_MASK)); 4597 4604 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */ 4598 4605 } … … 4612 4619 for (unsigned i = 0; i < RT_ELEMENTS(pShwPML4->a); i++) 4613 4620 { 4614 Assert((pShwPML4->a[i].u & (X86_PML4E_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0); 4615 if (pShwPML4->a[i].n.u1Present) 4616 { 4617 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPML4->a[i].u & X86_PDPE_PG_MASK); 4621 X86PGPAEUINT const uPml4e = pShwPML4->a[i].u; 4622 Assert((uPml4e & (X86_PML4E_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0); 4623 if (uPml4e & X86_PML4E_P) 4624 { 4625 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPml4e & X86_PDPE_PG_MASK); 4618 4626 if (pSubPage) 4619 4627 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 4620 4628 else 4621 AssertFatalMsgFailed(("%RX64\n", pShwPML4->a[i].u& X86_PML4E_PG_MASK));4629 AssertFatalMsgFailed(("%RX64\n", uPml4e & X86_PML4E_PG_MASK)); 4622 4630 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */ 4623 4631 } … … 4637 4645 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++) 4638 4646 { 4639 Assert((pShwPD->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); 4640 if (pShwPD->a[i].n.u1Present) 4647 X86PGPAEUINT const uPde = pShwPD->a[i].u; 4648 Assert((uPde & UINT64_C(0xfff0000000000f80)) == 0); 4649 if (uPde & EPT_E_READ) 4641 4650 { 4642 4651 #ifdef PGM_WITH_LARGE_PAGES 4643 if ( pShwPD->a[i].b.u1Size)4652 if (uPde & EPT_E_LEAF) 4644 4653 { 4645 4654 Log4(("pgmPoolTrackDerefPDEPT: i=%d pde=%RX64 GCPhys=%RX64\n", 4646 i, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys));4647 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK,4655 i, uPde & EPT_PDE2M_PG_MASK, pPage->GCPhys)); 4656 pgmPoolTracDerefGCPhys(pPool, pPage, uPde & EPT_PDE2M_PG_MASK, 4648 4657 pPage->GCPhys + i * 2 * _1M /* pPage->GCPhys = base address of the memory described by the PD */, 4649 4658 i); … … 4652 4661 #endif 4653 4662 { 4654 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u& EPT_PDE_PG_MASK);4663 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPde & EPT_PDE_PG_MASK); 4655 4664 if (pSubPage) 4656 4665 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); … … 4675 4684 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++) 4676 4685 { 4677 Assert((pShwPDPT->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); 4678 if (pShwPDPT->a[i].n.u1Present) 4679 { 4680 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK); 4686 X86PGPAEUINT const uPdpe = pShwPDPT->a[i].u; 4687 Assert((uPdpe & UINT64_C(0xfff0000000000f80)) == 0); 4688 if (uPdpe & EPT_E_READ) 4689 { 4690 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPdpe & EPT_PDPTE_PG_MASK); 4681 4691 if (pSubPage) 4682 4692 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 4683 4693 else 4684 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u& EPT_PDPTE_PG_MASK));4694 AssertFatalMsgFailed(("%RX64\n", uPdpe & EPT_PDPTE_PG_MASK)); 4685 4695 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */ 4686 4696 } -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r86470 r86472 97 97 *********************************************************************************************************************************/ 98 98 #define LOG_GROUP LOG_GROUP_PGM_POOL 99 #define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */ 99 100 #include <VBox/vmm/pgm.h> 100 101 #include <VBox/vmm/mm.h> … … 563 564 { 564 565 Assert((pShwPD->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); 565 if ( pShwPD->a[i].n.u1Present 566 && pShwPD->a[i].b.u1Size) 566 if ((pShwPD->a[i].u & (EPT_E_READ | EPT_E_LEAF)) == (EPT_E_READ | EPT_E_LEAF)) 567 567 { 568 568 # ifndef PGM_WITHOUT_MAPPINGS … … 815 815 for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPTEpt->a); iShw++) 816 816 { 817 if (uShw.pPTEpt->a[iShw]. n.u1Present)818 uShw.pPTEpt->a[iShw]. n.u1Write = 0;817 if (uShw.pPTEpt->a[iShw].u & EPT_E_READ) 818 uShw.pPTEpt->a[iShw].u &= ~(X86PGPAEUINT)EPT_E_WRITE; 819 819 } 820 820 break;
Note:
See TracChangeset
for help on using the changeset viewer.