Changeset 86466 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 7, 2020 12:50:21 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r86464 r86466 1451 1451 int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD) 1452 1452 { 1453 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;1454 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);1455 PX86PDPE pPdpe = &pPdpt->a[iPdPt];1456 1453 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1457 1454 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1458 1455 PPGMPOOLPAGE pShwPage; 1459 1456 int rc; 1460 1461 1457 PGM_LOCK_ASSERT_OWNER(pVM); 1462 1458 1459 1463 1460 /* Allocate page directory if not present. */ 1464 if ( !pPdpe->n.u1Present 1465 && !(pPdpe->u & X86_PDPE_PG_MASK)) 1461 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 1462 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu); 1463 PX86PDPE pPdpe = &pPdpt->a[iPdPt]; 1464 X86PGPAEUINT const uPdpe = pPdpe->u; 1465 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK)) 1466 { 1467 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK); 1468 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1469 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key); 1470 1471 pgmPoolCacheUsed(pPool, pShwPage); 1472 1473 /* Update the entry if necessary. */ 1474 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS); 1475 if (uPdpeNew == uPdpe) 1476 { /* likely */ } 1477 else 1478 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew); 1479 } 1480 else 1466 1481 { 1467 1482 RTGCPTR64 GCPdPt; 1468 1483 PGMPOOLKIND enmKind; 1469 1470 1484 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu)) 1471 1485 { … … 1481 1495 { 1482 1496 /* PD not present; guest must reload CR3 to change it. 1483 * No need to monitor anything in this case. 1484 */ 1497 * No need to monitor anything in this case. */ 1485 1498 Assert(VM_IS_RAW_MODE_ENABLED(pVM)); 1486 1487 1499 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; 1488 1500 enmKind = PGMPOOLKIND_PAE_PD_PHYS; 1489 uGstPdpe |= X86_PDPE_P;1501 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */ 1490 1502 } 1491 1503 else … … 1508 1520 AssertRCReturn(rc, rc); 1509 1521 1510 /* The PD was cached or created; hook it up now. */ 1511 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)); 1512 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe); 1513 } 1514 else 1515 { 1516 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK); 1517 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1518 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key); 1519 1520 pgmPoolCacheUsed(pPool, pShwPage); 1521 } 1522 /* Hook it up. */ 1523 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS)); 1524 } 1525 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe); 1526 1522 1527 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1523 1528 return VINF_SUCCESS; … … 1535 1540 DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde) 1536 1541 { 1542 PVM pVM = pVCpu->CTX_SUFF(pVM); 1543 PGM_LOCK_ASSERT_OWNER(pVM); 1544 1545 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu); 1546 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */ 1537 1547 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 1538 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu); 1539 PVM pVM = pVCpu->CTX_SUFF(pVM); 1540 1541 PGM_LOCK_ASSERT_OWNER(pVM); 1542 1543 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */ 1544 if (!pPdpt->a[iPdPt].n.u1Present) 1545 { 1546 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u)); 1548 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u; 1549 if (!(uPdpe & X86_PDPE_P)) 1550 { 1551 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe)); 1547 1552 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT; 1548 1553 } 1549 AssertMsg( pPdpt->a[iPdPt].u& X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));1554 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr)); 1550 1555 1551 1556 /* Fetch the pgm pool shadow descriptor. */ 1552 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u& X86_PDPE_PG_MASK);1557 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK); 1553 1558 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED); 1554 1559 … … 1577 1582 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1578 1583 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1579 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1580 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1581 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu); 1582 PPGMPOOLPAGE pShwPage; 1584 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu); 1583 1585 int rc; 1584 1586 1585 1587 PGM_LOCK_ASSERT_OWNER(pVM); 1586 1588 1587 /* Allocate page directory pointer table if not present. */ 1588 if ( !pPml4e->n.u1Present 1589 && !(pPml4e->u & X86_PML4E_PG_MASK)) 1590 { 1591 RTGCPTR64 GCPml4; 1592 PGMPOOLKIND enmKind; 1593 1594 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 1595 1596 if (fNestedPagingOrNoGstPaging) 1597 { 1598 /* AMD-V nested paging or real/protected mode without paging */ 1599 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; 1600 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS; 1589 /* 1590 * PML4. 1591 */ 1592 PPGMPOOLPAGE pShwPage; 1593 { 1594 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1595 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1596 X86PGPAEUINT const uPml4e = pPml4e->u; 1597 1598 /* Allocate page directory pointer table if not present. */ 1599 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK)) 1600 { 1601 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK); 1602 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1603 1604 pgmPoolCacheUsed(pPool, pShwPage); 1605 1606 /* Update the entry if needed. */ 1607 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask) 1608 | (uPml4e & PGM_PML4_FLAGS); 1609 if (uPml4e == uPml4eNew) 1610 { /* likely */ } 1611 else 1612 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew); 1601 1613 } 1602 1614 else 1603 1615 { 1604 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK; 1605 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT; 1606 } 1607 1608 /* Create a reference back to the PDPT by using the index in its shadow page. */ 1609 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 1610 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/, 1611 &pShwPage); 1612 AssertRCReturn(rc, rc); 1616 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 1617 1618 RTGCPTR64 GCPml4; 1619 PGMPOOLKIND enmKind; 1620 if (fNestedPagingOrNoGstPaging) 1621 { 1622 /* AMD-V nested paging or real/protected mode without paging */ 1623 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; 1624 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS; 1625 } 1626 else 1627 { 1628 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK; 1629 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT; 1630 } 1631 1632 /* Create a reference back to the PDPT by using the index in its shadow page. */ 1633 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 1634 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/, 1635 &pShwPage); 1636 AssertRCReturn(rc, rc); 1637 1638 /* Hook it up. */ 1639 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask) 1640 | (uPml4e & PGM_PML4_FLAGS)); 1641 } 1642 } 1643 1644 /* 1645 * PDPT. 1646 */ 1647 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1648 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1649 PX86PDPE pPdpe = &pPdpt->a[iPdPt]; 1650 X86PGPAEUINT const uPdpe = pPdpe->u; 1651 1652 /* Allocate page directory if not present. */ 1653 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK)) 1654 { 1655 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK); 1656 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1657 1658 pgmPoolCacheUsed(pPool, pShwPage); 1659 1660 /* Update the entry if needed. */ 1661 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) 1662 | (uPdpe & PGM_PDPT_FLAGS); 1663 if (uPdpe == uPdpeNew) 1664 { /* likely */ } 1665 else 1666 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew); 1613 1667 } 1614 1668 else 1615 {1616 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);1617 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);1618 1619 pgmPoolCacheUsed(pPool, pShwPage);1620 }1621 /* The PDPT was cached or created; hook it up now. */1622 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);1623 1624 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;1625 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);1626 PX86PDPE pPdpe = &pPdpt->a[iPdPt];1627 1628 /* Allocate page directory if not present. */1629 if ( !pPdpe->n.u1Present1630 && !(pPdpe->u & X86_PDPE_PG_MASK))1631 1669 { 1632 1670 RTGCPTR64 GCPdPt; 1633 1671 PGMPOOLKIND enmKind; 1634 1635 1672 if (fNestedPagingOrNoGstPaging) 1636 1673 { … … 1650 1687 &pShwPage); 1651 1688 AssertRCReturn(rc, rc); 1652 } 1653 else 1654 { 1655 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK); 1656 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1657 1658 pgmPoolCacheUsed(pPool, pShwPage); 1659 } 1660 /* The PD was cached or created; hook it up now. */ 1661 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask); 1689 1690 /* Hook it up. */ 1691 ASMAtomicWriteU64(&pPdpe->u, 1692 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS)); 1693 } 1662 1694 1663 1695 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); … … 1678 1710 DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 1679 1711 { 1712 PVM pVM = pVCpu->CTX_SUFF(pVM); 1713 PGM_LOCK_ASSERT_OWNER(pVM); 1714 1715 /* 1716 * PML4 1717 */ 1680 1718 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1681 1719 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1682 1683 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));1684 1685 1720 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING); 1686 1721 if (ppPml4e) 1687 1722 *ppPml4e = (PX86PML4E)pPml4e; 1688 1689 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u)); 1690 1691 if (!pPml4e->n.u1Present) 1723 X86PGPAEUINT const uPml4e = pPml4e->u; 1724 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e)); 1725 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */ 1692 1726 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT; 1693 1727 1694 PVM pVM = pVCpu->CTX_SUFF(pVM);1695 1728 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1696 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u& X86_PML4E_PG_MASK);1729 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK); 1697 1730 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1698 1731 1699 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1700 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1701 if (!pPdpt->a[iPdPt].n.u1Present) 1732 /* 1733 * PDPT 1734 */ 1735 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1736 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1737 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u; 1738 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */ 1702 1739 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT; 1703 1740 1704 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u& X86_PDPE_PG_MASK);1741 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK); 1705 1742 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1706 1743 … … 2283 2320 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 2284 2321 Assert(pGuestPDPT); 2285 Assert(pGuestPDPT->a[iPdpt]. n.u1Present);2322 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P); 2286 2323 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK; 2287 2324 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r86464 r86466 1069 1069 /* If the shadow PDPE isn't present, then skip the invalidate. */ 1070 1070 # ifdef IN_RING3 /* Possible we didn't resync yet when called from REM. */ 1071 if (!pPdptDst || ! pPdptDst->a[iPdpt].n.u1Present)1071 if (!pPdptDst || !(pPdptDst->a[iPdpt].u & X86_PDPE_P)) 1072 1072 # else 1073 if (! pPdptDst->a[iPdpt].n.u1Present)1074 # endif 1075 { 1076 # ifndef PGM_WITHOUT_MAPPINGS1073 if (!(pPdptDst->a[iPdpt].u & X86_PDPE_P)) 1074 # endif 1075 { 1076 # ifndef PGM_WITHOUT_MAPPINGS 1077 1077 Assert(!pPdptDst || !(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)); 1078 # endif1078 # endif 1079 1079 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); 1080 1080 PGM_INVL_PG(pVCpu, GCPtrPage); … … 1082 1082 } 1083 1083 1084 /* Fetch the pgm pool shadow descriptor. */ 1085 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK); 1086 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED); 1087 1088 PX86PDPAE pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde); 1084 1089 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 1085 PPGMPOOLPAGE pShwPde = NULL; 1086 PX86PDPAE pPDDst; 1087 1088 /* Fetch the pgm pool shadow descriptor. */ 1089 rc = pgmShwGetPaePoolPagePD(pVCpu, GCPtrPage, &pShwPde); 1090 AssertRCSuccessReturn(rc, rc); 1091 Assert(pShwPde); 1092 1093 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde); 1094 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 1090 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 1095 1091 1096 1092 # else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */ … … 1110 1106 return VINF_SUCCESS; 1111 1107 } 1112 Assert(pPDDst);1113 1114 1108 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 1115 1109 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpt]; 1116 1117 if (!pPdpeDst->n.u1Present) 1118 { 1119 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); 1120 PGM_INVL_PG(pVCpu, GCPtrPage); 1121 return VINF_SUCCESS; 1122 } 1110 Assert(pPDDst); 1111 Assert(!(pPdpeDst->u & X86_PDPE_P)); 1123 1112 1124 1113 /* Fetch the pgm pool shadow descriptor. */ … … 2101 2090 Assert(pPDDst && pPdptDst); 2102 2091 PdeDst = pPDDst->a[iPDDst]; 2092 2103 2093 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 2104 2094 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); … … 2519 2509 Assert(pPDDst); 2520 2510 PSHWPDE pPdeDst = &pPDDst->a[iPDDst]; 2511 2521 2512 # endif 2522 2513 SHWPDE PdeDst = *pPdeDst; … … 3640 3631 3641 3632 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */ 3642 if (! pPml4eDst->n.u1Present)3633 if (!(pPml4eDst->u & X86_PML4E_P)) 3643 3634 { 3644 3635 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); … … 3649 3640 GCPhysPdptSrc = PGM_A20_APPLY(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK); 3650 3641 3651 if ( pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)3642 if ((pPml4eSrc->u & X86_PML4E_P) != (pPml4eDst->u & X86_PML4E_P)) 3652 3643 { 3653 3644 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u)); … … 3665 3656 } 3666 3657 3667 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User 3668 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write 3669 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute) 3658 if ( (pPml4eDst->u & (X86_PML4E_US | X86_PML4E_RW | X86_PML4E_NX)) 3659 != (pPml4eSrc->u & (X86_PML4E_US | X86_PML4E_RW | X86_PML4E_NX))) 3670 3660 { 3671 3661 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u)); … … 3714 3704 pPdpeDst = &pPdptDst->a[iPdpt]; 3715 3705 3716 if (! pPdpeDst->n.u1Present)3706 if (!(pPdpeDst->u & X86_PDPE_P)) 3717 3707 { 3718 3708 GCPtr += 512 * _2M; … … 3723 3713 GCPhysPdeSrc = PGM_A20_APPLY(pVCpu, PdpeSrc.u & X86_PDPE_PG_MASK); 3724 3714 3725 if ( pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)3715 if ((pPdpeDst->u & X86_PDPE_P) != (PdpeSrc.u & X86_PDPE_P)) 3726 3716 { 3727 3717 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u)); … … 3744 3734 3745 3735 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3746 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User 3747 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write 3748 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute) 3736 if ( (pPdpeDst->u & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX)) 3737 != (PdpeSrc.u & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX))) 3749 3738 { 3750 3739 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u)); … … 4381 4370 { 4382 4371 pVCpu->pgm.s.aGstPaePdpeRegs[i].u = pGuestPDPT->a[i].u; 4383 if (pGuestPDPT->a[i]. n.u1Present)4372 if (pGuestPDPT->a[i].u & X86_PDPE_P) 4384 4373 { 4385 4374 RTHCPTR HCPtr; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r85185 r86466 147 147 pWalk->Pml4e.u = Pml4e.u = pPml4e->u; 148 148 149 if (Pml4e. n.u1Present) { /* probable */ }149 if (Pml4e.u & X86_PML4E_P) { /* probable */ } 150 150 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 151 151 … … 176 176 pWalk->Pdpe.u = Pdpe.u = pPdpe->u; 177 177 178 if (Pdpe. n.u1Present) { /* probable */ }178 if (Pdpe.u & X86_PDPE_P) { /* probable */ } 179 179 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 180 180 -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r86464 r86466 321 321 /* PML4 */ 322 322 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr); 323 if (! Pml4e.n.u1Present)323 if (!(Pml4e.u & X86_PML4E_P)) 324 324 return VERR_PAGE_TABLE_NOT_PRESENT; 325 325 … … 331 331 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 332 332 X86PDPE Pdpe = pPDPT->a[iPDPT]; 333 if (! Pdpe.n.u1Present)333 if (!(Pdpe.u & X86_PDPE_P)) 334 334 return VERR_PAGE_TABLE_NOT_PRESENT; 335 335 … … 343 343 344 344 /* Merge accessed, write, user and no-execute bits into the PDE. */ 345 Pde.n.u1Accessed &= Pml4e.n.u1Accessed & Pdpe.lm.u1Accessed; 346 Pde.n.u1Write &= Pml4e.n.u1Write & Pdpe.lm.u1Write; 347 Pde.n.u1User &= Pml4e.n.u1User & Pdpe.lm.u1User; 348 Pde.n.u1NoExecute |= Pml4e.n.u1NoExecute | Pdpe.lm.u1NoExecute; 345 AssertCompile(X86_PML4E_A == X86_PDPE_A && X86_PML4E_A == X86_PDE_A); 346 AssertCompile(X86_PML4E_RW == X86_PDPE_RW && X86_PML4E_RW == X86_PDE_RW); 347 AssertCompile(X86_PML4E_US == X86_PDPE_US && X86_PML4E_US == X86_PDE_US); 348 AssertCompile(X86_PML4E_NX == X86_PDPE_LM_NX && X86_PML4E_NX == X86_PDE_PAE_NX); 349 Pde.u &= (Pml4e.u & Pdpe.u) | ~(X86PGPAEUINT)(X86_PML4E_A | X86_PML4E_RW | X86_PML4E_US); 350 Pde.u |= (Pml4e.u | Pdpe.u) & X86_PML4E_NX; 349 351 350 352 # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE … … 508 510 /* PML4 */ 509 511 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr); 510 if (! Pml4e.n.u1Present)512 if (!(Pml4e.u & X86_PML4E_P)) 511 513 return VERR_PAGE_TABLE_NOT_PRESENT; 512 514 … … 518 520 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 519 521 X86PDPE Pdpe = pPDPT->a[iPDPT]; 520 if (! Pdpe.n.u1Present)522 if (!(Pdpe.u & X86_PDPE_P)) 521 523 return VERR_PAGE_TABLE_NOT_PRESENT; 522 524
Note:
See TracChangeset
for help on using the changeset viewer.