Changeset 31775 in vbox
- Timestamp:
- Aug 19, 2010 9:48:24 AM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 64961
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r31684 r31775 3558 3558 * @param pHlp Pointer to the output functions. 3559 3559 */ 3560 static int pgmR3DumpHierarchyHCPaePT(PVM pVM, P X86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)3560 static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PCPGMSHWPTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp) 3561 3561 { 3562 3562 for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++) 3563 { 3564 X86PTEPAE Pte = pPT->a[i]; 3565 if (Pte.n.u1Present) 3563 if (PGMSHWPTEPAE_IS_P(pPT->a[i])) 3566 3564 { 3565 X86PTEPAE Pte; 3566 Pte.u = PGMSHWPTEPAE_GET_U(pPT->a[i]); 3567 3567 pHlp->pfnPrintf(pHlp, 3568 3568 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */ … … 3584 3584 Pte.u & X86_PTE_PAE_PG_MASK); 3585 3585 } 3586 }3587 3586 return VINF_SUCCESS; 3588 3587 } … … 3659 3658 { 3660 3659 /** @todo what about using the page pool for mapping PTs? */ 3661 uint64_t u64AddressPT = u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT);3662 RTHCPHYS HCPhysPT = Pde.u & X86_PDE_PAE_PG_MASK;3663 P X86PTPAEpPT = NULL;3660 uint64_t u64AddressPT = u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT); 3661 RTHCPHYS HCPhysPT = Pde.u & X86_PDE_PAE_PG_MASK; 3662 PPGMSHWPTPAE pPT = NULL; 3664 3663 if (!(Pde.u & PGM_PDFLAGS_MAPPING)) 3665 pPT = (P X86PTPAE)MMPagePhys2Page(pVM, HCPhysPT);3664 pPT = (PPGMSHWPTPAE)MMPagePhys2Page(pVM, HCPhysPT); 3666 3665 else 3667 3666 { -
trunk/src/VBox/VMM/PGMGstDefs.h
r31080 r31775 73 73 # define BTH_IS_NP_ACTIVE(pVM) (true) 74 74 # else 75 # define GSTPT SHWPT 76 # define PGSTPT PSHWPT 77 # define GSTPTE SHWPTE 78 # define PGSTPTE PSHWPTE 79 # define GSTPD SHWPD 80 # define PGSTPD PSHWPD 81 # define GSTPDE SHWPDE 82 # define PGSTPDE PSHWPDE 83 # define GST_PTE_PG_MASK SHW_PTE_PG_MASK 75 # if PGM_SHW_TYPE == PGM_TYPE_32BIT /* Same as shadow paging, but no PGMSHWPTEPAE. */ 76 # define GSTPT X86PT 77 # define PGSTPT PX86PT 78 # define GSTPTE X86PTE 79 # define PGSTPTE PX86PTE 80 # define GSTPD X86PD 81 # define PGSTPD PX86PD 82 # define GSTPDE X86PDE 83 # define PGSTPDE PX86PDE 84 # define GST_PTE_PG_MASK X86_PTE_PG_MASK 85 # else 86 # define GSTPT X86PTPAE 87 # define PGSTPT PX86PTPAE 88 # define GSTPTE X86PTEPAE 89 # define PGSTPTE PX86PTEPAE 90 # define GSTPD X86PDPAE 91 # define PGSTPD PX86PDPAE 92 # define GSTPDE X86PDEPAE 93 # define PGSTPDE PX86PDEPAE 94 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 95 # endif 84 96 # define GST_IS_NX_ACTIVE(pVCpu) (pgmGstIsNoExecuteActive(pVCpu)) 85 97 # if PGM_GST_TYPE == PGM_TYPE_PROT /* (comment at top of PGMAllBth.h) */ -
trunk/src/VBox/VMM/PGMInternal.h
r31657 r31775 413 413 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM) 414 414 #endif 415 416 417 /** @name Safer Shadow PAE PT/PTE 418 * For helping avoid misinterpreting invalid PAE/AMD64 page table entries as 419 * present. 420 * 421 * @{ 422 */ 423 #if 1 424 /** 425 * For making sure that u1Present and X86_PTE_P checks doesn't mistake 426 * invalid entries for present. 427 * @sa X86PTEPAE. 428 */ 429 typedef union PGMSHWPTEPAE 430 { 431 /** Unsigned integer view */ 432 X86PGPAEUINT uCareful; 433 #if 0 434 /* Not bit field view. */ 435 /** 32-bit view. */ 436 uint32_t au32[2]; 437 /** 16-bit view. */ 438 uint16_t au16[4]; 439 /** 8-bit view. */ 440 uint8_t au8[8]; 441 #endif 442 } PGMSHWPTEPAE; 443 444 # define PGMSHWPTEPAE_IS_P(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == X86_PTE_P ) 445 # define PGMSHWPTEPAE_IS_RW(Pte) ( !!((Pte).uCareful & X86_PTE_RW)) 446 # define PGMSHWPTEPAE_IS_US(Pte) ( !!((Pte).uCareful & X86_PTE_US)) 447 # define PGMSHWPTEPAE_IS_A(Pte) ( !!((Pte).uCareful & X86_PTE_A)) 448 # define PGMSHWPTEPAE_IS_D(Pte) ( !!((Pte).uCareful & X86_PTE_D)) 449 # define PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte) ( !!((Pte).uCareful & PGM_PTFLAGS_TRACK_DIRTY) ) 450 # define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_RW | X86_PTE_PAE_MBZ_MASK_NX)) == (X86_PTE_P | X86_PTE_RW) ) 451 # define PGMSHWPTEPAE_GET_LOG(Pte) ( (Pte).uCareful ) 452 # define PGMSHWPTEPAE_GET_HCPHYS(Pte) ( (Pte).uCareful & X86_PTE_PAE_PG_MASK ) 453 # define PGMSHWPTEPAE_GET_U(Pte) ( (Pte).uCareful ) /**< Use with care. */ 454 # define PGMSHWPTEPAE_SET(Pte, uVal) do { (Pte).uCareful = (uVal); } while (0) 455 # define PGMSHWPTEPAE_SET2(Pte, Pte2) do { (Pte).uCareful = (Pte2).uCareful; } while (0) 456 # define PGMSHWPTEPAE_ATOMIC_SET(Pte, uVal) do { ASMAtomicWriteU64(&(Pte).uCareful, (uVal)); } while (0) 457 # define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).uCareful, (Pte2).uCareful); } while (0) 458 # define PGMSHWPTEPAE_SET_RO(Pte) do { (Pte).uCareful &= ~(X86PGPAEUINT)X86_PTE_RW; } while (0) 459 # define PGMSHWPTEPAE_SET_RW(Pte) do { (Pte).uCareful |= X86_PTE_RW; } while (0) 460 461 /** 462 * For making sure that u1Present and X86_PTE_P checks doesn't mistake 463 * invalid entries for present. 464 * @sa X86PTPAE. 465 */ 466 typedef struct PGMSHWPTPAE 467 { 468 PGMSHWPTEPAE a[X86_PG_PAE_ENTRIES]; 469 } PGMSHWPTPAE; 470 471 #else 472 typedef X86PTEPAE PGMSHWPTEPAE; 473 typedef X86PTPAE PGMSHWPTPAE; 474 # define PGMSHWPTEPAE_IS_P(Pte) ( (Pte).n.u1Present ) 475 # define PGMSHWPTEPAE_IS_RW(Pte) ( (Pte).n.u1Write ) 476 # define PGMSHWPTEPAE_IS_US(Pte) ( (Pte).n.u1User ) 477 # define PGMSHWPTEPAE_IS_A(Pte) ( (Pte).n.u1Accessed ) 478 # define PGMSHWPTEPAE_IS_D(Pte) ( (Pte).n.u1Dirty ) 479 # define PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) ) 480 # define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) ) 481 # define PGMSHWPTEPAE_GET_LOG(Pte) ( (Pte).u ) 482 # define PGMSHWPTEPAE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PAE_PG_MASK ) 483 # define PGMSHWPTEPAE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */ 484 # define PGMSHWPTEPAE_SET(Pte, uVal) do { (Pte).u = (uVal); } while (0) 485 # define PGMSHWPTEPAE_SET2(Pte, Pte2) do { (Pte).u = (Pte2).u; } while (0) 486 # define PGMSHWPTEPAE_ATOMIC_SET(Pte, uVal) do { ASMAtomicWriteU64(&(Pte).u, (uVal)); } while (0) 487 # define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0) 488 # define PGMSHWPTEPAE_SET_RO(Pte) do { (Pte).u &= ~(X86PGPAEUINT)X86_PTE_RW; } while (0) 489 # define PGMSHWPTEPAE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0) 490 491 #endif 492 493 /** Pointer to a shadow PAE PTE. */ 494 typedef PGMSHWPTEPAE *PPGMSHWPTEPAE; 495 /** Pointer to a const shadow PAE PTE. */ 496 typedef PGMSHWPTEPAE const *PCPGMSHWPTEPAE; 497 498 /** Pointer to a shadow PAE page table. */ 499 typedef PGMSHWPTPAE *PPGMSHWPTPAE; 500 /** Pointer to a const shadow PAE page table. */ 501 typedef PGMSHWPTPAE const *PCPGMSHWPTPAE; 502 /** @} */ 503 415 504 416 505 /** Size of the GCPtrConflict array in PGMMAPPING. … … 467 556 R3PTRTYPE(PX86PT) pPTR3; 468 557 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */ 469 R3PTRTYPE(P X86PTPAE)paPaePTsR3;558 R3PTRTYPE(PPGMSHWPTPAE) paPaePTsR3; 470 559 /** The RC virtual address of the 32-bit page table. */ 471 560 RCPTRTYPE(PX86PT) pPTRC; 472 561 /** The RC virtual address of the two PAE page table. */ 473 RCPTRTYPE(P X86PTPAE)paPaePTsRC;562 RCPTRTYPE(PPGMSHWPTPAE) paPaePTsRC; 474 563 /** The R0 virtual address of the 32-bit page table. */ 475 564 R0PTRTYPE(PX86PT) pPTR0; 476 565 /** The R0 virtual address of the two PAE page table. */ 477 R0PTRTYPE(P X86PTPAE)paPaePTsR0;566 R0PTRTYPE(PPGMSHWPTPAE) paPaePTsR0; 478 567 } aPTs[1]; 479 568 } PGMMAPPING; … … 2318 2407 * PGMPOOL_TD_CREFS_SHIFT. */ 2319 2408 #define PGMPOOL_TD_CREFS_MASK 0x3 2320 /** The cRef value used to indiciate that the idx is the head of a2409 /** The cRefs value used to indiciate that the idx is the head of a 2321 2410 * physical cross reference list. */ 2322 2411 #define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK … … 2917 3006 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC; 2918 3007 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */ 2919 RCPTRTYPE(P X86PTEPAE)paDynPageMapPaePTEsGC;3008 RCPTRTYPE(PPGMSHWPTEPAE) paDynPageMapPaePTEsGC; 2920 3009 2921 3010 -
trunk/src/VBox/VMM/PGMMap.cpp
r31123 r31775 172 172 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs); 173 173 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE); 174 pNew->aPTs[i].paPaePTsR3 = (P X86PTPAE)pbPTs;174 pNew->aPTs[i].paPaePTsR3 = (PPGMSHWPTPAE)pbPTs; 175 175 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs); 176 176 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs); … … 1405 1405 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a)) 1406 1406 { 1407 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present) 1407 PCPGMSHWPTEPAE pPte = &pCur->aPTs[iPT].CTXALLSUFF(paPaePTs)[iPTE / 512].a[iPTE % 512]; 1408 if (!PGMSHWPTEPAE_IS_P(*pPte)) 1408 1409 return VERR_PAGE_NOT_PRESENT; 1409 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;1410 RTHCPHYS HCPhys = PGMSHWPTEPAE_GET_HCPHYS(*pPte); 1410 1411 1411 1412 /* -
trunk/src/VBox/VMM/PGMPool.cpp
r31170 r31775 650 650 { 651 651 bool fFoundFirst = false; 652 P X86PTPAE pPT = (PX86PTPAE)pvShw;652 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)pvShw; 653 653 for (unsigned ptIndex = 0; ptIndex < RT_ELEMENTS(pPT->a); ptIndex++) 654 654 { … … 662 662 fFoundFirst = true; 663 663 } 664 if ( pPT->a[ptIndex].n.u1Present)664 if (PGMSHWPTEPAE_IS_P(pPT->a[ptIndex])) 665 665 { 666 pgmPoolTracDerefGCPhysHint(pPool, pPage, pPT->a[ptIndex].u & X86_PTE_PAE_PG_MASK, NIL_RTGCPHYS);666 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pPT->a[ptIndex]), NIL_RTGCPHYS); 667 667 if (pPage->iFirstPresent == ptIndex) 668 668 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX; … … 848 848 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 849 849 { 850 P X86PTPAE pShwPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);850 PPGMSHWPTPAE pShwPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 851 851 { 852 852 PX86PTPAE pGstPT; … … 857 857 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) 858 858 { 859 if ( pShwPT->a[j].n.u1Present)859 if (PGMSHWPTEPAE_IS_P(pShwPT->a[j])) 860 860 { 861 861 RTHCPHYS HCPhys = NIL_RTHCPHYS; 862 862 rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[j].u & X86_PTE_PAE_PG_MASK, &HCPhys); 863 if ( 864 || (pShwPT->a[j].u & X86_PTE_PAE_PG_MASK) != HCPhys)863 if ( rc != VINF_SUCCESS 864 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[j]) != HCPhys) 865 865 { 866 866 if (fFirstMsg) … … 869 869 fFirstMsg = false; 870 870 } 871 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch HCPhys: rc=% d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, j, pGstPT->a[j].u, pShwPT->a[j].u, HCPhys);871 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch HCPhys: rc=%Rrc idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys); 872 872 } 873 else 874 if ( pShwPT->a[j].n.u1Write 875 && !pGstPT->a[j].n.u1Write) 873 else if ( PGMSHWPTEPAE_IS_RW(pShwPT->a[j]) 874 && !pGstPT->a[j].n.u1Write) 876 875 { 877 876 if (fFirstMsg) … … 880 879 fFirstMsg = false; 881 880 } 882 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch r/w gst/shw: idx=%d guest %RX64 shw=%RX64 vs %RHp\n", j, pGstPT->a[j].u, pShwPT->a[j].u, HCPhys);881 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch r/w gst/shw: idx=%d guest %RX64 shw=%RX64 vs %RHp\n", j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys); 883 882 } 884 883 } … … 899 898 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 900 899 { 901 P X86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);900 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage); 902 901 903 902 for (unsigned k = 0; k < RT_ELEMENTS(pShwPT->a); k++) 904 903 { 905 if ( pShwPT2->a[k].n.u1Present 906 && pShwPT2->a[k].n.u1Write 904 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[k]) 907 905 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 908 906 && !pPage->fDirty 909 907 # endif 910 && ((pShwPT2->a[k].u & X86_PTE_PAE_PG_MASK) == HCPhysPT))908 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[k]) == HCPhysPT) 911 909 { 912 910 if (fFirstMsg) … … 915 913 fFirstMsg = false; 916 914 } 917 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch: r/w: GCPhys=%RGp idx=%d shw %RX64 %RX64\n", pTempPage->GCPhys, k, pShwPT->a[k].u, pShwPT2->a[k].u);915 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch: r/w: GCPhys=%RGp idx=%d shw %RX64 %RX64\n", pTempPage->GCPhys, k, PGMSHWPTEPAE_GET_LOG(pShwPT->a[k]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[k])); 918 916 } 919 917 } -
trunk/src/VBox/VMM/PGMShw.h
r31066 r31775 79 79 80 80 #else 81 # define SHWPT X86PTPAE82 # define PSHWPT P X86PTPAE83 # define SHWPTE X86PTEPAE84 # define PSHWPTE P X86PTEPAE81 # define SHWPT PGMSHWPTPAE 82 # define PSHWPT PPGMSHWPTPAE 83 # define SHWPTE PGMSHWPTEPAE 84 # define PSHWPTE PPGMSHWPTEPAE 85 85 # define SHWPD X86PDPAE 86 86 # define PSHWPD PX86PDPAE -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31657 r31775 1444 1444 * @param pPage The page in question. 1445 1445 * @param fPteSrc The flags of the source PTE. 1446 * @param pPteDst The shadow PTE (output). 1446 * @param pPteDst The shadow PTE (output). This is temporary storage and 1447 * does not need to be set atomically. 1447 1448 */ 1448 1449 DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVM pVM, PCPGMPAGE pPage, uint32_t fPteSrc, PSHWPTE pPteDst) … … 1461 1462 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */ 1462 1463 #else 1463 pPteDst->u = (fPteSrc & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1464 | PGM_PAGE_GET_HCPHYS(pPage); 1464 SHW_PTE_SET(*pPteDst, 1465 (fPteSrc & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1466 | PGM_PAGE_GET_HCPHYS(pPage)); 1465 1467 #endif 1466 1468 } … … 1487 1489 # else 1488 1490 /* Set high page frame bits that MBZ (bankers on PAE, CPU dependent on AMD64). */ 1489 pPteDst->u = pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX | X86_PTE_P;1491 SHW_PTE_SET(*pPteDst, pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX | X86_PTE_P); 1490 1492 # endif 1491 1493 } … … 1495 1497 { 1496 1498 LogFlow(("SyncHandlerPte: monitored page (%R[pgmpage]) -> mark not present\n", pPage)); 1497 pPteDst->u = 0;1499 SHW_PTE_SET(*pPteDst, 0); 1498 1500 } 1499 1501 /** @todo count these kinds of entries. */ … … 1550 1552 if (PGM_PAGE_IS_BALLOONED(pPage)) 1551 1553 { 1552 Assert(! pPteDst->n.u1Present); /** @todo user tracking needs updating if this triggers. */1554 Assert(!SHW_PTE_IS_P(*pPteDst)); /** @todo user tracking needs updating if this triggers. */ 1553 1555 return; 1554 1556 } … … 1595 1597 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n")); 1596 1598 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,AccessedPage)); 1597 PteDst.u = 0;1599 SHW_PTE_SET(PteDst, 0); 1598 1600 } 1599 1601 /* … … 1604 1606 { 1605 1607 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPage)); 1606 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1607 | PGM_PAGE_GET_HCPHYS(pPage) 1608 | PGM_PTFLAGS_TRACK_DIRTY; 1608 SHW_PTE_SET(PteDst, 1609 (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1610 | PGM_PAGE_GET_HCPHYS(pPage) 1611 | PGM_PTFLAGS_TRACK_DIRTY); 1609 1612 } 1610 1613 else … … 1621 1624 /* PteDst.n.u1Size = 0 */ 1622 1625 #else 1623 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1624 | PGM_PAGE_GET_HCPHYS(pPage); 1626 SHW_PTE_SET(PteDst, 1627 (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1628 | PGM_PAGE_GET_HCPHYS(pPage)); 1625 1629 #endif 1626 1630 } … … 1629 1633 * Make sure only allocated pages are mapped writable. 1630 1634 */ 1631 if ( PteDst.n.u1Write 1632 && PteDst.n.u1Present 1635 if ( SHW_PTE_IS_P_RW(PteDst) 1633 1636 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1634 1637 { 1635 1638 /* Still applies to shared pages. */ 1636 1639 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1637 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet. Why, isn't it? */1640 SHW_PTE_SET_RO(PteDst); /** @todo this isn't quite working yet. Why, isn't it? */ 1638 1641 Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst)); 1639 1642 } … … 1643 1646 * Keep user track up to date. 1644 1647 */ 1645 if ( PteDst.n.u1Present)1648 if (SHW_PTE_IS_P(PteDst)) 1646 1649 { 1647 if (! pPteDst->n.u1Present)1650 if (!SHW_PTE_IS_P(*pPteDst)) 1648 1651 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst); 1649 else if ( (pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))1652 else if (SHW_PTE_GET_HCPHYS(*pPteDst) != SHW_PTE_GET_HCPHYS(PteDst)) 1650 1653 { 1651 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));1652 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst);1654 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", SHW_PTE_LOG64(*pPteDst), SHW_PTE_LOG64(PteDst))); 1655 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPteDst), iPTDst); 1653 1656 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst); 1654 1657 } 1655 1658 } 1656 else if ( pPteDst->n.u1Present)1659 else if (SHW_PTE_IS_P(*pPteDst)) 1657 1660 { 1658 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));1659 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst);1661 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", SHW_PTE_LOG64(*pPteDst))); 1662 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPteDst), iPTDst); 1660 1663 } 1661 1664 … … 1667 1670 pShwPage->fSeenNonGlobal = true; 1668 1671 #endif 1669 ASMAtomicWriteSize(pPteDst, PteDst.u);1672 SHW_PTE_ATOMIC_SET2(*pPteDst, PteDst); 1670 1673 return; 1671 1674 } … … 1683 1686 * an empty entry, making sure to keep the user tracking up to date. 1684 1687 */ 1685 if ( pPteDst->n.u1Present)1686 { 1687 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));1688 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst);1689 } 1690 ASMAtomicWriteSize(pPteDst, 0);1688 if (SHW_PTE_IS_P(*pPteDst)) 1689 { 1690 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", SHW_PTE_LOG64(*pPteDst))); 1691 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPteDst), iPTDst); 1692 } 1693 SHW_PTE_ATOMIC_SET(*pPteDst, 0); 1691 1694 } 1692 1695 … … 1862 1865 for (; iPTDst < iPTDstEnd; iPTDst++) 1863 1866 { 1864 if (! pPTDst->a[iPTDst].n.u1Present)1867 if (!SHW_PTE_IS_P(pPTDst->a[iPTDst])) 1865 1868 { 1866 1869 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst]; … … 1887 1890 PteSrc.n.u1User & PdeSrc.n.u1User, 1888 1891 (uint64_t)PteSrc.u, 1889 (uint64_t)pPTDst->a[iPTDst].u,1890 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY? " Track-Dirty" : ""));1892 SHW_PTE_LOG64(pPTDst->a[iPTDst]), 1893 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 1891 1894 } 1892 1895 } … … 1904 1907 PteSrc.n.u1User & PdeSrc.n.u1User, 1905 1908 (uint64_t)PteSrc.u, 1906 (uint64_t)pPTDst->a[iPTDst].u,1907 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY? " Track-Dirty" : ""));1909 SHW_PTE_LOG64(pPTDst->a[iPTDst]), 1910 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 1908 1911 } 1909 1912 } … … 1911 1914 { 1912 1915 LogFlow(("PGM_GCPHYS_2_PTR %RGp failed with %Rrc\n", GCPhys, rc)); 1913 Assert(! pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);1916 Assert(!SHW_PTE_IS_P(pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK])); 1914 1917 } 1915 1918 } … … 1960 1963 &PteDst); 1961 1964 else 1962 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1963 | PGM_PAGE_GET_HCPHYS(pPage); 1965 SHW_PTE_SET(PteDst, 1966 (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1967 | PGM_PAGE_GET_HCPHYS(pPage)); 1964 1968 1965 1969 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 1966 if ( PteDst.n.u1Present1967 && ! pPTDst->a[iPTDst].n.u1Present)1970 if ( SHW_PTE_IS_P(PteDst) 1971 && !SHW_PTE_IS_P(pPTDst->a[iPTDst])) 1968 1972 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst); 1969 1973 1970 1974 /* Make sure only allocated pages are mapped writable. */ 1971 if ( PteDst.n.u1Write 1972 && PteDst.n.u1Present 1975 if ( SHW_PTE_IS_P_RW(PteDst) 1973 1976 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1974 1977 { 1975 1978 /* Still applies to shared pages. */ 1976 1979 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1977 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet... */1980 SHW_PTE_SET_RO(PteDst); /** @todo this isn't quite working yet... */ 1978 1981 Log3(("SyncPage: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, GCPtrPage)); 1979 1982 } 1980 1983 1981 ASMAtomicWriteSize(&pPTDst->a[iPTDst], PteDst.u);1984 SHW_PTE_ATOMIC_SET2(pPTDst->a[iPTDst], PteDst); 1982 1985 1983 1986 /* … … 2128 2131 for (; iPTDst < iPTDstEnd; iPTDst++) 2129 2132 { 2130 if (! pPTDst->a[iPTDst].n.u1Present)2133 if (!SHW_PTE_IS_P(pPTDst->a[iPTDst])) 2131 2134 { 2132 2135 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT); … … 2152 2155 PteSrc.n.u1User & PdeSrc.n.u1User, 2153 2156 (uint64_t)PteSrc.u, 2154 (uint64_t)pPTDst->a[iPTDst].u,2155 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY? " Track-Dirty" : ""));2157 SHW_PTE_LOG64(pPTDst->a[iPTDst]), 2158 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 2156 2159 2157 2160 if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) … … 2159 2162 } 2160 2163 else 2161 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));2164 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, SHW_PTE_LOG64(pPTDst->a[iPTDst]) )); 2162 2165 } 2163 2166 } … … 2191 2194 PteSrc.n.u1User & PdeSrc.n.u1User, 2192 2195 (uint64_t)PteSrc.u, 2193 (uint64_t)pPTDst->a[iPTDst].u,2194 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY? " Track-Dirty" : ""));2196 SHW_PTE_LOG64(pPTDst->a[iPTDst]), 2197 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 2195 2198 } 2196 2199 return VINF_SUCCESS; … … 2325 2328 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 2326 2329 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2327 if ( pPteDst->n.u1Present 2328 && pPteDst->n.u1Write) 2330 if (SHW_PTE_IS_P_RW(*pPteDst)) 2329 2331 { 2330 2332 /* Stale TLB entry. */ … … 2373 2375 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 2374 2376 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2375 if ( pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */2377 if (SHW_PTE_IS_P(*pPteDst)) /** @todo Optimize accessed bit emulation? */ 2376 2378 { 2377 if ( pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)2379 if (SHW_PTE_IS_TRACK_DIRTY(*pPteDst)) 2378 2380 { 2379 2381 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); … … 2396 2398 Assert(!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)); 2397 2399 /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */ 2398 PteDst.n.u1Write = 0;2400 SHW_PTE_SET_RO(PteDst); 2399 2401 } 2400 2402 else … … 2407 2409 } 2408 2410 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) 2409 PteDst.n.u1Write = 1;2411 SHW_PTE_SET_RW(PteDst); 2410 2412 else 2411 2413 { 2412 2414 /* Still applies to shared pages. */ 2413 2415 Assert(!PGM_PAGE_IS_ZERO(pPage)); 2414 PteDst.n.u1Write = 0;2416 SHW_PTE_SET_RO(PteDst); 2415 2417 } 2416 2418 } 2417 2419 } 2418 2420 else 2419 PteDst.n.u1Write = 1; /** @todo r=bird: This doesn't make sense to me. */ 2420 2421 PteDst.n.u1Dirty = 1; 2422 PteDst.n.u1Accessed = 1; 2423 PteDst.au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2424 ASMAtomicWriteSize(pPteDst, PteDst.u); 2421 SHW_PTE_SET_RW(PteDst); /** @todo r=bird: This doesn't make sense to me. */ 2422 2423 SHW_PTE_SET(PteDst, (SHW_PTE_GET_U(PteDst) | X86_PTE_D | X86_PTE_A) & ~(uint64_t)PGM_PTFLAGS_TRACK_DIRTY); 2424 SHW_PTE_ATOMIC_SET2(*pPteDst, PteDst); 2425 2425 PGM_INVL_PG(pVCpu, GCPtrPage); 2426 2426 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ … … 2430 2430 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2431 2431 if ( pVM->cCpus > 1 2432 && pPteDst->n.u1Write == 12433 && pPteDst->n.u1Accessed == 1)2432 && SHW_PTE_IS_RW(*pPteDst) 2433 && SHW_PTE_IS_A(*pPteDst)) 2434 2434 { 2435 2435 /* Stale TLB entry. */ … … 2739 2739 PteSrc.n.u1User & PdeSrc.n.u1User, 2740 2740 (uint64_t)PteSrc.u, 2741 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],2741 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "", SHW_PTE_LOG64(pPTDst->a[iPTDst]), iPTSrc, PdeSrc.au32[0], 2742 2742 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) )); 2743 2743 } … … 2792 2792 /* Get address and flags from the source PDE. */ 2793 2793 SHWPTE PteDstBase; 2794 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);2794 SHW_PTE_SET(PteDstBase, PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)); 2795 2795 2796 2796 /* Loop thru the entries in the shadow PT. */ … … 2820 2820 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 2821 2821 && ( PGM_PAGE_IS_ZERO(pPage) 2822 || ( PteDstBase.n.u1Write2822 || ( SHW_PTE_IS_RW(PteDstBase) 2823 2823 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED 2824 2824 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES … … 2841 2841 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2842 2842 { 2843 /** @todo call SyncHandlerPte !! 2844 * FIXME FIXME FIXME */ 2843 2845 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 2844 2846 { 2845 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;2846 PteDst.n.u1Write = 0;2847 SHW_PTE_SET(PteDst, PGM_PAGE_GET_HCPHYS(pPage) | SHW_PTE_GET_U(PteDstBase)); 2848 SHW_PTE_SET_RO(PteDst); 2847 2849 } 2848 2850 else 2849 PteDst.u = 0;2851 SHW_PTE_SET(PteDst, 0); 2850 2852 } 2851 2853 else if (PGM_PAGE_IS_BALLOONED(pPage)) 2852 { 2853 /* Skip ballooned pages. */ 2854 PteDst.u = 0; 2855 } 2854 SHW_PTE_SET(PteDst, 0); /* Handle ballooned pages at #PF time. */ 2856 2855 # ifndef IN_RING0 2857 2856 /* … … 2862 2861 else if ( !PdeSrc.n.u1User 2863 2862 && CSAMDoesPageNeedScanning(pVM, GCPtr | (iPTDst << SHW_PT_SHIFT))) 2864 PteDst.u = 0;2863 SHW_PTE_SET(PteDst, 0); 2865 2864 # endif 2866 2865 else 2867 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;2866 SHW_PTE_SET(PteDst, PGM_PAGE_GET_HCPHYS(pPage) | SHW_PTE_GET_U(PteDstBase)); 2868 2867 2869 2868 /* Only map writable pages writable. */ 2870 if ( PteDst.n.u1Write 2871 && PteDst.n.u1Present 2869 if ( SHW_PTE_IS_P_RW(PteDst) 2872 2870 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 2873 2871 { 2874 2872 /* Still applies to shared pages. */ 2875 2873 Assert(!PGM_PAGE_IS_ZERO(pPage)); 2876 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet... */2874 SHW_PTE_SET_RO(PteDst); /** @todo this isn't quite working yet... */ 2877 2875 Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)))); 2878 2876 } 2879 2877 2880 if ( PteDst.n.u1Present)2878 if (SHW_PTE_IS_P(PteDst)) 2881 2879 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst); 2882 2880 2883 /* commit it */2881 /* commit it (not atomic, new table) */ 2884 2882 pPTDst->a[iPTDst] = PteDst; 2885 2883 Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n", 2886 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,2887 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY? " Track-Dirty" : ""));2884 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), SHW_PTE_IS_P(PteDst), SHW_PTE_IS_RW(PteDst), SHW_PTE_IS_US(PteDst), SHW_PTE_LOG64(PteDst), 2885 SHW_PTE_IS_TRACK_DIRTY(PteDst) ? " Track-Dirty" : "")); 2888 2886 2889 2887 /* advance */ … … 2899 2897 do 2900 2898 { 2901 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */2899 SHW_PTE_SET(pPTDst->a[iPTDst], 0); /* Invalid page, we must handle them manually. */ 2902 2900 GCPhys += PAGE_SIZE; 2903 2901 iPTDst++; … … 2909 2907 Log(("Invalid pages at %RGp (2)\n", GCPhys)); 2910 2908 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++) 2911 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */2909 SHW_PTE_SET(pPTDst->a[iPTDst], 0); /* Invalid page, we must handle them manually. */ 2912 2910 } 2913 2911 } /* while more PTEs */ … … 3930 3928 const SHWPTE PteDst = pPTDst->a[iPT]; 3931 3929 3932 /* skip not-present entries. */3933 if (!( PteDst.u& (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */3930 /* skip not-present and dirty tracked entries. */ 3931 if (!(SHW_PTE_GET_U(PteDst) & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */ 3934 3932 continue; 3935 Assert( PteDst.n.u1Present);3933 Assert(SHW_PTE_IS_P(PteDst)); 3936 3934 3937 3935 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc]; … … 3943 3941 # endif 3944 3942 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n", 3945 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],3943 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst), pPTSrc, iPT + offPTSrc, PdeSrc.au32[0], 3946 3944 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc))); 3947 3945 cErrors++; … … 3955 3953 3956 3954 /* match the physical addresses */ 3957 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;3955 HCPhysShw = SHW_PTE_GET_HCPHYS(PteDst); 3958 3956 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK; 3959 3957 … … 3965 3963 { 3966 3964 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n", 3967 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));3965 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 3968 3966 cErrors++; 3969 3967 continue; … … 3973 3971 { 3974 3972 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n", 3975 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));3973 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 3976 3974 cErrors++; 3977 3975 continue; … … 3986 3984 { 3987 3985 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n", 3988 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));3986 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 3989 3987 cErrors++; 3990 3988 continue; 3991 3989 } 3992 3990 # endif 3993 if ( PteDst.n.u1Write)3991 if (SHW_PTE_IS_RW(PteDst)) 3994 3992 { 3995 3993 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n", 3996 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));3994 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 3997 3995 cErrors++; 3998 3996 } … … 4002 4000 { 4003 4001 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage:%R[pgmpage] GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n", 4004 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4002 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4005 4003 cErrors++; 4006 4004 continue; … … 4012 4010 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage)) 4013 4011 { 4014 if ( PteDst.n.u1Write)4012 if (SHW_PTE_IS_RW(PteDst)) 4015 4013 { 4016 4014 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n", 4017 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4015 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4018 4016 cErrors++; 4019 4017 continue; … … 4023 4021 else 4024 4022 { 4025 if ( PteDst.n.u1Present4023 if ( SHW_PTE_IS_P(PteDst) 4026 4024 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 4027 4025 && !PGM_PAGE_IS_MMIO(pPhysPage) … … 4030 4028 { 4031 4029 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n", 4032 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4030 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4033 4031 cErrors++; 4034 4032 continue; … … 4041 4039 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write) 4042 4040 { 4043 if ( PteDst.n.u1Write)4041 if (SHW_PTE_IS_RW(PteDst)) 4044 4042 { 4045 4043 AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n", 4046 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4044 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4047 4045 cErrors++; 4048 4046 continue; 4049 4047 } 4050 if (! (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))4048 if (!SHW_PTE_IS_TRACK_DIRTY(PteDst)) 4051 4049 { 4052 4050 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n", 4053 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4051 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4054 4052 cErrors++; 4055 4053 continue; 4056 4054 } 4057 if ( PteDst.n.u1Dirty)4055 if (SHW_PTE_IS_D(PteDst)) 4058 4056 { 4059 4057 AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n", 4060 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4058 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4061 4059 cErrors++; 4062 4060 } … … 4065 4063 { 4066 4064 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n", 4067 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4065 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4068 4066 cErrors++; 4069 4067 } … … 4073 4071 # endif 4074 4072 } 4075 else if ( PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)4073 else if (SHW_PTE_IS_TRACK_DIRTY(PteDst)) 4076 4074 { 4077 4075 /* access bit emulation (not implemented). */ 4078 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)4076 if (PteSrc.n.u1Accessed || SHW_PTE_IS_P(PteDst)) 4079 4077 { 4080 4078 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n", 4081 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4079 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4082 4080 cErrors++; 4083 4081 continue; 4084 4082 } 4085 if (! PteDst.n.u1Accessed)4083 if (!SHW_PTE_IS_A(PteDst)) 4086 4084 { 4087 4085 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n", 4088 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4086 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4089 4087 cErrors++; 4090 4088 } … … 4096 4094 } 4097 4095 4098 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u& ~fIgnoreFlags)4099 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != ( PteDst.u& ~fIgnoreFlags)4096 if ( (PteSrc.u & ~fIgnoreFlags) != (SHW_PTE_GET_U(PteDst) & ~fIgnoreFlags) 4097 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (SHW_PTE_GET_U(PteDst) & ~fIgnoreFlags) 4100 4098 ) 4101 4099 { 4102 4100 AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n", 4103 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u& ~fIgnoreFlags,4104 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));4101 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, SHW_PTE_LOG64(PteDst) & ~fIgnoreFlags, 4102 fIgnoreFlags, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst))); 4105 4103 cErrors++; 4106 4104 continue; … … 4176 4174 const SHWPTE PteDst = pPTDst->a[iPT]; 4177 4175 4178 if ( PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)4176 if (SHW_PTE_IS_TRACK_DIRTY(PteDst)) 4179 4177 { 4180 4178 AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n", 4181 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4179 GCPtr + off, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4182 4180 cErrors++; 4183 4181 } 4184 4182 4185 4183 /* skip not-present entries. */ 4186 if (! PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */4184 if (!SHW_PTE_IS_P(PteDst)) /** @todo deal with ALL handlers and CSAM !P pages! */ 4187 4185 continue; 4188 4186 … … 4190 4188 4191 4189 /* match the physical addresses */ 4192 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;4190 HCPhysShw = SHW_PTE_GET_HCPHYS(PteDst); 4193 4191 4194 4192 # ifdef IN_RING3 … … 4199 4197 { 4200 4198 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n", 4201 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4199 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4202 4200 cErrors++; 4203 4201 } … … 4206 4204 { 4207 4205 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n", 4208 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4206 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4209 4207 cErrors++; 4210 4208 continue; … … 4218 4216 { 4219 4217 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n", 4220 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4218 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4221 4219 cErrors++; 4222 4220 continue; 4223 4221 } 4224 4222 # endif 4225 if ( PteDst.n.u1Write)4223 if (SHW_PTE_IS_RW(PteDst)) 4226 4224 { 4227 4225 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n", 4228 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4226 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4229 4227 cErrors++; 4230 4228 } … … 4234 4232 { 4235 4233 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage=%R[pgmpage] GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n", 4236 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4234 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4237 4235 cErrors++; 4238 4236 continue; … … 4246 4244 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED) 4247 4245 { 4248 if ( PteDst.n.u1Write)4246 if (SHW_PTE_IS_RW(PteDst)) 4249 4247 { 4250 4248 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n", 4251 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4249 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4252 4250 cErrors++; 4253 4251 continue; … … 4258 4256 else 4259 4257 { 4260 if ( PteDst.n.u1Present4258 if ( SHW_PTE_IS_P(PteDst) 4261 4259 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 4262 4260 && !PGM_PAGE_IS_MMIO(pPhysPage) … … 4265 4263 { 4266 4264 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n", 4267 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4265 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4268 4266 cErrors++; 4269 4267 continue; … … 4273 4271 } 4274 4272 4275 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u& ~fIgnoreFlags)4276 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != ( PteDst.u& ~fIgnoreFlags) /* lazy phys handler dereg. */4273 if ( (PdeSrc.u & ~fIgnoreFlags) != (SHW_PTE_GET_U(PteDst) & ~fIgnoreFlags) 4274 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (SHW_PTE_GET_U(PteDst) & ~fIgnoreFlags) /* lazy phys handler dereg. */ 4277 4275 ) 4278 4276 { 4279 4277 AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n", 4280 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u& ~fIgnoreFlags,4281 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));4278 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, SHW_PTE_LOG64(PteDst) & ~fIgnoreFlags, 4279 fIgnoreFlags, (uint64_t)PdeSrc.u, SHW_PTE_LOG64(PteDst))); 4282 4280 cErrors++; 4283 4281 continue; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r31402 r31775 90 90 91 91 /* pae */ 92 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;92 PGMSHWPTEPAE_SET(pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512], Pte.u); 93 93 94 94 /* next */ … … 144 144 * Validate input. 145 145 */ 146 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));146 AssertMsg(!(fFlags & (X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)), ("fFlags=%#x\n", fFlags)); 147 147 Assert(cb); 148 148 … … 182 182 183 183 /* PAE */ 184 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK; 185 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK; 184 PPGMSHWPTEPAE pPtePae = &pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512]; 185 PGMSHWPTEPAE_SET(*pPtePae, 186 ( PGMSHWPTEPAE_GET_U(*pPtePae) 187 & (fMask | X86_PTE_PAE_PG_MASK)) 188 | (fFlags & ~(X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX))); 186 189 187 190 /* invalidate tls */ -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r31715 r31775 39 39 #include <iprt/asm-amd64-x86.h> 40 40 #include <iprt/string.h> 41 42 43 /*******************************************************************************44 * Defined Constants And Macros *45 *******************************************************************************/46 /**47 * Checks if a PAE PTE entry is actually present and not just invalid because48 * of the MMIO optimization.49 * @todo Move this to PGMInternal.h if necessary.50 */51 #ifdef PGM_WITH_MMIO_OPTIMIZATIONS52 # define PGM_POOL_IS_PAE_PTE_PRESENT(Pte) \53 ( ((Pte).u & (X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == X86_PTE_P)54 #else55 # define PGM_POOL_IS_PAE_PTE_PRESENT(Pte) \56 ( (Pte).n.u1Present )57 #endif58 59 /**60 * Checks if a EPT PTE entry is actually present and not just invalid61 * because of the MMIO optimization.62 * @todo Move this to PGMInternal.h if necessary.63 */64 #define PGM_POOL_IS_EPT_PTE_PRESENT(Pte) \65 ( (Pte).n.u1Present )66 41 67 42 … … 83 58 #endif 84 59 #if defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) 85 static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT, PCX86PTPAE pGstPT);60 static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT); 86 61 #endif 87 62 … … 209 184 union 210 185 { 211 void *pv;212 PX86PT pPT;213 P X86PTPAEpPTPae;214 PX86PD pPD;215 PX86PDPAE pPDPae;216 PX86PDPT pPDPT;217 PX86PML4 pPML4;186 void *pv; 187 PX86PT pPT; 188 PPGMSHWPTPAE pPTPae; 189 PX86PD pPD; 190 PX86PDPAE pPDPae; 191 PX86PDPT pPDPT; 192 PX86PML4 pPML4; 218 193 } uShw; 219 194 … … 254 229 const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1); 255 230 LogFlow(("PGMPOOLKIND_PAE_PT_FOR_32BIT_PT iShw=%x\n", iShw)); 256 if (PGM _POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw]))231 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw])) 257 232 { 258 233 X86PTE GstPte; … … 262 237 Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK)); 263 238 pgmPoolTracDerefGCPhysHint(pPool, pPage, 264 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK,239 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]), 265 240 GstPte.u & X86_PTE_PG_MASK, 266 241 iShw); 267 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw], 0);242 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0); 268 243 } 269 244 } … … 345 320 const unsigned iShw = off / sizeof(X86PTEPAE); 346 321 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 347 if (PGM _POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw]))322 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw])) 348 323 { 349 324 X86PTEPAE GstPte; … … 351 326 AssertRC(rc); 352 327 353 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PAE_PG_MASK));328 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]), GstPte.u & X86_PTE_PAE_PG_MASK)); 354 329 pgmPoolTracDerefGCPhysHint(pPool, pPage, 355 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK,330 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]), 356 331 GstPte.u & X86_PTE_PAE_PG_MASK, 357 332 iShw); 358 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw].u, 0);333 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0); 359 334 } 360 335 … … 366 341 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPTPae->a)); 367 342 368 if (PGM _POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw2]))343 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw2])) 369 344 { 370 345 X86PTEPAE GstPte; … … 375 350 # endif 376 351 AssertRC(rc); 377 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PAE_PG_MASK));352 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]), GstPte.u & X86_PTE_PAE_PG_MASK)); 378 353 pgmPoolTracDerefGCPhysHint(pPool, pPage, 379 uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK,354 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]), 380 355 GstPte.u & X86_PTE_PAE_PG_MASK, 381 356 iShw2); 382 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw2].u ,0);357 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw2], 0); 383 358 } 384 359 } … … 1036 1011 * it's fairly safe to assume the guest is reusing the PT. 1037 1012 */ 1038 if (PGM _POOL_IS_PAE_PTE_PRESENT(GstPte))1013 if (PGMSHWPTEPAE_IS_P(GstPte)) 1039 1014 { 1040 1015 RTHCPHYS HCPhys = -1; … … 1103 1078 void *pvGst; 1104 1079 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 1105 pgmPoolTrackCheckPTPaePae(pPool, pPage, (P X86PTPAE)pvShw, (PCX86PTPAE)pvGst);1080 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst); 1106 1081 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); 1107 1082 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw); … … 1374 1349 * @param pGstPT The guest page table. 1375 1350 */ 1376 static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT, PCX86PTPAE pGstPT)1351 static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT) 1377 1352 { 1378 1353 unsigned cErrors = 0; … … 1384 1359 #ifdef VBOX_STRICT 1385 1360 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++) 1386 AssertMsg(!PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));1361 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent)); 1387 1362 #endif 1388 1363 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 1389 1364 { 1390 if (PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]))1365 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i])) 1391 1366 { 1392 1367 RTHCPHYS HCPhys = NIL_RTHCPHYS; 1393 1368 int rc = PGMPhysGCPhys2HCPhys(pVM, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys); 1394 1369 if ( rc != VINF_SUCCESS 1395 || (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) != HCPhys)1396 { 1397 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys));1370 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) != HCPhys) 1371 { 1372 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys)); 1398 1373 LastPTE = i; 1399 1374 LastRc = rc; … … 1411 1386 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1412 1387 { 1413 P X86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pTempPage);1388 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pTempPage); 1414 1389 1415 1390 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) 1416 1391 { 1417 if ( PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT2->a[j]) 1418 && pShwPT2->a[j].n.u1Write 1419 && (pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT) 1392 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[j]) 1393 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[j]) == HCPhysPT) 1420 1394 { 1421 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, pShwPT->a[j].u, pShwPT2->a[j].u));1395 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[j]))); 1422 1396 } 1423 1397 } … … 1429 1403 } 1430 1404 } 1431 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, pShwPT->a[LastPTE].u, LastHCPhys));1405 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[LastPTE]), LastHCPhys)); 1432 1406 } 1433 1407 # endif /* VBOX_STRICT */ … … 1445 1419 * @param pfFlush Flush reused page table (out) 1446 1420 */ 1447 DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT, PCX86PTPAE pGstPT,1421 DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT, 1448 1422 PCX86PTPAE pOldGstPT, bool fAllowRemoval, bool *pfFlush) 1449 1423 { … … 1452 1426 #ifdef VBOX_STRICT 1453 1427 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++) 1454 AssertMsg(!PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));1428 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent)); 1455 1429 #endif 1456 1430 *pfFlush = false; … … 1470 1444 } 1471 1445 } 1472 if (PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]))1446 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i])) 1473 1447 { 1474 1448 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */ … … 1478 1452 RTHCPHYS HCPhys = NIL_RTGCPHYS; 1479 1453 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys); 1480 AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RX64 old %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, pShwPT->a[i].u, HCPhys));1454 AssertMsg(rc == VINF_SUCCESS && PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) == HCPhys, ("rc=%d guest %RX64 old %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys)); 1481 1455 #endif 1482 uint64_t uHostAttr = pShwPT->a[i].u& (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);1483 bool fHostRW = !!( pShwPT->a[i].u& X86_PTE_RW);1456 uint64_t uHostAttr = PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX); 1457 bool fHostRW = !!(PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & X86_PTE_RW); 1484 1458 uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX); 1485 1459 bool fGuestRW = !!(pGstPT->a[i].u & X86_PTE_RW); … … 1492 1466 /* Something was changed, so flush it. */ 1493 1467 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX64 hint=%RX64\n", 1494 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK));1495 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i);1496 ASMAtomicWriteSize(&pShwPT->a[i].u, 0);1468 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK)); 1469 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i); 1470 PGMSHWPTEPAE_ATOMIC_SET(pShwPT->a[i], 0); 1497 1471 } 1498 1472 } … … 1555 1529 rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 1556 1530 bool fFlush; 1557 unsigned cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (P X86PTPAE)pvShw, (PCX86PTPAE)pvGst,1531 unsigned cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst, 1558 1532 (PCX86PTPAE)&pPool->aDirtyPages[idxSlot][0], fAllowRemoval, &fFlush); 1559 1533 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); … … 1631 1605 #ifdef VBOX_STRICT 1632 1606 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage); 1633 pgmPoolTrackCheckPTPaePae(pPool, pPage, (P X86PTPAE)pvShw, (PCX86PTPAE)pvGst);1607 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst); 1634 1608 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw); 1635 1609 #endif … … 3004 2978 break; 3005 2979 default: 2980 /* (shouldn't be here, will assert below) */ 3006 2981 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3007 2982 break; … … 3054 3029 { 3055 3030 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; 3056 P X86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);3031 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3057 3032 uint64_t u64OrMask = 0; 3058 3033 uint64_t u64AndMask = 0; … … 3062 3037 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage)) 3063 3038 { 3064 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /* *No handler installed. */3065 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /* *Monitoring is temporarily disabled. */3039 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /* No handler installed. */ 3040 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /* Monitoring is temporarily disabled. */ 3066 3041 u64OrMask = X86_PTE_RW; 3067 3042 u64AndMask = UINT64_MAX; … … 3070 3045 break; 3071 3046 3072 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /* *Write access is monitored. */3047 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /* Write access is monitored. */ 3073 3048 u64OrMask = 0; 3074 3049 u64AndMask = ~((uint64_t)X86_PTE_RW); … … 3078 3053 3079 3054 default: 3055 /* (shouldn't be here, will assert below) */ 3080 3056 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3081 3057 break; … … 3094 3070 } 3095 3071 3096 if (( pPT->a[iPte].u& (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)3072 if ((PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64) 3097 3073 { 3098 3074 X86PTEPAE Pte; 3099 3075 3100 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64\n", iPte, pPT->a[iPte]));3101 Pte.u = ( pPT->a[iPte].u& u64AndMask) | u64OrMask;3076 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64\n", iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte]))); 3077 Pte.u = (PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & u64AndMask) | u64OrMask; 3102 3078 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY) 3103 3079 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */ 3104 3080 3105 ASMAtomicWriteSize(&pPT->a[iPte].u, Pte.u);3081 PGMSHWPTEPAE_ATOMIC_SET(pPT->a[iPte], Pte.u); 3106 3082 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT); 3107 3083 return fRet; … … 3109 3085 #ifdef LOG_ENABLED 3110 3086 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent)); 3111 Log(("Found %RX64 expected %RX64\n", pPT->a[iPte].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P), u64));3087 Log(("Found %RX64 expected %RX64\n", PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX), u64)); 3112 3088 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPT->a); i++) 3113 if (( pPT->a[i].u& (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)3089 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64) 3114 3090 Log(("i=%d cFound=%d\n", i, ++cFound)); 3115 3091 #endif 3116 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, pPT->a[iPte].u));3092 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte]))); 3117 3093 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT); 3118 3094 break; … … 3154 3130 } 3155 3131 3156 /* AMD-V nested paging -@todo merge with EPT as we only check the parts that are identical. */3132 /* AMD-V nested paging */ /** @todo merge with EPT as we only check the parts that are identical. */ 3157 3133 case PGMPOOLKIND_PAE_PD_PHYS: 3158 3134 { … … 3277 3253 * 3278 3254 * This is typically called when the host page backing the guest one has been 3279 * replaced or when the page protection was changed due to an access handler. 3255 * replaced or when the page protection was changed due to a guest access 3256 * caught by the monitoring. 3280 3257 * 3281 3258 * @returns VBox status code. … … 3422 3399 * This is simple but not quite optimal solution. 3423 3400 */ 3424 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; 3425 const uint32_t u32 = u64; 3401 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; /** @todo drop X86_PTE_P here as we always test if present separately, anyway. */ 3402 const uint32_t u32 = u64; /** @todo move into the 32BIT_PT_xx case */ 3426 3403 unsigned cLeft = pPool->cUsedPages; 3427 3404 unsigned iPage = pPool->cCurPages; … … 3470 3447 case PGMPOOLKIND_PAE_PT_FOR_PHYS: 3471 3448 { 3472 unsigned cPresent = pPage->cPresent;3473 P X86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);3449 unsigned cPresent = pPage->cPresent; 3450 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3474 3451 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3475 if (PGM _POOL_IS_PAE_PTE_PRESENT(pPT->a[i]))3452 if (PGMSHWPTEPAE_IS_P(pPT->a[i])) 3476 3453 { 3477 if (( pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)3454 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64) 3478 3455 { 3479 3456 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i])); 3480 pPT->a[i].u = 0;3457 PGMSHWPTEPAE_SET(pPT->a[i], 0); /// @todo why not atomic? 3481 3458 3482 3459 /* Update the counter as we're removing references. */ … … 3498 3475 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3499 3476 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3500 if ( PGM_POOL_IS_EPT_PTE_PRESENT(pPT->a[i]))3477 if (pPT->a[i].n.u1Present) 3501 3478 { 3502 3479 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64) … … 4132 4109 * @param pGstPT The guest page table (just a half one). 4133 4110 */ 4134 DECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT, PCX86PT pGstPT)4111 DECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PT pGstPT) 4135 4112 { 4136 4113 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 4137 if (PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]))4114 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i])) 4138 4115 { 4139 4116 Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX64 hint=%RX32\n", 4140 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));4141 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK, i);4117 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PG_MASK)); 4118 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PG_MASK, i); 4142 4119 if (!pPage->cPresent) 4143 4120 break; … … 4154 4131 * @param pGstPT The guest page table. 4155 4132 */ 4156 DECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT, PCX86PTPAE pGstPT)4133 DECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT) 4157 4134 { 4158 4135 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 4159 if (PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]))4136 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i])) 4160 4137 { 4161 4138 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n", 4162 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));4163 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i);4139 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK)); 4140 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i); 4164 4141 if (!pPage->cPresent) 4165 4142 break; … … 4197 4174 * @param pShwPT The shadow page table (mapping of the page). 4198 4175 */ 4199 DECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, P X86PTPAE pShwPT)4176 DECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT) 4200 4177 { 4201 4178 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent; 4202 4179 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE) 4203 if (PGM _POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]))4180 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i])) 4204 4181 { 4205 4182 Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n", 4206 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, GCPhys));4207 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, GCPhys, i);4183 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys)); 4184 pgmPoolTracDerefGCPhys(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys, i); 4208 4185 if (!pPage->cPresent) 4209 4186 break; … … 4223 4200 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent; 4224 4201 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE) 4225 if ( PGM_POOL_IS_EPT_PTE_PRESENT(pShwPT->a[i]))4202 if (pShwPT->a[i].n.u1Present) 4226 4203 { 4227 4204 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n", … … 4461 4438 void *pvGst; 4462 4439 int rc = PGM_GCPHYS_2_PTR_EX(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 4463 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (P X86PTPAE)pvShw, (PCX86PT)pvGst);4440 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst); 4464 4441 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); 4465 4442 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); … … 4472 4449 void *pvGst; 4473 4450 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 4474 pgmPoolTrackDerefPTPaePae(pPool, pPage, (P X86PTPAE)pvShw, (PCX86PTPAE)pvGst);4451 pgmPoolTrackDerefPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst); 4475 4452 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); 4476 4453 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); … … 4492 4469 { 4493 4470 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g); 4494 pgmPoolTrackDerefPTPaeBig(pPool, pPage, (P X86PTPAE)pvShw);4471 pgmPoolTrackDerefPTPaeBig(pPool, pPage, (PPGMSHWPTPAE)pvShw); 4495 4472 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); 4496 4473 break; -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r31178 r31775 31 31 #undef SHW_PD_MASK 32 32 #undef SHW_PTE_PG_MASK 33 #undef SHW_PTE_IS_P 34 #undef SHW_PTE_IS_RW 35 #undef SHW_PTE_IS_US 36 #undef SHW_PTE_IS_A 37 #undef SHW_PTE_IS_D 38 #undef SHW_PTE_IS_P_RW 39 #undef SHW_PTE_IS_TRACK_DIRTY 40 #undef SHW_PTE_GET_HCPHYS 41 #undef SHW_PTE_GET_U 42 #undef SHW_PTE_LOG64 43 #undef SHW_PTE_SET 44 #undef SHW_PTE_ATOMIC_SET 45 #undef SHW_PTE_ATOMIC_SET2 46 #undef SHW_PTE_SET_RO 47 #undef SHW_PTE_SET_RW 33 48 #undef SHW_PT_SHIFT 34 49 #undef SHW_PT_MASK … … 40 55 41 56 #if PGM_SHW_TYPE == PGM_TYPE_32BIT 42 # define SHWPT X86PT 43 # define PSHWPT PX86PT 44 # define SHWPTE X86PTE 45 # define PSHWPTE PX86PTE 46 # define SHWPD X86PD 47 # define PSHWPD PX86PD 48 # define SHWPDE X86PDE 49 # define PSHWPDE PX86PDE 50 # define SHW_PDE_PG_MASK X86_PDE_PG_MASK 51 # define SHW_PD_SHIFT X86_PD_SHIFT 52 # define SHW_PD_MASK X86_PD_MASK 53 # define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES 54 # define SHW_PTE_PG_MASK X86_PTE_PG_MASK 55 # define SHW_PT_SHIFT X86_PT_SHIFT 56 # define SHW_PT_MASK X86_PT_MASK 57 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD 57 # define SHWPT X86PT 58 # define PSHWPT PX86PT 59 # define SHWPTE X86PTE 60 # define PSHWPTE PX86PTE 61 # define SHWPD X86PD 62 # define PSHWPD PX86PD 63 # define SHWPDE X86PDE 64 # define PSHWPDE PX86PDE 65 # define SHW_PDE_PG_MASK X86_PDE_PG_MASK 66 # define SHW_PD_SHIFT X86_PD_SHIFT 67 # define SHW_PD_MASK X86_PD_MASK 68 # define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES 69 # define SHW_PTE_PG_MASK X86_PTE_PG_MASK 70 # define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present ) 71 # define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write ) 72 # define SHW_PTE_IS_US(Pte) ( (Pte).n.u1User ) 73 # define SHW_PTE_IS_A(Pte) ( (Pte).n.u1Accessed ) 74 # define SHW_PTE_IS_D(Pte) ( (Pte).n.u1Dirty ) 75 # define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write ) 76 # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) ) 77 # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK ) 78 # define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u ) 79 # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */ 80 # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0) 81 # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0) 82 # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0) 83 # define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0) 84 # define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0) 85 # define SHW_PT_SHIFT X86_PT_SHIFT 86 # define SHW_PT_MASK X86_PT_MASK 87 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD 58 88 59 89 #elif PGM_SHW_TYPE == PGM_TYPE_EPT 60 # define SHWPT EPTPT 61 # define PSHWPT PEPTPT 62 # define SHWPTE EPTPTE 63 # define PSHWPTE PEPTPTE 64 # define SHWPD EPTPD 65 # define PSHWPD PEPTPD 66 # define SHWPDE EPTPDE 67 # define PSHWPDE PEPTPDE 68 # define SHW_PDE_PG_MASK EPT_PDE_PG_MASK 69 # define SHW_PD_SHIFT EPT_PD_SHIFT 70 # define SHW_PD_MASK EPT_PD_MASK 71 # define SHW_PTE_PG_MASK EPT_PTE_PG_MASK 72 # define SHW_PT_SHIFT EPT_PT_SHIFT 73 # define SHW_PT_MASK EPT_PT_MASK 74 # define SHW_PDPT_SHIFT EPT_PDPT_SHIFT 75 # define SHW_PDPT_MASK EPT_PDPT_MASK 76 # define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK 77 # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES) 78 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */ 90 # define SHWPT EPTPT 91 # define PSHWPT PEPTPT 92 # define SHWPTE EPTPTE 93 # define PSHWPTE PEPTPTE 94 # define SHWPD EPTPD 95 # define PSHWPD PEPTPD 96 # define SHWPDE EPTPDE 97 # define PSHWPDE PEPTPDE 98 # define SHW_PDE_PG_MASK EPT_PDE_PG_MASK 99 # define SHW_PD_SHIFT EPT_PD_SHIFT 100 # define SHW_PD_MASK EPT_PD_MASK 101 # define SHW_PTE_PG_MASK EPT_PTE_PG_MASK 102 # define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present ) /* Approximation, works for us. */ 103 # define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write ) 104 # define SHW_PTE_IS_US(Pte) ( true ) 105 # define SHW_PTE_IS_A(Pte) ( true ) 106 # define SHW_PTE_IS_D(Pte) ( true ) 107 # define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write ) 108 # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false ) 109 # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK ) 110 # define SHW_PTE_LOG64(Pte) ( (Pte).u ) 111 # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */ 112 # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0) 113 # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0) 114 # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0) 115 # define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0) 116 # define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0) 117 # define SHW_PT_SHIFT EPT_PT_SHIFT 118 # define SHW_PT_MASK EPT_PT_MASK 119 # define SHW_PDPT_SHIFT EPT_PDPT_SHIFT 120 # define SHW_PDPT_MASK EPT_PDPT_MASK 121 # define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK 122 # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES) 123 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */ 79 124 80 125 #else 81 # define SHWPT X86PTPAE 82 # define PSHWPT PX86PTPAE 83 # define SHWPTE X86PTEPAE 84 # define PSHWPTE PX86PTEPAE 85 # define SHWPD X86PDPAE 86 # define PSHWPD PX86PDPAE 87 # define SHWPDE X86PDEPAE 88 # define PSHWPDE PX86PDEPAE 89 # define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK 90 # define SHW_PD_SHIFT X86_PD_PAE_SHIFT 91 # define SHW_PD_MASK X86_PD_PAE_MASK 92 # define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK 93 # define SHW_PT_SHIFT X86_PT_PAE_SHIFT 94 # define SHW_PT_MASK X86_PT_PAE_MASK 126 # define SHWPT PGMSHWPTPAE 127 # define PSHWPT PPGMSHWPTPAE 128 # define SHWPTE PGMSHWPTEPAE 129 # define PSHWPTE PPGMSHWPTEPAE 130 # define SHWPD X86PDPAE 131 # define PSHWPD PX86PDPAE 132 # define SHWPDE X86PDEPAE 133 # define PSHWPDE PX86PDEPAE 134 # define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK 135 # define SHW_PD_SHIFT X86_PD_PAE_SHIFT 136 # define SHW_PD_MASK X86_PD_PAE_MASK 137 # define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK 138 # define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte) 139 # define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte) 140 # define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte) 141 # define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte) 142 # define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte) 143 # define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte) 144 # define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte) 145 # define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte) 146 # define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte) 147 # define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */ 148 # define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew) 149 # define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew) 150 # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) 151 # define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte) 152 # define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte) 153 # define SHW_PT_SHIFT X86_PT_PAE_SHIFT 154 # define SHW_PT_MASK X86_PT_PAE_MASK 95 155 96 156 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 97 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT98 # define SHW_PDPT_MASK X86_PDPT_MASK_AMD6499 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK100 # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES)101 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3157 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT 158 # define SHW_PDPT_MASK X86_PDPT_MASK_AMD64 159 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 160 # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES) 161 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3 102 162 103 163 # else /* 32 bits PAE mode */ 104 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT105 # define SHW_PDPT_MASK X86_PDPT_MASK_PAE106 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK107 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)108 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT164 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT 165 # define SHW_PDPT_MASK X86_PDPT_MASK_PAE 166 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 167 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES) 168 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT 109 169 110 170 # endif … … 254 314 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK; 255 315 SHWPTE Pte = pPT->a[iPt]; 256 if (! Pte.n.u1Present)316 if (!SHW_PTE_IS_P(Pte)) 257 317 return VERR_PAGE_NOT_PRESENT; 258 318 … … 264 324 if (pfFlags) 265 325 { 266 *pfFlags = ( Pte.u& ~SHW_PTE_PG_MASK)326 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK) 267 327 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US)); 268 328 # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */ 269 329 /* The NX bit is determined by a bitwise OR between the PT and PD */ 270 if ((( Pte.u| Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))330 if (((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu)) 271 331 *pfFlags |= X86_PTE_PAE_NX; 272 332 # endif … … 274 334 275 335 if (pHCPhys) 276 *pHCPhys = Pte.u & SHW_PTE_PG_MASK;336 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte); 277 337 278 338 return VINF_SUCCESS; … … 375 435 while (iPTE < RT_ELEMENTS(pPT->a)) 376 436 { 377 if ( pPT->a[iPTE].n.u1Present)437 if (SHW_PTE_IS_P(pPT->a[iPTE])) 378 438 { 379 439 SHWPTE const OrgPte = pPT->a[iPTE]; 380 440 SHWPTE NewPte; 381 441 382 NewPte.u = (OrgPte.u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK); 383 Assert(NewPte.n.u1Present); 384 if (!NewPte.n.u1Present) 442 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK)); 443 if (!SHW_PTE_IS_P(NewPte)) 385 444 { 386 445 /** @todo Some CSAM code path might end up here and upset … … 388 447 AssertFailed(); 389 448 } 390 else if ( NewPte.n.u1Write391 && ! OrgPte.n.u1Write449 else if ( SHW_PTE_IS_RW(NewPte) 450 && !SHW_PTE_IS_RW(OrgPte) 392 451 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) ) 393 452 { … … 414 473 } 415 474 416 ASMAtomicWriteSize(&pPT->a[iPTE], NewPte.u);475 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte); 417 476 # if PGM_SHW_TYPE == PGM_TYPE_EPT 418 477 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr); -
trunk/src/VBox/VMM/VMMGC/PGMGCShw.h
r28800 r31775 50 50 # define SHW_PT_MASK X86_PT_MASK 51 51 #else 52 # define SHWPT X86PTPAE53 # define PSHWPT P X86PTPAE54 # define SHWPTE X86PTEPAE55 # define PSHWPTE P X86PTEPAE52 # define SHWPT PGMSHWPTPAE 53 # define PSHWPT PPGMSHWPTPAE 54 # define SHWPTE PGMSHWPTEPAE 55 # define PSHWPTE PPGMSHWPTEPAE 56 56 # define SHWPD X86PDPAE 57 57 # define PSHWPD PX86PDPAE -
trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
r31445 r31775 1291 1291 pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage]; 1292 1292 else 1293 pThis->paPages[iPage].uPte.pPae = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage];1293 pThis->paPages[iPage].uPte.pPae = (PX86PTEPAE)&pVM->pgm.s.paDynPageMapPaePTEsGC[iPage]; 1294 1294 } 1295 1295
Note:
See TracChangeset
for help on using the changeset viewer.