Changeset 9682 in vbox
- Timestamp:
- Jun 13, 2008 1:27:40 PM (17 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r9596 r9682 723 723 #ifndef IN_GC 724 724 /** 725 * Gets the SHADOW page directory pointer for the specified address. Allocates726 * backing pages in case the PDPT or page dirctory is missing.725 * Syncs the SHADOW page directory pointer for the specified address. Allocates 726 * backing pages in case the PDPT or PML4 entry is missing. 727 727 * 728 728 * @returns VBox status. 729 729 * @param pVM VM handle. 730 730 * @param GCPtr The address. 731 * @param pGstPml4e Guest PML4 entry 732 * @param pGstPdpe Guest PDPT entry 731 733 * @param ppPD Receives address of page directory 732 734 */ 733 PGMDECL(int) PGMShw GetAllocLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)735 PGMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 734 736 { 735 737 PPGM pPGM = &pVM->pgm.s; … … 756 758 757 759 /* The PDPT was cached or created; hook it up now. */ 758 pPml4e->u |= pShwPage->Core.Key; 760 pPml4e->u |= pShwPage->Core.Key 761 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT)); 759 762 } 760 763 else 761 764 { 765 AssertMsg((pGstPml4e->u & (X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX)) == (pPml4e->u & (X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX)), ("pGstMpl4e.u=%RX64 pPml4e->u=%RX64\n", pGstPml4e->u, pPml4e->u)); 766 762 767 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK); 763 768 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); … … 784 789 785 790 /* The PD was cached or created; hook it up now. */ 786 pPdpe->u |= pShwPage->Core.Key; 791 pPdpe->u |= pShwPage->Core.Key 792 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 787 793 } 788 794 else 789 795 { 796 AssertMsg((pGstPdpe->u & (X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX)) == (pPdpe->u & (X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX)), ("pGstPdpe.u=%RX64 pPdpe->u=%RX64\n", pGstPdpe->u, pPdpe->u)); 797 790 798 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK); 791 799 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r9620 r9682 110 110 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 111 111 unsigned iPDSrc; 112 PX86PML4E pPml4e ;113 X86PDPE Pdpe ;112 PX86PML4E pPml4eSrc; 113 X86PDPE PdpeSrc; 114 114 PGSTPD pPDSrc; 115 115 116 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4e , &Pdpe, &iPDSrc);117 Assert(pPml4e );116 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc); 117 Assert(pPml4eSrc); 118 118 # endif 119 119 /* Quick check for a valid guest trap. */ … … 149 149 const unsigned iPDDst = (((RTGCUINTPTR)pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 150 150 PX86PDPAE pPDDst; 151 152 rc = PGMShwGetAllocLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, &pPDDst); 151 # if PGM_GST_TYPE == PGM_TYPE_PROT 152 /* AMD-V nested paging */ 153 X86PML4E Pml4eSrc; 154 X86PDPE PdpeSrc; 155 PX86PML4E pPml4eSrc = &Pml4eSrc; 156 157 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 158 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A; 159 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A; 160 # endif 161 162 rc = PGMShwSyncLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 153 163 if (rc != VINF_SUCCESS) 154 164 { … … 1455 1465 PX86PDPAE pPDDst; 1456 1466 X86PDEPAE PdeDst; 1457 1458 int rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 1459 if (rc != VINF_SUCCESS) 1460 { 1461 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc)); 1462 return rc; 1463 } 1464 Assert(pPDDst); 1467 PX86PDPT pPdpt; 1468 1469 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdpt, &pPDDst); 1470 AssertRCReturn(rc, rc); 1471 Assert(pPDDst && pPdpt); 1465 1472 PdeDst = pPDDst->a[iPDDst]; 1466 1473 # endif … … 2148 2155 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2149 2156 PX86PDPAE pPDDst; 2150 rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 2157 PX86PDPT pPdpt; 2158 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdpt, &pPDDst); 2151 2159 if (rc != VINF_SUCCESS) 2152 2160 { … … 2595 2603 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 2596 2604 unsigned iPDSrc; 2597 PX86PML4E pPml4e ;2598 X86PDPE Pdpe ;2599 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e , &Pdpe, &iPDSrc);2605 PX86PML4E pPml4eSrc; 2606 X86PDPE PdpeSrc; 2607 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc); 2600 2608 if (!pPDSrc) 2601 2609 return VINF_SUCCESS; /* not present */ … … 2625 2633 X86PDEPAE PdeDst; 2626 2634 2627 int rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 2635 # if PGM_GST_TYPE == PGM_TYPE_PROT 2636 /* AMD-V nested paging */ 2637 X86PML4E Pml4eSrc; 2638 X86PDPE PdpeSrc; 2639 PX86PML4E pPml4eSrc = &Pml4eSrc; 2640 2641 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 2642 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A; 2643 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A; 2644 # endif 2645 2646 int rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst); 2628 2647 if (rc != VINF_SUCCESS) 2629 2648 { … … 2706 2725 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 2707 2726 unsigned iPDSrc; 2708 PX86PML4E pPml4e ;2709 X86PDPE Pdpe ;2710 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e , &Pdpe, &iPDSrc);2727 PX86PML4E pPml4eSrc; 2728 X86PDPE PdpeSrc; 2729 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc); 2711 2730 if (!pPDSrc) 2712 2731 { … … 2733 2752 PX86PDEPAE pPdeDst; 2734 2753 2735 rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 2754 # if PGM_GST_TYPE == PGM_TYPE_PROT 2755 /* AMD-V nested paging */ 2756 X86PML4E Pml4eSrc; 2757 X86PDPE PdpeSrc; 2758 PX86PML4E pPml4eSrc = &Pml4eSrc; 2759 2760 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 2761 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A; 2762 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A; 2763 # endif 2764 2765 rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst); 2736 2766 if (rc != VINF_SUCCESS) 2737 2767 { … … 2950 2980 for (uint64_t iPML4E = 0; iPML4E < X86_PG_PAE_ENTRIES; iPML4E++) 2951 2981 { 2952 /* Shadow PML4E present?*/2953 if ( pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present)2982 /* Guest PML4E not present (anymore). */ 2983 if (!pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPML4E].n.u1Present) 2954 2984 { 2955 2985 /** @todo this is not efficient; figure out if we can reuse the existing cached version */ 2956 /* Guest PML4E not present (anymore).*/2957 if ( !pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPML4E].n.u1Present)2986 /* Shadow PML4E present? */ 2987 if (pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present) 2958 2988 { 2959 2989 /* Shadow PML4 present, so free all pdpt & pd entries. */
Note:
See TracChangeset
for help on using the changeset viewer.