Changeset 16232 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jan 26, 2009 2:09:56 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r16203 r16232 72 72 DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); 73 73 DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD); 74 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);75 74 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 76 75 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 77 76 DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 77 DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); 78 78 #endif 79 79 … … 809 809 810 810 /** 811 * Syncs the SHADOW page directory pointer for the specified address.812 *813 * Allocates backing pages in case the PDPT entry is missing.814 *815 * @returns VBox status.816 * @param pVM VM handle.817 * @param GCPtr The address.818 * @param pGstPdpe Guest PDPT entry819 * @param ppPD Receives address of page directory820 * @remarks Unused.821 */822 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)823 {824 PPGM pPGM = &pVM->pgm.s;825 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);826 PPGMPOOLPAGE pShwPage;827 int rc;828 829 Assert(!HWACCMIsNestedPagingActive(pVM));830 831 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;832 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);833 PX86PDPE pPdpe = &pPdpt->a[iPdPt];834 835 /* Allocate page directory if not present. */836 if ( !pPdpe->n.u1Present837 && !(pPdpe->u & X86_PDPE_PG_MASK))838 {839 PX86PDPE pPdptGst = pgmGstGetPaePDPEPtr(pPGM, GCPtr);840 841 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));842 /* Create a reference back to the PDPT by using the index in its shadow page. */843 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);844 if (rc == VERR_PGM_POOL_FLUSHED)845 {846 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);847 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);848 return VINF_PGM_SYNC_CR3;849 }850 AssertRCReturn(rc, rc);851 }852 else853 {854 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);855 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);856 }857 /* The PD was cached or created; hook it up now. */858 pPdpe->u |= pShwPage->Core.Key859 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));860 861 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);862 return VINF_SUCCESS;863 }864 865 866 /**867 811 * Gets the SHADOW page directory pointer for the specified address. 868 812 * … … 898 842 899 843 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 844 845 /** 846 * Gets the shadow page directory for the specified address, PAE. 847 * 848 * @returns Pointer to the shadow PD. 849 * @param pVM VM handle. 850 * @param GCPtr The address. 851 * @param pGstPdpe Guest PDPT entry 852 * @param ppPD Receives address of page directory 853 */ 854 DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 855 { 856 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 857 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 858 PX86PDPE pPdpe = &pPdpt->a[iPdPt]; 859 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 860 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM); 861 PPGMPOOLPAGE pShwPage; 862 int rc; 863 864 /* Allocate page directory if not present. */ 865 if ( !pPdpe->n.u1Present 866 && !(pPdpe->u & X86_PDPE_PG_MASK)) 867 { 868 if (!fNestedPaging) 869 { 870 Assert(pGstPdpe); 871 Assert(!(pPdpe->u & X86_PDPE_PG_MASK)); 872 /* Create a reference back to the PDPT by using the index in its shadow page. */ 873 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage); 874 } 875 else 876 { 877 /* AMD-V nested paging. (Intel EPT never comes here) */ 878 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT; 879 880 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_PAE_PD_PHYS_PROT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage); 881 } 882 883 if (rc == VERR_PGM_POOL_FLUSHED) 884 { 885 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n")); 886 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL); 887 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 888 return VINF_PGM_SYNC_CR3; 889 } 890 AssertRCReturn(rc, rc); 891 } 892 else 893 { 894 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK); 895 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); 896 } 897 /* The PD was cached or created; hook it up now. */ 898 pPdpe->u |= pShwPage->Core.Key 899 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 900 901 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 902 return VINF_SUCCESS; 903 } 904 900 905 /** 901 906 * Gets the pointer to the shadow page directory entry for an address, PAE. -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r16203 r16232 109 109 # if PGM_GST_TYPE == PGM_TYPE_PAE 110 110 unsigned iPDSrc; 111 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 112 X86PDPE PdpeSrc; 113 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc); 114 # else 111 115 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL); 116 # endif 112 117 113 118 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 … … 120 125 Assert(pPml4eSrc); 121 126 # endif 122 /* Quick check for a valid guest trap. */ 127 128 /* Quick check for a valid guest trap. (PAE & AMD64) */ 123 129 if (!pPDSrc) 124 130 { … … 146 152 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 147 153 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 154 155 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 156 # if PGM_GST_TYPE != PGM_TYPE_PAE 157 PX86PDPAE pPDDst; 158 X86PDPE PdpeSrc; 159 160 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 161 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 162 # endif 163 rc = pgmShwSyncPaePDPtr(pVM, pvFault, &PdpeSrc, &pPDDst); 164 if (rc != VINF_SUCCESS) 165 { 166 AssertRC(rc); 167 return rc; 168 } 169 Assert(pPDDst); 170 171 # else 148 172 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault); 149 173 … … 153 177 if (!pPdptDst->a[iPdpt].n.u1Present) 154 178 pPdptDst->a[iPdpt].n.u1Present = 1; 179 # endif 155 180 156 181 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r16203 r16232 419 419 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 420 420 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 421 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 421 422 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 422 423 } … … 432 433 { 433 434 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 435 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 434 436 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 435 437 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); … … 508 510 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 509 511 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 512 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 510 513 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 511 514 } … … 543 546 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 544 547 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 548 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 545 549 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 546 550 } … … 579 583 { 580 584 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 585 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 581 586 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 582 587 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); … … 593 598 { 594 599 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 600 STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict)); 595 601 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 596 602 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
Note:
See TracChangeset
for help on using the changeset viewer.