- Timestamp:
- Apr 8, 2010 1:29:25 PM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r27976 r28090 147 147 const unsigned iPDSrc = 0; 148 148 # endif /* !PGM_WITH_PAGING */ 149 150 # if !defined(PGM_WITHOUT_MAPPINGS) && ((PGM_GST_TYPE == PGM_TYPE_32BIT) || (PGM_GST_TYPE == PGM_TYPE_PAE)) 151 /* 152 * Check for write conflicts with our hypervisor mapping early on. If the guest happens to access a non-present page, 153 * where our hypervisor is currently mapped, then we'll create a #PF storm in the guest. 154 */ 155 if ((uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW)) 156 { 157 pgmLock(pVM); 158 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 159 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; 160 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); 161 # else /* PGM_SHW_TYPE == PGM_TYPE_PAE */ 162 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 163 164 PX86PDPAE pPDDst; 165 # if PGM_GST_TYPE != PGM_TYPE_PAE 166 X86PDPE PdpeSrc; 167 168 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 169 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 170 # endif 171 int rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 172 if (rc != VINF_SUCCESS) 173 { 174 pgmUnlock(pVM); 175 AssertRC(rc); 176 return rc; 177 } 178 Assert(pPDDst); 179 # endif 180 if (pPDDst->a[iPDDst].u & PGM_PDFLAGS_MAPPING) 181 { 182 pgmUnlock(pVM); 183 /* Force a CR3 sync to check for conflicts and emulate the instruction. */ 184 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 185 return VINF_EM_RAW_EMULATE_INSTR; 186 } 187 pgmUnlock(pVM); 188 } 189 # endif 149 190 150 191 /* First check for a genuine guest page fault. */
Note:
See TracChangeset
for help on using the changeset viewer.