Changeset 31593 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 12, 2010 12:52:52 AM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 64700
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r31591 r31593 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 318 318 * @param ppStat Which sub-sample to attribute this call to. 319 319 */ 320 static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)320 static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat) 321 321 { 322 322 /* … … 367 367 RTGCPHYS Phys = GCPhysFault; 368 368 int rc; 369 if ( uErrorCode & X86_TRAP_PF_RW)369 if (fWriteAccess) 370 370 { 371 371 /* … … 1030 1030 * @returns VBox status code (appropriate for GC return). 1031 1031 * @param pVM VM Handle. 1032 * @param uErrorCode CPU Error code. 1032 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have 1033 * any error code (the EPT misconfig hack). 1033 1034 * @param pCtxCore Trap register frame. 1034 1035 * @param GCPhysFault The GC physical address corresponding to pvFault. 1035 1036 * @param pvUser Pointer to the MMIO ring-3 range entry. 1036 1037 */ 1037 static int iomMMIOHandler(PVM pVM, RTGCUINTuErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)1038 static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser) 1038 1039 { 1039 1040 /* Take the IOM lock before performing any MMIO. */ … … 1047 1048 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a); 1048 1049 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", 1049 GCPhysFault, (uint32_t)uErrorCode, (RTGCPTR)pCtxCore->rip));1050 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip)); 1050 1051 1051 1052 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; … … 1079 1080 #ifndef IN_RING3 1080 1081 /* 1081 * Should we defer the request right away? 1082 */ 1083 if (uErrorCode & X86_TRAP_PF_RW 1084 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3 1085 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3) 1082 * Should we defer the request right away? This isn't usually the case, so 1083 * do the simple test first and the try deal with uErrorCode being N/A. 1084 */ 1085 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback) 1086 || !pRange->CTX_SUFF(pfnReadCallback)) 1087 && ( uErrorCode == UINT32_MAX 1088 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3 1089 : uErrorCode & X86_TRAP_PF_RW 1090 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3 1091 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3 1092 ) 1093 ) 1094 ) 1086 1095 { 1087 1096 if (uErrorCode & X86_TRAP_PF_RW) … … 1093 1102 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures); 1094 1103 iomUnlock(pVM); 1095 return (uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ);1104 return VINF_IOM_HC_MMIO_READ_WRITE; 1096 1105 } 1097 1106 #endif /* !IN_RING3 */ … … 1117 1126 { 1118 1127 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b); 1119 if (uErrorCode & X86_TRAP_PF_RW) 1128 AssertMsg(uErrorCode == UINT32_MAX || DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->param1.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags), pDis->param2.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param2.flags), uErrorCode)); 1129 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */ 1130 ? uErrorCode & X86_TRAP_PF_RW 1131 : DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags)) 1120 1132 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault); 1121 1133 else … … 1130 1142 case OP_MOVSWD: 1131 1143 { 1144 if (uErrorCode == UINT32_MAX) 1145 return VINF_IOM_HC_MMIO_READ_WRITE; 1132 1146 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c); 1133 1147 PSTAMPROFILE pStat = NULL; 1134 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, pDis, pRange, &pStat);1148 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat); 1135 1149 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c); 1136 1150 break; … … 1148 1162 case OP_LODSB: 1149 1163 case OP_LODSWD: 1150 Assert(!(uErrorCode & X86_TRAP_PF_RW) );1164 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); 1151 1165 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e); 1152 1166 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange); … … 1155 1169 1156 1170 case OP_CMP: 1157 Assert(!(uErrorCode & X86_TRAP_PF_RW) );1171 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); 1158 1172 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f); 1159 1173 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange); … … 1180 1194 1181 1195 case OP_TEST: 1182 Assert(!(uErrorCode & X86_TRAP_PF_RW) );1196 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); 1183 1197 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h); 1184 1198 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange); … … 1187 1201 1188 1202 case OP_BT: 1189 Assert(!(uErrorCode & X86_TRAP_PF_RW) );1203 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); 1190 1204 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l); 1191 1205 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange); … … 1251 1265 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n", 1252 1266 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); 1253 if (!pvUser) 1254 { 1255 int rc = iomLock(pVM); 1256 pvUser = iomMMIOGetRange(&pVM->iom.s, GCPhysFault); 1257 iomUnlock(pVM); 1258 } 1259 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, pvUser); 1267 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser); 1260 1268 return VBOXSTRICTRC_VAL(rcStrict); 1261 1269 } … … 1277 1285 return VINF_IOM_HC_MMIO_READ_WRITE; 1278 1286 #endif 1279 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(&pVM->iom.s, GCPhysFault));1287 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(&pVM->iom.s, GCPhysFault)); 1280 1288 iomUnlock(pVM); 1281 1289 return VBOXSTRICTRC_VAL(rcStrict); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31565 r31593 129 129 * Deal with a guest page fault. 130 130 * 131 * The caller has taken the PGM lock. 132 * 131 133 * @returns Strict VBox status code. 132 134 * … … 137 139 * @param pPage The guest page at @a pvFault. 138 140 * @param pGstWalk The guest page table walk result. 141 * @param pfLockTaken PGM lock taken here or not (out). This is true 142 * when we're called. 139 143 */ 140 144 static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, 145 RTGCPTR pvFault, PPGMPAGE pPage, bool *pfLockTaken 141 146 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 142 RTGCPTR pvFault, PPGMPAGE pPage, PGSTPTWALK pGstWalk) 143 # else 144 RTGCPTR pvFault, PPGMPAGE pPage) 145 # endif 147 , PGSTPTWALK pGstWalk 148 # endif 149 ) 146 150 { 147 151 # if !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) … … 197 201 pvFault, GCPhysFault, pPage, uErr, pCur->enmType)); 198 202 199 # if defined(IN_RC) || defined(IN_RING0) /** @todo remove this */200 203 if (pCur->CTX_SUFF(pfnHandler)) 201 204 { 202 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 205 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 206 void *pvUser = pCur->CTX_SUFF(pvUser); 203 207 # ifdef IN_RING0 204 208 PFNPGMR0PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); … … 206 210 PFNPGMRCPHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); 207 211 # endif 208 bool fLeaveLock = (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler));209 void *pvUser = pCur->CTX_SUFF(pvUser);210 212 211 213 STAM_PROFILE_START(&pCur->Stat, h); 212 if (fLeaveLock) 213 pgmUnlock(pVM); /** @todo: Not entirely safe. */ 214 if (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler)) 215 { 216 pgmUnlock(pVM); 217 *pfLockTaken = false; 218 } 214 219 215 220 rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pvUser); 216 if (fLeaveLock) 217 pgmLock(pVM); 221 218 222 # ifdef VBOX_WITH_STATISTICS 223 pgmLock(pVM); 219 224 pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault); 220 225 if (pCur) 221 226 STAM_PROFILE_STOP(&pCur->Stat, h); 222 # else 223 pCur = NULL; /* might be invalid by now. */ 227 pgmUnlock(pVM); 224 228 # endif 225 226 229 } 227 230 else 228 # endif /* IN_RC || IN_RING0 */229 231 rc = VINF_EM_RAW_EMULATE_INSTR; 230 232 231 233 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersPhysical); 234 if (uErr & X86_TRAP_PF_RSVD) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersPhysical); 232 235 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndPhys; }); 233 236 return rc; … … 285 288 # ifdef IN_RC 286 289 STAM_PROFILE_START(&pCur->Stat, h); 290 RTGCPTR GCPtrStart = pCur->Core.Key; 291 CTX_MID(PFNPGM,VIRTHANDLER) pfnHandler = pCur->CTX_SUFF(pfnHandler); 287 292 pgmUnlock(pVM); 288 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key); 293 *pfLockTaken = false; 294 295 rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPtrStart, pvFault - GCPtrStart); 296 297 # ifdef VBOX_WITH_STATISTICS 289 298 pgmLock(pVM); 290 STAM_PROFILE_STOP(&pCur->Stat, h); 299 pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 300 if (pCur) 301 STAM_PROFILE_STOP(&pCur->Stat, h); 302 pgmUnlock(pVM); 303 # endif 291 304 # else 292 305 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ … … 310 323 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == (pGstWalk->Core.GCPhys & X86_PTE_PAE_PG_MASK)); 311 324 # ifdef IN_RC 312 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK); 325 STAM_PROFILE_START(&pCur->Stat, h); 326 RTGCPTR GCPtrStart = pCur->Core.Key; 327 CTX_MID(PFNPGM,VIRTHANDLER) pfnHandler = pCur->CTX_SUFF(pfnHandler); 328 pgmUnlock(pVM); 329 *pfLockTaken = false; 330 331 RTGCPTR off = (iPage << PAGE_SHIFT) 332 + (pvFault & PAGE_OFFSET_MASK) 333 - (GCPtrStart & PAGE_OFFSET_MASK); 313 334 Assert(off < pCur->cb); 314 STAM_PROFILE_START(&pCur->Stat, h); 335 rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPtrStart, off); 336 337 # ifdef VBOX_WITH_STATISTICS 338 pgmLock(pVM); 339 pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtrStart); 340 if (pCur) 341 STAM_PROFILE_STOP(&pCur->Stat, h); 315 342 pgmUnlock(pVM); 316 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off); 317 pgmLock(pVM); 318 STAM_PROFILE_STOP(&pCur->Stat, h); 343 # endif 319 344 # else 320 345 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ … … 421 446 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr)); 422 447 } 423 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 424 425 # ifdef PGM_WITH_MMIO_OPTIMIZATIONS 426 /* 427 * If it is a reserved bit fault we know that it is an MMIO or access 428 * handler related fault and can skip the dirty page stuff below. 429 */ 430 if (uErr & X86_TRAP_PF_RSVD) 431 { 432 /** @todo This is not complete code. take locks */ 433 Assert(uErr & X86_TRAP_PF_P); 434 PPGMPAGE pPage; 435 /** @todo Only all physical access handlers here, so optimize further. */ 436 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 437 rc = pgmPhysGetPageEx(&pVM->pgm.s, GstWalk.Core.GCPhys, &pPage); 438 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 439 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, 440 &GstWalk)); 441 # else 442 rc = pgmPhysGetPageEx(&pVM->pgm.s, (RTGCPHYS)pvFault, &pPage); 443 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 444 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage)); 445 # endif 446 } 447 # endif /* PGM_WITH_MMIO_OPTIMIZATIONS */ 448 449 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 448 450 449 /* 451 450 * Set the accessed and dirty flags. … … 503 502 *pfLockTaken = true; 504 503 pgmLock(pVM); 504 505 # ifdef PGM_WITH_MMIO_OPTIMIZATIONS 506 /* 507 * If it is a reserved bit fault we know that it is an MMIO (access 508 * handler) related fault and can skip some 200 lines of code. 509 */ 510 if (uErr & X86_TRAP_PF_RSVD) 511 { 512 Assert(uErr & X86_TRAP_PF_P); 513 PPGMPAGE pPage; 514 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 515 rc = pgmPhysGetPageEx(&pVM->pgm.s, GstWalk.Core.GCPhys, &pPage); 516 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 517 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, 518 pfLockTaken, &GstWalk)); 519 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 520 # else 521 rc = pgmPhysGetPageEx(&pVM->pgm.s, (RTGCPHYS)pvFault, &pPage); 522 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 523 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, 524 pfLockTaken)); 525 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr); 526 # endif 527 AssertRC(rc); 528 } 529 # endif /* PGM_WITH_MMIO_OPTIMIZATIONS */ 505 530 506 531 /* … … 706 731 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 707 732 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 708 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, &GstWalk)); 733 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken, 734 &GstWalk)); 709 735 # else 710 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage ));736 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken)); 711 737 # endif 712 738 … … 1440 1466 #ifdef PGM_WITH_MMIO_OPTIMIZATIONS 1441 1467 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 1442 else if ( PGM_PAGE_IS_MMIO(pPage) 1443 # if PGM_SHW_TYPE != PGM_TYPE_EPT 1444 && ( (fPteSrc & (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/ | X86_PTE_US )) /* #PF handles D & A first. */ 1445 == (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/) 1446 || BTH_IS_NP_ACTIVE(pVM) ) 1447 # endif 1468 else if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) 1469 && ( BTH_IS_NP_ACTIVE(pVM) 1470 || (fPteSrc & (X86_PTE_RW | X86_PTE_US)) == X86_PTE_RW) /** @todo remove X86_PTE_US */ 1448 1471 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 1449 1472 && pVM->pgm.s.fLessThan52PhysicalAddressBits -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r31446 r31593 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 39 39 #include <iprt/asm-amd64-x86.h> 40 40 #include <iprt/string.h> 41 42 43 /******************************************************************************* 44 * Defined Constants And Macros * 45 *******************************************************************************/ 46 /** 47 * Checks if a PAE PTE entry is actually present and not just invalid because 48 * of the MMIO optimization. 49 * @todo Move this to PGMInternal.h if necessary. 50 */ 51 #ifdef PGM_WITH_MMIO_OPTIMIZATIONS 52 # define PGM_POOL_IS_PAE_PTE_PRESENT(Pte) \ 53 ( ((Pte).u & (X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == X86_PTE_P) 54 #else 55 # define PGM_POOL_IS_PAE_PTE_PRESENT(Pte) \ 56 ( (Pte).n.u1Present ) 57 #endif 58 59 /** 60 * Checks if a EPT PTE entry is actually present and not just invalid 61 * because of the MMIO optimization. 62 * @todo Move this to PGMInternal.h if necessary. 63 */ 64 #define PGM_POOL_IS_EPT_PTE_PRESENT(Pte) \ 65 ( (Pte).n.u1Present ) 41 66 42 67 … … 228 253 const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1); 229 254 LogFlow(("PGMPOOLKIND_PAE_PT_FOR_32BIT_PT iShw=%x\n", iShw)); 230 if ( uShw.pPTPae->a[iShw].n.u1Present)255 if (PGM_POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw])) 231 256 { 232 257 X86PTE GstPte; … … 319 344 const unsigned iShw = off / sizeof(X86PTEPAE); 320 345 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 321 if ( uShw.pPTPae->a[iShw].n.u1Present)346 if (PGM_POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw])) 322 347 { 323 348 X86PTEPAE GstPte; … … 340 365 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPTPae->a)); 341 366 342 if ( uShw.pPTPae->a[iShw2].n.u1Present)367 if (PGM_POOL_IS_PAE_PTE_PRESENT(uShw.pPTPae->a[iShw2])) 343 368 { 344 369 X86PTEPAE GstPte; … … 1002 1027 * it's fairly safe to assume the guest is reusing the PT. 1003 1028 */ 1004 if ( GstPte.n.u1Present)1029 if (PGM_POOL_IS_PAE_PTE_PRESENT(GstPte)) 1005 1030 { 1006 1031 RTHCPHYS HCPhys = -1; … … 1350 1375 #ifdef VBOX_STRICT 1351 1376 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++) 1352 AssertMsg(! pShwPT->a[i].n.u1Present, ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));1377 AssertMsg(!PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent)); 1353 1378 #endif 1354 1379 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 1355 1380 { 1356 if ( pShwPT->a[i].n.u1Present)1381 if (PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i])) 1357 1382 { 1358 1383 RTHCPHYS HCPhys = NIL_RTHCPHYS; … … 1381 1406 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) 1382 1407 { 1383 if ( pShwPT2->a[j].n.u1Present1384 && 1385 && ((pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT))1408 if ( PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT2->a[j]) 1409 && pShwPT2->a[j].n.u1Write 1410 && (pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT) 1386 1411 { 1387 1412 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, pShwPT->a[j].u, pShwPT2->a[j].u)); … … 1418 1443 #ifdef VBOX_STRICT 1419 1444 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++) 1420 AssertMsg(! pShwPT->a[i].n.u1Present, ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));1445 AssertMsg(!PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent)); 1421 1446 #endif 1422 1447 *pfFlush = false; … … 1436 1461 } 1437 1462 } 1438 if ( pShwPT->a[i].n.u1Present)1463 if (PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i])) 1439 1464 { 1440 1465 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */ … … 3061 3086 } 3062 3087 3063 if ((pPT->a[iPte].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P )) == u64)3088 if ((pPT->a[iPte].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64) 3064 3089 { 3065 3090 X86PTEPAE Pte; … … 3078 3103 Log(("Found %RX64 expected %RX64\n", pPT->a[iPte].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P), u64)); 3079 3104 for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++) 3080 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P )) == u64)3105 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64) 3081 3106 { 3082 3107 Log(("i=%d cRefs=%d\n", i, cRefs--)); … … 3446 3471 PX86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3447 3472 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3448 if ( pPT->a[i].n.u1Present)3473 if (PGM_POOL_IS_PAE_PTE_PRESENT(pPT->a[i])) 3449 3474 { 3450 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P )) == u64)3475 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64) 3451 3476 { 3452 3477 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i])); … … 3471 3496 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3472 3497 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++) 3473 if ( pPT->a[i].n.u1Present)3498 if (PGM_POOL_IS_EPT_PTE_PRESENT(pPT->a[i])) 3474 3499 { 3475 3500 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64) … … 4107 4132 { 4108 4133 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 4109 if ( pShwPT->a[i].n.u1Present)4134 if (PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i])) 4110 4135 { 4111 4136 Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX64 hint=%RX32\n", … … 4129 4154 { 4130 4155 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 4131 if ( pShwPT->a[i].n.u1Present)4156 if (PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i])) 4132 4157 { 4133 4158 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n", … … 4173 4198 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent; 4174 4199 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE) 4175 if ( pShwPT->a[i].n.u1Present)4200 if (PGM_POOL_IS_PAE_PTE_PRESENT(pShwPT->a[i])) 4176 4201 { 4177 4202 Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n", … … 4195 4220 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent; 4196 4221 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE) 4197 if ( pShwPT->a[i].n.u1Present)4222 if (PGM_POOL_IS_EPT_PTE_PRESENT(pShwPT->a[i])) 4198 4223 { 4199 4224 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n",
Note:
See TracChangeset
for help on using the changeset viewer.