Changeset 31081 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 24, 2010 8:47:10 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r31080 r31081 2327 2327 /** Set if it involves a gigantic page (1 GB). */ 2328 2328 bool fGigantPage; 2329 #if 0 2330 /** Set if write access was attempted and not possible. */ 2331 bool fWriteError; 2332 /** Set if execute access was attempted and not possible. */ 2333 bool fExecuteError; 2334 #endif 2335 /** Unused. */ 2336 bool afUnused[3]; 2329 /** The effect X86_PTE_US flag for the address. */ 2330 bool fEffectiveUS; 2331 /** The effect X86_PTE_RW flag for the address. */ 2332 bool fEffectiveRW; 2333 /** The effect X86_PTE_NX flag for the address. */ 2334 bool fEffectiveNX; 2337 2335 } PGMPTWALKCORE; 2338 2336 … … 3639 3637 int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3); 3640 3638 3641 int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD);3639 int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); 3642 3640 3643 3641 int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r31080 r31081 71 71 DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 72 72 #ifndef IN_RC 73 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PCX86PML4E pGstPml4e, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD);74 static int 73 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); 74 static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD); 75 75 #endif 76 76 … … 914 914 * @param pVCpu The VMCPU handle. 915 915 * @param GCPtr The address. 916 * @param pGstPdpe Guest PDPT entry916 * @param uGstPdpe Guest PDPT entry. 917 917 * @param ppPD Receives address of page directory 918 918 */ 919 int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD)919 int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD) 920 920 { 921 921 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; … … 935 935 RTGCPTR64 GCPdPt; 936 936 PGMPOOLKIND enmKind; 937 Assert(pGstPdpe);938 X86PDPE GstPdpe = *pGstPdpe;939 937 940 938 # if defined(IN_RC) … … 953 951 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE) 954 952 { 955 if (! GstPdpe.n.u1Present)953 if (!(uGstPdpe & X86_PDPE_P)) 956 954 { 957 955 /* PD not present; guest must reload CR3 to change it. … … 960 958 Assert(!HWACCMIsEnabled(pVM)); 961 959 962 GCPdPt = GstPdpe.u& X86_PDPE_PG_MASK;960 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; 963 961 enmKind = PGMPOOLKIND_PAE_PD_PHYS; 964 GstPdpe.n.u1Present = 1;962 uGstPdpe |= X86_PDPE_P; 965 963 } 966 964 else 967 965 { 968 GCPdPt = GstPdpe.u& X86_PDPE_PG_MASK;966 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; 969 967 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD; 970 968 } … … 983 981 /* The PD was cached or created; hook it up now. */ 984 982 pPdpe->u |= pShwPage->Core.Key 985 | ( GstPdpe.u& ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));983 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 986 984 987 985 # if defined(IN_RC) … … 1052 1050 * @param pVCpu VMCPU handle. 1053 1051 * @param GCPtr The address. 1054 * @param pGstPml4e Guest PML4 entry1055 * @param pGstPdpe Guest PDPT entry1052 * @param uGstPml4e Guest PML4 entry 1053 * @param uGstPdpe Guest PDPT entry 1056 1054 * @param ppPD Receives address of page directory 1057 1055 */ 1058 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PCX86PML4E pGstPml4e, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD)1056 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD) 1059 1057 { 1060 1058 PPGMCPU pPGM = &pVCpu->pgm.s; … … 1086 1084 else 1087 1085 { 1088 Assert(pGstPml4e && pGstPdpe); 1089 1090 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK; 1086 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK; 1091 1087 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT; 1092 1088 } … … 1105 1101 /* The PDPT was cached or created; hook it up now. */ 1106 1102 pPml4e->u |= pShwPage->Core.Key 1107 | ( pGstPml4e->u& ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));1103 | (uGstPml4e & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT)); 1108 1104 1109 1105 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; … … 1126 1122 else 1127 1123 { 1128 Assert(pGstPdpe); 1129 1130 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK; 1124 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; 1131 1125 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD; 1132 1126 } … … 1145 1139 /* The PD was cached or created; hook it up now. */ 1146 1140 pPdpe->u |= pShwPage->Core.Key 1147 | ( pGstPdpe->u& ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));1141 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 1148 1142 1149 1143 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31069 r31081 36 36 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 37 37 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 38 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDEpPdeSrc, RTGCPTR GCPtrPage);38 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 39 39 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 40 40 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr); … … 73 73 #endif 74 74 75 /* enables the new code. */ 76 #define PGM_WITH_GST_WALK 75 77 76 78 #ifndef IN_RING3 79 80 #ifdef PGM_WITH_GST_WALK 81 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 82 /** 83 * Deal with a guest page fault. 84 * 85 * @returns Strict VBox status code. 86 * @retval VINF_EM_RAW_GUEST_TRAP 87 * @retval VINF_EM_RAW_EMULATE_INSTR 88 * 89 * @param pVCpu The current CPU. 90 * @param pGstWalk The guest page table walk result. 91 * @param uErr The error code. 92 */ 93 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPU pVCpu, PGSTPTWALK pGstWalk, RTGCUINT uErr) 94 { 95 # if !defined(PGM_WITHOUT_MAPPINGS) && (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) 96 /* 97 * Check for write conflicts with our hypervisor mapping. 98 * 99 * If the guest happens to access a non-present page, where our hypervisor 100 * is currently mapped, then we'll create a #PF storm in the guest. 101 */ 102 if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW) 103 && MMHyperIsInsideArea(pVCpu->CTX_SUFF(pVM), pGstWalk->Core.GCPtr)) 104 { 105 /* Force a CR3 sync to check for conflicts and emulate the instruction. */ 106 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 107 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 108 return VINF_EM_RAW_EMULATE_INSTR; 109 } 110 # endif 111 112 /* 113 * Calc the error code for the guest trap. 114 */ 115 uint32_t uNewErr = GST_IS_NX_ACTIVE(pVCpu) 116 ? uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID) 117 : uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US); 118 if (pGstWalk->Core.fBadPhysAddr) 119 { 120 uNewErr |= X86_TRAP_PF_RSVD | X86_TRAP_PF_P; 121 Assert(!pGstWalk->Core.fNotPresent); 122 } 123 else if (!pGstWalk->Core.fNotPresent) 124 uNewErr |= X86_TRAP_PF_P; 125 TRPMSetErrorCode(pVCpu, uNewErr); 126 127 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 128 return VINF_EM_RAW_GUEST_TRAP; 129 } 130 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 131 #endif /* PGM_WITH_GST_WALK */ 132 133 77 134 /** 78 135 * #PF Handler for raw-mode guest execution. … … 89 146 { 90 147 PVM pVM = pVCpu->CTX_SUFF(pVM); 148 #ifdef PGM_WITH_GST_WALK 149 int rc; 150 #endif 91 151 92 152 *pfLockTaken = false; … … 100 160 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 101 161 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) 162 163 #ifdef PGM_WITH_GST_WALK 164 165 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 166 /* 167 * Walk the guest page translation tables and check if it's a guest fault. 168 */ 169 GSTPTWALK GstWalk; 170 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk); 171 if (RT_FAILURE_NP(rc)) 172 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr)); 173 # if PGM_GST_TYPE == PGM_TYPE_AMD64 174 AssertMsg(GstWalk.Pml4e.u == GstWalk.pPml4e->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pml4e.u, (uint64_t)GstWalk.pPml4e->u)); 175 # endif 176 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 177 AssertMsg(GstWalk.Pdpe.u == GstWalk.pPdpe->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pdpe.u, (uint64_t)GstWalk.pPdpe->u)); 178 # endif 179 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); 180 AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); 181 182 if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)) 183 { 184 if ( ( (uErr & X86_TRAP_PF_RW) 185 && !GstWalk.Core.fEffectiveRW 186 && ( (uErr & X86_TRAP_PF_US) 187 || CPUMIsGuestR0WriteProtEnabled(pVCpu)) ) 188 || ((uErr & X86_TRAP_PF_US) && !GstWalk.Core.fEffectiveUS) 189 || ((uErr & X86_TRAP_PF_ID) && GstWalk.Core.fEffectiveNX) 190 ) 191 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr)); 192 } 193 194 /* 195 * Set the accessed and dirty flags. 196 */ 197 # if PGM_GST_TYPE == PGM_TYPE_AMD64 198 GstWalk.Pml4e.u |= X86_PML4E_A; 199 GstWalk.pPml4e->u |= X86_PML4E_A; 200 GstWalk.Pdpe.u |= X86_PDPE_A; 201 GstWalk.pPdpe->u |= X86_PDPE_A; 202 # endif 203 if (GstWalk.Core.fBigPage) 204 { 205 Assert(GstWalk.Pde.b.u1Size); 206 if (uErr & X86_TRAP_PF_RW) 207 { 208 GstWalk.Pde.u |= X86_PDE4M_A | X86_PDE4M_D; 209 GstWalk.pPde->u |= X86_PDE4M_A | X86_PDE4M_D; 210 } 211 else 212 { 213 GstWalk.Pde.u |= X86_PDE4M_A; 214 GstWalk.pPde->u |= X86_PDE4M_A; 215 } 216 } 217 else 218 { 219 Assert(!GstWalk.Pde.b.u1Size); 220 GstWalk.Pde.u |= X86_PDE_A; 221 GstWalk.pPde->u |= X86_PDE_A; 222 if (uErr & X86_TRAP_PF_RW) 223 { 224 # ifdef VBOX_WITH_STATISTICS 225 if (!GstWalk.Pte.n.u1Dirty) 226 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtiedPage)); 227 else 228 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty)); 229 # endif 230 GstWalk.Pte.u |= X86_PTE_A | X86_PTE_D; 231 GstWalk.pPte->u |= X86_PTE_A | X86_PTE_D; 232 } 233 else 234 { 235 GstWalk.Pte.u |= X86_PTE_A; 236 GstWalk.pPte->u |= X86_PTE_A; 237 } 238 Assert(GstWalk.Pte.u == GstWalk.pPte->u); 239 } 240 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, 241 ("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u)); 242 # else /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 243 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; 244 # endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 245 246 #else /* !PGM_WITH_GST_WALK */ 102 247 103 248 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE … … 188 333 } 189 334 # endif /* PGM_WITH_PAGING */ 335 #endif /* !PGM_WITH_GST_WALK */ 190 336 191 337 /* Take the big lock now. */ … … 196 342 * Fetch the guest PDE, PDPE and PML4E. 197 343 */ 344 #ifndef PGM_WITH_GST_WALK 198 345 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 199 346 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; … … 207 354 # endif 208 355 356 #endif /* !PGM_WITH_GST_WALK */ 209 357 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 210 358 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; … … 215 363 216 364 PX86PDPAE pPDDst; 365 #ifdef PGM_WITH_GST_WALK 366 # if PGM_GST_TYPE == PGM_TYPE_PAE 367 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, GstWalk.Pdpe.u, &pPDDst); 368 # else 369 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, X86_PDPE_P, &pPDDst); /* RW, US and A are reserved in PAE mode. */ 370 # endif 371 #else 217 372 # if PGM_GST_TYPE != PGM_TYPE_PAE 218 373 X86PDPE PdpeSrc; … … 221 376 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 222 377 # endif 223 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 378 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, PdpeSrc.u, &pPDDst); 379 #endif 224 380 if (rc != VINF_SUCCESS) 225 381 { … … 232 388 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 233 389 PX86PDPAE pPDDst; 390 #ifdef PGM_WITH_GST_WALK 391 # if PGM_GST_TYPE == PGM_TYPE_PROT /* (AMD-V nested paging) */ 392 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A, 393 X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A, &pPDDst); 394 # else 395 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, GstWalk.Pml4e.u, GstWalk.Pdpe.u, &pPDDst); 396 # endif 397 #else 234 398 # if PGM_GST_TYPE == PGM_TYPE_PROT 235 399 /* AMD-V nested paging */ … … 243 407 # endif 244 408 245 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 409 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc->u, PdpeSrc.u, &pPDDst); 410 #endif /* !PGM_WITH_GST_WALK */ 246 411 if (rc != VINF_SUCCESS) 247 412 { … … 273 438 */ 274 439 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 440 #ifdef PGM_WITH_GST_WALK 441 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], GstWalk.pPde, pvFault); 442 #else 275 443 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 444 #endif 276 445 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 277 446 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT) … … 282 451 return VINF_SUCCESS; 283 452 } 453 #ifdef PGM_WITH_GST_WALK 454 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); 455 AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); 456 #endif 284 457 } 285 458 … … 292 465 * A common case is the not-present error caused by lazy page table syncing. 293 466 * 294 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here 295 * so we can safely assume that the shadow PT is present when calling SyncPage later. 467 * It is IMPORTANT that we weed out any access to non-present shadow PDEs 468 * here so we can safely assume that the shadow PT is present when calling 469 * SyncPage later. 296 470 * 297 471 * On failure, we ASSUME that SyncPT is out of memory or detected some kind … … 300 474 * 301 475 */ 476 #ifdef PGM_WITH_GST_WALK 477 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 478 Assert(GstWalk.Pde.n.u1Present); 479 # endif 480 #else 302 481 Assert(PdeSrc.n.u1Present); 482 #endif 303 483 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 304 484 && !pPDDst->a[iPDDst].n.u1Present … … 307 487 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 308 488 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 489 #ifdef PGM_WITH_GST_WALK 490 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 491 LogFlow(("=>SyncPT %04x = %08RX64\n", (pvFault >> GST_PD_SHIFT) & GST_PD_MASK, (uint64_t)GstWalk.Pde.u)); 492 rc = PGM_BTH_NAME(SyncPT)(pVCpu, (pvFault >> GST_PD_SHIFT) & GST_PD_MASK, GstWalk.pPd, pvFault); 493 # else 494 LogFlow(("=>SyncPT pvFault=%RGv\n", pvFault)); 495 rc = PGM_BTH_NAME(SyncPT)(pVCpu, 0, NULL, pvFault); 496 # endif 497 #else /* !PGM_WITH_GST_WALK */ 309 498 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0])); 310 499 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault); 500 #endif /* !PGM_WITH_GST_WALK */ 311 501 if (RT_SUCCESS(rc)) 312 502 { … … 314 504 return rc; 315 505 } 506 #ifdef PGM_WITH_GST_WALK 507 Log(("SyncPT: %RGv failed!! rc=%Rrc\n", pvFault, rc)); 508 #else 316 509 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc)); 510 #endif 317 511 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 318 512 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); … … 345 539 unsigned iPT = pMapping->cb >> GST_PD_SHIFT; 346 540 while (iPT-- > 0) 541 #ifdef PGM_WITH_GST_WALK 542 if (GstWalk.pPde[iPT].n.u1Present) 543 #else 347 544 if (pPDSrc->a[iPDSrc + iPT].n.u1Present) 545 #endif 348 546 { 349 547 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eConflicts); … … 400 598 * in page tables which the guest believes to be present. 401 599 */ 600 #ifdef PGM_WITH_GST_WALK 601 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 602 Assert(GstWalk.Pde.n.u1Present); 603 # endif 604 #else 402 605 Assert(PdeSrc.n.u1Present); 403 { 606 #endif 607 { 608 #ifdef PGM_WITH_GST_WALK 609 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 610 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 611 #if 1 612 RTGCPHYS GCPhys3; 613 if ( GstWalk.Pde.b.u1Size && GST_IS_PSE_ACTIVE(pVCpu)) 614 GCPhys3 = GST_GET_PDE_BIG_PG_GCPHYS(pVM, GstWalk.Pde) 615 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK)); 616 else 617 GCPhys3 = GstWalk.Pte.u & GST_PTE_PG_MASK; 618 Assert(GCPhys3 == GCPhys); 619 #endif 620 # else 621 RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK; 622 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 623 #else 404 624 RTGCPHYS GCPhys = NIL_RTGCPHYS; 405 625 … … 424 644 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK); 425 645 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 646 #endif /* !PGM_WITH_GST_WALK */ 426 647 427 648 /* 428 649 * If we have a GC address we'll check if it has any flags set. 429 650 */ 651 #ifndef PGM_WITH_GST_WALK 430 652 if (GCPhys != NIL_RTGCPHYS) 653 #endif 431 654 { 432 655 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); … … 458 681 && !(uErr & X86_TRAP_PF_P)) 459 682 { 683 #ifdef PGM_WITH_GST_WALK 684 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 685 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 686 # else 687 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 688 # endif 689 #else 460 690 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr); 691 #endif 461 692 if ( RT_FAILURE(rc) 462 693 || !(uErr & X86_TRAP_PF_RW) … … 526 757 && !(uErr & X86_TRAP_PF_P)) 527 758 { 759 #ifdef PGM_WITH_GST_WALK 760 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 761 #else 528 762 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr); 763 #endif 529 764 if ( RT_FAILURE(rc) 530 765 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE … … 623 858 && !(uErr & X86_TRAP_PF_P)) 624 859 { 860 #ifdef PGM_WITH_GST_WALK 861 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 862 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 863 # else 864 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 865 # endif 866 #else 625 867 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr); 868 #endif 626 869 if ( RT_FAILURE(rc) 627 870 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE … … 737 980 uint64_t fPageGst2; 738 981 PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2); 982 #ifdef PGM_WITH_GST_WALK 983 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 984 Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 985 pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 986 # else 987 Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 988 pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 989 # endif 990 #else 739 991 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 740 992 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 993 #endif 741 994 # endif /* LOG_ENABLED */ 742 995 … … 769 1022 */ 770 1023 LogFlow(("CSAM ring 3 job\n")); 1024 #ifdef PGM_WITH_GST_WALK 1025 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 1026 #else 771 1027 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr); 1028 #endif 772 1029 AssertRC(rc2); 773 1030 … … 818 1075 } 819 1076 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */ 1077 #ifdef PGM_WITH_GST_WALK 1078 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1079 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 1080 # else 1081 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 1082 # endif 1083 #else 820 1084 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr); 1085 #endif 821 1086 if (RT_SUCCESS(rc)) 822 1087 { … … 888 1153 * page is not present, which is not true in this case. 889 1154 */ 1155 #ifdef PGM_WITH_GST_WALK 1156 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1157 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 1158 # else 1159 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr); 1160 # endif 1161 #else 890 1162 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr); 1163 #endif 891 1164 if (RT_SUCCESS(rc)) 892 1165 { … … 906 1179 { 907 1180 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2); 908 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%d fPageGst=%RX64\n")); 1181 #if defined(PGM_WITH_GST_WALK) && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1182 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64 EffRW=%d EffUS=%d uErr=%RGp cr4=%RX64 pvFault=%RGv\n", rc, fPageGst, GstWalk.Core.fEffectiveRW, GstWalk.Core.fEffectiveUS, uErr, CPUMGetGuestCR0(pVCpu), pvFault )); 1183 #else 1184 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); 1185 #endif 909 1186 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst)); 910 1187 } … … 953 1230 # endif /* PGM_OUT_OF_SYNC_IN_GC */ 954 1231 } 1232 #ifndef PGM_WITH_GST_WALK 955 1233 else /* GCPhys == NIL_RTGCPHYS */ 956 1234 { … … 970 1248 */ 971 1249 } 972 } 1250 #endif 1251 } 1252 /** @todo This point is never really reached. Clean up later! */ 973 1253 974 1254 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) … … 2437 2717 * @param GCPtrPage Guest context page address. 2438 2718 */ 2439 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDEpPdeSrc, RTGCPTR GCPtrPage)2719 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage) 2440 2720 { 2441 2721 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2506 2786 if (pPdeDst->n.u1Present) 2507 2787 { 2508 PGSTPTEpPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];2509 const GSTPTE PteSrc = *pPteSrc;2788 GSTPTE const *pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2789 const GSTPTE PteSrc = *pPteSrc; 2510 2790 2511 2791 #ifndef IN_RING0 … … 3348 3628 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 3349 3629 # endif 3350 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);3630 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, PdpeSrc.u, &pPDDst); 3351 3631 if (rc != VINF_SUCCESS) 3352 3632 { … … 3374 3654 # endif 3375 3655 3376 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc , &PdpeSrc, &pPDDst);3656 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc->u, PdpeSrc.u, &pPDDst); 3377 3657 if (rc != VINF_SUCCESS) 3378 3658 { … … 3510 3790 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 3511 3791 # endif 3512 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);3792 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, PdpeSrc.u, &pPDDst); 3513 3793 if (rc != VINF_SUCCESS) 3514 3794 { … … 3534 3814 # endif 3535 3815 3536 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc , &PdpeSrc, &pPDDst);3816 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc->u, PdpeSrc.u, &pPDDst); 3537 3817 if (rc != VINF_SUCCESS) 3538 3818 { -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r30896 r31081 164 164 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 165 165 166 pWalk->Core.GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 167 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 168 pWalk->Core.fBigPage = true; 169 pWalk->Core.fSucceeded = true; 166 pWalk->Core.GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 167 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 168 uint8_t fEffectiveXX = (uint8_t)pWalk->Pde.u 169 # if PGM_GST_TYPE == PGM_TYPE_AMD64 170 & (uint8_t)pWalk->Pde.u 171 & (uint8_t)pWalk->Pml4e.u 172 # endif 173 ; 174 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW); 175 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US); 176 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 177 pWalk->Core.fEffectiveNX = ( pWalk->Pde.n.u1NoExecute 178 # if PGM_GST_TYPE == PGM_TYPE_AMD64 179 || pWalk->Pde.n.u1NoExecute 180 || pWalk->Pml4e.n.u1NoExecute 181 # endif 182 ) && GST_IS_NX_ACTIVE(pVCpu); 183 # else 184 pWalk->Core.fEffectiveNX = false; 185 # endif 186 pWalk->Core.fBigPage = true; 187 pWalk->Core.fSucceeded = true; 170 188 return VINF_SUCCESS; 171 189 } … … 196 214 * We're done. 197 215 */ 198 pWalk->Core.GCPhys = (Pte.u & GST_PDE_PG_MASK) 199 | (GCPtr & PAGE_OFFSET_MASK); 200 pWalk->Core.fSucceeded = true; 216 pWalk->Core.GCPhys = (Pte.u & GST_PDE_PG_MASK) 217 | (GCPtr & PAGE_OFFSET_MASK); 218 uint8_t fEffectiveXX = (uint8_t)pWalk->Pte.u 219 & (uint8_t)pWalk->Pde.u 220 # if PGM_GST_TYPE == PGM_TYPE_AMD64 221 & (uint8_t)pWalk->Pde.u 222 & (uint8_t)pWalk->Pml4e.u 223 # endif 224 ; 225 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW); 226 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US); 227 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 228 pWalk->Core.fEffectiveNX = ( pWalk->Pte.n.u1NoExecute 229 || pWalk->Pde.n.u1NoExecute 230 # if PGM_GST_TYPE == PGM_TYPE_AMD64 231 || pWalk->Pde.n.u1NoExecute 232 || pWalk->Pml4e.n.u1NoExecute 233 # endif 234 ) && GST_IS_NX_ACTIVE(pVCpu); 235 # else 236 pWalk->Core.fEffectiveNX = false; 237 # endif 238 pWalk->Core.fSucceeded = true; 201 239 return VINF_SUCCESS; 202 240 } -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r30889 r31081 298 298 GstPdpe.u = X86_PDPE_P; 299 299 } 300 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);300 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, GstPdpe.u, &pShwPaePd); 301 301 AssertFatalRC(rc); 302 302 }
Note:
See TracChangeset
for help on using the changeset viewer.