Changeset 19772 in vbox
- Timestamp:
- May 18, 2009 10:05:40 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 47404
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/DBGFSym.cpp
r19649 r19772 25 25 *******************************************************************************/ 26 26 #define LOG_GROUP LOG_GROUP_DBGF 27 #if defined(RT_OS_WINDOWS) && 0//defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.27 #if defined(RT_OS_WINDOWS) && 1 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI. 28 28 # include <Windows.h> 29 29 # define _IMAGEHLP64 -
trunk/src/VBox/VMM/PGM.cpp
r19769 r19772 1791 1791 PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped, "/PGM/CPU%d/RZ/DirtyPage/Skipped", "The number of pages already dirty or readonly."); 1792 1792 PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap, "/PGM/CPU%d/RZ/DirtyPage/Trap", "The number of traps generated for dirty bit tracking."); 1793 PGM_REG_COUNTER(&pPGM->StatRZDirtyPageStale, "/PGM/CPU%d/RZ/DirtyPage/Stale", "The number of traps generated for dirty bit tracking (stale tlb entries).");1794 1793 PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage, "/PGM/CPU%d/RZ/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses."); 1795 1794 PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF, "/PGM/CPU%d/RZ/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking."); … … 1836 1835 PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped, "/PGM/CPU%d/R3/DirtyPage/Skipped", "The number of pages already dirty or readonly."); 1837 1836 PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap, "/PGM/CPU%d/R3/DirtyPage/Trap", "The number of traps generated for dirty bit tracking."); 1838 PGM_REG_COUNTER(&pPGM->StatR3DirtyPageStale, "/PGM/CPU%d/R3/DirtyPage/Stale", "The number of traps generated for dirty bit tracking (stale tlb entries).");1839 1837 PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage, "/PGM/CPU%d/R3/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses."); 1840 1838 PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF, "/PGM/CPU%d/R3/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking."); -
trunk/src/VBox/VMM/PGMInternal.h
r19769 r19772 2784 2784 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */ 2785 2785 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */ 2786 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */2787 2786 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */ 2788 2787 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */ … … 2829 2828 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */ 2830 2829 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */ 2831 STAMCOUNTER StatR3DirtyPageStale; /**< R3: The number of traps generated for dirty bit tracking (stale TLB entries). */2832 2830 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */ 2833 2831 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */ -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r19771 r19772 150 150 # endif /* !PGM_WITH_PAGING */ 151 151 152 /* Fetch the guest PDE */ 152 153 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 154 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; 155 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); 156 157 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 158 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 159 160 PX86PDPAE pPDDst; 161 # if PGM_GST_TYPE != PGM_TYPE_PAE 162 X86PDPE PdpeSrc; 163 164 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 165 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 166 # endif 167 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 168 if (rc != VINF_SUCCESS) 169 { 170 AssertRC(rc); 171 return rc; 172 } 173 Assert(pPDDst); 174 175 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 176 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 177 PX86PDPAE pPDDst; 178 # if PGM_GST_TYPE == PGM_TYPE_PROT 179 /* AMD-V nested paging */ 180 X86PML4E Pml4eSrc; 181 X86PDPE PdpeSrc; 182 PX86PML4E pPml4eSrc = &Pml4eSrc; 183 184 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 185 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A; 186 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 187 # endif 188 189 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 190 if (rc != VINF_SUCCESS) 191 { 192 AssertRC(rc); 193 return rc; 194 } 195 Assert(pPDDst); 196 197 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 198 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 199 PEPTPD pPDDst; 200 201 rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst); 202 if (rc != VINF_SUCCESS) 203 { 204 AssertRC(rc); 205 return rc; 206 } 207 Assert(pPDDst); 208 # endif 209 210 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 211 /* 212 * If we successfully correct the write protection fault due to dirty bit 213 * tracking, or this page fault is a genuine one, then return immediately. 214 */ 215 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 216 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 217 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 218 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT 219 || rc == VINF_EM_RAW_GUEST_TRAP) 220 { 221 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 222 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 223 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP")); 224 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc; 225 } 226 227 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]); 228 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 229 230 /* 231 * A common case is the not-present error caused by lazy page table syncing. 232 * 233 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here 234 * so we can safely assume that the shadow PT is present when calling SyncPage later. 235 * 236 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 237 * of mapping conflict and defer to SyncCR3 in R3. 238 * (Again, we do NOT support access handlers for non-present guest pages.) 239 * 240 */ 153 241 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 154 242 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; … … 161 249 PdeSrc.n.u1User = 1; 162 250 # endif 163 164 pgmLock(pVM); 165 { /* Force the shadow pointers to go out of scope after releasing the lock. */ 166 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 167 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; 168 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); 169 170 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 171 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 172 173 PX86PDPAE pPDDst; 174 # if PGM_GST_TYPE != PGM_TYPE_PAE 175 X86PDPE PdpeSrc; 176 177 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 178 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 179 # endif 180 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 181 if (rc != VINF_SUCCESS) 251 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 252 && !pPDDst->a[iPDDst].n.u1Present 253 && PdeSrc.n.u1Present 254 ) 255 256 { 257 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 258 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 259 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0])); 260 pgmLock(pVM); 261 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault); 262 pgmUnlock(pVM); 263 if (RT_SUCCESS(rc)) 182 264 { 183 pgmUnlock(pVM); 184 AssertRC(rc); 265 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 185 266 return rc; 186 267 } 187 Assert(pPDDst); 188 189 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 190 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 191 PX86PDPAE pPDDst; 192 # if PGM_GST_TYPE == PGM_TYPE_PROT 193 /* AMD-V nested paging */ 194 X86PML4E Pml4eSrc; 195 X86PDPE PdpeSrc; 196 PX86PML4E pPml4eSrc = &Pml4eSrc; 197 198 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 199 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A; 200 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 201 # endif 202 203 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 204 if (rc != VINF_SUCCESS) 205 { 206 pgmUnlock(pVM); 207 AssertRC(rc); 208 return rc; 209 } 210 Assert(pPDDst); 211 212 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 213 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 214 PEPTPD pPDDst; 215 216 rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst); 217 if (rc != VINF_SUCCESS) 218 { 219 pgmUnlock(pVM); 220 AssertRC(rc); 221 return rc; 222 } 223 Assert(pPDDst); 224 # endif 225 226 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 227 /* 228 * If we successfully correct the write protection fault due to dirty bit 229 * tracking, or this page fault is a genuine one, then return immediately. 230 */ 231 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 232 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 233 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 234 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT 235 || rc == VINF_EM_RAW_GUEST_TRAP) 236 { 237 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 238 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 239 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP")); 240 pgmUnlock(pVM); 241 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc; 242 } 243 244 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]); 245 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 246 247 /* 248 * A common case is the not-present error caused by lazy page table syncing. 249 * 250 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here 251 * so we can safely assume that the shadow PT is present when calling SyncPage later. 252 * 253 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 254 * of mapping conflict and defer to SyncCR3 in R3. 255 * (Again, we do NOT support access handlers for non-present guest pages.) 256 * 257 */ 258 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 259 && !pPDDst->a[iPDDst].n.u1Present 260 && PdeSrc.n.u1Present 261 ) 262 { 263 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 264 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 265 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0])); 266 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault); 267 pgmUnlock(pVM); 268 if (RT_SUCCESS(rc)) 269 { 270 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 271 return rc; 272 } 273 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc)); 274 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 275 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 276 return VINF_PGM_SYNC_CR3; 277 } 278 pgmUnlock(pVM); 268 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc)); 269 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 270 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 271 return VINF_PGM_SYNC_CR3; 279 272 } 280 273 … … 1085 1078 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1086 1079 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4); 1087 ASMAtomicWriteSize(pPml4eDst, 0);1080 pPml4eDst->u = 0; 1088 1081 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs)); 1089 1082 PGM_INVL_GUEST_TLBS(); … … 1099 1092 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1100 1093 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4); 1101 ASMAtomicWriteSize(pPml4eDst, 0);1094 pPml4eDst->u = 0; 1102 1095 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); 1103 1096 PGM_INVL_GUEST_TLBS(); … … 1111 1104 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1112 1105 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4); 1113 ASMAtomicWriteSize(pPml4eDst, 0);1106 pPml4eDst->u = 0; 1114 1107 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs)); 1115 1108 PGM_INVL_GUEST_TLBS(); … … 1125 1118 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1126 1119 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt); 1127 ASMAtomicWriteSize(pPdpeDst, 0);1120 pPdpeDst->u = 0; 1128 1121 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs)); 1129 1122 PGM_INVL_GUEST_TLBS(); … … 1139 1132 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1140 1133 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt); 1141 ASMAtomicWriteSize(pPdpeDst, 0);1134 pPdpeDst->u = 0; 1142 1135 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); 1143 1136 PGM_INVL_GUEST_TLBS(); … … 1151 1144 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1152 1145 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt); 1153 ASMAtomicWriteSize(pPdpeDst, 0);1146 pPdpeDst->u = 0; 1154 1147 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs)); 1155 1148 PGM_INVL_GUEST_TLBS(); … … 1183 1176 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1184 1177 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1185 ASMAtomicWriteSize(pPdeDst, 0);1178 pPdeDst->u = 0; 1186 1179 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); 1187 1180 PGM_INVL_GUEST_TLBS(); … … 1195 1188 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1196 1189 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1197 ASMAtomicWriteSize(pPdeDst, 0);1190 pPdeDst->u = 0; 1198 1191 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs)); 1199 1192 PGM_INVL_GUEST_TLBS(); … … 1239 1232 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst)); 1240 1233 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1241 ASMAtomicWriteSize(pPdeDst, 0);1234 pPdeDst->u = 0; 1242 1235 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); 1243 1236 PGM_INVL_GUEST_TLBS(); … … 1285 1278 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1286 1279 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1287 ASMAtomicWriteSize(pPdeDst, 0);1280 pPdeDst->u = 0; 1288 1281 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages)); 1289 1282 PGM_INVL_BIG_PG(GCPtrPage); … … 1298 1291 { 1299 1292 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1300 ASMAtomicWriteSize(pPdeDst, 0);1293 pPdeDst->u = 0; 1301 1294 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs)); 1302 1295 PGM_INVL_PG(GCPtrPage); … … 1910 1903 */ 1911 1904 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst); 1912 ASMAtomicWriteSize(pPdeDst, 0); 1905 1906 pPdeDst->u = 0; 1913 1907 1914 1908 # if defined(IN_RC) … … 2077 2071 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2078 2072 2079 Assert(PGMIsLockOwner(pVM));2080 2081 2073 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2082 2074 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u)); … … 2164 2156 pPdeSrc->b.u1Dirty = 1; 2165 2157 2166 if (pPdeDst->n.u1Present )2158 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)) 2167 2159 { 2168 if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY) 2169 { 2170 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2171 Assert(pPdeSrc->b.u1Write); 2172 2173 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2174 * fault again and take this path to only invalidate the entry. 2175 */ 2176 pPdeDst->n.u1Write = 1; 2177 pPdeDst->n.u1Accessed = 1; 2178 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY; 2179 } 2180 else 2181 { 2182 /* Stale TLB entry. */ 2183 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2184 } 2160 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2161 2162 Assert(pPdeSrc->b.u1Write); 2163 2164 pPdeDst->n.u1Write = 1; 2165 pPdeDst->n.u1Accessed = 1; 2166 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY; 2185 2167 PGM_INVL_BIG_PG(GCPtrPage); 2186 2168 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); … … 2274 2256 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2275 2257 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2276 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */ 2258 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */ 2259 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)) 2277 2260 { 2278 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY) 2279 { 2280 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2261 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2281 2262 # ifdef VBOX_STRICT 2282 2283 2284 2285 2263 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); 2264 if (pPage) 2265 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage), 2266 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK)); 2286 2267 # endif 2287 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2288 2289 Assert(pPteSrc->n.u1Write); 2290 2291 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2292 * fault again and take this path to only invalidate the entry. 2293 */ 2294 pPteDst->n.u1Write = 1; 2295 pPteDst->n.u1Dirty = 1; 2296 pPteDst->n.u1Accessed = 1; 2297 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2298 } 2299 else 2300 { 2301 /* Stale TLB entry. */ 2302 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2303 } 2268 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2269 2270 Assert(pPteSrc->n.u1Write); 2271 2272 pPteDst->n.u1Write = 1; 2273 pPteDst->n.u1Dirty = 1; 2274 pPteDst->n.u1Accessed = 1; 2275 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2304 2276 PGM_INVL_PG(GCPtrPage); 2305 2277 … … 3140 3112 int rc = VINF_SUCCESS; 3141 3113 3142 pgmLock(pVM);3143 3144 3114 /* 3145 3115 * First check if the shadow pd is present. … … 3160 3130 if (rc != VINF_SUCCESS) 3161 3131 { 3162 pgmUnlock(pVM);3163 3132 AssertRC(rc); 3164 3133 return rc; … … 3186 3155 if (rc != VINF_SUCCESS) 3187 3156 { 3188 pgmUnlock(pVM);3189 3157 AssertRC(rc); 3190 3158 return rc; … … 3201 3169 if (!pPdeDst->n.u1Present) 3202 3170 { 3171 pgmLock(pVM); 3203 3172 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); 3173 pgmUnlock(pVM); 3174 AssertRC(rc); 3204 3175 if (rc != VINF_SUCCESS) 3205 3176 { … … 3208 3179 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3209 3180 # endif 3210 pgmUnlock(pVM);3211 AssertRC(rc);3212 3181 return rc; 3213 3182 } … … 3222 3191 { 3223 3192 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 3224 # 3193 #else 3225 3194 { 3226 3195 GSTPDE PdeSrc; … … 3231 3200 PdeSrc.n.u1User = 1; 3232 3201 3233 # 3202 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 3234 3203 Assert(rc != VINF_EM_RAW_GUEST_TRAP); 3235 3204 if (uErr & X86_TRAP_PF_US) … … 3255 3224 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3256 3225 # endif 3257 pgmUnlock(pVM);3258 3226 return rc; 3259 3227 -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19709 r19772 672 672 Assert(!pTimer->offPrev); 673 673 Assert(!pTimer->offNext); 674 /* 674 675 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC 675 676 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking 676 677 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, 677 678 ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc))); 679 */ 678 680 pTimer->u64Expire = u64Expire; 679 681 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r19746 r19772 607 607 else 608 608 { 609 Log Flow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip));609 Log(("CPU%d: INJ-EI: %x at %RGv\n", pVCpu->idCpu, iGate, (RTGCPTR)pCtx->rip)); 610 610 Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 611 611 Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || pCtx->eflags.u32 & X86_EFL_IF); … … 1076 1076 1077 1077 u32TrapMask = HWACCM_VMX_TRAP_MASK; 1078 #ifndef DEBUG1078 //#ifndef DEBUG 1079 1079 if (pVM->hwaccm.s.fNestedPaging) 1080 1080 u32TrapMask &= ~RT_BIT(X86_XCPT_PF); /* no longer need to intercept #PF. */ 1081 #endif1081 //#endif 1082 1082 1083 1083 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */ … … 2874 2874 errCode |= X86_TRAP_PF_P; 2875 2875 2876 Log (("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));2876 LogFlow(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode)); 2877 2877 2878 2878 /* GCPhys contains the guest physical address of the page fault. */
Note:
See TracChangeset
for help on using the changeset viewer.