Changeset 65504 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 29, 2017 11:54:25 AM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r65466 r65504 1338 1338 PdeSrc.u = 0; 1339 1339 # endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 1340 const bool fWasBigPage = RT_BOOL(PdeDst.u & PGM_PDFLAGS_BIG_PAGE);1341 1340 const bool fIsBigPage = PdeSrc.b.u1Size && GST_IS_PSE_ACTIVE(pVCpu); 1342 if (fWasBigPage != fIsBigPage)1343 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped));1344 1341 1345 1342 # ifdef IN_RING3 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r65466 r65504 74 74 * @param pWalk Where to return the walk result. This is always set. 75 75 */ 76 DECLINLINE(int)PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)76 static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 77 77 { 78 78 int rc; … … 93 93 # endif 94 94 95 uint32_t register fEffective = X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | 1;96 95 { 97 96 # if PGM_GST_TYPE == PGM_TYPE_AMD64 … … 100 99 */ 101 100 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4); 102 if (RT_SUCCESS(rc)) { /* probable */ } 103 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 104 101 if (RT_FAILURE(rc)) 102 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 103 104 PX86PML4 register pPml4 = pWalk->pPml4; 105 X86PML4E register Pml4e; 105 106 PX86PML4E register pPml4e; 106 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 107 X86PML4E register Pml4e;107 108 pWalk->pPml4e = pPml4e = &pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 108 109 pWalk->Pml4e.u = Pml4e.u = pPml4e->u; 109 110 if (Pml4e.n.u1Present) { /* probable */ } 111 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 112 113 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ } 114 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 115 116 pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A)) 117 | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */; 110 if (!Pml4e.n.u1Present) 111 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 112 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, Pml4e))) 113 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 118 114 119 115 /* … … 121 117 */ 122 118 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt); 123 if (RT_ SUCCESS(rc)) { /* probable */ }124 elsereturn PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);119 if (RT_FAILURE(rc)) 120 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 125 121 126 122 # elif PGM_GST_TYPE == PGM_TYPE_PAE 127 123 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt); 128 if (RT_ SUCCESS(rc)) { /* probable */ }129 elsereturn PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);124 if (RT_FAILURE(rc)) 125 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 130 126 # endif 131 127 } 132 128 { 133 129 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 130 PX86PDPT register pPdpt = pWalk->pPdpt; 134 131 PX86PDPE register pPdpe; 135 pWalk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];136 132 X86PDPE register Pdpe; 133 134 pWalk->pPdpe = pPdpe = &pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 137 135 pWalk->Pdpe.u = Pdpe.u = pPdpe->u; 138 139 if (Pdpe.n.u1Present) { /* probable */ } 140 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 141 142 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ } 143 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3); 144 145 # if PGM_GST_TYPE == PGM_TYPE_AMD64 146 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pdpe.u & (X86_PDPE_RW | X86_PDPE_US | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A)) 147 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */; 148 # else 149 pWalk->Core.fEffective = fEffective = X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A 150 | ((uint32_t)Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD)) 151 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */; 152 # endif 136 if (!Pdpe.n.u1Present) 137 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 138 if (RT_UNLIKELY(!GST_IS_PDPE_VALID(pVCpu, Pdpe))) 139 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3); 153 140 154 141 /* … … 156 143 */ 157 144 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd); 158 if (RT_ SUCCESS(rc)) { /* probable */ }159 elsereturn PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);145 if (RT_FAILURE(rc)) 146 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 160 147 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 161 148 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd); 162 if (RT_SUCCESS(rc)) { /* probable */ } 163 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 164 # endif 165 } 166 { 149 if (RT_FAILURE(rc)) 150 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 151 # endif 152 } 153 { 154 PGSTPD register pPd = pWalk->pPd; 167 155 PGSTPDE register pPde; 168 pWalk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];169 156 GSTPDE Pde; 157 158 pWalk->pPde = pPde = &pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK]; 170 159 pWalk->Pde.u = Pde.u = pPde->u; 171 if ( Pde.n.u1Present) { /* probable */ }172 elsereturn PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);160 if (!Pde.n.u1Present) 161 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); 173 162 if (Pde.n.u1Size && GST_IS_PSE_ACTIVE(pVCpu)) 174 163 { 175 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ } 176 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 177 178 /* 179 * We're done. 180 */ 181 # if PGM_GST_TYPE == PGM_TYPE_32BIT 182 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A); 183 # else 184 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A)) 185 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */; 186 # endif 187 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 188 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 189 pWalk->Core.fEffective = fEffective; 190 191 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 192 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 193 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 194 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 195 # else 196 pWalk->Core.fEffectiveNX = false; 197 # endif 198 pWalk->Core.fBigPage = true; 199 pWalk->Core.fSucceeded = true; 164 if (RT_UNLIKELY(!GST_IS_BIG_PDE_VALID(pVCpu, Pde))) 165 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 200 166 201 167 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 202 168 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 203 169 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys); 170 uint8_t fEffectiveXX = (uint8_t)pWalk->Pde.u 171 # if PGM_GST_TYPE == PGM_TYPE_AMD64 172 & (uint8_t)pWalk->Pdpe.u 173 & (uint8_t)pWalk->Pml4e.u 174 # endif 175 ; 176 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW); 177 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US); 178 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 179 pWalk->Core.fEffectiveNX = ( pWalk->Pde.n.u1NoExecute 180 # if PGM_GST_TYPE == PGM_TYPE_AMD64 181 || pWalk->Pdpe.lm.u1NoExecute 182 || pWalk->Pml4e.n.u1NoExecute 183 # endif 184 ) && GST_IS_NX_ACTIVE(pVCpu); 185 # else 186 pWalk->Core.fEffectiveNX = false; 187 # endif 188 pWalk->Core.fBigPage = true; 189 pWalk->Core.fSucceeded = true; 204 190 return VINF_SUCCESS; 205 191 } … … 207 193 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde))) 208 194 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 209 # if PGM_GST_TYPE == PGM_TYPE_32BIT210 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);211 # else212 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))213 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;214 # endif215 195 216 196 /* … … 218 198 */ 219 199 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt); 220 if (RT_SUCCESS(rc)) { /* probable */ } 221 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); 222 } 223 { 200 if (RT_FAILURE(rc)) 201 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); 202 } 203 { 204 PGSTPT register pPt = pWalk->pPt; 224 205 PGSTPTE register pPte; 225 pWalk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];226 206 GSTPTE register Pte; 207 208 pWalk->pPte = pPte = &pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 227 209 pWalk->Pte.u = Pte.u = pPte->u; 228 229 if (Pte.n.u1Present) { /* probable */ } 230 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1); 231 232 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ } 233 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1); 210 if (!Pte.n.u1Present) 211 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1); 212 if (RT_UNLIKELY(!GST_IS_PTE_VALID(pVCpu, Pte))) 213 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1); 234 214 235 215 /* 236 216 * We're done. 237 217 */ 238 # if PGM_GST_TYPE == PGM_TYPE_32BIT 239 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);240 # else 241 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A))242 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */; 243 # endif 244 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);245 pWalk->Core.fEffective = fEffective; 246 247 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);248 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);218 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte) 219 | (GCPtr & PAGE_OFFSET_MASK); 220 uint8_t fEffectiveXX = (uint8_t)pWalk->Pte.u 221 & (uint8_t)pWalk->Pde.u 222 # if PGM_GST_TYPE == PGM_TYPE_AMD64 223 & (uint8_t)pWalk->Pdpe.u 224 & (uint8_t)pWalk->Pml4e.u 225 # endif 226 ; 227 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW); 228 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US); 249 229 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 250 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 230 pWalk->Core.fEffectiveNX = ( pWalk->Pte.n.u1NoExecute 231 || pWalk->Pde.n.u1NoExecute 232 # if PGM_GST_TYPE == PGM_TYPE_AMD64 233 || pWalk->Pdpe.lm.u1NoExecute 234 || pWalk->Pml4e.n.u1NoExecute 235 # endif 236 ) && GST_IS_NX_ACTIVE(pVCpu); 251 237 # else 252 238 pWalk->Core.fEffectiveNX = false; 253 239 # endif 254 240 pWalk->Core.fSucceeded = true; 255 256 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte)257 | (GCPtr & PAGE_OFFSET_MASK);258 241 return VINF_SUCCESS; 259 242 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r65502 r65504 685 685 } 686 686 687 # ifndef IN_RING3687 # ifndef IN_RING3 688 688 689 689 /** … … 697 697 * @param offFault The access offset. 698 698 */ 699 DECLINLINE(bool) pgm RZPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault)699 DECLINLINE(bool) pgmPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault) 700 700 { 701 701 /* … … 720 720 ) 721 721 { 722 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor Pf,Fork)); RT_NOREF_PV(pPool);722 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,Fork)); RT_NOREF_PV(pPool); 723 723 return true; 724 724 } … … 740 740 * @remark The REP prefix check is left to the caller because of STOSD/W. 741 741 */ 742 DECLINLINE(bool) pgm RZPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault)743 { 744 # 742 DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault) 743 { 744 #ifndef IN_RC 745 745 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */ 746 746 if ( HMHasPendingIrq(pVM) … … 748 748 { 749 749 /* Fault caused by stack writes while trying to inject an interrupt event. */ 750 Log(("pgm RZPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));750 Log(("pgmPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp)); 751 751 return true; 752 752 } 753 # 753 #else 754 754 NOREF(pVM); NOREF(pvFault); 755 # 755 #endif 756 756 757 757 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg)); … … 765 765 /* call implies the actual push of the return address faulted */ 766 766 case OP_CALL: 767 Log4(("pgm RZPoolMonitorIsReused: CALL\n"));767 Log4(("pgmPoolMonitorIsReused: CALL\n")); 768 768 return true; 769 769 case OP_PUSH: 770 Log4(("pgm RZPoolMonitorIsReused: PUSH\n"));770 Log4(("pgmPoolMonitorIsReused: PUSH\n")); 771 771 return true; 772 772 case OP_PUSHF: 773 Log4(("pgm RZPoolMonitorIsReused: PUSHF\n"));773 Log4(("pgmPoolMonitorIsReused: PUSHF\n")); 774 774 return true; 775 775 case OP_PUSHA: 776 Log4(("pgm RZPoolMonitorIsReused: PUSHA\n"));776 Log4(("pgmPoolMonitorIsReused: PUSHA\n")); 777 777 return true; 778 778 case OP_FXSAVE: 779 Log4(("pgm RZPoolMonitorIsReused: FXSAVE\n"));779 Log4(("pgmPoolMonitorIsReused: FXSAVE\n")); 780 780 return true; 781 781 case OP_MOVNTI: /* solaris - block_zero_no_xmm */ 782 Log4(("pgm RZPoolMonitorIsReused: MOVNTI\n"));782 Log4(("pgmPoolMonitorIsReused: MOVNTI\n")); 783 783 return true; 784 784 case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */ 785 Log4(("pgm RZPoolMonitorIsReused: MOVNTDQ\n"));785 Log4(("pgmPoolMonitorIsReused: MOVNTDQ\n")); 786 786 return true; 787 787 case OP_MOVSWD: … … 793 793 Assert(pDis->uCpuMode == DISCPUMODE_64BIT); 794 794 795 Log(("pgm RZPoolMonitorIsReused: OP_STOSQ\n"));795 Log(("pgmPoolMonitorIsReused: OP_STOSQ\n")); 796 796 return true; 797 797 } … … 806 806 && (pDis->Param1.Base.idxGenReg == DISGREG_ESP)) 807 807 { 808 Log4(("pgm RZPoolMonitorIsReused: ESP\n"));808 Log4(("pgmPoolMonitorIsReused: ESP\n")); 809 809 return true; 810 810 } … … 819 819 if ( (((uintptr_t)pvFault + cbWrite) >> X86_PAGE_SHIFT) != ((uintptr_t)pvFault >> X86_PAGE_SHIFT) ) 820 820 { 821 Log4(("pgm RZPoolMonitorIsReused: cross page write\n"));821 Log4(("pgmPoolMonitorIsReused: cross page write\n")); 822 822 return true; 823 823 } … … 828 828 if (cbWrite >= 8 && ((uintptr_t)pvFault & 7) != 0) 829 829 { 830 Log4(("pgm RZPoolMonitorIsReused: Unaligned 8+ byte write\n"));830 Log4(("pgmPoolMonitorIsReused: Unaligned 8+ byte write\n")); 831 831 return true; 832 832 } … … 850 850 * @todo VBOXSTRICTRC 851 851 */ 852 static int pgm RZPoolAccessPfHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,853 852 static int pgmPoolAccessPfHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 853 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 854 854 { 855 855 NOREF(pVM); NOREF(GCPhysFault); … … 871 871 if (rc == VINF_SUCCESS) 872 872 rc = VBOXSTRICTRC_VAL(rc2); 873 # 873 #ifndef IN_RING3 874 874 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 875 # 875 #endif 876 876 } 877 877 else if (rc2 == VERR_EM_INTERPRETER) 878 878 { 879 # 879 #ifdef IN_RC 880 880 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 881 881 { 882 LogFlow(("pgm RZPoolAccessPfHandlerFlush: Interpretation failed for patch code %04x:%RGv, ignoring.\n",882 LogFlow(("pgmPoolAccessPfHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n", 883 883 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->eip)); 884 884 rc = VINF_SUCCESS; 885 STAM_COUNTER_INC(&pPool->StatMonitor PfRZIntrFailPatch2);885 STAM_COUNTER_INC(&pPool->StatMonitorRZIntrFailPatch2); 886 886 } 887 887 else 888 # 888 #endif 889 889 { 890 890 rc = VINF_EM_RAW_EMULATE_INSTR; 891 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor Pf,EmulateInstr));891 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr)); 892 892 } 893 893 } … … 897 897 AssertMsgFailed(("%Rrc\n", VBOXSTRICTRC_VAL(rc2))); /* ASSUMES no complicated stuff here. */ 898 898 899 LogFlow(("pgm RZPoolAccessPfHandlerFlush: returns %Rrc (flushed)\n", rc));899 LogFlow(("pgmPoolAccessPfHandlerPT: returns %Rrc (flushed)\n", rc)); 900 900 return rc; 901 901 } … … 914 914 * @param pvFault The fault address. 915 915 */ 916 DECLINLINE(int) pgm RZPoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,917 916 DECLINLINE(int) pgmPoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 917 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 918 918 { 919 919 unsigned uIncrement = pDis->Param1.cb; … … 923 923 Assert(pRegFrame->rcx <= 0x20); 924 924 925 # 925 #ifdef VBOX_STRICT 926 926 if (pDis->uOpMode == DISCPUMODE_32BIT) 927 927 Assert(uIncrement == 4); 928 928 else 929 929 Assert(uIncrement == 8); 930 # 931 932 Log3(("pgm RZPoolAccessPfHandlerSTOSD\n"));930 #endif 931 932 Log3(("pgmPoolAccessPfHandlerSTOSD\n")); 933 933 934 934 /* … … 949 949 while (pRegFrame->rcx) 950 950 { 951 # 951 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 952 952 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 953 953 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 954 954 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 955 # 955 #else 956 956 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 957 # 958 # 957 #endif 958 #ifdef IN_RC 959 959 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax; 960 # 960 #else 961 961 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement); 962 # 962 #endif 963 963 pu32 += uIncrement; 964 964 GCPhysFault += uIncrement; … … 968 968 pRegFrame->rip += pDis->cbInstr; 969 969 970 LogFlow(("pgm RZPoolAccessPfHandlerSTOSD: returns\n"));970 LogFlow(("pgmPoolAccessPfHandlerSTOSD: returns\n")); 971 971 return VINF_SUCCESS; 972 972 } … … 987 987 * @param pfReused Reused state (in/out) 988 988 */ 989 DECLINLINE(int) pgm RZPoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,990 991 { 992 Log3(("pgm RZPoolAccessPfHandlerSimple\n"));989 DECLINLINE(int) pgmPoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 990 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused) 991 { 992 Log3(("pgmPoolAccessPfHandlerSimple\n")); 993 993 NOREF(pVM); 994 994 NOREF(pfReused); /* initialized by caller */ … … 1004 1004 * Clear all the pages. ASSUMES that pvFault is readable. 1005 1005 */ 1006 # 1006 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1007 1007 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1008 # 1008 #endif 1009 1009 1010 1010 uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1); … … 1023 1023 } 1024 1024 1025 # 1025 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1026 1026 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1027 # 1027 #endif 1028 1028 1029 1029 /* … … 1035 1035 else if (rc == VERR_EM_INTERPRETER) 1036 1036 { 1037 LogFlow(("pgm RZPoolAccessPfHandlerSimple: Interpretation failed for %04x:%RGv - opcode=%d\n",1037 LogFlow(("pgmPoolAccessPfHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n", 1038 1038 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode)); 1039 1039 rc = VINF_EM_RAW_EMULATE_INSTR; 1040 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor Pf,EmulateInstr));1041 } 1042 1043 # 1040 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr)); 1041 } 1042 1043 #if 0 /* experimental code */ 1044 1044 if (rc == VINF_SUCCESS) 1045 1045 { … … 1069 1069 } 1070 1070 } 1071 # 1072 1073 LogFlow(("pgm RZPoolAccessPfHandlerSimple: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));1071 #endif 1072 1073 LogFlow(("pgmPoolAccessPfHandlerSimple: returns %Rrc\n", VBOXSTRICTRC_VAL(rc))); 1074 1074 return VBOXSTRICTRC_VAL(rc); 1075 1075 } … … 1082 1082 * @remarks The @a pvUser argument points to the PGMPOOLPAGE. 1083 1083 */ 1084 DECLEXPORT(VBOXSTRICTRC) pgm RZPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,1085 1086 { 1087 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorRZ, a);1084 DECLEXPORT(VBOXSTRICTRC) pgmPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 1085 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 1086 { 1087 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a); 1088 1088 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1089 1089 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; … … 1092 1092 NOREF(uErrorCode); 1093 1093 1094 LogFlow(("pgm RZPoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));1094 LogFlow(("pgmPoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); 1095 1095 1096 1096 pgmLock(pVM); … … 1098 1098 { 1099 1099 /* Pool page changed while we were waiting for the lock; ignore. */ 1100 Log(("CPU%d: pgm RZPoolAccessPfHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys)));1101 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorPfRZ, &pPool->StatMonitorPfRZHandled, a);1100 Log(("CPU%d: pgmPoolAccessPfHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys))); 1101 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a); 1102 1102 pgmUnlock(pVM); 1103 1103 return VINF_SUCCESS; 1104 1104 } 1105 # 1105 #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 1106 1106 if (pPage->fDirty) 1107 1107 { … … 1110 1110 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */ 1111 1111 } 1112 # 1113 1114 # 1112 #endif 1113 1114 #if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */ 1115 1115 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1116 1116 { … … 1122 1122 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw); 1123 1123 } 1124 # 1124 #endif 1125 1125 1126 1126 /* … … 1144 1144 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); 1145 1145 1146 # 1146 #ifdef IN_RING0 1147 1147 /* Maximum nr of modifications depends on the page type. */ 1148 1148 if ( pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT … … 1151 1151 else 1152 1152 cMaxModifications = 24; 1153 # 1153 #else 1154 1154 cMaxModifications = 48; 1155 # 1155 #endif 1156 1156 1157 1157 /* … … 1172 1172 if (pPage->cModifications >= cMaxModifications) 1173 1173 { 1174 STAM_COUNTER_INC(&pPool-> StatMonitorPfRZFlushReinit);1174 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit)); 1175 1175 fForcedFlush = true; 1176 1176 } … … 1188 1188 || pgmPoolIsPageLocked(pPage) 1189 1189 ) 1190 && !(fReused = pgm RZPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault))1191 && !pgm RZPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))1190 && !(fReused = pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)) 1191 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK)) 1192 1192 { 1193 1193 /* … … 1196 1196 if (!(pDis->fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE))) 1197 1197 { 1198 rc = pgm RZPoolAccessPfHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused);1198 rc = pgmPoolAccessPfHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused); 1199 1199 if (fReused) 1200 1200 goto flushPage; … … 1226 1226 } 1227 1227 1228 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorPfRZ, &pPool->StatMonitorPfRZHandled, a);1228 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a); 1229 1229 pgmUnlock(pVM); 1230 1230 return rc; … … 1267 1267 if (fValidStosd) 1268 1268 { 1269 rc = pgm RZPoolAccessPfHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);1270 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorPfRZ, &pPool->StatMonitorPfRZRepStosd, a);1269 rc = pgmPoolAccessPfHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault); 1270 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a); 1271 1271 pgmUnlock(pVM); 1272 1272 return rc; … … 1275 1275 1276 1276 /* REP prefix, don't bother. */ 1277 STAM_COUNTER_INC(&pPool-> StatMonitorPfRZRepPrefix);1278 Log4(("pgm RZPoolAccessPfHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",1277 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix)); 1278 Log4(("pgmPoolAccessPfHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n", 1279 1279 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode, pDis->fPrefix)); 1280 1280 fNotReusedNotForking = true; 1281 1281 } 1282 1282 1283 # 1283 #if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0) 1284 1284 /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This 1285 1285 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring. … … 1289 1289 && (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT || pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT) 1290 1290 && ( fNotReusedNotForking 1291 || ( !pgm RZPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)1292 && !pgm RZPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))1291 || ( !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault) 1292 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK)) 1293 1293 ) 1294 1294 ) … … 1342 1342 || rc == VERR_PAGE_NOT_PRESENT, 1343 1343 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc)); 1344 # 1344 # ifdef VBOX_STRICT 1345 1345 pPage->GCPtrDirtyFault = pvFault; 1346 # 1347 1348 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorPfRZ, a);1346 # endif 1347 1348 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a); 1349 1349 pgmUnlock(pVM); 1350 1350 return rc; … … 1352 1352 } 1353 1353 } 1354 # 1355 1356 STAM_COUNTER_INC(&pPool-> StatMonitorPfRZFlushModOverflow);1354 #endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */ 1355 1356 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushModOverflow)); 1357 1357 flushPage: 1358 1358 /* … … 1364 1364 * the reuse detection must be fixed. 1365 1365 */ 1366 rc = pgm RZPoolAccessPfHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);1366 rc = pgmPoolAccessPfHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault); 1367 1367 if ( rc == VINF_EM_RAW_EMULATE_INSTR 1368 1368 && fReused) … … 1372 1372 rc = VINF_SUCCESS; /* safe to restart the instruction. */ 1373 1373 } 1374 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> StatMonitorPfRZ, &pPool->StatMonitorPfRZFlushPage, a);1374 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a); 1375 1375 pgmUnlock(pVM); 1376 1376 return rc; 1377 1377 } 1378 1378 1379 # endif /* !IN_RING3 */1379 # endif /* !IN_RING3 */ 1380 1380 1381 1381 /** … … 1390 1390 { 1391 1391 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1392 STAM_PROFILE_START(&pPool-> CTX_SUFF_Z(StatMonitor), a);1392 STAM_PROFILE_START(&pPool->StatMonitorR3, a); 1393 1393 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 1394 1394 LogFlow(("PGM_ALL_CB_DECL: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n", … … 1397 1397 NOREF(pvPhys); NOREF(pvBuf); NOREF(enmAccessType); 1398 1398 1399 /* 1400 * Make sure the pool page wasn't modified by a different CPU. 1401 */ 1399 1402 pgmLock(pVM); 1400 1401 #ifdef VBOX_WITH_STATISTICS1402 /*1403 * Collect stats on the access.1404 */1405 AssertCompile(RT_ELEMENTS(pPool->CTX_MID_Z(aStatMonitor,Sizes)) == 19);1406 if (cbBuf <= 16 && cbBuf > 0)1407 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[cbBuf - 1]);1408 else if (cbBuf >= 17 && cbBuf < 32)1409 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[16]);1410 else if (cbBuf >= 32 && cbBuf < 64)1411 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[17]);1412 else if (cbBuf >= 64)1413 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[18]);1414 1415 uint8_t cbAlign;1416 switch (pPage->enmKind)1417 {1418 default:1419 cbAlign = 7;1420 break;1421 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:1422 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:1423 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:1424 case PGMPOOLKIND_32BIT_PD:1425 case PGMPOOLKIND_32BIT_PD_PHYS:1426 cbAlign = 3;1427 break;1428 }1429 AssertCompile(RT_ELEMENTS(pPool->CTX_MID_Z(aStatMonitor,Misaligned)) == 7);1430 if ((uint8_t)GCPhys & cbAlign)1431 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Misaligned)[((uint8_t)GCPhys & cbAlign) - 1]);1432 #endif1433 1434 /*1435 * Make sure the pool page wasn't modified by a different CPU.1436 */1437 1403 if (PHYS_PAGE_ADDRESS(GCPhys) == PHYS_PAGE_ADDRESS(pPage->GCPhys)) 1438 1404 { … … 1481 1447 } 1482 1448 1483 STAM_PROFILE_STOP_EX(&pPool-> CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);1449 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a); 1484 1450 } 1485 1451 else … … 4611 4577 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++) 4612 4578 { 4579 Assert(!(pShwPD->a[i].u & RT_BIT_32(9))); 4613 4580 if ( pShwPD->a[i].n.u1Present 4614 4581 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING) … … 4651 4618 #endif 4652 4619 { 4653 Assert((pShwPD->a[i].u & (X86_PDE_PAE_MBZ_MASK_NX | UINT64_C(0x7ff0000000000 000))) == 0);4620 Assert((pShwPD->a[i].u & (X86_PDE_PAE_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0); 4654 4621 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK); 4655 4622 if (pSubPage) -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r65476 r65504 2050 2050 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); 2051 2051 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%u/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory."); 2052 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSizeChanges, "/PGM/CPU%u/RZ/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB).");2053 2052 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSkipped, "/PGM/CPU%u/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3."); 2054 2053 PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%u/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage."); … … 2098 2097 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); 2099 2098 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%u/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory."); 2100 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSizeChanges, "/PGM/CPU%u/R3/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB).");2101 2099 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSkipped, "/PGM/CPU%u/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3."); 2102 2100 PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%u/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage."); … … 3190 3188 pVCpu->pgm.s.pfnR3GstExit = pModeData->pfnR3GstExit; 3191 3189 pVCpu->pgm.s.pfnR3GstGetPage = pModeData->pfnR3GstGetPage; 3190 Assert(pVCpu->pgm.s.pfnR3GstGetPage); 3192 3191 pVCpu->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage; 3193 3192 pVCpu->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE; … … 3198 3197 pVCpu->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage; 3199 3198 pVCpu->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE; 3200 Assert(pVCpu->pgm.s.pfnR3GstGetPage);3201 3199 3202 3200 /* both */ -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r65476 r65504 1321 1321 Pde.b.u1PAT ? "AT" : "--", 1322 1322 Pde.b.u1NoExecute ? "NX" : "--", 1323 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',1323 Pde.u & RT_BIT_64(9) ? '1' : '0', 1324 1324 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1325 1325 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1350 1350 Pde.n.u1CacheDisable? "CD" : "--", 1351 1351 Pde.n.u1NoExecute ? "NX" : "--", 1352 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',1352 Pde.u & RT_BIT_64(9) ? '1' : '0', 1353 1353 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1354 1354 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1637 1637 Pde.b.u1CacheDisable? "CD" : "--", 1638 1638 Pde.b.u1PAT ? "AT" : "--", 1639 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',1639 Pde.u & RT_BIT_32(9) ? '1' : '0', 1640 1640 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1641 1641 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1658 1658 Pde.n.u1WriteThru ? "WT" : "--", 1659 1659 Pde.n.u1CacheDisable? "CD" : "--", 1660 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-',1660 Pde.u & RT_BIT_32(9) ? '1' : '0', 1661 1661 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1662 1662 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r65502 r65504 283 283 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 284 284 pgmPoolAccessHandler, 285 NULL, "pgmPoolAccessHandler", "pgm RZPoolAccessPfHandler",286 NULL, "pgmPoolAccessHandler", "pgm RZPoolAccessPfHandler",285 NULL, "pgmPoolAccessHandler", "pgmPoolAccessPfHandler", 286 NULL, "pgmPoolAccessHandler", "pgmPoolAccessPfHandler", 287 287 "Guest Paging Access Handler", 288 288 &pPool->hAccessHandlerType); … … 347 347 STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches."); 348 348 STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls."); 349 350 STAM_REG(pVM, &pPool->StatMonitorPfRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access handler."); 351 STAM_REG(pVM, &pPool->StatMonitorPfRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 352 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler."); 353 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 354 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 355 STAM_REG(pVM, &pPool->StatMonitorPfRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 356 STAM_REG(pVM, &pPool->StatMonitorPfRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access we've handled (except REP STOSD)."); 357 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction."); 358 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing."); 359 STAM_REG(pVM, &pPool->StatMonitorPfRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 360 STAM_REG(pVM, &pPool->StatMonitorPfRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 361 362 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM", STAMUNIT_TICKS_PER_CALL, "Profiling the regular access handler."); 363 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the regular access handler."); 364 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses."); 365 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses."); 366 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses."); 367 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses."); 368 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses."); 369 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses."); 370 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses."); 371 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[7], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses."); 372 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[8], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses."); 373 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[9], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses."); 374 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[10], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses."); 375 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[11], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses."); 376 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[12], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses."); 377 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[13], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses."); 378 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[14], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses."); 379 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[15], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses."); 380 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[16], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses."); 381 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[17], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses."); 382 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[18], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses."); 383 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1."); 384 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2."); 385 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3."); 386 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4."); 387 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5."); 388 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6."); 389 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7."); 390 391 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 392 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 393 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 394 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 395 396 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler."); 397 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler."); 398 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses (R3)."); 399 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses (R3)."); 400 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses (R3)."); 401 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses (R3)."); 402 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses (R3)."); 403 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses (R3)."); 404 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses (R3)."); 405 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[7], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses (R3)."); 406 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[8], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses (R3)."); 407 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[9], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses (R3)."); 408 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[10], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses (R3)."); 409 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[11], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses (R3)."); 410 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[12], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses (R3)."); 411 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[13], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses (R3)."); 412 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[14], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses (R3)."); 413 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[15], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses (R3)."); 414 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[16], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses."); 415 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[17], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses."); 416 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[18], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses."); 417 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1 in R3."); 418 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2 in R3."); 419 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3 in R3."); 420 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4 in R3."); 421 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5 in R3."); 422 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6 in R3."); 423 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7 in R3."); 424 349 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access handler."); 350 STAM_REG(pVM, &pPool->StatMonitorRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 351 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler."); 352 STAM_REG(pVM, &pPool->StatMonitorRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 353 STAM_REG(pVM, &pPool->StatMonitorRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 354 STAM_REG(pVM, &pPool->StatMonitorRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 355 STAM_REG(pVM, &pPool->StatMonitorRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access we've handled (except REP STOSD)."); 356 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction."); 357 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing."); 358 STAM_REG(pVM, &pPool->StatMonitorRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 359 STAM_REG(pVM, &pPool->StatMonitorRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 360 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 361 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 362 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 363 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 364 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler."); 365 STAM_REG(pVM, &pPool->StatMonitorR3EmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 366 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler."); 367 STAM_REG(pVM, &pPool->StatMonitorR3FlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 368 STAM_REG(pVM, &pPool->StatMonitorR3FlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 369 STAM_REG(pVM, &pPool->StatMonitorR3Fork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 370 STAM_REG(pVM, &pPool->StatMonitorR3Handled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access we've handled (except REP STOSD)."); 371 STAM_REG(pVM, &pPool->StatMonitorR3RepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 372 STAM_REG(pVM, &pPool->StatMonitorR3RepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 425 373 STAM_REG(pVM, &pPool->StatMonitorR3FaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 426 374 STAM_REG(pVM, &pPool->StatMonitorR3FaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 427 375 STAM_REG(pVM, &pPool->StatMonitorR3FaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 428 376 STAM_REG(pVM, &pPool->StatMonitorR3FaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 429 377 STAM_REG(pVM, &pPool->StatMonitorR3Async, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Async", STAMUNIT_OCCURENCES, "Times we're called in an async thread and need to flush."); 430 378 STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value."); 431 379 STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages."); -
trunk/src/VBox/VMM/include/PGMGstDefs.h
r65466 r65504 143 143 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A)) 144 144 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) \ 145 ( ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A)) | PGM_PDFLAGS_BIG_PAGE)145 ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A)) 146 146 # define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) \ 147 147 ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_G)) … … 183 183 # define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (pVCpu)->pgm.s.fGst64ShadowedPteMask ) 184 184 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedPdeMask ) 185 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) ( ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask ) | PGM_PDFLAGS_BIG_PAGE)185 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask ) 186 186 # define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPde4PteMask ) 187 187 -
trunk/src/VBox/VMM/include/PGMInternal.h
r65502 r65504 178 178 * the page directory entries. 179 179 * @{ */ 180 /** Indicates the original entry was a big page.181 * @remarks This is currently only used for statistics and can be recycled. */182 #define PGM_PDFLAGS_BIG_PAGE RT_BIT_64(9)183 180 /** Mapping (hypervisor allocated pagetable). */ 184 181 #define PGM_PDFLAGS_MAPPING RT_BIT_64(10) … … 2499 2496 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */ 2500 2497 STAMCOUNTER StamTrackPhysExtAllocFailures; 2501 2502 /** Profiling the RC/R0 \#PF access handler. */ 2503 STAMPROFILE StatMonitorPfRZ; 2498 /** Profiling the RC/R0 access handler. */ 2499 STAMPROFILE StatMonitorRZ; 2500 /** Times we've failed interpreting the instruction. */ 2501 STAMCOUNTER StatMonitorRZEmulateInstr; 2502 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */ 2503 STAMPROFILE StatMonitorRZFlushPage; 2504 /* Times we've detected a page table reinit. */ 2505 STAMCOUNTER StatMonitorRZFlushReinit; 2506 /** Counting flushes for pages that are modified too often. */ 2507 STAMCOUNTER StatMonitorRZFlushModOverflow; 2508 /** Times we've detected fork(). */ 2509 STAMCOUNTER StatMonitorRZFork; 2504 2510 /** Profiling the RC/R0 access we've handled (except REP STOSD). */ 2505 STAMPROFILE StatMonitorPfRZHandled; 2506 /** Times we've failed interpreting the instruction. */ 2507 STAMCOUNTER StatMonitorPfRZEmulateInstr; 2508 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */ 2509 STAMPROFILE StatMonitorPfRZFlushPage; 2510 /* Times we've detected a page table reinit. */ 2511 STAMCOUNTER StatMonitorPfRZFlushReinit; 2512 /** Counting flushes for pages that are modified too often. */ 2513 STAMCOUNTER StatMonitorPfRZFlushModOverflow; 2514 /** Times we've detected fork(). */ 2515 STAMCOUNTER StatMonitorPfRZFork; 2511 STAMPROFILE StatMonitorRZHandled; 2516 2512 /** Times we've failed interpreting a patch code instruction. */ 2517 STAMCOUNTER StatMonitor PfRZIntrFailPatch1;2513 STAMCOUNTER StatMonitorRZIntrFailPatch1; 2518 2514 /** Times we've failed interpreting a patch code instruction during flushing. */ 2519 STAMCOUNTER StatMonitor PfRZIntrFailPatch2;2515 STAMCOUNTER StatMonitorRZIntrFailPatch2; 2520 2516 /** The number of times we've seen rep prefixes we can't handle. */ 2521 STAMCOUNTER StatMonitor PfRZRepPrefix;2517 STAMCOUNTER StatMonitorRZRepPrefix; 2522 2518 /** Profiling the REP STOSD cases we've handled. */ 2523 STAMPROFILE StatMonitorPfRZRepStosd; 2524 2525 /** Profiling the R0/RC regular access handler. */ 2526 STAMPROFILE StatMonitorRZ; 2527 /** Profiling the pgmPoolFlushPage calls made from the regular access handler in R0/RC. */ 2528 STAMPROFILE StatMonitorRZFlushPage; 2529 /** Per access size counts indexed by size minus 1, last for larger. */ 2530 STAMCOUNTER aStatMonitorRZSizes[16+3]; 2531 /** Missaligned access counts indexed by offset - 1. */ 2532 STAMCOUNTER aStatMonitorRZMisaligned[7]; 2533 2519 STAMPROFILE StatMonitorRZRepStosd; 2534 2520 /** Nr of handled PT faults. */ 2535 2521 STAMCOUNTER StatMonitorRZFaultPT; … … 2543 2529 /** Profiling the R3 access handler. */ 2544 2530 STAMPROFILE StatMonitorR3; 2531 /** Times we've failed interpreting the instruction. */ 2532 STAMCOUNTER StatMonitorR3EmulateInstr; 2545 2533 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */ 2546 2534 STAMPROFILE StatMonitorR3FlushPage; 2547 /** Per access size counts indexed by size minus 1, last for larger. */ 2548 STAMCOUNTER aStatMonitorR3Sizes[16+3]; 2549 /** Missaligned access counts indexed by offset - 1. */ 2550 STAMCOUNTER aStatMonitorR3Misaligned[7]; 2535 /* Times we've detected a page table reinit. */ 2536 STAMCOUNTER StatMonitorR3FlushReinit; 2537 /** Counting flushes for pages that are modified too often. */ 2538 STAMCOUNTER StatMonitorR3FlushModOverflow; 2539 /** Times we've detected fork(). */ 2540 STAMCOUNTER StatMonitorR3Fork; 2541 /** Profiling the R3 access we've handled (except REP STOSD). */ 2542 STAMPROFILE StatMonitorR3Handled; 2543 /** The number of times we've seen rep prefixes we can't handle. */ 2544 STAMCOUNTER StatMonitorR3RepPrefix; 2545 /** Profiling the REP STOSD cases we've handled. */ 2546 STAMPROFILE StatMonitorR3RepStosd; 2551 2547 /** Nr of handled PT faults. */ 2552 2548 STAMCOUNTER StatMonitorR3FaultPT; … … 2557 2553 /** Nr of handled PML4 faults. */ 2558 2554 STAMCOUNTER StatMonitorR3FaultPML4; 2559 2555 /** The number of times we're called in an async thread an need to flush. */ 2556 STAMCOUNTER StatMonitorR3Async; 2560 2557 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */ 2561 2558 STAMCOUNTER StatResetDirtyPages; … … 2795 2792 /** The effective X86_PTE_NX flag for the address. */ 2796 2793 bool fEffectiveNX; 2797 bool afPadding1[2];2798 /** Effective flags thus far: RW, US, PWT, PCD, A, ~NX >> 63.2799 * The NX bit is inverted and shifted down 63 places to bit 0. */2800 uint32_t fEffective;2801 2794 } PGMPTWALKCORE; 2802 2803 /** @name PGMPTWALKCORE::fEffective bits.2804 * @{ */2805 /** Effective execute bit (!NX). */2806 #define PGMPTWALK_EFF_X UINT32_C(1)2807 /** Effective write access bit. */2808 #define PGMPTWALK_EFF_RW X86_PTE_RW2809 /** Effective user-mode access bit. */2810 #define PGMPTWALK_EFF_US X86_PTE_US2811 /** Effective write through cache bit. */2812 #define PGMPTWALK_EFF_PWT X86_PTE_PWT2813 /** Effective cache disabled bit. */2814 #define PGMPTWALK_EFF_PCD X86_PTE_PCD2815 /** Effective accessed bit. */2816 #define PGMPTWALK_EFF_A X86_PTE_A2817 /** The dirty bit of the final entry. */2818 #define PGMPTWALK_EFF_D X86_PTE_D2819 /** The PAT bit of the final entry. */2820 #define PGMPTWALK_EFF_PAT X86_PTE_PAT2821 /** The global bit of the final entry. */2822 #define PGMPTWALK_EFF_G X86_PTE_G2823 /** @} */2824 2795 2825 2796 … … 3795 3766 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */ 3796 3767 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */ 3797 STAMCOUNTER StatRZInvalidatePageSizeChanges ; /**< RC/R0: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */3798 3768 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */ 3799 3769 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */ … … 3843 3813 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */ 3844 3814 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */ 3845 STAMCOUNTER StatR3InvalidatePageSizeChanges ; /**< R3: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */3846 3815 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */ 3847 3816 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */ … … 4292 4261 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPoolAccessHandler; 4293 4262 #ifndef IN_RING3 4294 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgm RZPoolAccessPfHandler;4263 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmPoolAccessPfHandler; 4295 4264 #endif 4296 4265 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r65466 r65504 653 653 GEN_CHECK_OFF(PGMCPU, pfnRCGstModifyPage); 654 654 GEN_CHECK_OFF(PGMCPU, pfnRCGstGetPDE); 655 GEN_CHECK_OFF(PGMCPU, pfnR0GstGetPage);656 GEN_CHECK_OFF(PGMCPU, pfnR0GstModifyPage);657 GEN_CHECK_OFF(PGMCPU, pfnR0GstGetPDE);658 655 GEN_CHECK_OFF(PGMCPU, pfnR3BthRelocate); 659 656 GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncCR3);
Note:
See TracChangeset
for help on using the changeset viewer.