Changeset 65531 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 31, 2017 10:26:35 AM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r65504 r65531 1338 1338 PdeSrc.u = 0; 1339 1339 # endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 1340 const bool fWasBigPage = RT_BOOL(PdeDst.u & PGM_PDFLAGS_BIG_PAGE); 1340 1341 const bool fIsBigPage = PdeSrc.b.u1Size && GST_IS_PSE_ACTIVE(pVCpu); 1342 if (fWasBigPage != fIsBigPage) 1343 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); 1341 1344 1342 1345 # ifdef IN_RING3 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r65504 r65531 74 74 * @param pWalk Where to return the walk result. This is always set. 75 75 */ 76 static intPGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)76 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 77 77 { 78 78 int rc; … … 93 93 # endif 94 94 95 uint32_t register fEffective = X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | 1; 95 96 { 96 97 # if PGM_GST_TYPE == PGM_TYPE_AMD64 … … 99 100 */ 100 101 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4); 101 if (RT_FAILURE(rc)) 102 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 103 104 PX86PML4 register pPml4 = pWalk->pPml4; 102 if (RT_SUCCESS(rc)) { /* probable */ } 103 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 104 105 PX86PML4E register pPml4e; 106 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 105 107 X86PML4E register Pml4e; 106 PX86PML4E register pPml4e;107 108 pWalk->pPml4e = pPml4e = &pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];109 108 pWalk->Pml4e.u = Pml4e.u = pPml4e->u; 110 if (!Pml4e.n.u1Present) 111 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 112 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, Pml4e))) 113 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 109 110 if (Pml4e.n.u1Present) { /* probable */ } 111 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 112 113 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ } 114 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 115 116 pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A)) 117 | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */; 114 118 115 119 /* … … 117 121 */ 118 122 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt); 119 if (RT_ FAILURE(rc))120 123 if (RT_SUCCESS(rc)) { /* probable */ } 124 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 121 125 122 126 # elif PGM_GST_TYPE == PGM_TYPE_PAE 123 127 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt); 124 if (RT_ FAILURE(rc))125 128 if (RT_SUCCESS(rc)) { /* probable */ } 129 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 126 130 # endif 127 131 } 128 132 { 129 133 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 130 PX86PDPT register pPdpt = pWalk->pPdpt;131 134 PX86PDPE register pPdpe; 135 pWalk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 132 136 X86PDPE register Pdpe; 133 134 pWalk->pPdpe = pPdpe = &pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];135 137 pWalk->Pdpe.u = Pdpe.u = pPdpe->u; 136 if (!Pdpe.n.u1Present) 137 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 138 if (RT_UNLIKELY(!GST_IS_PDPE_VALID(pVCpu, Pdpe))) 139 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3); 138 139 if (Pdpe.n.u1Present) { /* probable */ } 140 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 141 142 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ } 143 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3); 144 145 # if PGM_GST_TYPE == PGM_TYPE_AMD64 146 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pdpe.u & (X86_PDPE_RW | X86_PDPE_US | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A)) 147 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */; 148 # else 149 pWalk->Core.fEffective = fEffective = X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A 150 | ((uint32_t)Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD)) 151 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */; 152 # endif 140 153 141 154 /* … … 143 156 */ 144 157 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd); 145 if (RT_ FAILURE(rc))146 158 if (RT_SUCCESS(rc)) { /* probable */ } 159 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 147 160 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 148 161 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd); 149 if (RT_ FAILURE(rc))150 162 if (RT_SUCCESS(rc)) { /* probable */ } 163 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 151 164 # endif 152 165 } 153 166 { 154 PGSTPD register pPd = pWalk->pPd;155 167 PGSTPDE register pPde; 168 pWalk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK]; 156 169 GSTPDE Pde; 157 158 pWalk->pPde = pPde = &pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];159 170 pWalk->Pde.u = Pde.u = pPde->u; 160 if ( !Pde.n.u1Present)161 171 if (Pde.n.u1Present) { /* probable */ } 172 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); 162 173 if (Pde.n.u1Size && GST_IS_PSE_ACTIVE(pVCpu)) 163 174 { 164 if (RT_UNLIKELY(!GST_IS_BIG_PDE_VALID(pVCpu, Pde))) 165 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 175 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ } 176 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 177 178 /* 179 * We're done. 180 */ 181 # if PGM_GST_TYPE == PGM_TYPE_32BIT 182 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A); 183 # else 184 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A)) 185 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */; 186 # endif 187 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 188 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 189 pWalk->Core.fEffective = fEffective; 190 191 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 192 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 193 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 194 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 195 # else 196 pWalk->Core.fEffectiveNX = false; 197 # endif 198 pWalk->Core.fBigPage = true; 199 pWalk->Core.fSucceeded = true; 166 200 167 201 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 168 202 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 169 203 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys); 170 uint8_t fEffectiveXX = (uint8_t)pWalk->Pde.u171 # if PGM_GST_TYPE == PGM_TYPE_AMD64172 & (uint8_t)pWalk->Pdpe.u173 & (uint8_t)pWalk->Pml4e.u174 # endif175 ;176 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW);177 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US);178 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE179 pWalk->Core.fEffectiveNX = ( pWalk->Pde.n.u1NoExecute180 # if PGM_GST_TYPE == PGM_TYPE_AMD64181 || pWalk->Pdpe.lm.u1NoExecute182 || pWalk->Pml4e.n.u1NoExecute183 # endif184 ) && GST_IS_NX_ACTIVE(pVCpu);185 # else186 pWalk->Core.fEffectiveNX = false;187 # endif188 pWalk->Core.fBigPage = true;189 pWalk->Core.fSucceeded = true;190 204 return VINF_SUCCESS; 191 205 } … … 193 207 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde))) 194 208 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 209 # if PGM_GST_TYPE == PGM_TYPE_32BIT 210 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A); 211 # else 212 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A)) 213 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */; 214 # endif 195 215 196 216 /* … … 198 218 */ 199 219 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt); 200 if (RT_ FAILURE(rc))201 220 if (RT_SUCCESS(rc)) { /* probable */ } 221 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); 202 222 } 203 223 { 204 PGSTPT register pPt = pWalk->pPt;205 224 PGSTPTE register pPte; 225 pWalk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 206 226 GSTPTE register Pte; 207 208 pWalk->pPte = pPte = &pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];209 227 pWalk->Pte.u = Pte.u = pPte->u; 210 if (!Pte.n.u1Present) 211 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1); 212 if (RT_UNLIKELY(!GST_IS_PTE_VALID(pVCpu, Pte))) 213 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1); 228 229 if (Pte.n.u1Present) { /* probable */ } 230 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1); 231 232 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ } 233 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1); 214 234 215 235 /* 216 236 * We're done. 217 237 */ 238 # if PGM_GST_TYPE == PGM_TYPE_32BIT 239 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A); 240 # else 241 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A)) 242 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */; 243 # endif 244 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G); 245 pWalk->Core.fEffective = fEffective; 246 247 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 248 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 249 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 250 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 251 # else 252 pWalk->Core.fEffectiveNX = false; 253 # endif 254 pWalk->Core.fSucceeded = true; 255 218 256 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte) 219 257 | (GCPtr & PAGE_OFFSET_MASK); 220 uint8_t fEffectiveXX = (uint8_t)pWalk->Pte.u221 & (uint8_t)pWalk->Pde.u222 # if PGM_GST_TYPE == PGM_TYPE_AMD64223 & (uint8_t)pWalk->Pdpe.u224 & (uint8_t)pWalk->Pml4e.u225 # endif226 ;227 pWalk->Core.fEffectiveRW = !!(fEffectiveXX & X86_PTE_RW);228 pWalk->Core.fEffectiveUS = !!(fEffectiveXX & X86_PTE_US);229 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE230 pWalk->Core.fEffectiveNX = ( pWalk->Pte.n.u1NoExecute231 || pWalk->Pde.n.u1NoExecute232 # if PGM_GST_TYPE == PGM_TYPE_AMD64233 || pWalk->Pdpe.lm.u1NoExecute234 || pWalk->Pml4e.n.u1NoExecute235 # endif236 ) && GST_IS_NX_ACTIVE(pVCpu);237 # else238 pWalk->Core.fEffectiveNX = false;239 # endif240 pWalk->Core.fSucceeded = true;241 258 return VINF_SUCCESS; 242 259 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r65504 r65531 685 685 } 686 686 687 # 687 #ifndef IN_RING3 688 688 689 689 /** … … 697 697 * @param offFault The access offset. 698 698 */ 699 DECLINLINE(bool) pgm PoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault)699 DECLINLINE(bool) pgmRZPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault) 700 700 { 701 701 /* … … 720 720 ) 721 721 { 722 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor ,Fork)); RT_NOREF_PV(pPool);722 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitorPf,Fork)); RT_NOREF_PV(pPool); 723 723 return true; 724 724 } … … 740 740 * @remark The REP prefix check is left to the caller because of STOSD/W. 741 741 */ 742 DECLINLINE(bool) pgm PoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault)743 { 744 # ifndef IN_RC742 DECLINLINE(bool) pgmRZPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault) 743 { 744 # ifndef IN_RC 745 745 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */ 746 746 if ( HMHasPendingIrq(pVM) … … 748 748 { 749 749 /* Fault caused by stack writes while trying to inject an interrupt event. */ 750 Log(("pgm PoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));750 Log(("pgmRZPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp)); 751 751 return true; 752 752 } 753 # else753 # else 754 754 NOREF(pVM); NOREF(pvFault); 755 # endif755 # endif 756 756 757 757 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg)); … … 765 765 /* call implies the actual push of the return address faulted */ 766 766 case OP_CALL: 767 Log4(("pgm PoolMonitorIsReused: CALL\n"));767 Log4(("pgmRZPoolMonitorIsReused: CALL\n")); 768 768 return true; 769 769 case OP_PUSH: 770 Log4(("pgm PoolMonitorIsReused: PUSH\n"));770 Log4(("pgmRZPoolMonitorIsReused: PUSH\n")); 771 771 return true; 772 772 case OP_PUSHF: 773 Log4(("pgm PoolMonitorIsReused: PUSHF\n"));773 Log4(("pgmRZPoolMonitorIsReused: PUSHF\n")); 774 774 return true; 775 775 case OP_PUSHA: 776 Log4(("pgm PoolMonitorIsReused: PUSHA\n"));776 Log4(("pgmRZPoolMonitorIsReused: PUSHA\n")); 777 777 return true; 778 778 case OP_FXSAVE: 779 Log4(("pgm PoolMonitorIsReused: FXSAVE\n"));779 Log4(("pgmRZPoolMonitorIsReused: FXSAVE\n")); 780 780 return true; 781 781 case OP_MOVNTI: /* solaris - block_zero_no_xmm */ 782 Log4(("pgm PoolMonitorIsReused: MOVNTI\n"));782 Log4(("pgmRZPoolMonitorIsReused: MOVNTI\n")); 783 783 return true; 784 784 case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */ 785 Log4(("pgm PoolMonitorIsReused: MOVNTDQ\n"));785 Log4(("pgmRZPoolMonitorIsReused: MOVNTDQ\n")); 786 786 return true; 787 787 case OP_MOVSWD: … … 793 793 Assert(pDis->uCpuMode == DISCPUMODE_64BIT); 794 794 795 Log(("pgm PoolMonitorIsReused: OP_STOSQ\n"));795 Log(("pgmRZPoolMonitorIsReused: OP_STOSQ\n")); 796 796 return true; 797 797 } … … 806 806 && (pDis->Param1.Base.idxGenReg == DISGREG_ESP)) 807 807 { 808 Log4(("pgm PoolMonitorIsReused: ESP\n"));808 Log4(("pgmRZPoolMonitorIsReused: ESP\n")); 809 809 return true; 810 810 } … … 819 819 if ( (((uintptr_t)pvFault + cbWrite) >> X86_PAGE_SHIFT) != ((uintptr_t)pvFault >> X86_PAGE_SHIFT) ) 820 820 { 821 Log4(("pgm PoolMonitorIsReused: cross page write\n"));821 Log4(("pgmRZPoolMonitorIsReused: cross page write\n")); 822 822 return true; 823 823 } … … 828 828 if (cbWrite >= 8 && ((uintptr_t)pvFault & 7) != 0) 829 829 { 830 Log4(("pgm PoolMonitorIsReused: Unaligned 8+ byte write\n"));830 Log4(("pgmRZPoolMonitorIsReused: Unaligned 8+ byte write\n")); 831 831 return true; 832 832 } … … 850 850 * @todo VBOXSTRICTRC 851 851 */ 852 static int pgm PoolAccessPfHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,853 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)852 static int pgmRZPoolAccessPfHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 853 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 854 854 { 855 855 NOREF(pVM); NOREF(GCPhysFault); … … 871 871 if (rc == VINF_SUCCESS) 872 872 rc = VBOXSTRICTRC_VAL(rc2); 873 # ifndef IN_RING3873 # ifndef IN_RING3 874 874 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 875 # endif875 # endif 876 876 } 877 877 else if (rc2 == VERR_EM_INTERPRETER) 878 878 { 879 # ifdef IN_RC879 # ifdef IN_RC 880 880 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 881 881 { 882 LogFlow(("pgm PoolAccessPfHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n",882 LogFlow(("pgmRZPoolAccessPfHandlerFlush: Interpretation failed for patch code %04x:%RGv, ignoring.\n", 883 883 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->eip)); 884 884 rc = VINF_SUCCESS; 885 STAM_COUNTER_INC(&pPool->StatMonitor RZIntrFailPatch2);885 STAM_COUNTER_INC(&pPool->StatMonitorPfRZIntrFailPatch2); 886 886 } 887 887 else 888 # endif888 # endif 889 889 { 890 890 rc = VINF_EM_RAW_EMULATE_INSTR; 891 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor ,EmulateInstr));891 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitorPf,EmulateInstr)); 892 892 } 893 893 } … … 897 897 AssertMsgFailed(("%Rrc\n", VBOXSTRICTRC_VAL(rc2))); /* ASSUMES no complicated stuff here. */ 898 898 899 LogFlow(("pgm PoolAccessPfHandlerPT: returns %Rrc (flushed)\n", rc));899 LogFlow(("pgmRZPoolAccessPfHandlerFlush: returns %Rrc (flushed)\n", rc)); 900 900 return rc; 901 901 } … … 914 914 * @param pvFault The fault address. 915 915 */ 916 DECLINLINE(int) pgm PoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,917 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)916 DECLINLINE(int) pgmRZPoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 917 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 918 918 { 919 919 unsigned uIncrement = pDis->Param1.cb; … … 923 923 Assert(pRegFrame->rcx <= 0x20); 924 924 925 # ifdef VBOX_STRICT925 # ifdef VBOX_STRICT 926 926 if (pDis->uOpMode == DISCPUMODE_32BIT) 927 927 Assert(uIncrement == 4); 928 928 else 929 929 Assert(uIncrement == 8); 930 # endif931 932 Log3(("pgm PoolAccessPfHandlerSTOSD\n"));930 # endif 931 932 Log3(("pgmRZPoolAccessPfHandlerSTOSD\n")); 933 933 934 934 /* … … 949 949 while (pRegFrame->rcx) 950 950 { 951 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)951 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 952 952 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 953 953 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 954 954 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 955 # else955 # else 956 956 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 957 # endif958 # ifdef IN_RC957 # endif 958 # ifdef IN_RC 959 959 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax; 960 # else960 # else 961 961 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement); 962 # endif962 # endif 963 963 pu32 += uIncrement; 964 964 GCPhysFault += uIncrement; … … 968 968 pRegFrame->rip += pDis->cbInstr; 969 969 970 LogFlow(("pgm PoolAccessPfHandlerSTOSD: returns\n"));970 LogFlow(("pgmRZPoolAccessPfHandlerSTOSD: returns\n")); 971 971 return VINF_SUCCESS; 972 972 } … … 987 987 * @param pfReused Reused state (in/out) 988 988 */ 989 DECLINLINE(int) pgm PoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,990 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused)991 { 992 Log3(("pgm PoolAccessPfHandlerSimple\n"));989 DECLINLINE(int) pgmRZPoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 990 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused) 991 { 992 Log3(("pgmRZPoolAccessPfHandlerSimple\n")); 993 993 NOREF(pVM); 994 994 NOREF(pfReused); /* initialized by caller */ … … 1004 1004 * Clear all the pages. ASSUMES that pvFault is readable. 1005 1005 */ 1006 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)1006 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1007 1007 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1008 # endif1008 # endif 1009 1009 1010 1010 uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1); … … 1023 1023 } 1024 1024 1025 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)1025 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1026 1026 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1027 # endif1027 # endif 1028 1028 1029 1029 /* … … 1035 1035 else if (rc == VERR_EM_INTERPRETER) 1036 1036 { 1037 LogFlow(("pgm PoolAccessPfHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n",1037 LogFlow(("pgmRZPoolAccessPfHandlerSimple: Interpretation failed for %04x:%RGv - opcode=%d\n", 1038 1038 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode)); 1039 1039 rc = VINF_EM_RAW_EMULATE_INSTR; 1040 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor ,EmulateInstr));1041 } 1042 1043 # if 0 /* experimental code */1040 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitorPf,EmulateInstr)); 1041 } 1042 1043 # if 0 /* experimental code */ 1044 1044 if (rc == VINF_SUCCESS) 1045 1045 { … … 1069 1069 } 1070 1070 } 1071 # endif1072 1073 LogFlow(("pgm PoolAccessPfHandlerSimple: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));1071 # endif 1072 1073 LogFlow(("pgmRZPoolAccessPfHandlerSimple: returns %Rrc\n", VBOXSTRICTRC_VAL(rc))); 1074 1074 return VBOXSTRICTRC_VAL(rc); 1075 1075 } … … 1082 1082 * @remarks The @a pvUser argument points to the PGMPOOLPAGE. 1083 1083 */ 1084 DECLEXPORT(VBOXSTRICTRC) pgm PoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,1085 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)1086 { 1087 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), a);1084 DECLEXPORT(VBOXSTRICTRC) pgmRZPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 1085 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 1086 { 1087 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorRZ, a); 1088 1088 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1089 1089 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; … … 1092 1092 NOREF(uErrorCode); 1093 1093 1094 LogFlow(("pgm PoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));1094 LogFlow(("pgmRZPoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); 1095 1095 1096 1096 pgmLock(pVM); … … 1098 1098 { 1099 1099 /* Pool page changed while we were waiting for the lock; ignore. */ 1100 Log(("CPU%d: pgm PoolAccessPfHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys)));1101 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);1100 Log(("CPU%d: pgmRZPoolAccessPfHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys))); 1101 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorPfRZ, &pPool->StatMonitorPfRZHandled, a); 1102 1102 pgmUnlock(pVM); 1103 1103 return VINF_SUCCESS; 1104 1104 } 1105 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT1105 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 1106 1106 if (pPage->fDirty) 1107 1107 { … … 1110 1110 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */ 1111 1111 } 1112 # endif1113 1114 # if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */1112 # endif 1113 1114 # if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */ 1115 1115 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1116 1116 { … … 1122 1122 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw); 1123 1123 } 1124 # endif1124 # endif 1125 1125 1126 1126 /* … … 1144 1144 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); 1145 1145 1146 # ifdef IN_RING01146 # ifdef IN_RING0 1147 1147 /* Maximum nr of modifications depends on the page type. */ 1148 1148 if ( pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT … … 1151 1151 else 1152 1152 cMaxModifications = 24; 1153 # else1153 # else 1154 1154 cMaxModifications = 48; 1155 # endif1155 # endif 1156 1156 1157 1157 /* … … 1172 1172 if (pPage->cModifications >= cMaxModifications) 1173 1173 { 1174 STAM_COUNTER_INC(&pPool-> CTX_MID_Z(StatMonitor,FlushReinit));1174 STAM_COUNTER_INC(&pPool->StatMonitorPfRZFlushReinit); 1175 1175 fForcedFlush = true; 1176 1176 } … … 1188 1188 || pgmPoolIsPageLocked(pPage) 1189 1189 ) 1190 && !(fReused = pgm PoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault))1191 && !pgm PoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))1190 && !(fReused = pgmRZPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)) 1191 && !pgmRZPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK)) 1192 1192 { 1193 1193 /* … … 1196 1196 if (!(pDis->fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE))) 1197 1197 { 1198 rc = pgm PoolAccessPfHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused);1198 rc = pgmRZPoolAccessPfHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused); 1199 1199 if (fReused) 1200 1200 goto flushPage; … … 1226 1226 } 1227 1227 1228 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);1228 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorPfRZ, &pPool->StatMonitorPfRZHandled, a); 1229 1229 pgmUnlock(pVM); 1230 1230 return rc; … … 1267 1267 if (fValidStosd) 1268 1268 { 1269 rc = pgm PoolAccessPfHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);1270 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a);1269 rc = pgmRZPoolAccessPfHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault); 1270 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorPfRZ, &pPool->StatMonitorPfRZRepStosd, a); 1271 1271 pgmUnlock(pVM); 1272 1272 return rc; … … 1275 1275 1276 1276 /* REP prefix, don't bother. */ 1277 STAM_COUNTER_INC(&pPool-> CTX_MID_Z(StatMonitor,RepPrefix));1278 Log4(("pgm PoolAccessPfHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",1277 STAM_COUNTER_INC(&pPool->StatMonitorPfRZRepPrefix); 1278 Log4(("pgmRZPoolAccessPfHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n", 1279 1279 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode, pDis->fPrefix)); 1280 1280 fNotReusedNotForking = true; 1281 1281 } 1282 1282 1283 # if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0)1283 # if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0) 1284 1284 /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This 1285 1285 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring. … … 1289 1289 && (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT || pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT) 1290 1290 && ( fNotReusedNotForking 1291 || ( !pgm PoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)1292 && !pgm PoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))1291 || ( !pgmRZPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault) 1292 && !pgmRZPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK)) 1293 1293 ) 1294 1294 ) … … 1342 1342 || rc == VERR_PAGE_NOT_PRESENT, 1343 1343 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc)); 1344 # ifdef VBOX_STRICT1344 # ifdef VBOX_STRICT 1345 1345 pPage->GCPtrDirtyFault = pvFault; 1346 # endif1347 1348 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), a);1346 # endif 1347 1348 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorPfRZ, a); 1349 1349 pgmUnlock(pVM); 1350 1350 return rc; … … 1352 1352 } 1353 1353 } 1354 # endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */1355 1356 STAM_COUNTER_INC(&pPool-> CTX_MID_Z(StatMonitor,FlushModOverflow));1354 # endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */ 1355 1356 STAM_COUNTER_INC(&pPool->StatMonitorPfRZFlushModOverflow); 1357 1357 flushPage: 1358 1358 /* … … 1364 1364 * the reuse detection must be fixed. 1365 1365 */ 1366 rc = pgm PoolAccessPfHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);1366 rc = pgmRZPoolAccessPfHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault); 1367 1367 if ( rc == VINF_EM_RAW_EMULATE_INSTR 1368 1368 && fReused) … … 1372 1372 rc = VINF_SUCCESS; /* safe to restart the instruction. */ 1373 1373 } 1374 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)-> CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);1374 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->StatMonitorPfRZ, &pPool->StatMonitorPfRZFlushPage, a); 1375 1375 pgmUnlock(pVM); 1376 1376 return rc; 1377 1377 } 1378 1378 1379 # 1379 #endif /* !IN_RING3 */ 1380 1380 1381 1381 /** … … 1390 1390 { 1391 1391 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1392 STAM_PROFILE_START(&pPool-> StatMonitorR3, a);1392 STAM_PROFILE_START(&pPool->CTX_SUFF_Z(StatMonitor), a); 1393 1393 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 1394 1394 LogFlow(("PGM_ALL_CB_DECL: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n", … … 1397 1397 NOREF(pvPhys); NOREF(pvBuf); NOREF(enmAccessType); 1398 1398 1399 pgmLock(pVM); 1400 1401 #ifdef VBOX_WITH_STATISTICS 1402 /* 1403 * Collect stats on the access. 1404 */ 1405 AssertCompile(RT_ELEMENTS(pPool->CTX_MID_Z(aStatMonitor,Sizes)) == 19); 1406 if (cbBuf <= 16 && cbBuf > 0) 1407 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[cbBuf - 1]); 1408 else if (cbBuf >= 17 && cbBuf < 32) 1409 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[16]); 1410 else if (cbBuf >= 32 && cbBuf < 64) 1411 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[17]); 1412 else if (cbBuf >= 64) 1413 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Sizes)[18]); 1414 1415 uint8_t cbAlign; 1416 switch (pPage->enmKind) 1417 { 1418 default: 1419 cbAlign = 7; 1420 break; 1421 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: 1422 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT: 1423 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB: 1424 case PGMPOOLKIND_32BIT_PD: 1425 case PGMPOOLKIND_32BIT_PD_PHYS: 1426 cbAlign = 3; 1427 break; 1428 } 1429 AssertCompile(RT_ELEMENTS(pPool->CTX_MID_Z(aStatMonitor,Misaligned)) == 7); 1430 if ((uint8_t)GCPhys & cbAlign) 1431 STAM_COUNTER_INC(&pPool->CTX_MID_Z(aStatMonitor,Misaligned)[((uint8_t)GCPhys & cbAlign) - 1]); 1432 #endif 1433 1399 1434 /* 1400 1435 * Make sure the pool page wasn't modified by a different CPU. 1401 1436 */ 1402 pgmLock(pVM);1403 1437 if (PHYS_PAGE_ADDRESS(GCPhys) == PHYS_PAGE_ADDRESS(pPage->GCPhys)) 1404 1438 { … … 1447 1481 } 1448 1482 1449 STAM_PROFILE_STOP_EX(&pPool-> StatMonitorR3, &pPool->StatMonitorR3FlushPage, a);1483 STAM_PROFILE_STOP_EX(&pPool->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a); 1450 1484 } 1451 1485 else … … 4577 4611 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++) 4578 4612 { 4579 Assert(!(pShwPD->a[i].u & RT_BIT_32(9)));4580 4613 if ( pShwPD->a[i].n.u1Present 4581 4614 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING) … … 4618 4651 #endif 4619 4652 { 4620 Assert((pShwPD->a[i].u & (X86_PDE_PAE_MBZ_MASK_NX | UINT64_C(0x7ff0000000000 200))) == 0);4653 Assert((pShwPD->a[i].u & (X86_PDE_PAE_MBZ_MASK_NX | UINT64_C(0x7ff0000000000000))) == 0); 4621 4654 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK); 4622 4655 if (pSubPage) -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r65504 r65531 2050 2050 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); 2051 2051 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%u/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory."); 2052 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSizeChanges, "/PGM/CPU%u/RZ/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB)."); 2052 2053 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSkipped, "/PGM/CPU%u/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3."); 2053 2054 PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%u/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage."); … … 2097 2098 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); 2098 2099 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%u/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory."); 2100 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSizeChanges, "/PGM/CPU%u/R3/InvalidatePage/SizeChanges", "The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB)."); 2099 2101 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSkipped, "/PGM/CPU%u/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3."); 2100 2102 PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%u/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage."); … … 3188 3190 pVCpu->pgm.s.pfnR3GstExit = pModeData->pfnR3GstExit; 3189 3191 pVCpu->pgm.s.pfnR3GstGetPage = pModeData->pfnR3GstGetPage; 3190 Assert(pVCpu->pgm.s.pfnR3GstGetPage);3191 3192 pVCpu->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage; 3192 3193 pVCpu->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE; … … 3197 3198 pVCpu->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage; 3198 3199 pVCpu->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE; 3200 Assert(pVCpu->pgm.s.pfnR3GstGetPage); 3199 3201 3200 3202 /* both */ -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r65504 r65531 1321 1321 Pde.b.u1PAT ? "AT" : "--", 1322 1322 Pde.b.u1NoExecute ? "NX" : "--", 1323 Pde.u & RT_BIT_64(9) ? '1' : '0',1323 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1324 1324 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1325 1325 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1350 1350 Pde.n.u1CacheDisable? "CD" : "--", 1351 1351 Pde.n.u1NoExecute ? "NX" : "--", 1352 Pde.u & RT_BIT_64(9) ? '1' : '0',1352 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1353 1353 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1354 1354 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1637 1637 Pde.b.u1CacheDisable? "CD" : "--", 1638 1638 Pde.b.u1PAT ? "AT" : "--", 1639 Pde.u & RT_BIT_32(9) ? '1' : '0',1639 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1640 1640 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1641 1641 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', … … 1658 1658 Pde.n.u1WriteThru ? "WT" : "--", 1659 1659 Pde.n.u1CacheDisable? "CD" : "--", 1660 Pde.u & RT_BIT_32(9) ? '1' : '0',1660 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1661 1661 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-', 1662 1662 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r65504 r65531 283 283 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 284 284 pgmPoolAccessHandler, 285 NULL, "pgmPoolAccessHandler", "pgm PoolAccessPfHandler",286 NULL, "pgmPoolAccessHandler", "pgm PoolAccessPfHandler",285 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler", 286 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler", 287 287 "Guest Paging Access Handler", 288 288 &pPool->hAccessHandlerType); … … 347 347 STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches."); 348 348 STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls."); 349 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access handler."); 350 STAM_REG(pVM, &pPool->StatMonitorRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 351 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler."); 352 STAM_REG(pVM, &pPool->StatMonitorRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 353 STAM_REG(pVM, &pPool->StatMonitorRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 354 STAM_REG(pVM, &pPool->StatMonitorRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 355 STAM_REG(pVM, &pPool->StatMonitorRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access we've handled (except REP STOSD)."); 356 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction."); 357 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing."); 358 STAM_REG(pVM, &pPool->StatMonitorRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 359 STAM_REG(pVM, &pPool->StatMonitorRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 360 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 361 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 362 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 363 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 364 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler."); 365 STAM_REG(pVM, &pPool->StatMonitorR3EmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 366 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler."); 367 STAM_REG(pVM, &pPool->StatMonitorR3FlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 368 STAM_REG(pVM, &pPool->StatMonitorR3FlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 369 STAM_REG(pVM, &pPool->StatMonitorR3Fork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 370 STAM_REG(pVM, &pPool->StatMonitorR3Handled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access we've handled (except REP STOSD)."); 371 STAM_REG(pVM, &pPool->StatMonitorR3RepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 372 STAM_REG(pVM, &pPool->StatMonitorR3RepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 349 350 STAM_REG(pVM, &pPool->StatMonitorPfRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access handler."); 351 STAM_REG(pVM, &pPool->StatMonitorPfRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction."); 352 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler."); 353 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit."); 354 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often."); 355 STAM_REG(pVM, &pPool->StatMonitorPfRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork()."); 356 STAM_REG(pVM, &pPool->StatMonitorPfRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access we've handled (except REP STOSD)."); 357 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction."); 358 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing."); 359 STAM_REG(pVM, &pPool->StatMonitorPfRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle."); 360 STAM_REG(pVM, &pPool->StatMonitorPfRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled."); 361 362 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM", STAMUNIT_TICKS_PER_CALL, "Profiling the regular access handler."); 363 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the regular access handler."); 364 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses."); 365 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses."); 366 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses."); 367 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses."); 368 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses."); 369 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses."); 370 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses."); 371 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[7], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses."); 372 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[8], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses."); 373 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[9], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses."); 374 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[10], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses."); 375 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[11], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses."); 376 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[12], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses."); 377 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[13], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses."); 378 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[14], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses."); 379 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[15], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses."); 380 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[16], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses."); 381 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[17], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses."); 382 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[18], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses."); 383 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1."); 384 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2."); 385 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3."); 386 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4."); 387 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5."); 388 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6."); 389 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7."); 390 391 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 392 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 393 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 394 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 395 396 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler."); 397 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler."); 398 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses (R3)."); 399 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses (R3)."); 400 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses (R3)."); 401 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses (R3)."); 402 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses (R3)."); 403 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses (R3)."); 404 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses (R3)."); 405 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[7], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses (R3)."); 406 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[8], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses (R3)."); 407 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[9], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses (R3)."); 408 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[10], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses (R3)."); 409 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[11], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses (R3)."); 410 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[12], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses (R3)."); 411 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[13], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses (R3)."); 412 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[14], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses (R3)."); 413 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[15], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses (R3)."); 414 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[16], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses."); 415 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[17], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses."); 416 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[18], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses."); 417 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[0], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1 in R3."); 418 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[1], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2 in R3."); 419 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[2], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3 in R3."); 420 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[3], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4 in R3."); 421 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[4], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5 in R3."); 422 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[5], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6 in R3."); 423 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[6], STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7 in R3."); 424 373 425 STAM_REG(pVM, &pPool->StatMonitorR3FaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults."); 374 426 STAM_REG(pVM, &pPool->StatMonitorR3FaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults."); 375 427 STAM_REG(pVM, &pPool->StatMonitorR3FaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults."); 376 428 STAM_REG(pVM, &pPool->StatMonitorR3FaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults."); 377 STAM_REG(pVM, &pPool->StatMonitorR3Async, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Async", STAMUNIT_OCCURENCES, "Times we're called in an async thread and need to flush."); 429 378 430 STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value."); 379 431 STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages."); -
trunk/src/VBox/VMM/include/PGMGstDefs.h
r65504 r65531 143 143 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A)) 144 144 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) \ 145 ( (Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A))145 ( ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A)) | PGM_PDFLAGS_BIG_PAGE ) 146 146 # define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) \ 147 147 ((Pde).u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_G)) … … 183 183 # define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (pVCpu)->pgm.s.fGst64ShadowedPteMask ) 184 184 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedPdeMask ) 185 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) ( (Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask)185 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) ( ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask ) | PGM_PDFLAGS_BIG_PAGE) 186 186 # define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPde4PteMask ) 187 187 -
trunk/src/VBox/VMM/include/PGMInternal.h
r65504 r65531 178 178 * the page directory entries. 179 179 * @{ */ 180 /** Indicates the original entry was a big page. 181 * @remarks This is currently only used for statistics and can be recycled. */ 182 #define PGM_PDFLAGS_BIG_PAGE RT_BIT_64(9) 180 183 /** Mapping (hypervisor allocated pagetable). */ 181 184 #define PGM_PDFLAGS_MAPPING RT_BIT_64(10) … … 2496 2499 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */ 2497 2500 STAMCOUNTER StamTrackPhysExtAllocFailures; 2498 /** Profiling the RC/R0 access handler. */ 2501 2502 /** Profiling the RC/R0 \#PF access handler. */ 2503 STAMPROFILE StatMonitorPfRZ; 2504 /** Profiling the RC/R0 access we've handled (except REP STOSD). */ 2505 STAMPROFILE StatMonitorPfRZHandled; 2506 /** Times we've failed interpreting the instruction. */ 2507 STAMCOUNTER StatMonitorPfRZEmulateInstr; 2508 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */ 2509 STAMPROFILE StatMonitorPfRZFlushPage; 2510 /* Times we've detected a page table reinit. */ 2511 STAMCOUNTER StatMonitorPfRZFlushReinit; 2512 /** Counting flushes for pages that are modified too often. */ 2513 STAMCOUNTER StatMonitorPfRZFlushModOverflow; 2514 /** Times we've detected fork(). */ 2515 STAMCOUNTER StatMonitorPfRZFork; 2516 /** Times we've failed interpreting a patch code instruction. */ 2517 STAMCOUNTER StatMonitorPfRZIntrFailPatch1; 2518 /** Times we've failed interpreting a patch code instruction during flushing. */ 2519 STAMCOUNTER StatMonitorPfRZIntrFailPatch2; 2520 /** The number of times we've seen rep prefixes we can't handle. */ 2521 STAMCOUNTER StatMonitorPfRZRepPrefix; 2522 /** Profiling the REP STOSD cases we've handled. */ 2523 STAMPROFILE StatMonitorPfRZRepStosd; 2524 2525 /** Profiling the R0/RC regular access handler. */ 2499 2526 STAMPROFILE StatMonitorRZ; 2500 /** Times we've failed interpreting the instruction. */ 2501 STAMCOUNTER StatMonitorRZEmulateInstr; 2502 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */ 2527 /** Profiling the pgmPoolFlushPage calls made from the regular access handler in R0/RC. */ 2503 2528 STAMPROFILE StatMonitorRZFlushPage; 2504 /* Times we've detected a page table reinit. */ 2505 STAMCOUNTER StatMonitorRZFlushReinit; 2506 /** Counting flushes for pages that are modified too often. */ 2507 STAMCOUNTER StatMonitorRZFlushModOverflow; 2508 /** Times we've detected fork(). */ 2509 STAMCOUNTER StatMonitorRZFork; 2510 /** Profiling the RC/R0 access we've handled (except REP STOSD). */ 2511 STAMPROFILE StatMonitorRZHandled; 2512 /** Times we've failed interpreting a patch code instruction. */ 2513 STAMCOUNTER StatMonitorRZIntrFailPatch1; 2514 /** Times we've failed interpreting a patch code instruction during flushing. */ 2515 STAMCOUNTER StatMonitorRZIntrFailPatch2; 2516 /** The number of times we've seen rep prefixes we can't handle. */ 2517 STAMCOUNTER StatMonitorRZRepPrefix; 2518 /** Profiling the REP STOSD cases we've handled. */ 2519 STAMPROFILE StatMonitorRZRepStosd; 2529 /** Per access size counts indexed by size minus 1, last for larger. */ 2530 STAMCOUNTER aStatMonitorRZSizes[16+3]; 2531 /** Missaligned access counts indexed by offset - 1. */ 2532 STAMCOUNTER aStatMonitorRZMisaligned[7]; 2533 2520 2534 /** Nr of handled PT faults. */ 2521 2535 STAMCOUNTER StatMonitorRZFaultPT; … … 2529 2543 /** Profiling the R3 access handler. */ 2530 2544 STAMPROFILE StatMonitorR3; 2531 /** Times we've failed interpreting the instruction. */2532 STAMCOUNTER StatMonitorR3EmulateInstr;2533 2545 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */ 2534 2546 STAMPROFILE StatMonitorR3FlushPage; 2535 /* Times we've detected a page table reinit. */ 2536 STAMCOUNTER StatMonitorR3FlushReinit; 2537 /** Counting flushes for pages that are modified too often. */ 2538 STAMCOUNTER StatMonitorR3FlushModOverflow; 2539 /** Times we've detected fork(). */ 2540 STAMCOUNTER StatMonitorR3Fork; 2541 /** Profiling the R3 access we've handled (except REP STOSD). */ 2542 STAMPROFILE StatMonitorR3Handled; 2543 /** The number of times we've seen rep prefixes we can't handle. */ 2544 STAMCOUNTER StatMonitorR3RepPrefix; 2545 /** Profiling the REP STOSD cases we've handled. */ 2546 STAMPROFILE StatMonitorR3RepStosd; 2547 /** Per access size counts indexed by size minus 1, last for larger. */ 2548 STAMCOUNTER aStatMonitorR3Sizes[16+3]; 2549 /** Missaligned access counts indexed by offset - 1. */ 2550 STAMCOUNTER aStatMonitorR3Misaligned[7]; 2547 2551 /** Nr of handled PT faults. */ 2548 2552 STAMCOUNTER StatMonitorR3FaultPT; … … 2553 2557 /** Nr of handled PML4 faults. */ 2554 2558 STAMCOUNTER StatMonitorR3FaultPML4; 2555 /** The number of times we're called in an async thread an need to flush. */ 2556 STAMCOUNTER StatMonitorR3Async; 2559 2557 2560 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */ 2558 2561 STAMCOUNTER StatResetDirtyPages; … … 2792 2795 /** The effective X86_PTE_NX flag for the address. */ 2793 2796 bool fEffectiveNX; 2797 bool afPadding1[2]; 2798 /** Effective flags thus far: RW, US, PWT, PCD, A, ~NX >> 63. 2799 * The NX bit is inverted and shifted down 63 places to bit 0. */ 2800 uint32_t fEffective; 2794 2801 } PGMPTWALKCORE; 2802 2803 /** @name PGMPTWALKCORE::fEffective bits. 2804 * @{ */ 2805 /** Effective execute bit (!NX). */ 2806 #define PGMPTWALK_EFF_X UINT32_C(1) 2807 /** Effective write access bit. */ 2808 #define PGMPTWALK_EFF_RW X86_PTE_RW 2809 /** Effective user-mode access bit. */ 2810 #define PGMPTWALK_EFF_US X86_PTE_US 2811 /** Effective write through cache bit. */ 2812 #define PGMPTWALK_EFF_PWT X86_PTE_PWT 2813 /** Effective cache disabled bit. */ 2814 #define PGMPTWALK_EFF_PCD X86_PTE_PCD 2815 /** Effective accessed bit. */ 2816 #define PGMPTWALK_EFF_A X86_PTE_A 2817 /** The dirty bit of the final entry. */ 2818 #define PGMPTWALK_EFF_D X86_PTE_D 2819 /** The PAT bit of the final entry. */ 2820 #define PGMPTWALK_EFF_PAT X86_PTE_PAT 2821 /** The global bit of the final entry. */ 2822 #define PGMPTWALK_EFF_G X86_PTE_G 2823 /** @} */ 2795 2824 2796 2825 … … 3766 3795 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */ 3767 3796 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */ 3797 STAMCOUNTER StatRZInvalidatePageSizeChanges ; /**< RC/R0: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */ 3768 3798 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */ 3769 3799 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */ … … 3813 3843 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */ 3814 3844 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */ 3845 STAMCOUNTER StatR3InvalidatePageSizeChanges ; /**< R3: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */ 3815 3846 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */ 3816 3847 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */ … … 4261 4292 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) pgmPoolAccessHandler; 4262 4293 #ifndef IN_RING3 4263 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgm PoolAccessPfHandler;4294 DECLEXPORT(FNPGMRZPHYSPFHANDLER) pgmRZPoolAccessPfHandler; 4264 4295 #endif 4265 4296 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r65504 r65531 653 653 GEN_CHECK_OFF(PGMCPU, pfnRCGstModifyPage); 654 654 GEN_CHECK_OFF(PGMCPU, pfnRCGstGetPDE); 655 GEN_CHECK_OFF(PGMCPU, pfnR0GstGetPage); 656 GEN_CHECK_OFF(PGMCPU, pfnR0GstModifyPage); 657 GEN_CHECK_OFF(PGMCPU, pfnR0GstGetPDE); 655 658 GEN_CHECK_OFF(PGMCPU, pfnR3BthRelocate); 656 659 GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncCR3);
Note:
See TracChangeset
for help on using the changeset viewer.