Changeset 92311 in vbox for trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
- Timestamp:
- Nov 10, 2021 9:05:02 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
r92257 r92311 47 47 PGSTPTWALK pWalk) 48 48 { 49 /* 50 * Init walk structure. 51 */ 49 52 int rc; 50 53 RT_ZERO(*pWalk); … … 54 57 pWalk->Core.fIsLinearAddrValid = fIsLinearAddrValid; 55 58 59 /* 60 * Figure out EPT attributes that are cumulative (logical-AND) across page walks. 61 * - R, W, X_SUPER are unconditionally cumulative. 62 * See Intel spec. Table 26-7 "Exit Qualification for EPT Violations". 63 * 64 * - X_USER is Cumulative but relevant only when mode-based execute control for EPT 65 * which we currently don't support it (asserted below). 66 * 67 * - MEMTYPE is not cumulative and only applicable to the final paging entry. 68 * 69 * - A, D EPT bits map to the regular page-table bit positions. Thus, they're not 70 * included in the mask below and handled separately. Accessed bits are 71 * cumulative but dirty bits are not cumulative as they're only applicable to 72 * the final paging entry. 73 */ 74 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt); 75 uint64_t const fCumulativeEpt = PGM_PTATTRS_EPT_R_MASK 76 | PGM_PTATTRS_EPT_W_MASK 77 | PGM_PTATTRS_EPT_X_SUPER_MASK; 78 79 /* 80 * Do the walk. 81 */ 56 82 uint64_t fEffective; 57 83 { … … 73 99 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt); 74 100 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK; 75 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);76 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);77 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);78 101 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 79 102 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 80 pWalk->Core.fEffective = fEffective = RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 81 | RT_BF_MAKE(PGM_PTATTRS_RW, fRead & fWrite) 82 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 83 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 84 | fEffectiveEpt; 103 fEffective = RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 104 | fEffectiveEpt; 105 pWalk->Core.fEffective = fEffective; 85 106 86 107 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt); … … 100 121 if (GST_IS_PDPE_VALID(pVCpu, Pdpte)) 101 122 { 102 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK; 103 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 104 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 105 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 123 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK; 124 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 106 125 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 107 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 108 | RT_BF_MAKE(PGM_PTATTRS_RW, fWrite) 109 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 110 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 111 | fEffectiveEpt; 126 fEffective &= RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 127 | (fEffectiveEpt & fCumulativeEpt); 128 pWalk->Core.fEffective = fEffective; 112 129 } 113 130 else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte)) 114 131 { 115 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK; 116 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 117 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 118 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 119 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 132 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK; 133 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 134 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 135 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE); 120 136 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 121 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 122 | RT_BF_MAKE(PGM_PTATTRS_RW, fWrite) 123 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 124 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 125 | RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 126 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, 0) 127 | fEffectiveEpt; 128 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 137 fEffective &= RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 138 | (fEffectiveEpt & fCumulativeEpt); 139 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 140 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 141 pWalk->Core.fEffective = fEffective; 142 143 pWalk->Core.fEffectiveRW = !!(fEffective & PGM_PTATTRS_RW_MASK); /** @todo RW isn't copied from EPT R, W. This will break callers who use RW for EPT attributes. */ 129 144 pWalk->Core.fEffectiveUS = true; 130 pWalk->Core.fEffectiveNX = ! fExecute;145 pWalk->Core.fEffectiveNX = !(fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK); 131 146 pWalk->Core.fGigantPage = true; 132 147 pWalk->Core.fSucceeded = true; … … 150 165 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2); 151 166 152 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK; 153 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 154 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 155 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 156 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 167 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK; 168 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 169 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 170 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE); 157 171 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 158 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 159 | RT_BF_MAKE(PGM_PTATTRS_RW, fWrite) 160 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 161 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 162 | RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 163 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, 0) 164 | fEffectiveEpt; 165 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 172 173 fEffective &= RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 174 | (fEffectiveEpt & fCumulativeEpt); 175 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 176 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 177 pWalk->Core.fEffective = fEffective; 178 pWalk->Core.fEffectiveRW = !!(fEffective & PGM_PTATTRS_RW_MASK); /** @todo RW isn't copied from EPT R, W. This will break callers who use RW for EPT attributes. */ 166 179 pWalk->Core.fEffectiveUS = true; 167 pWalk->Core.fEffectiveNX = ! fExecute;180 pWalk->Core.fEffectiveNX = !(fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK); 168 181 pWalk->Core.fBigPage = true; 169 182 pWalk->Core.fSucceeded = true; … … 177 190 return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2); 178 191 179 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK; 180 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 181 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 182 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 192 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK; 193 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 183 194 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 184 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 185 | RT_BF_MAKE(PGM_PTATTRS_RW, fWrite) 186 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 187 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 188 | fEffectiveEpt; 195 196 fEffective &= RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 197 | (fEffectiveEpt & fCumulativeEpt); 198 pWalk->Core.fEffective = fEffective; 189 199 190 200 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt); … … 204 214 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1); 205 215 206 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK; 207 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 208 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 209 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 210 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 216 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK; 217 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 218 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 219 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE); 211 220 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK; 212 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_PTATTRS_X, fExecute) 213 | RT_BF_MAKE(PGM_PTATTRS_RW, fWrite) 214 | RT_BF_MAKE(PGM_PTATTRS_US, 1) 215 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 216 | RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 217 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, 0) 218 | fEffectiveEpt; 219 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 221 fEffective &= RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 222 | (fEffectiveEpt & fCumulativeEpt); 223 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 224 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 225 pWalk->Core.fEffective = fEffective; 226 227 pWalk->Core.fEffectiveRW = !!(fEffective & PGM_PTATTRS_RW_MASK); /** @todo RW isn't copied from EPT R, W. This will break callers who use RW for EPT attributes. */ 220 228 pWalk->Core.fEffectiveUS = true; 221 pWalk->Core.fEffectiveNX = ! fExecute;229 pWalk->Core.fEffectiveNX = !(fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK); 222 230 pWalk->Core.fSucceeded = true; 223 231 pWalk->Core.GCPhys = GST_GET_PTE_GCPHYS(Pte)
Note:
See TracChangeset
for help on using the changeset viewer.