VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h@ 93628

Last change on this file since 93628 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.7 KB
Line 
1/* $Id: PGMAllGstSlatEpt.cpp.h 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest EPT SLAT - All context code.
4 */
5
6/*
7 * Copyright (C) 2021-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#if PGM_GST_TYPE == PGM_TYPE_EPT
19DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(PCVMCPUCC pVCpu, uint64_t uEntry)
20{
21 if (!(uEntry & EPT_E_READ))
22 {
23 if (uEntry & EPT_E_WRITE)
24 return false;
25 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
26 if ( !RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY)
27 && (uEntry & EPT_E_EXECUTE))
28 return false;
29 }
30 return true;
31}
32
33
34DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(uint64_t uEntry, uint8_t uLevel)
35{
36 Assert(uLevel <= 3 && uLevel >= 1); NOREF(uLevel);
37 uint64_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
38 if ( fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_2
39 || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_3
40 || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_7)
41 return false;
42 return true;
43}
44
45
46DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint64_t uEntry, uint8_t uLevel)
47{
48 static PGMWALKFAIL const s_afEptViolations[] = { PGM_WALKFAIL_EPT_VIOLATION, PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE };
49 uint8_t const fEptVeSupported = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxEptXcptVe;
50 uint8_t const fConvertible = RT_BOOL(uLevel == 1 || (uEntry & EPT_E_BIT_LEAF));
51 uint8_t const idxViolationType = fEptVeSupported & fConvertible & !RT_BF_GET(uEntry, VMX_BF_EPT_PT_SUPPRESS_VE);
52
53 pWalk->fNotPresent = true;
54 pWalk->uLevel = uLevel;
55 pWalk->fFailed = s_afEptViolations[idxViolationType];
56 return VERR_PAGE_TABLE_NOT_PRESENT;
57}
58
59
60DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
61{
62 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
63 pWalk->fBadPhysAddr = true;
64 pWalk->uLevel = uLevel;
65 pWalk->fFailed = PGM_WALKFAIL_EPT_VIOLATION;
66 return VERR_PAGE_TABLE_NOT_PRESENT;
67}
68
69
70DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
71{
72 NOREF(pVCpu);
73 pWalk->fRsvdError = true;
74 pWalk->uLevel = uLevel;
75 pWalk->fFailed = PGM_WALKFAIL_EPT_MISCONFIG;
76 return VERR_PAGE_TABLE_NOT_PRESENT;
77}
78
79
80/**
81 * Performs an EPT walk (second-level address translation).
82 *
83 * @returns VBox status code.
84 * @retval VINF_SUCCESS on success.
85 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
86 *
87 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
88 * @param GCPhysNested The nested-guest physical address to walk.
89 * @param fIsLinearAddrValid Whether the linear-address in @c GCPtrNested caused
90 * this page walk. If this is false, @c GCPtrNested
91 * must be 0.
92 * @param GCPtrNested The nested-guest linear address that caused this
93 * page walk.
94 * @param pWalk The page walk info.
95 * @param pGstWalk The guest mode specific page walk info.
96 */
97DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
98 PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
99{
100 Assert(fIsLinearAddrValid || GCPtrNested == 0);
101
102 /*
103 * Init walk structures.
104 */
105 RT_ZERO(*pWalk);
106 RT_ZERO(*pGstWalk);
107
108 pWalk->GCPtr = GCPtrNested;
109 pWalk->GCPhysNested = GCPhysNested;
110 pWalk->fIsLinearAddrValid = fIsLinearAddrValid;
111 pWalk->fIsSlat = true;
112
113 /*
114 * Figure out EPT attributes that are cumulative (logical-AND) across page walks.
115 * - R, W, X_SUPER are unconditionally cumulative.
116 * See Intel spec. Table 26-7 "Exit Qualification for EPT Violations".
117 *
118 * - X_USER is cumulative but relevant only when mode-based execute control for EPT
119 * which we currently don't support it (asserted below).
120 *
121 * - MEMTYPE is not cumulative and only applicable to the final paging entry.
122 *
123 * - A, D EPT bits map to the regular page-table bit positions. Thus, they're not
124 * included in the mask below and handled separately. Accessed bits are
125 * cumulative but dirty bits are not cumulative as they're only applicable to
126 * the final paging entry.
127 */
128 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
129 uint64_t const fCumulativeEpt = PGM_PTATTRS_EPT_R_MASK
130 | PGM_PTATTRS_EPT_W_MASK
131 | PGM_PTATTRS_EPT_X_SUPER_MASK;
132
133 /*
134 * Do the walk.
135 */
136 uint64_t fEffective;
137 {
138 /*
139 * EPTP.
140 *
141 * We currently only support 4-level EPT paging.
142 * EPT 5-level paging was documented at some point (bit 7 of MSR_IA32_VMX_EPT_VPID_CAP)
143 * but for some reason seems to have been removed from subsequent specs.
144 */
145 int const rc = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4);
146 if (RT_SUCCESS(rc))
147 { /* likely */ }
148 else
149 return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
150 }
151 {
152 /*
153 * PML4E.
154 */
155 PEPTPML4E pPml4e;
156 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
157 EPTPML4E Pml4e;
158 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
159
160 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
161 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pml4e.u, 4);
162
163 if (RT_LIKELY( GST_IS_PML4E_VALID(pVCpu, Pml4e)
164 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pml4e.u)))
165 { /* likely */ }
166 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 4);
167
168 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
169 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK;
170 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
171 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
172 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
173 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
174 fEffective = RT_BF_MAKE(PGM_PTATTRS_R, fRead)
175 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
176 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
177 | fEffectiveEpt;
178 pWalk->fEffective = fEffective;
179
180 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt);
181 if (RT_SUCCESS(rc)) { /* probable */ }
182 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
183 }
184 {
185 /*
186 * PDPTE.
187 */
188 PEPTPDPTE pPdpte;
189 pGstWalk->pPdpte = pPdpte = &pGstWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
190 EPTPDPTE Pdpte;
191 pGstWalk->Pdpte.u = Pdpte.u = pPdpte->u;
192
193 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
194 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pdpte.u, 3);
195
196 /* The order of the following "if" and "else if" statements matter. */
197 if ( GST_IS_PDPE_VALID(pVCpu, Pdpte)
198 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u))
199 {
200 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
201 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
202 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
203 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
204 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
205 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
206 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
207 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
208 | (fEffectiveEpt & fCumulativeEpt);
209 pWalk->fEffective = fEffective;
210 }
211 else if ( GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte)
212 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u)
213 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pdpte.u, 3))
214 {
215 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
216 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
217 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
218 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
219 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
220 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
221 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
222 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
223 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
224 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
225 | (fEffectiveEpt & fCumulativeEpt);
226 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
227 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
228 pWalk->fEffective = fEffective;
229
230 pWalk->fGigantPage = true;
231 pWalk->fSucceeded = true;
232 pWalk->GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
233 | (GCPhysNested & GST_GIGANT_PAGE_OFFSET_MASK);
234 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
235 return VINF_SUCCESS;
236 }
237 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 3);
238
239 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpte.u & EPT_PDPTE_PG_MASK, &pGstWalk->pPd);
240 if (RT_SUCCESS(rc)) { /* probable */ }
241 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
242 }
243 {
244 /*
245 * PDE.
246 */
247 PGSTPDE pPde;
248 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];
249 GSTPDE Pde;
250 pGstWalk->Pde.u = Pde.u = pPde->u;
251
252 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
253 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pde.u, 2);
254
255 /* The order of the following "if" and "else if" statements matter. */
256 if ( GST_IS_PDE_VALID(pVCpu, Pde)
257 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u))
258 {
259 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
260 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
261 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
262 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
263 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
264 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
265 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
266 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
267 | (fEffectiveEpt & fCumulativeEpt);
268 pWalk->fEffective = fEffective;
269
270 }
271 else if ( GST_IS_BIG_PDE_VALID(pVCpu, Pde)
272 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u)
273 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pde.u, 2))
274 {
275 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK;
276 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
277 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
278 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
279 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
280 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
281 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
282 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
283 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
284 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
285 | (fEffectiveEpt & fCumulativeEpt);
286 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
287 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
288 pWalk->fEffective = fEffective;
289
290 pWalk->fBigPage = true;
291 pWalk->fSucceeded = true;
292 pWalk->GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
293 | (GCPhysNested & GST_BIG_PAGE_OFFSET_MASK);
294 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
295 return VINF_SUCCESS;
296 }
297 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
298
299 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pGstWalk->pPt);
300 if (RT_SUCCESS(rc)) { /* probable */ }
301 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
302 }
303 {
304 /*
305 * PTE.
306 */
307 PGSTPTE pPte;
308 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];
309 GSTPTE Pte;
310 pGstWalk->Pte.u = Pte.u = pPte->u;
311
312 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
313 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pte.u, 1);
314
315 if ( GST_IS_PTE_VALID(pVCpu, Pte)
316 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pte.u)
317 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pte.u, 1))
318 { /* likely*/ }
319 else
320 return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
321
322 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK;
323 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
324 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
325 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
326 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
327 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
328 uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
329 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
330 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
331 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
332 | (fEffectiveEpt & fCumulativeEpt);
333 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
334 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
335 pWalk->fEffective = fEffective;
336
337 pWalk->fSucceeded = true;
338 pWalk->GCPhys = GST_GET_PTE_GCPHYS(Pte) | (GCPhysNested & GUEST_PAGE_OFFSET_MASK);
339 return VINF_SUCCESS;
340 }
341}
342#else
343# error "Guest paging type must be EPT."
344#endif
345
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette