VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 104932

Last change on this file since 104932 was 104932, checked in by vboxsync, 10 months ago

VMM/PGM,IEM: Refactored+copied PGMGstGetPage into PGMGstQueryPage that takes care of table walking, setting A & D bits and validating the access. Use new function in IEM. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.0 KB
Line 
1/* $Id: PGMAllGst.h 104932 2024-06-15 00:29:39Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Internal Functions *
31*********************************************************************************************************************************/
32RT_C_DECLS_BEGIN
33/** @todo Do we really need any of these forward declarations? */
34#if PGM_GST_TYPE == PGM_TYPE_32BIT \
35 || PGM_GST_TYPE == PGM_TYPE_PAE \
36 || PGM_GST_TYPE == PGM_TYPE_AMD64
37DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
38#endif
39PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
40PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
41PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
42PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
43
44#ifdef IN_RING3 /* r3 only for now. */
45PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
46#endif
47RT_C_DECLS_END
48
49
50/**
51 * Enters the guest mode.
52 *
53 * @returns VBox status code.
54 * @param pVCpu The cross context virtual CPU structure.
55 * @param GCPhysCR3 The physical address from the CR3 register.
56 */
57PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
58{
59 /*
60 * Map and monitor CR3
61 */
62 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
63 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
64 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
65 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
66}
67
68
69/**
70 * Exits the guest mode.
71 *
72 * @returns VBox status code.
73 * @param pVCpu The cross context virtual CPU structure.
74 */
75PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
76{
77 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
78 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
79 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
80 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
81}
82
83
84#if PGM_GST_TYPE == PGM_TYPE_32BIT \
85 || PGM_GST_TYPE == PGM_TYPE_PAE \
86 || PGM_GST_TYPE == PGM_TYPE_AMD64
87
88
89DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
90{
91 NOREF(pVCpu);
92 pWalk->fNotPresent = true;
93 pWalk->uLevel = uLevel;
94 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
95 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
96 return VERR_PAGE_TABLE_NOT_PRESENT;
97}
98
99DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
100{
101 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
102 pWalk->fBadPhysAddr = true;
103 pWalk->uLevel = uLevel;
104 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
105 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
106 return VERR_PAGE_TABLE_NOT_PRESENT;
107}
108
109DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
110{
111 NOREF(pVCpu);
112 pWalk->fRsvdError = true;
113 pWalk->uLevel = uLevel;
114 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
115 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
116 return VERR_PAGE_TABLE_NOT_PRESENT;
117}
118
119
120/**
121 * Performs a guest page table walk.
122 *
123 * @returns VBox status code.
124 * @retval VINF_SUCCESS on success.
125 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
126 *
127 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
128 * @param GCPtr The guest virtual address to walk by.
129 * @param pWalk The page walk info.
130 * @param pGstWalk The guest mode specific page walk info.
131 * @thread EMT(pVCpu)
132 */
133DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
134{
135 int rc;
136
137# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
138/** @def PGM_GST_SLAT_WALK
139 * Macro to perform guest second-level address translation (EPT or Nested).
140 *
141 * @param a_pVCpu The cross context virtual CPU structure of the calling
142 * EMT.
143 * @param a_GCPtrNested The nested-guest linear address that caused the
144 * second-level translation.
145 * @param a_GCPhysNested The nested-guest physical address to translate.
146 * @param a_GCPhysOut Where to store the guest-physical address (result).
147 */
148# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
149 do { \
150 if ((a_pVCpu)->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) \
151 { \
152 PGMPTWALK WalkSlat; \
153 PGMPTWALKGST WalkGstSlat; \
154 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &WalkSlat, \
155 &WalkGstSlat); \
156 if (RT_SUCCESS(rcX)) \
157 (a_GCPhysOut) = WalkSlat.GCPhys; \
158 else \
159 { \
160 *(a_pWalk) = WalkSlat; \
161 return rcX; \
162 } \
163 } \
164 } while (0)
165# endif
166
167 /*
168 * Init the walking structures.
169 */
170 RT_ZERO(*pWalk);
171 RT_ZERO(*pGstWalk);
172 pWalk->GCPtr = GCPtr;
173
174# if PGM_GST_TYPE == PGM_TYPE_32BIT \
175 || PGM_GST_TYPE == PGM_TYPE_PAE
176 /*
177 * Boundary check for PAE and 32-bit (prevents trouble further down).
178 */
179 if (RT_UNLIKELY(GCPtr >= _4G))
180 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
181# endif
182
183 uint64_t fEffective;
184 {
185# if PGM_GST_TYPE == PGM_TYPE_AMD64
186 /*
187 * The PML4 table.
188 */
189 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
190 if (RT_SUCCESS(rc)) { /* probable */ }
191 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
192
193 PX86PML4E pPml4e;
194 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
195 X86PML4E Pml4e;
196 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
197
198 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
199 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
200
201 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
202 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
203
204 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
205 | X86_PML4E_NX);
206 pWalk->fEffective = fEffective;
207
208 /*
209 * The PDPT.
210 */
211 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
212# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
213 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
214# endif
215 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
216 if (RT_SUCCESS(rc)) { /* probable */ }
217 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
218
219# elif PGM_GST_TYPE == PGM_TYPE_PAE
220 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
221 if (RT_SUCCESS(rc)) { /* probable */ }
222 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
223# endif
224 }
225 {
226# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
227 PX86PDPE pPdpe;
228 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
229 X86PDPE Pdpe;
230 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
231
232 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
233 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
234
235 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
236 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
237
238# if PGM_GST_TYPE == PGM_TYPE_AMD64
239 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
240 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
241 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
242# else
243 /*
244 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
245 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
246 */
247 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
248 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
249 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
250# endif
251 pWalk->fEffective = fEffective;
252
253 /*
254 * The PD.
255 */
256 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
257# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
258 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
259# endif
260 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
261 if (RT_SUCCESS(rc)) { /* probable */ }
262 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
263
264# elif PGM_GST_TYPE == PGM_TYPE_32BIT
265 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
266 if (RT_SUCCESS(rc)) { /* probable */ }
267 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
268# endif
269 }
270 {
271 PGSTPDE pPde;
272 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
273 GSTPDE Pde;
274 pGstWalk->Pde.u = Pde.u = pPde->u;
275 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
276 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
277 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
278 {
279 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
280 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
281
282 /*
283 * We're done.
284 */
285# if PGM_GST_TYPE == PGM_TYPE_32BIT
286 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
287# else
288 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
289 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
290# endif
291 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
292 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
293 pWalk->fEffective = fEffective;
294 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
295 Assert(fEffective & PGM_PTATTRS_R_MASK);
296
297 pWalk->fBigPage = true;
298 pWalk->fSucceeded = true;
299 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
300 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
301# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
302 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
303# endif
304 pWalk->GCPhys = GCPhysPde;
305 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); /** @todo why do we apply it here and not below?!? */
306 return VINF_SUCCESS;
307 }
308
309 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
310 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
311# if PGM_GST_TYPE == PGM_TYPE_32BIT
312 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
313# else
314 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
315 fEffective |= Pde.u & X86_PDE_PAE_NX;
316# endif
317 pWalk->fEffective = fEffective;
318
319 /*
320 * The PT.
321 */
322 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
323# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
324 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
325# endif
326 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
327 if (RT_SUCCESS(rc)) { /* probable */ }
328 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
329 }
330 {
331 PGSTPTE pPte;
332 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
333 GSTPTE Pte;
334 pGstWalk->Pte.u = Pte.u = pPte->u;
335
336 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
337 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
338
339 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
340 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
341
342 /*
343 * We're done.
344 */
345 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
346 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
347# if PGM_GST_TYPE != PGM_TYPE_32BIT
348 fEffective |= Pte.u & X86_PTE_PAE_NX;
349# endif
350 pWalk->fEffective = fEffective;
351 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
352 Assert(fEffective & PGM_PTATTRS_R_MASK);
353
354 pWalk->fSucceeded = true;
355 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
356 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
357# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
358 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
359# endif
360 pWalk->GCPhys = GCPhysPte;
361 return VINF_SUCCESS;
362 }
363}
364
365#endif /* 32BIT, PAE, AMD64 */
366
367/**
368 * Gets effective Guest OS page information.
369 *
370 * @returns VBox status code.
371 * @param pVCpu The cross context virtual CPU structure.
372 * @param GCPtr Guest Context virtual address of the page.
373 * @param pWalk Where to store the page walk info.
374 * @thread EMT(pVCpu)
375 */
376PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
377{
378#if PGM_GST_TYPE == PGM_TYPE_REAL \
379 || PGM_GST_TYPE == PGM_TYPE_PROT
380
381# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
382 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
383 {
384 PGMPTWALK WalkSlat;
385 PGMPTWALKGST WalkGstSlat;
386 int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &WalkSlat, &WalkGstSlat);
387 if (RT_SUCCESS(rc))
388 {
389 RT_ZERO(*pWalk);
390 pWalk->fSucceeded = true;
391 pWalk->GCPtr = GCPtr;
392 pWalk->GCPhys = WalkSlat.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
393 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
394 }
395 else
396 *pWalk = WalkSlat;
397 return rc;
398 }
399# endif
400
401 /*
402 * Fake it.
403 */
404 RT_ZERO(*pWalk);
405 pWalk->fSucceeded = true;
406 pWalk->GCPtr = GCPtr;
407 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
408 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
409 NOREF(pVCpu);
410 return VINF_SUCCESS;
411
412#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
413 || PGM_GST_TYPE == PGM_TYPE_PAE \
414 || PGM_GST_TYPE == PGM_TYPE_AMD64
415
416 GSTPTWALK GstWalk;
417 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, pWalk, &GstWalk);
418 if (RT_FAILURE(rc))
419 return rc;
420
421 Assert(pWalk->fSucceeded);
422 Assert(pWalk->GCPtr == GCPtr);
423
424 PGMPTATTRS fFlags;
425 if (!pWalk->fBigPage)
426 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
427 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
428# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
429 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
430# endif
431 ;
432 else
433 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
434 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
435# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
436 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
437# endif
438 ;
439
440 pWalk->GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
441 pWalk->fEffective = fFlags;
442 return VINF_SUCCESS;
443
444#else
445# error "shouldn't be here!"
446 /* something else... */
447 return VERR_NOT_SUPPORTED;
448#endif
449}
450
451
452/* x x x x x x x x */
453/* x x x x x x x x */
454
455#if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX) || defined(DOXYGEN_RUNNING)
456/** Converts regular style walk info to fast style. */
457DECL_FORCE_INLINE(void) PGM_GST_NAME(ConvertPtWalkToFast)(PGMPTWALK const *pSrc, PPGMPTWALKFAST *pDst)
458{
459 pDst->GCPtr = pSrc->GCPtr;
460 pDst->GCPhys = pSrc->GCPhys;
461 pDst->GCPhysNested = pSrc->GCPhysNested;
462 pDst->fInfo = (pSrc->fSucceeded ? PGM_WALKINFO_SUCCEEDED : 0)
463 | (pSrc->fIsSlat ? PGM_WALKINFO_IS_SLAT : 0)
464 | (pSrc->fIsLinearAddrValid ? PGM_WALKINFO_IS_LINEAR_ADDR_VALID : 0)
465 | ((uint32_t)pSrc->uLevel << PGM_WALKINFO_LEVEL_SHIFT);
466 pDst->fFailed = pSrc->fFailed;
467 pDst->fEffective = pSrc->fEffective;
468}
469#endif
470
471
472#if PGM_GST_TYPE == PGM_TYPE_32BIT \
473 || PGM_GST_TYPE == PGM_TYPE_PAE \
474 || PGM_GST_TYPE == PGM_TYPE_AMD64
475
476DECLINLINE(int) PGM_GST_NAME(WalkFastReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
477{
478 RT_NOREF(pVCpu);
479 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
480 return VERR_PAGE_TABLE_NOT_PRESENT;
481}
482
483DECLINLINE(int) PGM_GST_NAME(WalkFastReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel, int rc)
484{
485 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); RT_NOREF(pVCpu, rc);
486 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
487 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
488}
489
490DECLINLINE(int) PGM_GST_NAME(WalkFastReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
491{
492 RT_NOREF(pVCpu);
493 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
494 return VERR_RESERVED_PAGE_TABLE_BITS;
495}
496
497/**
498 * Performs a guest page table walk.
499 *
500 * @returns VBox status code.
501 * @retval VINF_SUCCESS on success.
502 * @retval VERR_PAGE_TABLE_NOT_PRESENT, VERR_RESERVED_PAGE_TABLE_BITS or
503 * VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS on normal failure.
504 * The failure reason is also recorded in PGMPTWALKFAST::fFailed.
505 *
506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
507 * @param GCPtr The guest virtual address to walk by.
508 * @param fFlags PGMQPAGE_F_XXX.
509 * This is ignored when @a a_fSetFlags is @c false.
510 * @param pWalk The page walk info.
511 * @param pGstWalk The guest mode specific page walk info.
512 * @tparam a_enmGuestSlatMode The SLAT mode of the function.
513 * @tparam a_fSetFlags Whether to process @a fFlags and set accessed
514 * and dirty flags accordingly.
515 * @thread EMT(pVCpu)
516 */
517template<PGMSLAT const a_enmGuestSlatMode = PGMSLAT_DIRECT, bool const a_fSetFlags = false>
518DECLINLINE(int) PGM_GST_NAME(WalkFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk, PGSTPTWALK pGstWalk)
519{
520 int rc;
521
522# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX) || defined(DOXYGEN_RUNNING)
523/** @def PGM_GST_SLAT_WALK
524 * Macro to perform guest second-level address translation (EPT or Nested).
525 *
526 * @param a_pVCpu The cross context virtual CPU structure of the calling
527 * EMT.
528 * @param a_GCPtrNested The nested-guest linear address that caused the
529 * second-level translation.
530 * @param a_GCPhysNested The nested-guest physical address to translate.
531 * @param a_fFinal Set to @a true if this is the final page table entry
532 * and effective nested page table flags should be
533 * merged into PGMPTWALKFAST::fEffective. Otherwise
534 * set to @a false and nothing done.
535 * @param a_GCPhysOut Where to store the guest-physical address (result).
536 * @param a_pWalk The @a pWalk argument to the function.
537 */
538# define PGM_GST_SLAT_WALK_FAST(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_fFinal, a_GCPhysOut, a_pWalk) \
539 do { \
540 /** @todo Optimize this. Among other things, WalkSlat can be eliminated. WalkGstSlat is completely pointless. */ \
541 /** @todo pass fFlags along as appropriate... */ \
542 if (a_enmGuestSlatMode != PGMSLAT_DIRECT) \
543 { \
544 PGMPTWALK WalkSlat; \
545 PGMPTWALKGST WalkGstSlat; \
546 int rcX; \
547 if (a_enmGuestSlatMode == PGMSLAT_EPT) \
548 rcX = PGM_GST_SLAT_NAME_EPT(Walk)(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, \
549 &WalkSlat, &WalkGstSlat.u.Ept); \
550 else AssertFailedReturn(VERR_NOT_IMPLEMENTED); \
551 if (RT_SUCCESS(rcX)) \
552 (a_GCPhysOut) = WalkSlat.GCPhys; \
553 else \
554 { \
555 PGM_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk); \
556 return rcX; \
557 } \
558 if (a_fFinal) \
559 { /* Merge in the nested paging flags for the final GCPhys. */ \
560 if (a_enmGuestSlatMode == PGMSLAT_EPT) \
561 (a_pWalk)->fEffective = ((a_pWalk)->fEffective & ~PGM_PTATTRS_EPT_MASK) \
562 | WalkSlat.fEffective & PGM_PTATTRS_EPT_MASK; \
563 else AssertFailedReturn(VERR_NOT_IMPLEMENTED); \
564 } \
565 } \
566 } while (0)
567# else
568# define PGM_GST_SLAT_WALK_FAST(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_fFinal, a_GCPhysOut, a_pWalk) do { } while (0)
569# endif
570# if PGM_GST_TYPE == PGM_TYPE_32BIT
571# define PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, a_fEffective, a_pEntryU, a_OrgEntryU, a_fFlags) do { \
572 if (!a_fSetFlags || ((a_OrgEntryU) & (a_fFlags)) == (a_fFlags)) \
573 { /* likely */ } \
574 else \
575 { \
576 ASMAtomicOrU32((a_pEntryU), (a_fFlags)); \
577 (a_fEffective) |= (a_fFlags); \
578 } \
579 } while (0)
580# else
581# define PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, a_fEffective, a_pEntryU, a_OrgEntryU, a_fFlags) do { \
582 if (!a_fSetFlags || ((a_OrgEntryU) & (a_fFlags)) == (a_fFlags)) \
583 { /* likely */ } \
584 else \
585 { \
586 ASMAtomicOrU64((a_pEntryU), (a_fFlags)); \
587 (a_fEffective) |= (a_fFlags); \
588 } \
589 } while (0)
590# endif
591
592
593 /*
594 * Init the walking structures.
595 */
596 RT_ZERO(*pGstWalk);
597 pWalk->GCPtr = GCPtr;
598 pWalk->GCPhys = 0;
599 pWalk->GCPhysNested = 0;
600 pWalk->fInfo = 0;
601 pWalk->fFailed = 0;
602 pWalk->fEffective = 0;
603
604# if PGM_GST_TYPE == PGM_TYPE_32BIT \
605 || PGM_GST_TYPE == PGM_TYPE_PAE
606 /*
607 * Boundary check for PAE and 32-bit (prevents trouble further down).
608 */
609 if (RT_LIKELY(GCPtr < _4G))
610 { /* extremely likely */ }
611 else
612 return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 8);
613# endif
614
615 uint64_t fEffective;
616 {
617# if PGM_GST_TYPE == PGM_TYPE_AMD64
618 /*
619 * The PML4 table.
620 */
621 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
622 if (RT_SUCCESS(rc)) { /* probable */ }
623 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
624
625 PX86PML4E pPml4e;
626 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
627 X86PML4E Pml4e;
628 pGstWalk->Pml4e.u = Pml4e.u = ASMAtomicUoReadU64(&pPml4e->u);
629
630 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
631 else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 4);
632
633 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
634 else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 4);
635
636 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
637 | X86_PML4E_NX);
638 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPml4e->u, Pml4e.u, X86_PML4E_A);
639 pWalk->fEffective = fEffective;
640
641 /*
642 * The PDPT.
643 */
644 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
645 PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPdpt, false /*a_fFinal*/, GCPhysPdpt, pWalk);
646 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
647 if (RT_SUCCESS(rc)) { /* probable */ }
648 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
649
650# elif PGM_GST_TYPE == PGM_TYPE_PAE
651 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
652 if (RT_SUCCESS(rc)) { /* probable */ }
653 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
654# endif
655 }
656 {
657# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
658 PX86PDPE pPdpe;
659 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
660 X86PDPE Pdpe;
661 pGstWalk->Pdpe.u = Pdpe.u = ASMAtomicUoReadU64(&pPdpe->u);
662
663 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
664 else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 3);
665
666 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
667 else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 3);
668
669# if PGM_GST_TYPE == PGM_TYPE_AMD64
670 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
671 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
672 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
673 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPdpe->u, Pdpe.u, X86_PDE_A);
674# else
675 /*
676 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
677 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
678 */
679 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
680 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
681 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
682# endif
683 pWalk->fEffective = fEffective;
684
685 /*
686 * The PD.
687 */
688 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
689 PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPd, false /*a_fFinal*/, GCPhysPd, pWalk);
690 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
691 if (RT_SUCCESS(rc)) { /* probable */ }
692 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
693
694# elif PGM_GST_TYPE == PGM_TYPE_32BIT
695 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
696 if (RT_SUCCESS(rc)) { /* probable */ }
697 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
698# endif
699 }
700 {
701 PGSTPDE pPde;
702 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
703 GSTPDE Pde;
704# if PGM_GST_TYPE != PGM_TYPE_32BIT
705 pGstWalk->Pde.u = Pde.u = ASMAtomicUoReadU64(&pPde->u);
706# else
707 pGstWalk->Pde.u = Pde.u = ASMAtomicUoReadU32(&pPde->u);
708# endif
709 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
710 else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 2);
711 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
712 {
713 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
714 else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 2);
715
716 /*
717 * We're done.
718 */
719 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED | PGM_WALKINFO_BIG_PAGE;
720
721# if PGM_GST_TYPE == PGM_TYPE_32BIT
722 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
723# else
724 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
725 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
726# endif
727 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
728 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
729
730 rc = VINF_SUCCESS;
731 if (a_fSetFlags)
732 {
733 /* We have to validate the access before setting any flags. */
734 uint32_t fFailed = 0;
735 if ((fFlags & PGMQPAGE_F_USER_MODE) && !(fEffective & X86_PDE4M_US))
736 fFailed |= PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE;
737 if (fFlags & PGMQPAGE_F_WRITE)
738 {
739 if ( (fEffective & X86_PDE4M_RW)
740 || (fFlags & (PGMQPAGE_F_USER_MODE | PGMQPAGE_F_CR0_WP0)) == PGMQPAGE_F_CR0_WP0)
741 { /* likely*/ }
742 else fFailed |= PGM_WALKFAIL_NOT_WRITABLE;
743 }
744# if PGM_GST_TYPE != PGM_TYPE_32BIT
745 else if (fFlags & PGMQPAGE_F_EXECUTE)
746 {
747 if (!(fEffective & X86_PDE2M_PAE_NX) || !pVCpu->pgm.s.fNoExecuteEnabled) { /* likely */ }
748 else fFailed |= PGM_WALKFAIL_NOT_EXECUTABLE;
749 }
750# endif
751 if (fFailed == 0)
752 {
753 if (!(fFlags & PGMQPAGE_F_WRITE))
754 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE4M_A);
755 else
756 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE4M_A | X86_PDE4M_D);
757 }
758 else
759 {
760 pWalk->fFailed = fFailed | (2U << PGM_WALKFAIL_LEVEL_SHIFT);
761 pWalk->fInfo = PGM_WALKINFO_BIG_PAGE;
762 rc = VERR_ACCESS_DENIED;
763 }
764 }
765
766 pWalk->fEffective = fEffective;
767 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
768 Assert(fEffective & PGM_PTATTRS_R_MASK);
769
770 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
771 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
772 PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPde, true /*a_fFinal*/, GCPhysPde, pWalk);
773 pWalk->GCPhys = GCPhysPde;
774 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); /** @todo why do we apply it here and not below?!? */
775 return rc;
776 }
777
778 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
779 return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 2);
780# if PGM_GST_TYPE == PGM_TYPE_32BIT
781 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
782# else
783 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
784 fEffective |= Pde.u & X86_PDE_PAE_NX;
785# endif
786 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPde->u, Pde.u, X86_PDE_A);
787 pWalk->fEffective = fEffective;
788
789 /*
790 * The PT.
791 */
792 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
793 PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPt, false /*a_fFinal*/, GCPhysPt, pWalk);
794 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
795 if (RT_SUCCESS(rc)) { /* probable */ }
796 else return PGM_GST_NAME(WalkFastReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
797 }
798 {
799 PGSTPTE pPte;
800 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
801 GSTPTE Pte;
802# if PGM_GST_TYPE != PGM_TYPE_32BIT
803 pGstWalk->Pte.u = Pte.u = ASMAtomicUoReadU64(&pPte->u);
804# else
805 pGstWalk->Pte.u = Pte.u = ASMAtomicUoReadU32(&pPte->u);
806# endif
807
808 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
809 else return PGM_GST_NAME(WalkFastReturnNotPresent)(pVCpu, pWalk, 1);
810
811 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
812 else return PGM_GST_NAME(WalkFastReturnRsvdError)(pVCpu, pWalk, 1);
813
814 /*
815 * We're done.
816 */
817 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED;
818
819 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
820# if PGM_GST_TYPE != PGM_TYPE_32BIT
821 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G | X86_PTE_PAE_NX);
822# else
823 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
824# endif
825
826 rc = VINF_SUCCESS;
827 if (a_fSetFlags)
828 {
829 /* We have to validate the access before setting any flags. */
830 uint32_t fFailed = 0;
831 if ((fFlags & PGMQPAGE_F_USER_MODE) && !(fEffective & X86_PTE_US))
832 fFailed |= PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE;
833 if (fFlags & PGMQPAGE_F_WRITE)
834 {
835 if ((fEffective & X86_PTE_RW) || (fFlags & (PGMQPAGE_F_USER_MODE | PGMQPAGE_F_CR0_WP0)) == PGMQPAGE_F_CR0_WP0)
836 { /* likely*/ }
837 else fFailed |= PGM_WALKFAIL_NOT_WRITABLE;
838 }
839# if PGM_GST_TYPE != PGM_TYPE_32BIT
840 else if (fFlags & PGMQPAGE_F_EXECUTE)
841 {
842 if (!(fEffective & X86_PTE_PAE_NX) || !pVCpu->pgm.s.fNoExecuteEnabled) { /* likely */ }
843 else fFailed |= PGM_WALKFAIL_NOT_EXECUTABLE;
844 }
845# endif
846 if (fFailed == 0)
847 {
848 if (!(fFlags & PGMQPAGE_F_WRITE))
849 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPte->u, Pte.u, X86_PTE_A);
850 else
851 PGM_GST_ENSURE_ENTRY_FLAGS_SET(a_pVCpu, fEffective, &pPte->u, Pte.u, X86_PTE_A | X86_PTE_D);
852 }
853 else
854 {
855 pWalk->fFailed = fFailed | (1U << PGM_WALKFAIL_LEVEL_SHIFT);
856 pWalk->fInfo = 0;
857 rc = VERR_ACCESS_DENIED;
858 }
859 }
860
861 pWalk->fEffective = fEffective;
862 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
863 Assert(fEffective & PGM_PTATTRS_R_MASK);
864
865 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
866 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
867 PGM_GST_SLAT_WALK_FAST(pVCpu, GCPtr, GCPhysPte, true /*a_fFinal*/, GCPhysPte, pWalk);
868 pWalk->GCPhys = GCPhysPte;
869 return rc;
870 }
871# undef PGM_GST_SLAT_WALK_FAST
872# undef PGM_GST_ENSURE_ENTRY_FLAGS_SET
873}
874
875#endif /* 32BIT, PAE, AMD64 */
876
877/**
878 * Guest virtual to guest physical + info translation, the faster and better
879 * version.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param GCPtr Guest Context virtual address of the page.
884 * @param fFlags PGMQPAGE_F_XXX
885 * @param pWalk Where to store the page walk info.
886 * @thread EMT(pVCpu)
887 */
888#define PGM_GET_PAGE_F_WRITE
889#define PGM_GET_PAGE_F_READ
890PGM_GST_DECL(int, QueryPageFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
891{
892#if PGM_GST_TYPE == PGM_TYPE_REAL \
893 || PGM_GST_TYPE == PGM_TYPE_PROT
894
895# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
896 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
897 {
898 /** @todo optimize this case as well. */
899 /** @todo pass fFlags along. */
900 PGMPTWALK WalkSlat;
901 PGMPTWALKGST WalkGstSlat;
902 int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &WalkSlat, &WalkGstSlat);
903 if (RT_SUCCESS(rc))
904 {
905 PGMPTWALKFAST_ZERO(pWalk);
906 pWalk->GCPtr = GCPtr;
907 pWalk->GCPhys = WalkSlat.GCPhys;
908 pWalk->GCPhysNested = 0;
909 pWalk->u64Union = 0;
910 pWalk->fSucceeded = true;
911 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D;
912 }
913 else
914 PGM_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk);
915 return rc;
916 }
917# endif
918
919 /*
920 * Fake it.
921 */
922 pWalk->GCPtr = GCPtr;
923 pWalk->GCPhys = GCPtr;
924 pWalk->GCPhysNested = 0;
925 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED;
926 pWalk->fFailed = PGM_WALKFAIL_SUCCESS;
927 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D;
928 RT_NOREF(pVCpu, fFlags);
929 return VINF_SUCCESS;
930
931#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
932 || PGM_GST_TYPE == PGM_TYPE_PAE \
933 || PGM_GST_TYPE == PGM_TYPE_AMD64
934
935 GSTPTWALK GstWalk;
936 int rc;
937# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX)
938 switch (pVCpu->pgm.s.enmGuestSlatMode)
939 {
940 case PGMSLAT_DIRECT:
941# endif
942 if (fFlags)
943 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_DIRECT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
944 else
945 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_DIRECT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
946# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX)
947 break;
948# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
949 case PGMSLAT_EPT:
950 if (fFlags)
951 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_EPT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
952 else
953 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_EPT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
954 break;
955# endif
956# ifdef VBOX_WITH_NESTED_HWVIRT_SVM_XXX
957 case PGMSLAT_32BIT:
958 if (fFlags)
959 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_32BIT, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
960 else
961 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_32BIT, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
962 break;
963 case PGMSLAT_PAE:
964 if (fFlags)
965 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_PAE, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
966 else
967 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_PAE, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
968 break;
969 case PGMSLAT_AMD64:
970 if (fFlags)
971 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_AMD64, true>(pVCpu, GCPtr, fFlags, pWalk, &GstWalk);
972 else
973 rc = PGM_GST_NAME(WalkFast)<PGMSLAT_AMD64, false>(pVCpu, GCPtr, 0, pWalk, &GstWalk);
974 break;
975# endif
976 default:
977 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
978 }
979# endif
980 if (RT_SUCCESS(rc))
981 {
982 Assert(pWalk->fInfo & PGM_WALKINFO_SUCCEEDED);
983 Assert(pWalk->GCPtr == GCPtr);
984 Assert((pWalk->GCPhys & GUEST_PAGE_OFFSET_MASK) == (GCPtr & GUEST_PAGE_OFFSET_MASK));
985 return VINF_SUCCESS;
986 }
987 return rc;
988
989#else
990# error "shouldn't be here!"
991 /* something else... */
992 return VERR_NOT_SUPPORTED;
993#endif
994}
995
996/* x x x x x x x x */
997/* x x x x x x x x */
998
999/**
1000 * Modify page flags for a range of pages in the guest's tables
1001 *
1002 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1003 *
1004 * @returns VBox status code.
1005 * @param pVCpu The cross context virtual CPU structure.
1006 * @param GCPtr Virtual address of the first page in the range. Page aligned!
1007 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
1008 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1009 * @param fMask The AND mask - page flags X86_PTE_*.
1010 */
1011PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1012{
1013 Assert((cb & GUEST_PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
1014
1015#if PGM_GST_TYPE == PGM_TYPE_32BIT \
1016 || PGM_GST_TYPE == PGM_TYPE_PAE \
1017 || PGM_GST_TYPE == PGM_TYPE_AMD64
1018 for (;;)
1019 {
1020 PGMPTWALK Walk;
1021 GSTPTWALK GstWalk;
1022 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
1023 if (RT_FAILURE(rc))
1024 return rc;
1025
1026 if (!Walk.fBigPage)
1027 {
1028 /*
1029 * 4KB Page table, process
1030 *
1031 * Walk pages till we're done.
1032 */
1033 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
1034 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
1035 {
1036 GSTPTE Pte = GstWalk.pPt->a[iPTE];
1037 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
1038 | (fFlags & ~GST_PTE_PG_MASK);
1039 GstWalk.pPt->a[iPTE] = Pte;
1040
1041 /* next page */
1042 cb -= GUEST_PAGE_SIZE;
1043 if (!cb)
1044 return VINF_SUCCESS;
1045 GCPtr += GUEST_PAGE_SIZE;
1046 iPTE++;
1047 }
1048 }
1049 else
1050 {
1051 /*
1052 * 2/4MB Page table
1053 */
1054 GSTPDE PdeNew;
1055# if PGM_GST_TYPE == PGM_TYPE_32BIT
1056 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
1057# else
1058 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
1059# endif
1060 | (fFlags & ~GST_PTE_PG_MASK)
1061 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
1062 *GstWalk.pPde = PdeNew;
1063
1064 /* advance */
1065 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
1066 if (cbDone >= cb)
1067 return VINF_SUCCESS;
1068 cb -= cbDone;
1069 GCPtr += cbDone;
1070 }
1071 }
1072
1073#else
1074 /* real / protected mode: ignore. */
1075 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
1076 return VINF_SUCCESS;
1077#endif
1078}
1079
1080
1081#ifdef IN_RING3
1082/**
1083 * Relocate any GC pointers related to guest mode paging.
1084 *
1085 * @returns VBox status code.
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param offDelta The relocation offset.
1088 */
1089PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
1090{
1091 RT_NOREF(pVCpu, offDelta);
1092 return VINF_SUCCESS;
1093}
1094#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette