VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 73244

Last change on this file since 73244 was 73199, checked in by vboxsync, 7 years ago

PGM: Working on eliminating PGMMODEDATA and the corresponding PGMCPU section so we can do mode switching in ring-0. This first part deals with guest mode specific pointers. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.6 KB
Line 
1/* $Id: PGMAllGst.h 73199 2018-07-18 12:13:55Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Internal Functions *
21*******************************************************************************/
22RT_C_DECLS_BEGIN
23#if PGM_GST_TYPE == PGM_TYPE_32BIT \
24 || PGM_GST_TYPE == PGM_TYPE_PAE \
25 || PGM_GST_TYPE == PGM_TYPE_AMD64
26static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
27#endif
28PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
29PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
30PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
31PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
32
33#ifdef IN_RING3 /* r3 only for now. */
34PGM_GST_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
35PGM_GST_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
36PGM_GST_DECL(int, Exit)(PVMCPU pVCpu);
37#endif
38RT_C_DECLS_END
39
40
41#if PGM_GST_TYPE == PGM_TYPE_32BIT \
42 || PGM_GST_TYPE == PGM_TYPE_PAE \
43 || PGM_GST_TYPE == PGM_TYPE_AMD64
44
45
46DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel)
47{
48 NOREF(iLevel); NOREF(pVCpu);
49 pWalk->Core.fNotPresent = true;
50 pWalk->Core.uLevel = (uint8_t)iLevel;
51 return VERR_PAGE_TABLE_NOT_PRESENT;
52}
53
54DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
55{
56 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
57 pWalk->Core.fBadPhysAddr = true;
58 pWalk->Core.uLevel = (uint8_t)iLevel;
59 return VERR_PAGE_TABLE_NOT_PRESENT;
60}
61
62DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel)
63{
64 NOREF(pVCpu);
65 pWalk->Core.fRsvdError = true;
66 pWalk->Core.uLevel = (uint8_t)iLevel;
67 return VERR_PAGE_TABLE_NOT_PRESENT;
68}
69
70
71/**
72 * Performs a guest page table walk.
73 *
74 * @returns VBox status code.
75 * @retval VINF_SUCCESS on success.
76 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
77 *
78 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
79 * @param GCPtr The guest virtual address to walk by.
80 * @param pWalk Where to return the walk result. This is always set.
81 */
82DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
83{
84 int rc;
85
86 /*
87 * Init the walking structure.
88 */
89 RT_ZERO(*pWalk);
90 pWalk->Core.GCPtr = GCPtr;
91
92# if PGM_GST_TYPE == PGM_TYPE_32BIT \
93 || PGM_GST_TYPE == PGM_TYPE_PAE
94 /*
95 * Boundary check for PAE and 32-bit (prevents trouble further down).
96 */
97 if (RT_UNLIKELY(GCPtr >= _4G))
98 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
99# endif
100
101 uint32_t register fEffective = X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | 1;
102 {
103# if PGM_GST_TYPE == PGM_TYPE_AMD64
104 /*
105 * The PMLE4.
106 */
107 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
108 if (RT_SUCCESS(rc)) { /* probable */ }
109 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
110
111 PX86PML4E register pPml4e;
112 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
113 X86PML4E register Pml4e;
114 pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
115
116 if (Pml4e.n.u1Present) { /* probable */ }
117 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
118
119 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
120 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
121
122 pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A))
123 | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */;
124
125 /*
126 * The PDPE.
127 */
128 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt);
129 if (RT_SUCCESS(rc)) { /* probable */ }
130 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
131
132# elif PGM_GST_TYPE == PGM_TYPE_PAE
133 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt);
134 if (RT_SUCCESS(rc)) { /* probable */ }
135 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
136# endif
137 }
138 {
139# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
140 PX86PDPE register pPdpe;
141 pWalk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
142 X86PDPE register Pdpe;
143 pWalk->Pdpe.u = Pdpe.u = pPdpe->u;
144
145 if (Pdpe.n.u1Present) { /* probable */ }
146 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
147
148 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
149 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
150
151# if PGM_GST_TYPE == PGM_TYPE_AMD64
152 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pdpe.u & (X86_PDPE_RW | X86_PDPE_US | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))
153 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
154# else
155 pWalk->Core.fEffective = fEffective = X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
156 | ((uint32_t)Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD))
157 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
158# endif
159
160 /*
161 * The PDE.
162 */
163 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd);
164 if (RT_SUCCESS(rc)) { /* probable */ }
165 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
166# elif PGM_GST_TYPE == PGM_TYPE_32BIT
167 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
168 if (RT_SUCCESS(rc)) { /* probable */ }
169 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
170# endif
171 }
172 {
173 PGSTPDE register pPde;
174 pWalk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
175 GSTPDE Pde;
176 pWalk->Pde.u = Pde.u = pPde->u;
177 if (Pde.n.u1Present) { /* probable */ }
178 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
179 if (Pde.n.u1Size && GST_IS_PSE_ACTIVE(pVCpu))
180 {
181 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
182 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
183
184 /*
185 * We're done.
186 */
187# if PGM_GST_TYPE == PGM_TYPE_32BIT
188 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
189# else
190 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A))
191 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
192# endif
193 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
194 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
195 pWalk->Core.fEffective = fEffective;
196
197 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
198 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
199# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
200 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
201# else
202 pWalk->Core.fEffectiveNX = false;
203# endif
204 pWalk->Core.fBigPage = true;
205 pWalk->Core.fSucceeded = true;
206
207 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
208 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
209 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
210 return VINF_SUCCESS;
211 }
212
213 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
214 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
215# if PGM_GST_TYPE == PGM_TYPE_32BIT
216 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
217# else
218 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))
219 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
220# endif
221
222 /*
223 * The PTE.
224 */
225 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
226 if (RT_SUCCESS(rc)) { /* probable */ }
227 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
228 }
229 {
230 PGSTPTE register pPte;
231 pWalk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
232 GSTPTE register Pte;
233 pWalk->Pte.u = Pte.u = pPte->u;
234
235 if (Pte.n.u1Present) { /* probable */ }
236 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
237
238 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
239 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
240
241 /*
242 * We're done.
243 */
244# if PGM_GST_TYPE == PGM_TYPE_32BIT
245 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
246# else
247 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A))
248 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */;
249# endif
250 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
251 pWalk->Core.fEffective = fEffective;
252
253 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
254 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
255# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
256 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
257# else
258 pWalk->Core.fEffectiveNX = false;
259# endif
260 pWalk->Core.fSucceeded = true;
261
262 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte)
263 | (GCPtr & PAGE_OFFSET_MASK);
264 return VINF_SUCCESS;
265 }
266}
267
268#endif /* 32BIT, PAE, AMD64 */
269
270/**
271 * Gets effective Guest OS page information.
272 *
273 * When GCPtr is in a big page, the function will return as if it was a normal
274 * 4KB page. If the need for distinguishing between big and normal page becomes
275 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
276 * purpose.
277 *
278 * @returns VBox status code.
279 * @param pVCpu The cross context virtual CPU structure.
280 * @param GCPtr Guest Context virtual address of the page.
281 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
282 * @param pGCPhys Where to store the GC physical address of the page.
283 * This is page aligned!
284 */
285PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
286{
287#if PGM_GST_TYPE == PGM_TYPE_REAL \
288 || PGM_GST_TYPE == PGM_TYPE_PROT
289 /*
290 * Fake it.
291 */
292 if (pfFlags)
293 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
294 if (pGCPhys)
295 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
296 NOREF(pVCpu);
297 return VINF_SUCCESS;
298
299#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
300 || PGM_GST_TYPE == PGM_TYPE_PAE \
301 || PGM_GST_TYPE == PGM_TYPE_AMD64
302
303 GSTPTWALK Walk;
304 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
305 if (RT_FAILURE(rc))
306 return rc;
307
308 if (pGCPhys)
309 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
310
311 if (pfFlags)
312 {
313 if (!Walk.Core.fBigPage)
314 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
315 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
316 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
317# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
318 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
319# endif
320 ;
321 else
322 {
323 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
324 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)
325 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
326 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
327# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
328 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
329# endif
330 ;
331 }
332 }
333
334 return VINF_SUCCESS;
335
336#else
337# error "shouldn't be here!"
338 /* something else... */
339 return VERR_NOT_SUPPORTED;
340#endif
341}
342
343
344/**
345 * Modify page flags for a range of pages in the guest's tables
346 *
347 * The existing flags are ANDed with the fMask and ORed with the fFlags.
348 *
349 * @returns VBox status code.
350 * @param pVCpu The cross context virtual CPU structure.
351 * @param GCPtr Virtual address of the first page in the range. Page aligned!
352 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
353 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
354 * @param fMask The AND mask - page flags X86_PTE_*.
355 */
356PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
357{
358 Assert((cb & PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
359
360#if PGM_GST_TYPE == PGM_TYPE_32BIT \
361 || PGM_GST_TYPE == PGM_TYPE_PAE \
362 || PGM_GST_TYPE == PGM_TYPE_AMD64
363 for (;;)
364 {
365 GSTPTWALK Walk;
366 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
367 if (RT_FAILURE(rc))
368 return rc;
369
370 if (!Walk.Core.fBigPage)
371 {
372 /*
373 * 4KB Page table, process
374 *
375 * Walk pages till we're done.
376 */
377 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
378 while (iPTE < RT_ELEMENTS(Walk.pPt->a))
379 {
380 GSTPTE Pte = Walk.pPt->a[iPTE];
381 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
382 | (fFlags & ~GST_PTE_PG_MASK);
383 Walk.pPt->a[iPTE] = Pte;
384
385 /* next page */
386 cb -= PAGE_SIZE;
387 if (!cb)
388 return VINF_SUCCESS;
389 GCPtr += PAGE_SIZE;
390 iPTE++;
391 }
392 }
393 else
394 {
395 /*
396 * 2/4MB Page table
397 */
398 GSTPDE PdeNew;
399# if PGM_GST_TYPE == PGM_TYPE_32BIT
400 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
401# else
402 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
403# endif
404 | (fFlags & ~GST_PTE_PG_MASK)
405 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
406 *Walk.pPde = PdeNew;
407
408 /* advance */
409 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
410 if (cbDone >= cb)
411 return VINF_SUCCESS;
412 cb -= cbDone;
413 GCPtr += cbDone;
414 }
415 }
416
417#else
418 /* real / protected mode: ignore. */
419 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
420 return VINF_SUCCESS;
421#endif
422}
423
424
425/**
426 * Retrieve guest PDE information.
427 *
428 * @returns VBox status code.
429 * @param pVCpu The cross context virtual CPU structure.
430 * @param GCPtr Guest context pointer.
431 * @param pPDE Pointer to guest PDE structure.
432 */
433PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE)
434{
435#if PGM_GST_TYPE == PGM_TYPE_32BIT \
436 || PGM_GST_TYPE == PGM_TYPE_PAE \
437 || PGM_GST_TYPE == PGM_TYPE_AMD64
438
439# if PGM_GST_TYPE != PGM_TYPE_AMD64
440 /* Boundary check. */
441 if (RT_UNLIKELY(GCPtr >= _4G))
442 return VERR_PAGE_TABLE_NOT_PRESENT;
443# endif
444
445# if PGM_GST_TYPE == PGM_TYPE_32BIT
446 unsigned iPd = (GCPtr >> GST_PD_SHIFT) & GST_PD_MASK;
447 PX86PD pPd = pgmGstGet32bitPDPtr(pVCpu);
448
449# elif PGM_GST_TYPE == PGM_TYPE_PAE
450 unsigned iPd = 0; /* shut up gcc */
451 PCX86PDPAE pPd = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPd, NULL);
452
453# elif PGM_GST_TYPE == PGM_TYPE_AMD64
454 PX86PML4E pPml4eIgn;
455 X86PDPE PdpeIgn;
456 unsigned iPd = 0; /* shut up gcc */
457 PCX86PDPAE pPd = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eIgn, &PdpeIgn, &iPd);
458 /* Note! We do not return an effective PDE here like we do for the PTE in GetPage method. */
459# endif
460
461 if (RT_LIKELY(pPd))
462 pPDE->u = (X86PGPAEUINT)pPd->a[iPd].u;
463 else
464 pPDE->u = 0;
465 return VINF_SUCCESS;
466
467#else
468 NOREF(pVCpu); NOREF(GCPtr); NOREF(pPDE);
469 AssertFailed();
470 return VERR_NOT_IMPLEMENTED;
471#endif
472}
473
474
475#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
476 || PGM_GST_TYPE == PGM_TYPE_PAE \
477 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
478 && defined(VBOX_WITH_RAW_MODE)
479/**
480 * Updates one virtual handler range.
481 *
482 * @returns 0
483 * @param pNode Pointer to a PGMVIRTHANDLER.
484 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
485 */
486static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
487{
488 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
489 PVM pVM = pState->pVM;
490 PVMCPU pVCpu = pState->pVCpu;
491 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
492 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur);
493
494 Assert(pCurType->enmKind != PGMVIRTHANDLERKIND_HYPERVISOR); NOREF(pCurType);
495
496# if PGM_GST_TYPE == PGM_TYPE_32BIT
497 PX86PD pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
498# endif
499
500 RTGCPTR GCPtr = pCur->Core.Key;
501# if PGM_GST_TYPE != PGM_TYPE_AMD64
502 /* skip all stuff above 4GB if not AMD64 mode. */
503 if (RT_UNLIKELY(GCPtr >= _4G))
504 return 0;
505# endif
506
507 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
508 unsigned iPage = 0;
509 while (iPage < pCur->cPages)
510 {
511# if PGM_GST_TYPE == PGM_TYPE_32BIT
512 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
513# elif PGM_GST_TYPE == PGM_TYPE_PAE
514 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
515# elif PGM_GST_TYPE == PGM_TYPE_AMD64
516 X86PDEPAE Pde = pgmGstGetLongModePDE(pVCpu, GCPtr);
517# endif
518# if PGM_GST_TYPE == PGM_TYPE_32BIT
519 bool const fBigPage = Pde.b.u1Size && (pState->cr4 & X86_CR4_PSE);
520# else
521 bool const fBigPage = Pde.b.u1Size;
522# endif
523 if ( Pde.n.u1Present
524 && ( !fBigPage
525 ? GST_IS_PDE_VALID(pVCpu, Pde)
526 : GST_IS_BIG_PDE_VALID(pVCpu, Pde)) )
527 {
528 if (!fBigPage)
529 {
530 /*
531 * Normal page table.
532 */
533 PGSTPT pPT;
534 int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GST_GET_PDE_GCPHYS(Pde), &pPT);
535 if (RT_SUCCESS(rc))
536 {
537 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
538 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
539 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
540 {
541 GSTPTE Pte = pPT->a[iPTE];
542 RTGCPHYS GCPhysNew;
543 if (Pte.n.u1Present)
544 GCPhysNew = PGM_A20_APPLY(pVCpu, (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage);
545 else
546 GCPhysNew = NIL_RTGCPHYS;
547 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
548 {
549 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
550 pgmHandlerVirtualClearPage(pVM, pCur, iPage);
551#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
552 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
553 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
554 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
555 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
556#endif
557 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
558 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
559 }
560 }
561 }
562 else
563 {
564 /* not-present. */
565 offPage = 0;
566 AssertRC(rc);
567 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
568 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
569 iPTE++, iPage++, GCPtr += PAGE_SIZE)
570 {
571 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
572 {
573 pgmHandlerVirtualClearPage(pVM, pCur, iPage);
574#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
575 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
576 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
577 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
578 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
579#endif
580 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
581 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
582 }
583 }
584 }
585 }
586 else
587 {
588 /*
589 * 2/4MB page.
590 */
591 RTGCPHYS GCPhys = (RTGCPHYS)GST_GET_PDE_GCPHYS(Pde);
592 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
593 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
594 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
595 {
596 RTGCPHYS GCPhysNew = PGM_A20_APPLY(pVCpu, GCPhys + (i4KB << PAGE_SHIFT) + offPage);
597 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
598 {
599 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
600 pgmHandlerVirtualClearPage(pVM, pCur, iPage);
601#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
602 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
603 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
604 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
605 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
606#endif
607 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
608 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
609 }
610 }
611 } /* pde type */
612 }
613 else
614 {
615 /* not-present / invalid. */
616 Log(("VirtHandler: Not present / invalid Pde=%RX64\n", (uint64_t)Pde.u));
617 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
618 cPages && iPage < pCur->cPages;
619 iPage++, GCPtr += PAGE_SIZE)
620 {
621 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
622 {
623 pgmHandlerVirtualClearPage(pVM, pCur, iPage);
624 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
625 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
626 }
627 }
628 offPage = 0;
629 }
630 } /* for pages in virtual mapping. */
631
632 return 0;
633}
634#endif /* 32BIT, PAE and AMD64 + VBOX_WITH_RAW_MODE */
635
636
637/**
638 * Updates the virtual page access handlers.
639 *
640 * @returns true if bits were flushed.
641 * @returns false if bits weren't flushed.
642 * @param pVM The cross context VM structure.
643 * @param cr4 The cr4 register value.
644 */
645PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
646{
647#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
648 || PGM_GST_TYPE == PGM_TYPE_PAE \
649 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
650 && defined(VBOX_WITH_RAW_MODE)
651
652 /** @todo
653 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
654 */
655
656 /*
657 * Resolve any virtual address based access handlers to GC physical addresses.
658 * This should be fairly quick.
659 */
660 RTUINT fTodo = 0;
661
662 pgmLock(pVM);
663 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
664
665 for (VMCPUID i = 0; i < pVM->cCpus; i++)
666 {
667 PGMHVUSTATE State;
668 PVMCPU pVCpu = &pVM->aCpus[i];
669
670 State.pVM = pVM;
671 State.pVCpu = pVCpu;
672 State.fTodo = pVCpu->pgm.s.fSyncFlags;
673 State.cr4 = cr4;
674 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
675
676 fTodo |= State.fTodo;
677 }
678 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
679
680
681 /*
682 * Set / reset bits?
683 */
684 if (fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
685 {
686 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
687 Log(("HandlerVirtualUpdate: resets bits\n"));
688 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
689
690 for (VMCPUID i = 0; i < pVM->cCpus; i++)
691 {
692 PVMCPU pVCpu = &pVM->aCpus[i];
693 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
694 }
695
696 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
697 }
698 pgmUnlock(pVM);
699
700 return !!(fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
701
702#else /* real / protected */
703 NOREF(pVM); NOREF(cr4);
704 return false;
705#endif
706}
707
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette