VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 13961

Last change on this file since 13961 was 13937, checked in by vboxsync, 16 years ago

PGM: RTGCUINTPTR -> RTGCPTR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 46.0 KB
Line 
1/* $Id: PGMAllGst.h 13937 2008-11-06 20:52:05Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Defined Constants And Macros *
25*******************************************************************************/
26#undef GSTPT
27#undef PGSTPT
28#undef GSTPTE
29#undef PGSTPTE
30#undef GSTPD
31#undef PGSTPD
32#undef GSTPDE
33#undef PGSTPDE
34#undef GST_BIG_PAGE_SIZE
35#undef GST_BIG_PAGE_OFFSET_MASK
36#undef GST_PDE_PG_MASK
37#undef GST_PDE_BIG_PG_MASK
38#undef GST_PD_SHIFT
39#undef GST_PD_MASK
40#undef GST_PTE_PG_MASK
41#undef GST_PT_SHIFT
42#undef GST_PT_MASK
43#undef GST_TOTAL_PD_ENTRIES
44#undef GST_CR3_PAGE_MASK
45#undef GST_PDPE_ENTRIES
46#undef GST_PDPT_SHIFT
47#undef GST_PDPT_MASK
48#undef GST_PDPE_PG_MASK
49#undef GST_GET_PDE_BIG_PG_GCPHYS
50
51#if PGM_GST_TYPE == PGM_TYPE_REAL \
52 || PGM_GST_TYPE == PGM_TYPE_PROT
53# define GSTPT SHWPT
54# define PGSTPT PSHWPT
55# define GSTPTE SHWPTE
56# define PGSTPTE PSHWPTE
57# define GSTPD SHWPD
58# define PGSTPD PSHWPD
59# define GSTPDE SHWPDE
60# define PGSTPDE PSHWPDE
61# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
62
63#elif PGM_GST_TYPE == PGM_TYPE_32BIT
64# define GSTPT X86PT
65# define PGSTPT PX86PT
66# define GSTPTE X86PTE
67# define PGSTPTE PX86PTE
68# define GSTPD X86PD
69# define PGSTPD PX86PD
70# define GSTPDE X86PDE
71# define PGSTPDE PX86PDE
72# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
73# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
74# define GST_PDE_PG_MASK X86_PDE_PG_MASK
75# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
76# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
77# define GST_PD_SHIFT X86_PD_SHIFT
78# define GST_PD_MASK X86_PD_MASK
79# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
80# define GST_PTE_PG_MASK X86_PTE_PG_MASK
81# define GST_PT_SHIFT X86_PT_SHIFT
82# define GST_PT_MASK X86_PT_MASK
83# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
84
85#elif PGM_GST_TYPE == PGM_TYPE_PAE \
86 || PGM_GST_TYPE == PGM_TYPE_AMD64
87# define GSTPT X86PTPAE
88# define PGSTPT PX86PTPAE
89# define GSTPTE X86PTEPAE
90# define PGSTPTE PX86PTEPAE
91# define GSTPD X86PDPAE
92# define PGSTPD PX86PDPAE
93# define GSTPDE X86PDEPAE
94# define PGSTPDE PX86PDEPAE
95# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
96# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
97# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
98# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
99# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
100# define GST_PD_SHIFT X86_PD_PAE_SHIFT
101# define GST_PD_MASK X86_PD_PAE_MASK
102# if PGM_GST_TYPE == PGM_TYPE_PAE
103# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
104# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
105# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
106# define GST_PDPT_SHIFT X86_PDPT_SHIFT
107# define GST_PDPT_MASK X86_PDPT_MASK_PAE
108# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
109# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
110# else
111# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
112# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
113# define GST_PDPT_SHIFT X86_PDPT_SHIFT
114# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
115# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
116# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
117# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
118# endif
119# define GST_PT_SHIFT X86_PT_PAE_SHIFT
120# define GST_PT_MASK X86_PT_PAE_MASK
121#endif
122
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127__BEGIN_DECLS
128PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
129PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
130PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
131PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
132PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
133PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
134PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
135PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
136#ifndef IN_RING3
137PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
138# if PGM_GST_TYPE == PGM_TYPE_PAE \
139 || PGM_GST_TYPE == PGM_TYPE_AMD64
140PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
141# endif
142#endif
143__END_DECLS
144
145
146
147/**
148 * Gets effective Guest OS page information.
149 *
150 * When GCPtr is in a big page, the function will return as if it was a normal
151 * 4KB page. If the need for distinguishing between big and normal page becomes
152 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
153 * purpose.
154 *
155 * @returns VBox status.
156 * @param pVM VM Handle.
157 * @param GCPtr Guest Context virtual address of the page. Page aligned!
158 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
159 * @param pGCPhys Where to store the GC physical address of the page.
160 * This is page aligned. The fact that the
161 */
162PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
163{
164#if PGM_GST_TYPE == PGM_TYPE_REAL \
165 || PGM_GST_TYPE == PGM_TYPE_PROT
166 /*
167 * Fake it.
168 */
169 if (pfFlags)
170 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
171 if (pGCPhys)
172 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
173 return VINF_SUCCESS;
174
175#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
176
177 /*
178 * Get the PDE.
179 */
180# if PGM_GST_TYPE == PGM_TYPE_32BIT
181 X86PDE Pde;
182 Pde.u = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
183
184#elif PGM_GST_TYPE == PGM_TYPE_PAE
185 X86PDEPAE Pde;
186 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
187
188 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present
189 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
190 */
191 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
192
193#elif PGM_GST_TYPE == PGM_TYPE_AMD64
194 PX86PML4E pPml4e;
195 X86PDPE Pdpe;
196 X86PDEPAE Pde;
197 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
198
199 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
200 Assert(pPml4e);
201 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
202 return VERR_PAGE_TABLE_NOT_PRESENT;
203
204 /* Merge accessed, write, user and no-execute bits into the PDE. */
205 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
206 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
207 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
208 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
209# endif
210
211 /*
212 * Lookup the page.
213 */
214 if (!Pde.n.u1Present)
215 return VERR_PAGE_TABLE_NOT_PRESENT;
216
217 if ( !Pde.b.u1Size
218# if PGM_GST_TYPE != PGM_TYPE_AMD64
219 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
220# endif
221 )
222 {
223 PGSTPT pPT;
224 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
225 if (RT_FAILURE(rc))
226 return rc;
227
228 /*
229 * Get PT entry and check presence.
230 */
231 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
232 if (!Pte.n.u1Present)
233 return VERR_PAGE_NOT_PRESENT;
234
235 /*
236 * Store the result.
237 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
238 * where the PDPE is simplified.
239 */
240 if (pfFlags)
241 {
242 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
243 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
244# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
245 /* The NX bit is determined by a bitwise OR between the PT and PD */
246 if (fNoExecuteBitValid)
247 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
248# endif
249 }
250 if (pGCPhys)
251 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
252 }
253 else
254 {
255 /*
256 * Map big to 4k PTE and store the result
257 */
258 if (pfFlags)
259 {
260 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
261 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
262# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
263 /* The NX bit is determined by a bitwise OR between the PT and PD */
264 if (fNoExecuteBitValid)
265 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
266# endif
267 }
268 if (pGCPhys)
269 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
270 }
271 return VINF_SUCCESS;
272#else
273# error "shouldn't be here!"
274 /* something else... */
275 return VERR_NOT_SUPPORTED;
276#endif
277}
278
279
280/**
281 * Modify page flags for a range of pages in the guest's tables
282 *
283 * The existing flags are ANDed with the fMask and ORed with the fFlags.
284 *
285 * @returns VBox status code.
286 * @param pVM VM handle.
287 * @param GCPtr Virtual address of the first page in the range. Page aligned!
288 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
289 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
290 * @param fMask The AND mask - page flags X86_PTE_*.
291 */
292PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
293{
294#if PGM_GST_TYPE == PGM_TYPE_32BIT \
295 || PGM_GST_TYPE == PGM_TYPE_PAE \
296 || PGM_GST_TYPE == PGM_TYPE_AMD64
297
298 for (;;)
299 {
300 /*
301 * Get the PD entry.
302 */
303# if PGM_GST_TYPE == PGM_TYPE_32BIT
304 PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVM->pgm.s, GCPtr);
305
306# elif PGM_GST_TYPE == PGM_TYPE_PAE
307 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
308 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
309 */
310 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
311 Assert(pPde);
312 if (!pPde)
313 return VERR_PAGE_TABLE_NOT_PRESENT;
314# elif PGM_GST_TYPE == PGM_TYPE_AMD64
315 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
316 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
317 Assert(pPde);
318 if (!pPde)
319 return VERR_PAGE_TABLE_NOT_PRESENT;
320# endif
321 GSTPDE Pde = *pPde;
322 Assert(Pde.n.u1Present);
323 if (!Pde.n.u1Present)
324 return VERR_PAGE_TABLE_NOT_PRESENT;
325
326 if ( !Pde.b.u1Size
327# if PGM_GST_TYPE != PGM_TYPE_AMD64
328 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
329# endif
330 )
331 {
332 /*
333 * 4KB Page table
334 *
335 * Walk page tables and pages till we're done.
336 */
337 PGSTPT pPT;
338 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
339 if (RT_FAILURE(rc))
340 return rc;
341
342 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
343 while (iPTE < RT_ELEMENTS(pPT->a))
344 {
345 GSTPTE Pte = pPT->a[iPTE];
346 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
347 | (fFlags & ~GST_PTE_PG_MASK);
348 pPT->a[iPTE] = Pte;
349
350 /* next page */
351 cb -= PAGE_SIZE;
352 if (!cb)
353 return VINF_SUCCESS;
354 GCPtr += PAGE_SIZE;
355 iPTE++;
356 }
357 }
358 else
359 {
360 /*
361 * 4MB Page table
362 */
363# if PGM_GST_TYPE == PGM_TYPE_32BIT
364 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
365# else
366 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
367# endif
368 | (fFlags & ~GST_PTE_PG_MASK)
369 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
370 *pPde = Pde;
371
372 /* advance */
373 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
374 if (cbDone >= cb)
375 return VINF_SUCCESS;
376 cb -= cbDone;
377 GCPtr += cbDone;
378 }
379 }
380
381#else
382 /* real / protected mode: ignore. */
383 return VINF_SUCCESS;
384#endif
385}
386
387
388/**
389 * Retrieve guest PDE information
390 *
391 * @returns VBox status code.
392 * @param pVM The virtual machine.
393 * @param GCPtr Guest context pointer
394 * @param pPDE Pointer to guest PDE structure
395 */
396PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE)
397{
398#if PGM_GST_TYPE == PGM_TYPE_32BIT \
399 || PGM_GST_TYPE == PGM_TYPE_PAE \
400 || PGM_GST_TYPE == PGM_TYPE_AMD64
401
402# if PGM_GST_TYPE == PGM_TYPE_32BIT
403 X86PDE Pde;
404 Pde.u = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
405# elif PGM_GST_TYPE == PGM_TYPE_PAE
406 X86PDEPAE Pde;
407 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
408# elif PGM_GST_TYPE == PGM_TYPE_AMD64
409 X86PDEPAE Pde;
410 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
411# endif
412
413 pPDE->u = (X86PGPAEUINT)Pde.u;
414 return VINF_SUCCESS;
415#else
416 AssertFailed();
417 return VERR_NOT_IMPLEMENTED;
418#endif
419}
420
421
422
423/**
424 * Maps the CR3 into HMA in GC and locate it in HC.
425 *
426 * Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
427 * CR3 is updated we simply call MapCR3 again.
428 *
429 * @returns VBox status, no specials.
430 * @param pVM VM handle.
431 * @param GCPhysCR3 The physical address in the CR3 register.
432 */
433PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
434{
435#if PGM_GST_TYPE == PGM_TYPE_32BIT \
436 || PGM_GST_TYPE == PGM_TYPE_PAE \
437 || PGM_GST_TYPE == PGM_TYPE_AMD64
438
439 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
440
441 /*
442 * Map the page CR3 points at.
443 */
444 RTHCPHYS HCPhysGuestCR3;
445 RTHCPTR HCPtrGuestCR3;
446 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
447 if (RT_SUCCESS(rc))
448 {
449 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
450 if (RT_SUCCESS(rc))
451 {
452 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
453# if PGM_GST_TYPE == PGM_TYPE_32BIT
454 pVM->pgm.s.pGuestPDR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
455# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
456 pVM->pgm.s.pGuestPDR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
457# endif
458 pVM->pgm.s.pGuestPDRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
459
460# elif PGM_GST_TYPE == PGM_TYPE_PAE
461 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
462 pVM->pgm.s.pGstPaePDPTR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
463# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
464 pVM->pgm.s.pGstPaePDPTR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
465# endif
466 pVM->pgm.s.pGstPaePDPTRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
467 Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePDPTRC));
468
469 /*
470 * Map the 4 PDs too.
471 */
472 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
473 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
474 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
475 {
476 if (pGuestPDPT->a[i].n.u1Present)
477 {
478 RTHCPTR HCPtr;
479 RTHCPHYS HCPhys;
480 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
481 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
482 if (RT_SUCCESS(rc2))
483 {
484 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
485 AssertRCReturn(rc, rc);
486
487 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
488# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
489 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
490# endif
491 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
492 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
493 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
494 continue;
495 }
496 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
497 }
498
499 pVM->pgm.s.apGstPaePDsR3[i] = 0;
500# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
501 pVM->pgm.s.apGstPaePDsR0[i] = 0;
502# endif
503 pVM->pgm.s.apGstPaePDsRC[i] = 0;
504 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
505 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
506 }
507
508# elif PGM_GST_TYPE == PGM_TYPE_AMD64
509 pVM->pgm.s.pGstAmd64PML4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
510# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
511 pVM->pgm.s.pGstAmd64PML4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
512# endif
513 if (!HWACCMIsNestedPagingActive(pVM))
514 {
515 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
516 if (pVM->pgm.s.pHCShwAmd64CR3)
517 {
518 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
519 if (pVM->pgm.s.pHCShwAmd64CR3->enmKind != PGMPOOLKIND_FREE)
520 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
521 pVM->pgm.s.pHCShwAmd64CR3 = 0;
522 pVM->pgm.s.pHCPaePML4 = 0;
523 pVM->pgm.s.HCPhysPaePML4 = 0;
524 }
525
526 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
527l_try_again:
528 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.pHCShwAmd64CR3);
529 if (rc == VERR_PGM_POOL_FLUSHED)
530 {
531 Log(("MapCR3: Flush pool and try again\n"));
532 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
533 rc = pgmPoolSyncCR3(pVM);
534 AssertRC(rc);
535 goto l_try_again;
536 }
537 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3);
538 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.pHCShwAmd64CR3->Core.Key;
539 }
540# endif
541 }
542 else
543 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
544 }
545 else
546 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
547
548#else /* prot/real stub */
549 int rc = VINF_SUCCESS;
550#endif
551 return rc;
552}
553
554
555/**
556 * Unmaps the CR3.
557 *
558 * @returns VBox status, no specials.
559 * @param pVM VM handle.
560 */
561PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
562{
563 LogFlow(("UnmapCR3\n"));
564
565 int rc = VINF_SUCCESS;
566
567#if PGM_GST_TYPE == PGM_TYPE_32BIT
568 pVM->pgm.s.pGuestPDR3 = 0;
569#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
570 pVM->pgm.s.pGuestPDR0 = 0;
571#endif
572 pVM->pgm.s.pGuestPDRC = 0;
573
574#elif PGM_GST_TYPE == PGM_TYPE_PAE
575 pVM->pgm.s.pGstPaePDPTR3 = 0;
576# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
577 pVM->pgm.s.pGstPaePDPTR0 = 0;
578# endif
579 pVM->pgm.s.pGstPaePDPTRC = 0;
580 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
581 {
582 pVM->pgm.s.apGstPaePDsR3[i] = 0;
583# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
584 pVM->pgm.s.apGstPaePDsR0[i] = 0;
585# endif
586 pVM->pgm.s.apGstPaePDsRC[i] = 0;
587 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
588 }
589
590#elif PGM_GST_TYPE == PGM_TYPE_AMD64
591 pVM->pgm.s.pGstAmd64PML4R3 = 0;
592# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
593 pVM->pgm.s.pGstAmd64PML4R0 = 0;
594# endif
595 if (!HWACCMIsNestedPagingActive(pVM))
596 {
597 pVM->pgm.s.pHCPaePML4 = 0;
598 pVM->pgm.s.HCPhysPaePML4 = 0;
599 if (pVM->pgm.s.pHCShwAmd64CR3)
600 {
601 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
602 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
603 pVM->pgm.s.pHCShwAmd64CR3 = 0;
604 }
605 }
606
607#else /* prot/real mode stub */
608 /* nothing to do */
609#endif
610 return rc;
611}
612
613
614#undef LOG_GROUP
615#define LOG_GROUP LOG_GROUP_PGM_POOL
616
617/**
618 * Registers physical page monitors for the necessary paging
619 * structures to detect conflicts with our guest mappings.
620 *
621 * This is always called after mapping CR3.
622 * This is never called with fixed mappings.
623 *
624 * @returns VBox status, no specials.
625 * @param pVM VM handle.
626 * @param GCPhysCR3 The physical address in the CR3 register.
627 */
628PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
629{
630 Assert(!pVM->pgm.s.fMappingsFixed);
631 int rc = VINF_SUCCESS;
632
633 /*
634 * Register/Modify write phys handler for guest's CR3 if it changed.
635 */
636#if PGM_GST_TYPE == PGM_TYPE_32BIT
637
638 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
639 {
640# ifndef PGMPOOL_WITH_MIXED_PT_CR3
641 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
642 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
643 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
644 else
645 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
646 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
647 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
648 pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
649 pVM->pgm.s.pszR3GstWriteHandlerCR3);
650# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
651 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
652 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
653 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
654 ? PGMPOOL_IDX_PAE_PD
655 : PGMPOOL_IDX_PD,
656 GCPhysCR3);
657# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
658 if (RT_FAILURE(rc))
659 {
660 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
661 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
662 return rc;
663 }
664 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
665 }
666
667#elif PGM_GST_TYPE == PGM_TYPE_PAE
668 /* Monitor the PDPT page */
669 /*
670 * Register/Modify write phys handler for guest's CR3 if it changed.
671 */
672# ifndef PGMPOOL_WITH_MIXED_PT_CR3
673 AssertFailed();
674# endif
675 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
676 {
677 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
678 if (RT_FAILURE(rc))
679 {
680 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
681 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
682 return rc;
683 }
684 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
685 }
686
687 /*
688 * Do the 4 PDs.
689 */
690 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
691 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
692 {
693 if (pGuestPDPT->a[i].n.u1Present)
694 {
695 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
696 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
697 {
698 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
699
700 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
701 }
702
703 if (RT_FAILURE(rc))
704 {
705 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
706 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
707 return rc;
708 }
709 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
710 }
711 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
712 {
713 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
714 AssertRC(rc);
715 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
716 }
717 }
718
719#else
720 /* prot/real/amd64 mode stub */
721
722#endif
723 return rc;
724}
725
726/**
727 * Deregisters any physical page monitors installed by MonitorCR3.
728 *
729 * @returns VBox status code, no specials.
730 * @param pVM The VM handle.
731 */
732PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
733{
734 int rc = VINF_SUCCESS;
735
736 /*
737 * Deregister the access handlers.
738 *
739 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
740 * before we enter GC again.
741 */
742#if PGM_GST_TYPE == PGM_TYPE_32BIT
743 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
744 {
745# ifndef PGMPOOL_WITH_MIXED_PT_CR3
746 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
747 AssertRCReturn(rc, rc);
748# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
749 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
750 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
751 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
752 ? PGMPOOL_IDX_PAE_PD
753 : PGMPOOL_IDX_PD);
754 AssertRCReturn(rc, rc);
755# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
756 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
757 }
758
759#elif PGM_GST_TYPE == PGM_TYPE_PAE
760 /* The PDPT page */
761# ifndef PGMPOOL_WITH_MIXED_PT_CR3
762 AssertFailed();
763# endif
764
765 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
766 {
767 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
768 AssertRC(rc);
769 }
770
771 /* The 4 PDs. */
772 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
773 {
774 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
775 {
776 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
777 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
778 AssertRC(rc2);
779 if (RT_FAILURE(rc2))
780 rc = rc2;
781 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
782 }
783 }
784#else
785 /* prot/real/amd64 mode stub */
786#endif
787 return rc;
788
789}
790
791#undef LOG_GROUP
792#define LOG_GROUP LOG_GROUP_PGM
793
794
795#if PGM_GST_TYPE == PGM_TYPE_32BIT \
796 || PGM_GST_TYPE == PGM_TYPE_PAE \
797 || PGM_GST_TYPE == PGM_TYPE_AMD64
798/**
799 * Updates one virtual handler range.
800 *
801 * @returns 0
802 * @param pNode Pointer to a PGMVIRTHANDLER.
803 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
804 */
805static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
806{
807 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
808 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
809 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
810
811#if PGM_GST_TYPE == PGM_TYPE_32BIT
812 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pState->pVM->pgm.s);
813#endif
814
815 RTGCPTR GCPtr = pCur->Core.Key;
816#if PGM_GST_MODE != PGM_MODE_AMD64
817 /* skip all stuff above 4GB if not AMD64 mode. */
818 if (GCPtr >= _4GB)
819 return 0;
820#endif
821
822 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
823 unsigned iPage = 0;
824 while (iPage < pCur->cPages)
825 {
826#if PGM_GST_TYPE == PGM_TYPE_32BIT
827 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
828#elif PGM_GST_TYPE == PGM_TYPE_PAE
829 X86PDEPAE Pde;
830 Pde.u = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
831#elif PGM_GST_TYPE == PGM_TYPE_AMD64
832 X86PDEPAE Pde;
833 Pde.u = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
834#endif
835 if (Pde.n.u1Present)
836 {
837 if ( !Pde.b.u1Size
838# if PGM_GST_TYPE != PGM_TYPE_AMD64
839 || !(pState->cr4 & X86_CR4_PSE)
840# endif
841 )
842 {
843 /*
844 * Normal page table.
845 */
846 PGSTPT pPT;
847 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
848 if (RT_SUCCESS(rc))
849 {
850 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
851 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
852 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
853 {
854 GSTPTE Pte = pPT->a[iPTE];
855 RTGCPHYS GCPhysNew;
856 if (Pte.n.u1Present)
857 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
858 else
859 GCPhysNew = NIL_RTGCPHYS;
860 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
861 {
862 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
863 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
864#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
865 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
866 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
867 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
868 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
869#endif
870 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
871 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
872 }
873 }
874 }
875 else
876 {
877 /* not-present. */
878 offPage = 0;
879 AssertRC(rc);
880 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
881 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
882 iPTE++, iPage++, GCPtr += PAGE_SIZE)
883 {
884 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
885 {
886 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
887#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
888 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
889 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
890 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
891 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
892#endif
893 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
894 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
895 }
896 }
897 }
898 }
899 else
900 {
901 /*
902 * 2/4MB page.
903 */
904 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
905 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
906 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
907 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
908 {
909 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
910 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
911 {
912 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
913 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
914#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
915 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
916 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
917 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
918 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
919#endif
920 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
921 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
922 }
923 }
924 } /* pde type */
925 }
926 else
927 {
928 /* not-present. */
929 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
930 cPages && iPage < pCur->cPages;
931 iPage++, GCPtr += PAGE_SIZE)
932 {
933 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
934 {
935 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
936 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
937 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
938 }
939 }
940 offPage = 0;
941 }
942 } /* for pages in virtual mapping. */
943
944 return 0;
945}
946#endif /* 32BIT, PAE and AMD64 */
947
948
949/**
950 * Updates the virtual page access handlers.
951 *
952 * @returns true if bits were flushed.
953 * @returns false if bits weren't flushed.
954 * @param pVM VM handle.
955 * @param pPDSrc The page directory.
956 * @param cr4 The cr4 register value.
957 */
958PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
959{
960#if PGM_GST_TYPE == PGM_TYPE_32BIT \
961 || PGM_GST_TYPE == PGM_TYPE_PAE \
962 || PGM_GST_TYPE == PGM_TYPE_AMD64
963
964 /** @todo
965 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
966 */
967
968 /*
969 * Resolve any virtual address based access handlers to GC physical addresses.
970 * This should be fairly quick.
971 */
972 PGMHVUSTATE State;
973
974 pgmLock(pVM);
975 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
976 State.pVM = pVM;
977 State.fTodo = pVM->pgm.s.fSyncFlags;
978 State.cr4 = cr4;
979 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
980 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
981
982
983 /*
984 * Set / reset bits?
985 */
986 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
987 {
988 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
989 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
990 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
991 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
992 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
993 }
994 pgmUnlock(pVM);
995
996 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
997
998#else /* real / protected */
999 return false;
1000#endif
1001}
1002
1003
1004#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
1005
1006/**
1007 * Write access handler for the Guest CR3 page in 32-bit mode.
1008 *
1009 * This will try interpret the instruction, if failure fail back to the recompiler.
1010 * Check if the changed PDEs are marked present and conflicts with our
1011 * mappings. If conflict, we'll switch to the host context and resolve it there
1012 *
1013 * @returns VBox status code (appropritate for trap handling and GC return).
1014 * @param pVM VM Handle.
1015 * @param uErrorCode CPU Error code.
1016 * @param pRegFrame Trap register frame.
1017 * @param pvFault The fault address (cr2).
1018 * @param GCPhysFault The GC physical address corresponding to pvFault.
1019 * @param pvUser User argument.
1020 */
1021PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1022{
1023 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1024
1025 /*
1026 * Try interpret the instruction.
1027 */
1028 uint32_t cb;
1029 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1030 if (RT_SUCCESS(rc) && cb)
1031 {
1032 /*
1033 * Check if the modified PDEs are present and mappings.
1034 */
1035 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1036 const unsigned iPD1 = offPD / sizeof(X86PDE);
1037 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
1038
1039 Assert(cb > 0 && cb <= 8);
1040 Assert(iPD1 < X86_PG_ENTRIES);
1041 Assert(iPD2 < X86_PG_ENTRIES);
1042
1043#ifdef DEBUG
1044 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
1045 if (iPD1 != iPD2)
1046 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
1047#endif
1048
1049 if (!pVM->pgm.s.fMappingsFixed)
1050 {
1051 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
1052 if ( ( pPDSrc->a[iPD1].n.u1Present
1053 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
1054 || ( iPD1 != iPD2
1055 && pPDSrc->a[iPD2].n.u1Present
1056 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
1057 )
1058 {
1059 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
1060 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1061 if (rc == VINF_SUCCESS)
1062 rc = VINF_PGM_SYNC_CR3;
1063 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
1064 return rc;
1065 }
1066 }
1067
1068 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1069 }
1070 else
1071 {
1072 Assert(RT_FAILURE(rc));
1073 if (rc == VERR_EM_INTERPRETER)
1074 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1075 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1076 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1077 }
1078 return rc;
1079}
1080
1081#endif /* PGM_TYPE_32BIT && !IN_RING3 */
1082#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
1083
1084/**
1085 * Write access handler for the Guest CR3 page in PAE mode.
1086 *
1087 * This will try interpret the instruction, if failure fail back to the recompiler.
1088 * Check if the changed PDEs are marked present and conflicts with our
1089 * mappings. If conflict, we'll switch to the host context and resolve it there
1090 *
1091 * @returns VBox status code (appropritate for trap handling and GC return).
1092 * @param pVM VM Handle.
1093 * @param uErrorCode CPU Error code.
1094 * @param pRegFrame Trap register frame.
1095 * @param pvFault The fault address (cr2).
1096 * @param GCPhysFault The GC physical address corresponding to pvFault.
1097 * @param pvUser User argument.
1098 */
1099PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1100{
1101 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1102
1103 /*
1104 * Try interpret the instruction.
1105 */
1106 uint32_t cb;
1107 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1108 if (RT_SUCCESS(rc) && cb)
1109 {
1110 /*
1111 * Check if any of the PDs have changed.
1112 * We'll simply check all of them instead of figuring out which one/two to check.
1113 */
1114 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
1115 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1116 {
1117 if ( pGuestPDPT->a[i].n.u1Present
1118 && (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
1119 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
1120 {
1121 /*
1122 * The PDPE has changed.
1123 * We will schedule a monitoring update for the next TLB Flush,
1124 * InvalidatePage or SyncCR3.
1125 *
1126 * This isn't perfect, because a lazy page sync might be dealing with an half
1127 * updated PDPE. However, we assume that the guest OS is disabling interrupts
1128 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
1129 * executing.
1130 */
1131 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1132 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
1133 i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
1134 }
1135 }
1136
1137 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1138 }
1139 else
1140 {
1141 Assert(RT_FAILURE(rc));
1142 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1143 if (rc == VERR_EM_INTERPRETER)
1144 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1145 }
1146 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
1147 return rc;
1148}
1149
1150
1151/**
1152 * Write access handler for the Guest PDs in PAE mode.
1153 *
1154 * This will try interpret the instruction, if failure fail back to the recompiler.
1155 * Check if the changed PDEs are marked present and conflicts with our
1156 * mappings. If conflict, we'll switch to the host context and resolve it there
1157 *
1158 * @returns VBox status code (appropritate for trap handling and GC return).
1159 * @param pVM VM Handle.
1160 * @param uErrorCode CPU Error code.
1161 * @param pRegFrame Trap register frame.
1162 * @param pvFault The fault address (cr2).
1163 * @param GCPhysFault The GC physical address corresponding to pvFault.
1164 * @param pvUser User argument.
1165 */
1166PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1167{
1168 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1169
1170 /*
1171 * Try interpret the instruction.
1172 */
1173 uint32_t cb;
1174 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1175 if (RT_SUCCESS(rc) && cb)
1176 {
1177 /*
1178 * Figure out which of the 4 PDs this is.
1179 */
1180 RTGCPTR i;
1181 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
1182 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1183 if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
1184 {
1185 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
1186 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1187 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
1188 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
1189
1190 Assert(cb > 0 && cb <= 8);
1191 Assert(iPD1 < X86_PG_PAE_ENTRIES);
1192 Assert(iPD2 < X86_PG_PAE_ENTRIES);
1193
1194# ifdef LOG_ENABLED
1195 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
1196 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
1197 if (iPD1 != iPD2)
1198 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
1199 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
1200# endif
1201
1202 if (!pVM->pgm.s.fMappingsFixed)
1203 {
1204 if ( ( pPDSrc->a[iPD1].n.u1Present
1205 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
1206 || ( iPD1 != iPD2
1207 && pPDSrc->a[iPD2].n.u1Present
1208 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
1209 )
1210 {
1211 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
1212 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
1213 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1214 return VINF_PGM_SYNC_CR3;
1215 }
1216 }
1217 break; /* ASSUMES no duplicate entries... */
1218 }
1219 Assert(i < 4);
1220
1221 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1222 }
1223 else
1224 {
1225 Assert(RT_FAILURE(rc));
1226 if (rc == VERR_EM_INTERPRETER)
1227 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1228 else
1229 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1230 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1231 }
1232 return rc;
1233}
1234
1235#endif /* PGM_TYPE_PAE && !IN_RING3 */
1236
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette