VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 7676

Last change on this file since 7676 was 7676, checked in by vboxsync, 17 years ago

Cleaned up.
AMD64 shadow paging is only valid with AMD64 guest paging. Other combinations removed.
Simplified paging #ifdefs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.1 KB
Line 
1/* $Id: PGMAllGst.h 7676 2008-04-01 09:18:10Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#undef GSTPT
23#undef PGSTPT
24#undef GSTPTE
25#undef PGSTPTE
26#undef GSTPD
27#undef PGSTPD
28#undef GSTPDE
29#undef PGSTPDE
30#undef GST_BIG_PAGE_SIZE
31#undef GST_BIG_PAGE_OFFSET_MASK
32#undef GST_PDE_PG_MASK
33#undef GST_PDE_BIG_PG_MASK
34#undef GST_PD_SHIFT
35#undef GST_PD_MASK
36#undef GST_PTE_PG_MASK
37#undef GST_PT_SHIFT
38#undef GST_PT_MASK
39#undef GST_TOTAL_PD_ENTRIES
40#undef GST_CR3_PAGE_MASK
41
42#if PGM_GST_TYPE == PGM_TYPE_32BIT \
43 || PGM_GST_TYPE == PGM_TYPE_REAL \
44 || PGM_GST_TYPE == PGM_TYPE_PROT
45# define GSTPT X86PT
46# define PGSTPT PX86PT
47# define GSTPTE X86PTE
48# define PGSTPTE PX86PTE
49# define GSTPD X86PD
50# define PGSTPD PX86PD
51# define GSTPDE X86PDE
52# define PGSTPDE PX86PDE
53# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
54# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
55# define GST_PDE_PG_MASK X86_PDE_PG_MASK
56# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
57# define GST_PD_SHIFT X86_PD_SHIFT
58# define GST_PD_MASK X86_PD_MASK
59# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
60# define GST_PTE_PG_MASK X86_PTE_PG_MASK
61# define GST_PT_SHIFT X86_PT_SHIFT
62# define GST_PT_MASK X86_PT_MASK
63# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
64#elif PGM_GST_TYPE == PGM_TYPE_PAE \
65 || PGM_GST_TYPE == PGM_TYPE_AMD64
66# define GSTPT X86PTPAE
67# define PGSTPT PX86PTPAE
68# define GSTPTE X86PTEPAE
69# define PGSTPTE PX86PTEPAE
70# define GSTPD X86PDPAE
71# define PGSTPD PX86PDPAE
72# define GSTPDE X86PDEPAE
73# define PGSTPDE PX86PDEPAE
74# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
75# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
76# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
77# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
78# define GST_PD_SHIFT X86_PD_PAE_SHIFT
79# define GST_PD_MASK X86_PD_PAE_MASK
80# if PGM_GST_TYPE == PGM_TYPE_PAE
81# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPTE_ENTRIES)
82# else
83# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPTE_ENTRIES)
84# endif
85# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
86# define GST_PT_SHIFT X86_PT_PAE_SHIFT
87# define GST_PT_MASK X86_PT_PAE_MASK
88# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
89#endif
90
91
92/*******************************************************************************
93* Internal Functions *
94*******************************************************************************/
95__BEGIN_DECLS
96PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
97PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
98PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE);
99PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
100PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
101PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
102PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
103PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
104#ifndef IN_RING3
105PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
106# if PGM_GST_TYPE == PGM_TYPE_PAE \
107 || PGM_GST_TYPE == PGM_TYPE_AMD64
108PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
109# endif
110#endif
111__END_DECLS
112
113
114
115/**
116 * Gets effective Guest OS page information.
117 *
118 * When GCPtr is in a big page, the function will return as if it was a normal
119 * 4KB page. If the need for distinguishing between big and normal page becomes
120 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
121 * purpose.
122 *
123 * @returns VBox status.
124 * @param pVM VM Handle.
125 * @param GCPtr Guest Context virtual address of the page. Page aligned!
126 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
127 * @param pGCPhys Where to store the GC physical address of the page.
128 * This is page aligned. The fact that the
129 */
130PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
131{
132#if PGM_GST_TYPE == PGM_TYPE_REAL \
133 || PGM_GST_TYPE == PGM_TYPE_PROT
134 /*
135 * Fake it.
136 */
137 if (pfFlags)
138 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
139 if (pGCPhys)
140 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
141 return VINF_SUCCESS;
142
143#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
144 || PGM_GST_TYPE == PGM_TYPE_PAE \
145 || PGM_GST_TYPE == PGM_TYPE_AMD64
146
147#if PGM_GST_TYPE == PGM_TYPE_AMD64
148 /* later */
149 AssertFailed();
150 return VERR_NOT_IMPLEMENTED;
151#endif
152
153
154 /*
155 * Get the PDE.
156 */
157#if PGM_GST_TYPE == PGM_TYPE_32BIT
158 const X86PDE Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
159#else /* PAE */
160 X86PDEPAE Pde;
161 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
162#endif
163
164 /*
165 * Lookup the page.
166 */
167 if (!Pde.n.u1Present)
168 return VERR_PAGE_TABLE_NOT_PRESENT;
169
170 if ( !Pde.b.u1Size
171 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE))
172 {
173 PGSTPT pPT;
174 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
175 if (VBOX_FAILURE(rc))
176 return rc;
177
178 /*
179 * Get PT entry and check presentness.
180 */
181 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
182 if (!Pte.n.u1Present)
183 return VERR_PAGE_NOT_PRESENT;
184
185 /*
186 * Store the result.
187 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
188 * where the PDPE is simplified.
189 */
190 if (pfFlags)
191 {
192 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
193 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
194# if PGM_WITH_NX(PGM_GST_TYPE)
195 /* The NX bit is determined by a bitwise OR between the PT and PD */
196 if (Pde.u & X86_PTE_PAE_NX)
197 *pfFlags |= X86_PTE_PAE_NX;
198# endif
199 }
200 if (pGCPhys)
201 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
202 }
203 else
204 {
205 /*
206 * Map big to 4k PTE and store the result
207 */
208 if (pfFlags)
209 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
210 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
211 if (pGCPhys)
212 *pGCPhys = (Pde.u & GST_PDE_BIG_PG_MASK) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK)); /** @todo pse36 */
213 }
214 return VINF_SUCCESS;
215#else
216 /* something else... */
217 return VERR_NOT_SUPPORTED;
218#endif
219}
220
221
222/**
223 * Modify page flags for a range of pages in the guest's tables
224 *
225 * The existing flags are ANDed with the fMask and ORed with the fFlags.
226 *
227 * @returns VBox status code.
228 * @param pVM VM handle.
229 * @param GCPtr Virtual address of the first page in the range. Page aligned!
230 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
231 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
232 * @param fMask The AND mask - page flags X86_PTE_*.
233 */
234PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
235{
236#if PGM_GST_TYPE == PGM_TYPE_32BIT \
237 || PGM_GST_TYPE == PGM_TYPE_PAE \
238 || PGM_GST_TYPE == PGM_TYPE_AMD64
239
240#if PGM_GST_TYPE == PGM_TYPE_AMD64
241 /* later */
242 AssertFailed();
243 return VERR_NOT_IMPLEMENTED;
244#endif
245
246 for (;;)
247 {
248 /*
249 * Get the PD entry.
250 */
251#if PGM_GST_TYPE == PGM_TYPE_32BIT
252 PX86PDE pPde = &CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
253#else /* PAE */
254 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
255 Assert(pPde);
256 if (!pPde)
257 return VERR_PAGE_TABLE_NOT_PRESENT;
258#endif
259 GSTPDE Pde = *pPde;
260 Assert(Pde.n.u1Present);
261 if (!Pde.n.u1Present)
262 return VERR_PAGE_TABLE_NOT_PRESENT;
263
264 if ( !Pde.b.u1Size
265 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE))
266 {
267 /*
268 * 4KB Page table
269 *
270 * Walk page tables and pages till we're done.
271 */
272 PGSTPT pPT;
273 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
274 if (VBOX_FAILURE(rc))
275 return rc;
276
277 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
278 while (iPTE < RT_ELEMENTS(pPT->a))
279 {
280 GSTPTE Pte = pPT->a[iPTE];
281 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
282 | (fFlags & ~GST_PTE_PG_MASK);
283 pPT->a[iPTE] = Pte;
284
285 /* next page */
286 cb -= PAGE_SIZE;
287 if (!cb)
288 return VINF_SUCCESS;
289 GCPtr += PAGE_SIZE;
290 iPTE++;
291 }
292 }
293 else
294 {
295 /*
296 * 4MB Page table
297 */
298 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) /** @todo pse36 */
299 | (fFlags & ~GST_PTE_PG_MASK)
300 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
301 *pPde = Pde;
302
303 /* advance */
304 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
305 if (cbDone >= cb)
306 return VINF_SUCCESS;
307 cb -= cbDone;
308 GCPtr += cbDone;
309 }
310 }
311
312#else
313 /* real / protected mode: ignore. */
314 return VINF_SUCCESS;
315#endif
316}
317
318
319/**
320 * Retrieve guest PDE information
321 *
322 * @returns VBox status code.
323 * @param pVM The virtual machine.
324 * @param GCPtr Guest context pointer
325 * @param pPDE Pointer to guest PDE structure
326 */
327PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE)
328{
329#if PGM_GST_TYPE == PGM_TYPE_32BIT \
330 || PGM_GST_TYPE == PGM_TYPE_PAE \
331 || PGM_GST_TYPE == PGM_TYPE_AMD64
332
333#if PGM_GST_TYPE == PGM_TYPE_AMD64
334 /* later */
335 AssertFailed();
336 return VERR_NOT_IMPLEMENTED;
337#endif
338
339# if PGM_GST_TYPE == PGM_TYPE_32BIT
340 X86PDE Pde;
341 Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> GST_PD_SHIFT];
342# else
343 X86PDEPAE Pde;
344 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
345# endif
346
347 pPDE->u = (X86PGPAEUINT)Pde.u;
348 return VINF_SUCCESS;
349#else
350 AssertFailed();
351 return VERR_NOT_IMPLEMENTED;
352#endif
353}
354
355
356
357/**
358 * Maps the CR3 into HMA in GC and locate it in HC.
359 *
360 * @returns VBox status, no specials.
361 * @param pVM VM handle.
362 * @param GCPhysCR3 The physical address in the CR3 register.
363 */
364PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
365{
366#if PGM_GST_TYPE == PGM_TYPE_32BIT \
367 || PGM_GST_TYPE == PGM_TYPE_PAE \
368 || PGM_GST_TYPE == PGM_TYPE_AMD64
369 /*
370 * Map the page CR3 points at.
371 */
372 RTHCPHYS HCPhysGuestCR3;
373 RTHCPTR HCPtrGuestCR3;
374 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3, &HCPtrGuestCR3, &HCPhysGuestCR3);
375 if (VBOX_SUCCESS(rc))
376 {
377 rc = PGMMap(pVM, (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3 & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
378 if (VBOX_SUCCESS(rc))
379 {
380 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
381#if PGM_GST_TYPE == PGM_TYPE_32BIT
382 pVM->pgm.s.pGuestPDHC = (R3R0PTRTYPE(PX86PD))HCPtrGuestCR3;
383 pVM->pgm.s.pGuestPDGC = (GCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
384
385#elif PGM_GST_TYPE == PGM_TYPE_PAE
386 const unsigned off = GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
387 pVM->pgm.s.pGstPaePDPTRHC = (R3R0PTRTYPE(PX86PDPTR))((RTHCUINTPTR)HCPtrGuestCR3 | off);
388 pVM->pgm.s.pGstPaePDPTRGC = (GCPTRTYPE(PX86PDPTR))((RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping | off);
389
390 /*
391 * Map the 4 PDs too.
392 */
393 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
394 for (unsigned i = 0; i < 4; i++, GCPtr += PAGE_SIZE)
395 {
396 if (pVM->pgm.s.CTXSUFF(pGstPaePDPTR)->a[i].n.u1Present)
397 {
398 RTHCPTR HCPtr;
399 RTHCPHYS HCPhys;
400 RTGCPHYS GCPhys = pVM->pgm.s.CTXSUFF(pGstPaePDPTR)->a[i].u & X86_PDPE_PG_MASK;
401 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
402 if (VBOX_SUCCESS(rc2))
403 {
404 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
405 AssertRCReturn(rc, rc);
406 pVM->pgm.s.apGstPaePDsHC[i] = (R3R0PTRTYPE(PX86PDPAE))HCPtr;
407 pVM->pgm.s.apGstPaePDsGC[i] = (GCPTRTYPE(PX86PDPAE))GCPtr;
408 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
409 PGM_INVL_PG(GCPtr);
410 continue;
411 }
412 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
413 }
414
415 pVM->pgm.s.apGstPaePDsHC[i] = 0;
416 pVM->pgm.s.apGstPaePDsGC[i] = 0;
417 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
418 PGM_INVL_PG(GCPtr);
419 }
420
421#else /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
422 rc = VERR_NOT_IMPLEMENTED;
423#endif
424 }
425 }
426 else
427 AssertMsgFailed(("rc=%Vrc GCPhysGuestPD=%VGp\n", rc, GCPhysCR3));
428
429#else /* prot/real mode stub */
430 int rc = VINF_SUCCESS;
431#endif
432 return rc;
433}
434
435
436/**
437 * Unmaps the CR3.
438 *
439 * @returns VBox status, no specials.
440 * @param pVM VM handle.
441 * @param GCPhysCR3 The physical address in the CR3 register.
442 */
443PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
444{
445 int rc = VINF_SUCCESS;
446#if PGM_GST_TYPE == PGM_TYPE_32BIT
447 pVM->pgm.s.pGuestPDHC = 0;
448 pVM->pgm.s.pGuestPDGC = 0;
449
450#elif PGM_GST_TYPE == PGM_TYPE_PAE
451 pVM->pgm.s.pGstPaePDPTRHC = 0;
452 pVM->pgm.s.pGstPaePDPTRGC = 0;
453 /** PAE todo: pVM->pgm.s.apGstPaePDsHC? -> unmap?? */
454 AssertFailed();
455
456#elif PGM_GST_TYPE == PGM_TYPE_AMD64
457//#error not implemented
458 rc = VERR_NOT_IMPLEMENTED;
459
460#else /* prot/real mode stub */
461 /* nothing to do */
462#endif
463 return rc;
464}
465
466
467#undef LOG_GROUP
468#define LOG_GROUP LOG_GROUP_PGM_POOL
469
470/**
471 * Registers physical page monitors for the necessary paging
472 * structures to detect conflicts with our guest mappings.
473 *
474 * This is always called after mapping CR3.
475 * This is never called with fixed mappings.
476 *
477 * @returns VBox status, no specials.
478 * @param pVM VM handle.
479 * @param GCPhysCR3 The physical address in the CR3 register.
480 */
481PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
482{
483 Assert(!pVM->pgm.s.fMappingsFixed);
484 int rc = VINF_SUCCESS;
485
486#if PGM_GST_TYPE == PGM_TYPE_32BIT \
487 || PGM_GST_TYPE == PGM_TYPE_PAE \
488 || PGM_GST_TYPE == PGM_TYPE_AMD64
489
490 /*
491 * Register/Modify write phys handler for guest's CR3 if it changed.
492 */
493 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
494 {
495# ifndef PGMPOOL_WITH_MIXED_PT_CR3
496 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
497 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
498 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
499 else
500 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
501 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
502 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
503 pVM->pgm.s.pfnGCGstWriteHandlerCR3, 0,
504 pVM->pgm.s.pszR3GstWriteHandlerCR3);
505# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
506 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
507 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
508 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
509 ? PGMPOOL_IDX_PAE_PD
510 : PGMPOOL_IDX_PD,
511 GCPhysCR3);
512# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
513 if (VBOX_FAILURE(rc))
514 {
515 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
516 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
517 return rc;
518 }
519 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
520 }
521
522#if PGM_GST_TYPE == PGM_TYPE_PAE
523 /*
524 * Do the 4 PDs.
525 */
526 for (unsigned i = 0; i < 4; i++)
527 {
528 if (CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].n.u1Present)
529 {
530 RTGCPHYS GCPhys = CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u & X86_PDPE_PG_MASK;
531# ifndef PGMPOOL_WITH_MIXED_PT_CR3
532 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
533 {
534 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
535 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys, GCPhys + PAGE_SIZE - 1);
536 else
537 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhys + PAGE_SIZE - 1,
538 pVM->pgm.s.pfnR3GstPAEWriteHandlerCR3, 0,
539 pVM->pgm.s.pfnR0GstPAEWriteHandlerCR3, 0,
540 pVM->pgm.s.pfnGCGstPAEWriteHandlerCR3, 0,
541 pVM->pgm.s.pszR3GstPAEWriteHandlerCR3);
542 if (VBOX_SUCCESS(rc))
543 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
544 }
545# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
546 /** PAE todo */
547 AssertFailed();
548 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
549 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
550 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
551 ? PGMPOOL_IDX_PAE_PD
552 : PGMPOOL_IDX_PD,
553 GCPhys);
554# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
555 if (VBOX_FAILURE(rc))
556 {
557 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
558 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
559 return rc;
560 }
561 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
562 }
563 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
564 {
565 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]);
566 AssertRC(rc);
567 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
568 }
569 }
570#endif /* PGM_GST_TYPE == PGM_TYPE_PAE */
571
572#else
573 /* prot/real mode stub */
574
575#endif
576 return rc;
577}
578
579/**
580 * Deregisters any physical page monitors installed by MonitorCR3.
581 *
582 * @returns VBox status code, no specials.
583 * @param pVM The VM handle.
584 */
585PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
586{
587 int rc = VINF_SUCCESS;
588
589#if PGM_GST_TYPE == PGM_TYPE_32BIT \
590 || PGM_GST_TYPE == PGM_TYPE_PAE \
591 || PGM_GST_TYPE == PGM_TYPE_AMD64
592
593 /*
594 * Deregister the access handlers.
595 *
596 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
597 * before we enter GC again.
598 */
599 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
600 {
601# ifndef PGMPOOL_WITH_MIXED_PT_CR3
602 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
603 AssertRCReturn(rc, rc);
604# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
605 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
606 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
607 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
608 ? PGMPOOL_IDX_PAE_PD
609 : PGMPOOL_IDX_PD);
610 AssertRCReturn(rc, rc);
611# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
612 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
613 }
614
615# if PGM_GST_TYPE == PGM_TYPE_PAE
616 /* The 4 PDs. */
617 for (unsigned i = 0; i < 4; i++)
618 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
619 {
620# ifndef PGMPOOL_WITH_MIXED_PT_CR3
621 int rc2 = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]);
622# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
623 /** PAE todo */
624 AssertFailed();
625 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
626 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
627 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
628 ? PGMPOOL_IDX_PAE_PD
629 : PGMPOOL_IDX_PD);
630# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
631 AssertRC(rc2);
632 if (VBOX_FAILURE(rc2))
633 rc = rc2;
634 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
635 }
636# endif
637
638#else
639 /* prot/real mode stub */
640#endif
641 return rc;
642
643}
644
645#undef LOG_GROUP
646#define LOG_GROUP LOG_GROUP_PGM
647
648
649#if PGM_GST_TYPE == PGM_TYPE_32BIT \
650 || PGM_GST_TYPE == PGM_TYPE_PAE \
651 || PGM_GST_TYPE == PGM_TYPE_AMD64
652/**
653 * Updates one virtual handler range.
654 *
655 * @returns 0
656 * @param pNode Pointer to a PGMVIRTHANDLER.
657 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
658 */
659static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
660{
661 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
662 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
663 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
664
665#if PGM_GST_TYPE == PGM_TYPE_32BIT
666 PX86PD pPDSrc = pState->pVM->pgm.s.CTXSUFF(pGuestPD);
667#endif
668
669 RTGCUINTPTR GCPtr = (RTUINTPTR)pCur->GCPtr;
670#if PGM_GST_MODE != PGM_MODE_AMD64
671 /* skip all stuff above 4GB if not AMD64 mode. */
672 if (GCPtr >= _4GB)
673 return 0;
674#endif
675
676 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
677 unsigned iPage = 0;
678 while (iPage < pCur->cPages)
679 {
680#if PGM_GST_TYPE == PGM_TYPE_32BIT
681 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
682#else
683 X86PDEPAE Pde;
684 Pde.u = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
685#endif
686 if (Pde.n.u1Present)
687 {
688 if (!Pde.b.u1Size || !(pState->cr4 & X86_CR4_PSE))
689 {
690 /*
691 * Normal page table.
692 */
693 PGSTPT pPT;
694 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
695 if (VBOX_SUCCESS(rc))
696 {
697 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
698 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
699 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
700 {
701 GSTPTE Pte = pPT->a[iPTE];
702 RTGCPHYS GCPhysNew;
703 if (Pte.n.u1Present)
704 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
705 else
706 GCPhysNew = NIL_RTGCPHYS;
707 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
708 {
709 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
710 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
711#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
712 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
713 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
714 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
715 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
716#endif
717 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
718 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
719 }
720 }
721 }
722 else
723 {
724 /* not-present. */
725 offPage = 0;
726 AssertRC(rc);
727 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
728 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
729 iPTE++, iPage++, GCPtr += PAGE_SIZE)
730 {
731 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
732 {
733 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
734#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
735 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
736 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
737 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
738 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
739#endif
740 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
741 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
742 }
743 }
744 }
745 }
746 else
747 {
748 /*
749 * 2/4MB page.
750 */
751 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
752 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
753 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
754 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
755 {
756 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
757 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
758 {
759 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
760 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
761#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
762 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
763 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
764 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
765 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
766#endif
767 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
768 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
769 }
770 }
771 } /* pde type */
772 }
773 else
774 {
775 /* not-present. */
776 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
777 cPages && iPage < pCur->cPages;
778 iPage++, GCPtr += PAGE_SIZE)
779 {
780 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
781 {
782 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
783 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
784 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
785 }
786 }
787 offPage = 0;
788 }
789 } /* for pages in virtual mapping. */
790
791 return 0;
792}
793#endif /* 32BIT, PAE and AMD64 */
794
795
796/**
797 * Updates the virtual page access handlers.
798 *
799 * @returns true if bits were flushed.
800 * @returns false if bits weren't flushed.
801 * @param pVM VM handle.
802 * @param pPDSrc The page directory.
803 * @param cr4 The cr4 register value.
804 */
805PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
806{
807#if PGM_GST_TYPE == PGM_TYPE_32BIT \
808 || PGM_GST_TYPE == PGM_TYPE_PAE \
809 || PGM_GST_TYPE == PGM_TYPE_AMD64
810
811#if PGM_GST_TYPE == PGM_TYPE_AMD64
812 AssertFailed();
813#endif
814
815 /** @todo
816 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
817 */
818
819 /*
820 * Resolve any virtual address based access handlers to GC physical addresses.
821 * This should be fairly quick.
822 */
823 PGMHVUSTATE State;
824
825 pgmLock(pVM);
826 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
827 State.pVM = pVM;
828 State.fTodo = pVM->pgm.s.fSyncFlags;
829 State.cr4 = cr4;
830 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
831 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
832
833
834 /*
835 * Set / reset bits?
836 */
837 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
838 {
839 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
840 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
841 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
842 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
843 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
844 }
845 pgmUnlock(pVM);
846
847 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
848
849#else /* real / protected */
850 return false;
851#endif
852}
853
854
855#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
856
857/**
858 * Write access handler for the Guest CR3 page in 32-bit mode.
859 *
860 * This will try interpret the instruction, if failure fail back to the recompiler.
861 * Check if the changed PDEs are marked present and conflicts with our
862 * mappings. If conflict, we'll switch to the host context and resolve it there
863 *
864 * @returns VBox status code (appropritate for trap handling and GC return).
865 * @param pVM VM Handle.
866 * @param uErrorCode CPU Error code.
867 * @param pRegFrame Trap register frame.
868 * @param pvFault The fault address (cr2).
869 * @param GCPhysFault The GC physical address corresponding to pvFault.
870 * @param pvUser User argument.
871 */
872PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
873{
874 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
875
876 /*
877 * Try interpret the instruction.
878 */
879 uint32_t cb;
880 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
881 if (VBOX_SUCCESS(rc) && cb)
882 {
883 /*
884 * Check if the modified PDEs are present and mappings.
885 */
886 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
887 const unsigned iPD1 = offPD / sizeof(X86PDE);
888 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
889
890 Assert(cb > 0 && cb <= 8);
891 Assert(iPD1 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a)); /// @todo R3/R0 separation.
892 Assert(iPD2 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a));
893
894#ifdef DEBUG
895 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD1, iPD1 << X86_PD_SHIFT));
896 if (iPD1 != iPD2)
897 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD2, iPD2 << X86_PD_SHIFT));
898#endif
899
900 if (!pVM->pgm.s.fMappingsFixed)
901 {
902 PX86PD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
903 if ( ( pPDSrc->a[iPD1].n.u1Present
904 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
905 || ( iPD1 != iPD2
906 && pPDSrc->a[iPD2].n.u1Present
907 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
908 )
909 {
910 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
911 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
912 if (rc == VINF_SUCCESS)
913 rc = VINF_PGM_SYNC_CR3;
914 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
915 return rc;
916 }
917 }
918
919 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
920 }
921 else
922 {
923 Assert(VBOX_FAILURE(rc));
924 if (rc == VERR_EM_INTERPRETER)
925 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
926 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
927 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
928 }
929 return rc;
930}
931
932#endif /* PGM_TYPE_32BIT && !IN_RING3 */
933
934
935#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
936
937/**
938 * Write access handler for the Guest CR3 page in PAE mode.
939 *
940 * This will try interpret the instruction, if failure fail back to the recompiler.
941 * Check if the changed PDEs are marked present and conflicts with our
942 * mappings. If conflict, we'll switch to the host context and resolve it there
943 *
944 * @returns VBox status code (appropritate for trap handling and GC return).
945 * @param pVM VM Handle.
946 * @param uErrorCode CPU Error code.
947 * @param pRegFrame Trap register frame.
948 * @param pvFault The fault address (cr2).
949 * @param GCPhysFault The GC physical address corresponding to pvFault.
950 * @param pvUser User argument.
951 */
952PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
953{
954 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
955
956 /*
957 * Try interpret the instruction.
958 */
959 uint32_t cb;
960 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
961 if (VBOX_SUCCESS(rc) && cb)
962 {
963 /*
964 * Check if any of the PDs have changed.
965 * We'll simply check all of them instead of figuring out which one/two to check.
966 */
967 for (unsigned i = 0; i < 4; i++)
968 {
969 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].n.u1Present
970 && ( CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u & X86_PDPE_PG_MASK)
971 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
972 {
973 /*
974 * The PDPE has changed.
975 * We will schedule a monitoring update for the next TLB Flush,
976 * InvalidatePage or SyncCR3.
977 *
978 * This isn't perfect, because a lazy page sync might be dealing with an half
979 * updated PDPE. However, we assume that the guest OS is disabling interrupts
980 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
981 * executing.
982 */
983 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
984 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
985 i, CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
986 }
987 }
988
989 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
990 }
991 else
992 {
993 Assert(VBOX_FAILURE(rc));
994 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
995 if (rc == VERR_EM_INTERPRETER)
996 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
997 }
998 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
999 return rc;
1000}
1001
1002
1003/**
1004 * Write access handler for the Guest PDs in PAE mode.
1005 *
1006 * This will try interpret the instruction, if failure fail back to the recompiler.
1007 * Check if the changed PDEs are marked present and conflicts with our
1008 * mappings. If conflict, we'll switch to the host context and resolve it there
1009 *
1010 * @returns VBox status code (appropritate for trap handling and GC return).
1011 * @param pVM VM Handle.
1012 * @param uErrorCode CPU Error code.
1013 * @param pRegFrame Trap register frame.
1014 * @param pvFault The fault address (cr2).
1015 * @param GCPhysFault The GC physical address corresponding to pvFault.
1016 * @param pvUser User argument.
1017 */
1018PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1019{
1020 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1021
1022 /*
1023 * Try interpret the instruction.
1024 */
1025 uint32_t cb;
1026 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1027 if (VBOX_SUCCESS(rc) && cb)
1028 {
1029 /*
1030 * Figure out which of the 4 PDs this is.
1031 */
1032 RTGCUINTPTR i;
1033 for (i = 0; i < 4; i++)
1034 if (CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
1035 {
1036 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPTR_SHIFT);
1037 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1038 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
1039 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
1040
1041 Assert(cb > 0 && cb <= 8);
1042 Assert(iPD1 < X86_PG_PAE_ENTRIES);
1043 Assert(iPD2 < X86_PG_PAE_ENTRIES);
1044
1045#ifdef DEBUG
1046 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%VGv)\n",
1047 i, iPD1, (i << X86_PDPTR_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
1048 if (iPD1 != iPD2)
1049 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%VGv)\n",
1050 i, iPD2, (i << X86_PDPTR_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
1051#endif
1052
1053 if (!pVM->pgm.s.fMappingsFixed)
1054 {
1055 if ( ( pPDSrc->a[iPD1].n.u1Present
1056 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPTR_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
1057 || ( iPD1 != iPD2
1058 && pPDSrc->a[iPD2].n.u1Present
1059 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPTR_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
1060 )
1061 {
1062 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
1063 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
1064 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1065 return VINF_PGM_SYNC_CR3;
1066 }
1067 }
1068 break; /* ASSUMES no duplicate entries... */
1069 }
1070 Assert(i < 4);
1071
1072 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1073 }
1074 else
1075 {
1076 Assert(VBOX_FAILURE(rc));
1077 if (rc == VERR_EM_INTERPRETER)
1078 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1079 else
1080 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1081 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1082 }
1083 return rc;
1084}
1085
1086#endif /* PGM_TYPE_PAE && !IN_RING3 */
1087
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette