VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 11531

Last change on this file since 11531 was 11531, checked in by vboxsync, 17 years ago

Missing changes for PSE-36 support.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 44.9 KB
Line 
1/* $Id: PGMAllGst.h 11531 2008-08-21 10:10:09Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Defined Constants And Macros *
25*******************************************************************************/
26#undef GSTPT
27#undef PGSTPT
28#undef GSTPTE
29#undef PGSTPTE
30#undef GSTPD
31#undef PGSTPD
32#undef GSTPDE
33#undef PGSTPDE
34#undef GST_BIG_PAGE_SIZE
35#undef GST_BIG_PAGE_OFFSET_MASK
36#undef GST_PDE_PG_MASK
37#undef GST_PDE_BIG_PG_MASK
38#undef GST_PD_SHIFT
39#undef GST_PD_MASK
40#undef GST_PTE_PG_MASK
41#undef GST_PT_SHIFT
42#undef GST_PT_MASK
43#undef GST_TOTAL_PD_ENTRIES
44#undef GST_CR3_PAGE_MASK
45#undef GST_PDPE_ENTRIES
46#undef GST_PDPT_SHIFT
47#undef GST_PDPT_MASK
48#undef GST_PDPE_PG_MASK
49#undef GST_GET_PDE_BIG_PG_GCPHYS
50
51#if PGM_GST_TYPE == PGM_TYPE_REAL \
52 || PGM_GST_TYPE == PGM_TYPE_PROT
53# define GSTPT SHWPT
54# define PGSTPT PSHWPT
55# define GSTPTE SHWPTE
56# define PGSTPTE PSHWPTE
57# define GSTPD SHWPD
58# define PGSTPD PSHWPD
59# define GSTPDE SHWPDE
60# define PGSTPDE PSHWPDE
61# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
62#elif PGM_GST_TYPE == PGM_TYPE_32BIT
63# define GSTPT X86PT
64# define PGSTPT PX86PT
65# define GSTPTE X86PTE
66# define PGSTPTE PX86PTE
67# define GSTPD X86PD
68# define PGSTPD PX86PD
69# define GSTPDE X86PDE
70# define PGSTPDE PX86PDE
71# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
72# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
73# define GST_PDE_PG_MASK X86_PDE_PG_MASK
74# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
75# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
76# define GST_PD_SHIFT X86_PD_SHIFT
77# define GST_PD_MASK X86_PD_MASK
78# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
79# define GST_PTE_PG_MASK X86_PTE_PG_MASK
80# define GST_PT_SHIFT X86_PT_SHIFT
81# define GST_PT_MASK X86_PT_MASK
82# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
83#elif PGM_GST_TYPE == PGM_TYPE_PAE \
84 || PGM_GST_TYPE == PGM_TYPE_AMD64
85# define GSTPT X86PTPAE
86# define PGSTPT PX86PTPAE
87# define GSTPTE X86PTEPAE
88# define PGSTPTE PX86PTEPAE
89# define GSTPD X86PDPAE
90# define PGSTPD PX86PDPAE
91# define GSTPDE X86PDEPAE
92# define PGSTPDE PX86PDEPAE
93# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
94# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
95# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
96# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
97# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
98# define GST_PD_SHIFT X86_PD_PAE_SHIFT
99# define GST_PD_MASK X86_PD_PAE_MASK
100# if PGM_GST_TYPE == PGM_TYPE_PAE
101# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
102# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
103# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
104# define GST_PDPT_SHIFT X86_PDPT_SHIFT
105# define GST_PDPT_MASK X86_PDPT_MASK_PAE
106# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
107# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
108# else
109# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
110# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
111# define GST_PDPT_SHIFT X86_PDPT_SHIFT
112# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
113# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
114# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
115# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
116# endif
117# define GST_PT_SHIFT X86_PT_PAE_SHIFT
118# define GST_PT_MASK X86_PT_PAE_MASK
119#endif
120
121
122/*******************************************************************************
123* Internal Functions *
124*******************************************************************************/
125__BEGIN_DECLS
126PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
127PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
128PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE);
129PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
130PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
131PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
132PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
133PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
134#ifndef IN_RING3
135PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
136# if PGM_GST_TYPE == PGM_TYPE_PAE \
137 || PGM_GST_TYPE == PGM_TYPE_AMD64
138PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
139# endif
140#endif
141__END_DECLS
142
143
144
145/**
146 * Gets effective Guest OS page information.
147 *
148 * When GCPtr is in a big page, the function will return as if it was a normal
149 * 4KB page. If the need for distinguishing between big and normal page becomes
150 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
151 * purpose.
152 *
153 * @returns VBox status.
154 * @param pVM VM Handle.
155 * @param GCPtr Guest Context virtual address of the page. Page aligned!
156 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
157 * @param pGCPhys Where to store the GC physical address of the page.
158 * This is page aligned. The fact that the
159 */
160PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
161{
162#if PGM_GST_TYPE == PGM_TYPE_REAL \
163 || PGM_GST_TYPE == PGM_TYPE_PROT
164 /*
165 * Fake it.
166 */
167 if (pfFlags)
168 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
169 if (pGCPhys)
170 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
171 return VINF_SUCCESS;
172
173#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
174
175 /*
176 * Get the PDE.
177 */
178# if PGM_GST_TYPE == PGM_TYPE_32BIT
179 const X86PDE Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
180#elif PGM_GST_TYPE == PGM_TYPE_PAE
181 X86PDEPAE Pde;
182 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
183
184 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present
185 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
186 */
187 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
188#elif PGM_GST_TYPE == PGM_TYPE_AMD64
189 PX86PML4E pPml4e;
190 X86PDPE Pdpe;
191 X86PDEPAE Pde;
192 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
193
194 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
195 Assert(pPml4e);
196 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
197 return VERR_PAGE_TABLE_NOT_PRESENT;
198
199 /* Merge accessed, write, user and no-execute bits into the PDE. */
200 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
201 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
202 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
203 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
204# endif
205
206 /*
207 * Lookup the page.
208 */
209 if (!Pde.n.u1Present)
210 return VERR_PAGE_TABLE_NOT_PRESENT;
211
212 if ( !Pde.b.u1Size
213# if PGM_GST_TYPE != PGM_TYPE_AMD64
214 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
215# endif
216 )
217 {
218 PGSTPT pPT;
219 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
220 if (VBOX_FAILURE(rc))
221 return rc;
222
223 /*
224 * Get PT entry and check presence.
225 */
226 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
227 if (!Pte.n.u1Present)
228 return VERR_PAGE_NOT_PRESENT;
229
230 /*
231 * Store the result.
232 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
233 * where the PDPE is simplified.
234 */
235 if (pfFlags)
236 {
237 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
238 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
239# if PGM_WITH_NX(PGM_GST_TYPE)
240 /* The NX bit is determined by a bitwise OR between the PT and PD */
241 if (fNoExecuteBitValid)
242 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
243# endif
244 }
245 if (pGCPhys)
246 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
247 }
248 else
249 {
250 /*
251 * Map big to 4k PTE and store the result
252 */
253 if (pfFlags)
254 {
255 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
256 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
257# if PGM_WITH_NX(PGM_GST_TYPE)
258 /* The NX bit is determined by a bitwise OR between the PT and PD */
259 if (fNoExecuteBitValid)
260 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
261# endif
262 }
263 if (pGCPhys)
264 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
265 }
266 return VINF_SUCCESS;
267#else
268# error "shouldn't be here!"
269 /* something else... */
270 return VERR_NOT_SUPPORTED;
271#endif
272}
273
274
275/**
276 * Modify page flags for a range of pages in the guest's tables
277 *
278 * The existing flags are ANDed with the fMask and ORed with the fFlags.
279 *
280 * @returns VBox status code.
281 * @param pVM VM handle.
282 * @param GCPtr Virtual address of the first page in the range. Page aligned!
283 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
284 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
285 * @param fMask The AND mask - page flags X86_PTE_*.
286 */
287PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
288{
289#if PGM_GST_TYPE == PGM_TYPE_32BIT \
290 || PGM_GST_TYPE == PGM_TYPE_PAE \
291 || PGM_GST_TYPE == PGM_TYPE_AMD64
292
293 for (;;)
294 {
295 /*
296 * Get the PD entry.
297 */
298# if PGM_GST_TYPE == PGM_TYPE_32BIT
299 PX86PDE pPde = &CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
300# elif PGM_GST_TYPE == PGM_TYPE_PAE
301 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
302 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
303 */
304 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
305 Assert(pPde);
306 if (!pPde)
307 return VERR_PAGE_TABLE_NOT_PRESENT;
308# elif PGM_GST_TYPE == PGM_TYPE_AMD64
309 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
310 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
311 Assert(pPde);
312 if (!pPde)
313 return VERR_PAGE_TABLE_NOT_PRESENT;
314# endif
315 GSTPDE Pde = *pPde;
316 Assert(Pde.n.u1Present);
317 if (!Pde.n.u1Present)
318 return VERR_PAGE_TABLE_NOT_PRESENT;
319
320 if ( !Pde.b.u1Size
321# if PGM_GST_TYPE != PGM_TYPE_AMD64
322 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
323# endif
324 )
325 {
326 /*
327 * 4KB Page table
328 *
329 * Walk page tables and pages till we're done.
330 */
331 PGSTPT pPT;
332 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
333 if (VBOX_FAILURE(rc))
334 return rc;
335
336 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
337 while (iPTE < RT_ELEMENTS(pPT->a))
338 {
339 GSTPTE Pte = pPT->a[iPTE];
340 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
341 | (fFlags & ~GST_PTE_PG_MASK);
342 pPT->a[iPTE] = Pte;
343
344 /* next page */
345 cb -= PAGE_SIZE;
346 if (!cb)
347 return VINF_SUCCESS;
348 GCPtr += PAGE_SIZE;
349 iPTE++;
350 }
351 }
352 else
353 {
354 /*
355 * 4MB Page table
356 */
357# if PGM_GST_TYPE == PGM_TYPE_32BIT
358 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
359# else
360 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) /** @todo pse36 */
361# endif
362 | (fFlags & ~GST_PTE_PG_MASK)
363 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
364 *pPde = Pde;
365
366 /* advance */
367 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
368 if (cbDone >= cb)
369 return VINF_SUCCESS;
370 cb -= cbDone;
371 GCPtr += cbDone;
372 }
373 }
374
375#else
376 /* real / protected mode: ignore. */
377 return VINF_SUCCESS;
378#endif
379}
380
381
382/**
383 * Retrieve guest PDE information
384 *
385 * @returns VBox status code.
386 * @param pVM The virtual machine.
387 * @param GCPtr Guest context pointer
388 * @param pPDE Pointer to guest PDE structure
389 */
390PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE)
391{
392#if PGM_GST_TYPE == PGM_TYPE_32BIT \
393 || PGM_GST_TYPE == PGM_TYPE_PAE \
394 || PGM_GST_TYPE == PGM_TYPE_AMD64
395
396# if PGM_GST_TYPE == PGM_TYPE_32BIT
397 X86PDE Pde;
398 Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> GST_PD_SHIFT];
399# elif PGM_GST_TYPE == PGM_TYPE_PAE
400 X86PDEPAE Pde;
401 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
402# elif PGM_GST_TYPE == PGM_TYPE_AMD64
403 X86PDEPAE Pde;
404 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
405# endif
406
407 pPDE->u = (X86PGPAEUINT)Pde.u;
408 return VINF_SUCCESS;
409#else
410 AssertFailed();
411 return VERR_NOT_IMPLEMENTED;
412#endif
413}
414
415
416
417/**
418 * Maps the CR3 into HMA in GC and locate it in HC.
419 *
420 * Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
421 * CR3 is updated we simply call MapCR3 again.
422 *
423 * @returns VBox status, no specials.
424 * @param pVM VM handle.
425 * @param GCPhysCR3 The physical address in the CR3 register.
426 */
427PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
428{
429#if PGM_GST_TYPE == PGM_TYPE_32BIT \
430 || PGM_GST_TYPE == PGM_TYPE_PAE \
431 || PGM_GST_TYPE == PGM_TYPE_AMD64
432
433 LogFlow(("MapCR3: %VGp\n", GCPhysCR3));
434
435 /*
436 * Map the page CR3 points at.
437 */
438 RTHCPHYS HCPhysGuestCR3;
439 RTHCPTR HCPtrGuestCR3;
440 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
441 if (VBOX_SUCCESS(rc))
442 {
443 rc = PGMMap(pVM, (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
444 if (VBOX_SUCCESS(rc))
445 {
446 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
447# if PGM_GST_TYPE == PGM_TYPE_32BIT
448 pVM->pgm.s.pGuestPDHC = (R3R0PTRTYPE(PX86PD))HCPtrGuestCR3;
449 pVM->pgm.s.pGuestPDGC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
450
451# elif PGM_GST_TYPE == PGM_TYPE_PAE
452 unsigned offset = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
453 pVM->pgm.s.pGstPaePDPTHC = (R3R0PTRTYPE(PX86PDPT)) HCPtrGuestCR3;
454 pVM->pgm.s.pGstPaePDPTGC = (RCPTRTYPE(PX86PDPT)) ((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + offset);
455 Log(("Cached mapping %VGv\n", pVM->pgm.s.pGstPaePDPTGC));
456
457 /*
458 * Map the 4 PDs too.
459 */
460 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
461 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
462 {
463 if (pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].n.u1Present)
464 {
465 RTHCPTR HCPtr;
466 RTHCPHYS HCPhys;
467 RTGCPHYS GCPhys = pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;
468 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
469 if (VBOX_SUCCESS(rc2))
470 {
471 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
472 AssertRCReturn(rc, rc);
473 pVM->pgm.s.apGstPaePDsHC[i] = (R3R0PTRTYPE(PX86PDPAE))HCPtr;
474 pVM->pgm.s.apGstPaePDsGC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
475 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
476 PGM_INVL_PG(GCPtr);
477 continue;
478 }
479 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
480 }
481
482 pVM->pgm.s.apGstPaePDsHC[i] = 0;
483 pVM->pgm.s.apGstPaePDsGC[i] = 0;
484 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
485 PGM_INVL_PG(GCPtr);
486 }
487# elif PGM_GST_TYPE == PGM_TYPE_AMD64
488 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
489
490 pVM->pgm.s.pGstPaePML4HC = (R3R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
491
492 if (!HWACCMIsNestedPagingActive(pVM))
493 {
494 if (pVM->pgm.s.pHCShwAmd64CR3)
495 {
496 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
497 pVM->pgm.s.pHCShwAmd64CR3 = 0;
498 pVM->pgm.s.pHCPaePML4 = 0;
499 pVM->pgm.s.HCPhysPaePML4 = 0;
500 }
501
502 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
503try_again:
504 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.pHCShwAmd64CR3);
505 if (rc == VERR_PGM_POOL_FLUSHED)
506 {
507 Log(("MapCR3: Flush pool and try again\n"));
508 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
509 rc = pgmPoolSyncCR3(pVM);
510 AssertRC(rc);
511 goto try_again;
512 }
513 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTXSUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3);
514 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.pHCShwAmd64CR3->Core.Key;
515 }
516# endif
517 }
518 else
519 AssertMsgFailed(("rc=%Vrc GCPhysGuestPD=%VGp\n", rc, GCPhysCR3));
520 }
521 else
522 AssertMsgFailed(("rc=%Vrc GCPhysGuestPD=%VGp\n", rc, GCPhysCR3));
523
524#else /* prot/real stub */
525 int rc = VINF_SUCCESS;
526#endif
527 return rc;
528}
529
530
531/**
532 * Unmaps the CR3.
533 *
534 * @returns VBox status, no specials.
535 * @param pVM VM handle.
536 */
537PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
538{
539 LogFlow(("UnmapCR3\n"));
540
541 int rc = VINF_SUCCESS;
542
543#if PGM_GST_TYPE == PGM_TYPE_32BIT
544 pVM->pgm.s.pGuestPDHC = 0;
545 pVM->pgm.s.pGuestPDGC = 0;
546
547#elif PGM_GST_TYPE == PGM_TYPE_PAE
548 pVM->pgm.s.pGstPaePDPTHC = 0;
549 pVM->pgm.s.pGstPaePDPTGC = 0;
550 for (unsigned i=0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
551 {
552 pVM->pgm.s.apGstPaePDsHC[i] = 0;
553 pVM->pgm.s.apGstPaePDsGC[i] = 0;
554 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
555 }
556
557#elif PGM_GST_TYPE == PGM_TYPE_AMD64
558 pVM->pgm.s.pGstPaePML4HC = 0;
559 if (!HWACCMIsNestedPagingActive(pVM))
560 {
561 pVM->pgm.s.pHCPaePML4 = 0;
562 if (pVM->pgm.s.pHCShwAmd64CR3)
563 {
564 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
565 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
566 pVM->pgm.s.pHCShwAmd64CR3 = NULL;
567 }
568 }
569
570#else /* prot/real mode stub */
571 /* nothing to do */
572#endif
573 return rc;
574}
575
576
577#undef LOG_GROUP
578#define LOG_GROUP LOG_GROUP_PGM_POOL
579
580/**
581 * Registers physical page monitors for the necessary paging
582 * structures to detect conflicts with our guest mappings.
583 *
584 * This is always called after mapping CR3.
585 * This is never called with fixed mappings.
586 *
587 * @returns VBox status, no specials.
588 * @param pVM VM handle.
589 * @param GCPhysCR3 The physical address in the CR3 register.
590 */
591PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
592{
593 Assert(!pVM->pgm.s.fMappingsFixed);
594 int rc = VINF_SUCCESS;
595
596 /*
597 * Register/Modify write phys handler for guest's CR3 if it changed.
598 */
599#if PGM_GST_TYPE == PGM_TYPE_32BIT
600
601 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
602 {
603# ifndef PGMPOOL_WITH_MIXED_PT_CR3
604 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
605 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
606 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
607 else
608 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
609 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
610 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
611 pVM->pgm.s.pfnGCGstWriteHandlerCR3, 0,
612 pVM->pgm.s.pszR3GstWriteHandlerCR3);
613# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
614 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
615 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
616 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
617 ? PGMPOOL_IDX_PAE_PD
618 : PGMPOOL_IDX_PD,
619 GCPhysCR3);
620# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
621 if (VBOX_FAILURE(rc))
622 {
623 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
624 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
625 return rc;
626 }
627 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
628 }
629
630#elif PGM_GST_TYPE == PGM_TYPE_PAE
631 /* Monitor the PDPT page */
632 /*
633 * Register/Modify write phys handler for guest's CR3 if it changed.
634 */
635# ifndef PGMPOOL_WITH_MIXED_PT_CR3
636 AssertFailed();
637# endif
638 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
639 {
640 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
641 if (VBOX_FAILURE(rc))
642 {
643 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
644 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
645 return rc;
646 }
647 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
648 }
649 /*
650 * Do the 4 PDs.
651 */
652 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
653 {
654 if (CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present)
655 {
656 RTGCPHYS GCPhys = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;
657 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
658 {
659 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
660
661 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
662 }
663
664 if (VBOX_FAILURE(rc))
665 {
666 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
667 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
668 return rc;
669 }
670 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
671 }
672 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
673 {
674 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
675 AssertRC(rc);
676 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
677 }
678 }
679
680#else
681 /* prot/real/amd64 mode stub */
682
683#endif
684 return rc;
685}
686
687/**
688 * Deregisters any physical page monitors installed by MonitorCR3.
689 *
690 * @returns VBox status code, no specials.
691 * @param pVM The VM handle.
692 */
693PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
694{
695 int rc = VINF_SUCCESS;
696
697 /*
698 * Deregister the access handlers.
699 *
700 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
701 * before we enter GC again.
702 */
703#if PGM_GST_TYPE == PGM_TYPE_32BIT
704 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
705 {
706# ifndef PGMPOOL_WITH_MIXED_PT_CR3
707 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
708 AssertRCReturn(rc, rc);
709# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
710 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
711 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
712 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
713 ? PGMPOOL_IDX_PAE_PD
714 : PGMPOOL_IDX_PD);
715 AssertRCReturn(rc, rc);
716# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
717 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
718 }
719
720#elif PGM_GST_TYPE == PGM_TYPE_PAE
721 /* The PDPT page */
722# ifndef PGMPOOL_WITH_MIXED_PT_CR3
723 AssertFailed();
724# endif
725
726 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
727 {
728 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PDPT);
729 AssertRC(rc);
730 }
731
732 /* The 4 PDs. */
733 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
734 {
735 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
736 {
737 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
738 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
739 AssertRC(rc2);
740 if (VBOX_FAILURE(rc2))
741 rc = rc2;
742 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
743 }
744 }
745#else
746 /* prot/real/amd64 mode stub */
747#endif
748 return rc;
749
750}
751
752#undef LOG_GROUP
753#define LOG_GROUP LOG_GROUP_PGM
754
755
756#if PGM_GST_TYPE == PGM_TYPE_32BIT \
757 || PGM_GST_TYPE == PGM_TYPE_PAE \
758 || PGM_GST_TYPE == PGM_TYPE_AMD64
759/**
760 * Updates one virtual handler range.
761 *
762 * @returns 0
763 * @param pNode Pointer to a PGMVIRTHANDLER.
764 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
765 */
766static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
767{
768 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
769 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
770 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
771
772#if PGM_GST_TYPE == PGM_TYPE_32BIT
773 PX86PD pPDSrc = pState->pVM->pgm.s.CTXSUFF(pGuestPD);
774#endif
775
776 RTGCUINTPTR GCPtr = (RTUINTPTR)pCur->GCPtr;
777#if PGM_GST_MODE != PGM_MODE_AMD64
778 /* skip all stuff above 4GB if not AMD64 mode. */
779 if (GCPtr >= _4GB)
780 return 0;
781#endif
782
783 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
784 unsigned iPage = 0;
785 while (iPage < pCur->cPages)
786 {
787#if PGM_GST_TYPE == PGM_TYPE_32BIT
788 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
789#elif PGM_GST_TYPE == PGM_TYPE_PAE
790 X86PDEPAE Pde;
791 Pde.u = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
792#elif PGM_GST_TYPE == PGM_TYPE_AMD64
793 X86PDEPAE Pde;
794 Pde.u = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
795#endif
796 if (Pde.n.u1Present)
797 {
798 if ( !Pde.b.u1Size
799# if PGM_GST_TYPE != PGM_TYPE_AMD64
800 || !(pState->cr4 & X86_CR4_PSE)
801# endif
802 )
803 {
804 /*
805 * Normal page table.
806 */
807 PGSTPT pPT;
808 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
809 if (VBOX_SUCCESS(rc))
810 {
811 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
812 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
813 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
814 {
815 GSTPTE Pte = pPT->a[iPTE];
816 RTGCPHYS GCPhysNew;
817 if (Pte.n.u1Present)
818 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
819 else
820 GCPhysNew = NIL_RTGCPHYS;
821 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
822 {
823 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
824 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
825#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
826 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
827 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
828 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
829 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
830#endif
831 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
832 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
833 }
834 }
835 }
836 else
837 {
838 /* not-present. */
839 offPage = 0;
840 AssertRC(rc);
841 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
842 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
843 iPTE++, iPage++, GCPtr += PAGE_SIZE)
844 {
845 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
846 {
847 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
848#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
849 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
850 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
851 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
852 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
853#endif
854 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
855 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
856 }
857 }
858 }
859 }
860 else
861 {
862 /*
863 * 2/4MB page.
864 */
865 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
866 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
867 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
868 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
869 {
870 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
871 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
872 {
873 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
874 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
875#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
876 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
877 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
878 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
879 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
880#endif
881 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
882 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
883 }
884 }
885 } /* pde type */
886 }
887 else
888 {
889 /* not-present. */
890 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
891 cPages && iPage < pCur->cPages;
892 iPage++, GCPtr += PAGE_SIZE)
893 {
894 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
895 {
896 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
897 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
898 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
899 }
900 }
901 offPage = 0;
902 }
903 } /* for pages in virtual mapping. */
904
905 return 0;
906}
907#endif /* 32BIT, PAE and AMD64 */
908
909
910/**
911 * Updates the virtual page access handlers.
912 *
913 * @returns true if bits were flushed.
914 * @returns false if bits weren't flushed.
915 * @param pVM VM handle.
916 * @param pPDSrc The page directory.
917 * @param cr4 The cr4 register value.
918 */
919PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
920{
921#if PGM_GST_TYPE == PGM_TYPE_32BIT \
922 || PGM_GST_TYPE == PGM_TYPE_PAE \
923 || PGM_GST_TYPE == PGM_TYPE_AMD64
924
925 /** @todo
926 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
927 */
928
929 /*
930 * Resolve any virtual address based access handlers to GC physical addresses.
931 * This should be fairly quick.
932 */
933 PGMHVUSTATE State;
934
935 pgmLock(pVM);
936 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
937 State.pVM = pVM;
938 State.fTodo = pVM->pgm.s.fSyncFlags;
939 State.cr4 = cr4;
940 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
941 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
942
943
944 /*
945 * Set / reset bits?
946 */
947 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
948 {
949 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
950 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
951 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
952 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
953 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
954 }
955 pgmUnlock(pVM);
956
957 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
958
959#else /* real / protected */
960 return false;
961#endif
962}
963
964
965#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
966
967/**
968 * Write access handler for the Guest CR3 page in 32-bit mode.
969 *
970 * This will try interpret the instruction, if failure fail back to the recompiler.
971 * Check if the changed PDEs are marked present and conflicts with our
972 * mappings. If conflict, we'll switch to the host context and resolve it there
973 *
974 * @returns VBox status code (appropritate for trap handling and GC return).
975 * @param pVM VM Handle.
976 * @param uErrorCode CPU Error code.
977 * @param pRegFrame Trap register frame.
978 * @param pvFault The fault address (cr2).
979 * @param GCPhysFault The GC physical address corresponding to pvFault.
980 * @param pvUser User argument.
981 */
982PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
983{
984 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
985
986 /*
987 * Try interpret the instruction.
988 */
989 uint32_t cb;
990 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
991 if (VBOX_SUCCESS(rc) && cb)
992 {
993 /*
994 * Check if the modified PDEs are present and mappings.
995 */
996 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
997 const unsigned iPD1 = offPD / sizeof(X86PDE);
998 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
999
1000 Assert(cb > 0 && cb <= 8);
1001 Assert(iPD1 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a)); /// @todo R3/R0 separation.
1002 Assert(iPD2 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a));
1003
1004#ifdef DEBUG
1005 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD1, iPD1 << X86_PD_SHIFT));
1006 if (iPD1 != iPD2)
1007 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD2, iPD2 << X86_PD_SHIFT));
1008#endif
1009
1010 if (!pVM->pgm.s.fMappingsFixed)
1011 {
1012 PX86PD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
1013 if ( ( pPDSrc->a[iPD1].n.u1Present
1014 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
1015 || ( iPD1 != iPD2
1016 && pPDSrc->a[iPD2].n.u1Present
1017 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
1018 )
1019 {
1020 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
1021 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1022 if (rc == VINF_SUCCESS)
1023 rc = VINF_PGM_SYNC_CR3;
1024 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
1025 return rc;
1026 }
1027 }
1028
1029 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1030 }
1031 else
1032 {
1033 Assert(VBOX_FAILURE(rc));
1034 if (rc == VERR_EM_INTERPRETER)
1035 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1036 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1037 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1038 }
1039 return rc;
1040}
1041
1042#endif /* PGM_TYPE_32BIT && !IN_RING3 */
1043
1044
1045#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
1046
1047/**
1048 * Write access handler for the Guest CR3 page in PAE mode.
1049 *
1050 * This will try interpret the instruction, if failure fail back to the recompiler.
1051 * Check if the changed PDEs are marked present and conflicts with our
1052 * mappings. If conflict, we'll switch to the host context and resolve it there
1053 *
1054 * @returns VBox status code (appropritate for trap handling and GC return).
1055 * @param pVM VM Handle.
1056 * @param uErrorCode CPU Error code.
1057 * @param pRegFrame Trap register frame.
1058 * @param pvFault The fault address (cr2).
1059 * @param GCPhysFault The GC physical address corresponding to pvFault.
1060 * @param pvUser User argument.
1061 */
1062PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1063{
1064 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1065
1066 /*
1067 * Try interpret the instruction.
1068 */
1069 uint32_t cb;
1070 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1071 if (VBOX_SUCCESS(rc) && cb)
1072 {
1073 /*
1074 * Check if any of the PDs have changed.
1075 * We'll simply check all of them instead of figuring out which one/two to check.
1076 */
1077 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1078 {
1079 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present
1080 && ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK)
1081 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
1082 {
1083 /*
1084 * The PDPE has changed.
1085 * We will schedule a monitoring update for the next TLB Flush,
1086 * InvalidatePage or SyncCR3.
1087 *
1088 * This isn't perfect, because a lazy page sync might be dealing with an half
1089 * updated PDPE. However, we assume that the guest OS is disabling interrupts
1090 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
1091 * executing.
1092 */
1093 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1094 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
1095 i, CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
1096 }
1097 }
1098
1099 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1100 }
1101 else
1102 {
1103 Assert(VBOX_FAILURE(rc));
1104 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1105 if (rc == VERR_EM_INTERPRETER)
1106 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1107 }
1108 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
1109 return rc;
1110}
1111
1112
1113/**
1114 * Write access handler for the Guest PDs in PAE mode.
1115 *
1116 * This will try interpret the instruction, if failure fail back to the recompiler.
1117 * Check if the changed PDEs are marked present and conflicts with our
1118 * mappings. If conflict, we'll switch to the host context and resolve it there
1119 *
1120 * @returns VBox status code (appropritate for trap handling and GC return).
1121 * @param pVM VM Handle.
1122 * @param uErrorCode CPU Error code.
1123 * @param pRegFrame Trap register frame.
1124 * @param pvFault The fault address (cr2).
1125 * @param GCPhysFault The GC physical address corresponding to pvFault.
1126 * @param pvUser User argument.
1127 */
1128PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1129{
1130 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1131
1132 /*
1133 * Try interpret the instruction.
1134 */
1135 uint32_t cb;
1136 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1137 if (VBOX_SUCCESS(rc) && cb)
1138 {
1139 /*
1140 * Figure out which of the 4 PDs this is.
1141 */
1142 RTGCUINTPTR i;
1143 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1144 if (CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
1145 {
1146 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
1147 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1148 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
1149 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
1150
1151 Assert(cb > 0 && cb <= 8);
1152 Assert(iPD1 < X86_PG_PAE_ENTRIES);
1153 Assert(iPD2 < X86_PG_PAE_ENTRIES);
1154
1155#ifdef DEBUG
1156 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%VGv)\n",
1157 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
1158 if (iPD1 != iPD2)
1159 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%VGv)\n",
1160 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
1161#endif
1162
1163 if (!pVM->pgm.s.fMappingsFixed)
1164 {
1165 if ( ( pPDSrc->a[iPD1].n.u1Present
1166 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
1167 || ( iPD1 != iPD2
1168 && pPDSrc->a[iPD2].n.u1Present
1169 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
1170 )
1171 {
1172 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
1173 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
1174 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1175 return VINF_PGM_SYNC_CR3;
1176 }
1177 }
1178 break; /* ASSUMES no duplicate entries... */
1179 }
1180 Assert(i < 4);
1181
1182 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1183 }
1184 else
1185 {
1186 Assert(VBOX_FAILURE(rc));
1187 if (rc == VERR_EM_INTERPRETER)
1188 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1189 else
1190 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1191 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1192 }
1193 return rc;
1194}
1195
1196#endif /* PGM_TYPE_PAE && !IN_RING3 */
1197
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette