VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 14010

Last change on this file since 14010 was 14010, checked in by vboxsync, 16 years ago

#1865: PGM - one more down.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 46.7 KB
Line 
1/* $Id: PGMAllGst.h 14010 2008-11-10 13:38:57Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Defined Constants And Macros *
25*******************************************************************************/
26#undef GSTPT
27#undef PGSTPT
28#undef GSTPTE
29#undef PGSTPTE
30#undef GSTPD
31#undef PGSTPD
32#undef GSTPDE
33#undef PGSTPDE
34#undef GST_BIG_PAGE_SIZE
35#undef GST_BIG_PAGE_OFFSET_MASK
36#undef GST_PDE_PG_MASK
37#undef GST_PDE_BIG_PG_MASK
38#undef GST_PD_SHIFT
39#undef GST_PD_MASK
40#undef GST_PTE_PG_MASK
41#undef GST_PT_SHIFT
42#undef GST_PT_MASK
43#undef GST_TOTAL_PD_ENTRIES
44#undef GST_CR3_PAGE_MASK
45#undef GST_PDPE_ENTRIES
46#undef GST_PDPT_SHIFT
47#undef GST_PDPT_MASK
48#undef GST_PDPE_PG_MASK
49#undef GST_GET_PDE_BIG_PG_GCPHYS
50
51#if PGM_GST_TYPE == PGM_TYPE_REAL \
52 || PGM_GST_TYPE == PGM_TYPE_PROT
53# define GSTPT SHWPT
54# define PGSTPT PSHWPT
55# define GSTPTE SHWPTE
56# define PGSTPTE PSHWPTE
57# define GSTPD SHWPD
58# define PGSTPD PSHWPD
59# define GSTPDE SHWPDE
60# define PGSTPDE PSHWPDE
61# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
62
63#elif PGM_GST_TYPE == PGM_TYPE_32BIT
64# define GSTPT X86PT
65# define PGSTPT PX86PT
66# define GSTPTE X86PTE
67# define PGSTPTE PX86PTE
68# define GSTPD X86PD
69# define PGSTPD PX86PD
70# define GSTPDE X86PDE
71# define PGSTPDE PX86PDE
72# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
73# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
74# define GST_PDE_PG_MASK X86_PDE_PG_MASK
75# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
76# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
77# define GST_PD_SHIFT X86_PD_SHIFT
78# define GST_PD_MASK X86_PD_MASK
79# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
80# define GST_PTE_PG_MASK X86_PTE_PG_MASK
81# define GST_PT_SHIFT X86_PT_SHIFT
82# define GST_PT_MASK X86_PT_MASK
83# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
84
85#elif PGM_GST_TYPE == PGM_TYPE_PAE \
86 || PGM_GST_TYPE == PGM_TYPE_AMD64
87# define GSTPT X86PTPAE
88# define PGSTPT PX86PTPAE
89# define GSTPTE X86PTEPAE
90# define PGSTPTE PX86PTEPAE
91# define GSTPD X86PDPAE
92# define PGSTPD PX86PDPAE
93# define GSTPDE X86PDEPAE
94# define PGSTPDE PX86PDEPAE
95# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
96# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
97# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
98# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
99# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
100# define GST_PD_SHIFT X86_PD_PAE_SHIFT
101# define GST_PD_MASK X86_PD_PAE_MASK
102# if PGM_GST_TYPE == PGM_TYPE_PAE
103# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
104# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
105# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
106# define GST_PDPT_SHIFT X86_PDPT_SHIFT
107# define GST_PDPT_MASK X86_PDPT_MASK_PAE
108# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
109# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
110# else
111# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
112# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
113# define GST_PDPT_SHIFT X86_PDPT_SHIFT
114# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
115# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
116# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
117# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
118# endif
119# define GST_PT_SHIFT X86_PT_PAE_SHIFT
120# define GST_PT_MASK X86_PT_PAE_MASK
121#endif
122
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127__BEGIN_DECLS
128PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
129PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
130PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
131PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
132PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
133PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
134PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
135PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
136#ifndef IN_RING3
137PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
138# if PGM_GST_TYPE == PGM_TYPE_PAE \
139 || PGM_GST_TYPE == PGM_TYPE_AMD64
140PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
141# endif
142#endif
143__END_DECLS
144
145
146
147/**
148 * Gets effective Guest OS page information.
149 *
150 * When GCPtr is in a big page, the function will return as if it was a normal
151 * 4KB page. If the need for distinguishing between big and normal page becomes
152 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
153 * purpose.
154 *
155 * @returns VBox status.
156 * @param pVM VM Handle.
157 * @param GCPtr Guest Context virtual address of the page. Page aligned!
158 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
159 * @param pGCPhys Where to store the GC physical address of the page.
160 * This is page aligned. The fact that the
161 */
162PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
163{
164#if PGM_GST_TYPE == PGM_TYPE_REAL \
165 || PGM_GST_TYPE == PGM_TYPE_PROT
166 /*
167 * Fake it.
168 */
169 if (pfFlags)
170 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
171 if (pGCPhys)
172 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
173 return VINF_SUCCESS;
174
175#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
176
177 /*
178 * Get the PDE.
179 */
180# if PGM_GST_TYPE == PGM_TYPE_32BIT
181 X86PDE Pde;
182 Pde.u = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
183
184#elif PGM_GST_TYPE == PGM_TYPE_PAE
185 X86PDEPAE Pde;
186 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
187
188 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present
189 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
190 */
191 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
192
193#elif PGM_GST_TYPE == PGM_TYPE_AMD64
194 PX86PML4E pPml4e;
195 X86PDPE Pdpe;
196 X86PDEPAE Pde;
197 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
198
199 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
200 Assert(pPml4e);
201 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
202 return VERR_PAGE_TABLE_NOT_PRESENT;
203
204 /* Merge accessed, write, user and no-execute bits into the PDE. */
205 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
206 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
207 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
208 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
209# endif
210
211 /*
212 * Lookup the page.
213 */
214 if (!Pde.n.u1Present)
215 return VERR_PAGE_TABLE_NOT_PRESENT;
216
217 if ( !Pde.b.u1Size
218# if PGM_GST_TYPE != PGM_TYPE_AMD64
219 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
220# endif
221 )
222 {
223 PGSTPT pPT;
224 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
225 if (RT_FAILURE(rc))
226 return rc;
227
228 /*
229 * Get PT entry and check presence.
230 */
231 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
232 if (!Pte.n.u1Present)
233 return VERR_PAGE_NOT_PRESENT;
234
235 /*
236 * Store the result.
237 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
238 * where the PDPE is simplified.
239 */
240 if (pfFlags)
241 {
242 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
243 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
244# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
245 /* The NX bit is determined by a bitwise OR between the PT and PD */
246 if (fNoExecuteBitValid)
247 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
248# endif
249 }
250 if (pGCPhys)
251 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
252 }
253 else
254 {
255 /*
256 * Map big to 4k PTE and store the result
257 */
258 if (pfFlags)
259 {
260 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
261 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
262# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
263 /* The NX bit is determined by a bitwise OR between the PT and PD */
264 if (fNoExecuteBitValid)
265 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
266# endif
267 }
268 if (pGCPhys)
269 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
270 }
271 return VINF_SUCCESS;
272#else
273# error "shouldn't be here!"
274 /* something else... */
275 return VERR_NOT_SUPPORTED;
276#endif
277}
278
279
280/**
281 * Modify page flags for a range of pages in the guest's tables
282 *
283 * The existing flags are ANDed with the fMask and ORed with the fFlags.
284 *
285 * @returns VBox status code.
286 * @param pVM VM handle.
287 * @param GCPtr Virtual address of the first page in the range. Page aligned!
288 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
289 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
290 * @param fMask The AND mask - page flags X86_PTE_*.
291 */
292PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
293{
294#if PGM_GST_TYPE == PGM_TYPE_32BIT \
295 || PGM_GST_TYPE == PGM_TYPE_PAE \
296 || PGM_GST_TYPE == PGM_TYPE_AMD64
297
298 for (;;)
299 {
300 /*
301 * Get the PD entry.
302 */
303# if PGM_GST_TYPE == PGM_TYPE_32BIT
304 PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVM->pgm.s, GCPtr);
305
306# elif PGM_GST_TYPE == PGM_TYPE_PAE
307 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
308 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
309 */
310 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
311 Assert(pPde);
312 if (!pPde)
313 return VERR_PAGE_TABLE_NOT_PRESENT;
314# elif PGM_GST_TYPE == PGM_TYPE_AMD64
315 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
316 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
317 Assert(pPde);
318 if (!pPde)
319 return VERR_PAGE_TABLE_NOT_PRESENT;
320# endif
321 GSTPDE Pde = *pPde;
322 Assert(Pde.n.u1Present);
323 if (!Pde.n.u1Present)
324 return VERR_PAGE_TABLE_NOT_PRESENT;
325
326 if ( !Pde.b.u1Size
327# if PGM_GST_TYPE != PGM_TYPE_AMD64
328 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
329# endif
330 )
331 {
332 /*
333 * 4KB Page table
334 *
335 * Walk page tables and pages till we're done.
336 */
337 PGSTPT pPT;
338 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
339 if (RT_FAILURE(rc))
340 return rc;
341
342 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
343 while (iPTE < RT_ELEMENTS(pPT->a))
344 {
345 GSTPTE Pte = pPT->a[iPTE];
346 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
347 | (fFlags & ~GST_PTE_PG_MASK);
348 pPT->a[iPTE] = Pte;
349
350 /* next page */
351 cb -= PAGE_SIZE;
352 if (!cb)
353 return VINF_SUCCESS;
354 GCPtr += PAGE_SIZE;
355 iPTE++;
356 }
357 }
358 else
359 {
360 /*
361 * 4MB Page table
362 */
363# if PGM_GST_TYPE == PGM_TYPE_32BIT
364 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
365# else
366 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
367# endif
368 | (fFlags & ~GST_PTE_PG_MASK)
369 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
370 *pPde = Pde;
371
372 /* advance */
373 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
374 if (cbDone >= cb)
375 return VINF_SUCCESS;
376 cb -= cbDone;
377 GCPtr += cbDone;
378 }
379 }
380
381#else
382 /* real / protected mode: ignore. */
383 return VINF_SUCCESS;
384#endif
385}
386
387
388/**
389 * Retrieve guest PDE information
390 *
391 * @returns VBox status code.
392 * @param pVM The virtual machine.
393 * @param GCPtr Guest context pointer
394 * @param pPDE Pointer to guest PDE structure
395 */
396PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE)
397{
398#if PGM_GST_TYPE == PGM_TYPE_32BIT \
399 || PGM_GST_TYPE == PGM_TYPE_PAE \
400 || PGM_GST_TYPE == PGM_TYPE_AMD64
401
402# if PGM_GST_TYPE == PGM_TYPE_32BIT
403 X86PDE Pde;
404 Pde.u = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
405# elif PGM_GST_TYPE == PGM_TYPE_PAE
406 X86PDEPAE Pde;
407 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
408# elif PGM_GST_TYPE == PGM_TYPE_AMD64
409 X86PDEPAE Pde;
410 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
411# endif
412
413 pPDE->u = (X86PGPAEUINT)Pde.u;
414 return VINF_SUCCESS;
415#else
416 AssertFailed();
417 return VERR_NOT_IMPLEMENTED;
418#endif
419}
420
421
422
423/**
424 * Maps the CR3 into HMA in GC and locate it in HC.
425 *
426 * Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
427 * CR3 is updated we simply call MapCR3 again.
428 *
429 * @returns VBox status, no specials.
430 * @param pVM VM handle.
431 * @param GCPhysCR3 The physical address in the CR3 register.
432 */
433PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
434{
435#if PGM_GST_TYPE == PGM_TYPE_32BIT \
436 || PGM_GST_TYPE == PGM_TYPE_PAE \
437 || PGM_GST_TYPE == PGM_TYPE_AMD64
438
439 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
440
441 /*
442 * Map the page CR3 points at.
443 */
444 RTHCPHYS HCPhysGuestCR3;
445 RTHCPTR HCPtrGuestCR3;
446 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
447 if (RT_SUCCESS(rc))
448 {
449 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
450 if (RT_SUCCESS(rc))
451 {
452 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
453# if PGM_GST_TYPE == PGM_TYPE_32BIT
454 pVM->pgm.s.pGuestPDR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
455# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
456 pVM->pgm.s.pGuestPDR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
457# endif
458 pVM->pgm.s.pGuestPDRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
459
460# elif PGM_GST_TYPE == PGM_TYPE_PAE
461 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
462 pVM->pgm.s.pGstPaePDPTR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
463# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
464 pVM->pgm.s.pGstPaePDPTR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
465# endif
466 pVM->pgm.s.pGstPaePDPTRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
467 Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePDPTRC));
468
469 /*
470 * Map the 4 PDs too.
471 */
472 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
473 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
474 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
475 {
476 if (pGuestPDPT->a[i].n.u1Present)
477 {
478 RTHCPTR HCPtr;
479 RTHCPHYS HCPhys;
480 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
481 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
482 if (RT_SUCCESS(rc2))
483 {
484 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
485 AssertRCReturn(rc, rc);
486
487 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
488# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
489 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
490# endif
491 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
492 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
493 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
494 continue;
495 }
496 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
497 }
498
499 pVM->pgm.s.apGstPaePDsR3[i] = 0;
500# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
501 pVM->pgm.s.apGstPaePDsR0[i] = 0;
502# endif
503 pVM->pgm.s.apGstPaePDsRC[i] = 0;
504 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
505 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
506 }
507
508# elif PGM_GST_TYPE == PGM_TYPE_AMD64
509 pVM->pgm.s.pGstAmd64PML4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
510# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
511 pVM->pgm.s.pGstAmd64PML4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
512# endif
513 if (!HWACCMIsNestedPagingActive(pVM))
514 {
515 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
516 if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3))
517 {
518 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
519 if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->enmKind != PGMPOOLKIND_FREE)
520 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->GCPhys >> PAGE_SHIFT);
521 pVM->pgm.s.CTX_SUFF(pShwAmd64CR3) = 0;
522 pVM->pgm.s.pShwPaePml4R3 = 0;
523# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
524 pVM->pgm.s.pShwPaePml4R0 = 0;
525# endif
526 pVM->pgm.s.HCPhysPaePML4 = 0;
527 }
528
529 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
530l_try_again:
531 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
532 if (rc == VERR_PGM_POOL_FLUSHED)
533 {
534 Log(("MapCR3: Flush pool and try again\n"));
535 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
536 rc = pgmPoolSyncCR3(pVM);
537 AssertRC(rc);
538 goto l_try_again;
539 }
540# ifdef IN_RING0
541 pVM->pgm.s.pShwAmd64CR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
542# else
543 pVM->pgm.s.pShwAmd64CR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
544# endif
545 pVM->pgm.s.pShwPaePml4R3 = (R3PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
546# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
547 pVM->pgm.s.pShwPaePml4R0 = (R0PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
548# endif
549 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->Core.Key;
550 }
551# endif
552 }
553 else
554 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
555 }
556 else
557 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
558
559#else /* prot/real stub */
560 int rc = VINF_SUCCESS;
561#endif
562 return rc;
563}
564
565
566/**
567 * Unmaps the CR3.
568 *
569 * @returns VBox status, no specials.
570 * @param pVM VM handle.
571 */
572PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
573{
574 LogFlow(("UnmapCR3\n"));
575
576 int rc = VINF_SUCCESS;
577
578#if PGM_GST_TYPE == PGM_TYPE_32BIT
579 pVM->pgm.s.pGuestPDR3 = 0;
580#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
581 pVM->pgm.s.pGuestPDR0 = 0;
582#endif
583 pVM->pgm.s.pGuestPDRC = 0;
584
585#elif PGM_GST_TYPE == PGM_TYPE_PAE
586 pVM->pgm.s.pGstPaePDPTR3 = 0;
587# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
588 pVM->pgm.s.pGstPaePDPTR0 = 0;
589# endif
590 pVM->pgm.s.pGstPaePDPTRC = 0;
591 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
592 {
593 pVM->pgm.s.apGstPaePDsR3[i] = 0;
594# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
595 pVM->pgm.s.apGstPaePDsR0[i] = 0;
596# endif
597 pVM->pgm.s.apGstPaePDsRC[i] = 0;
598 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
599 }
600
601#elif PGM_GST_TYPE == PGM_TYPE_AMD64
602 pVM->pgm.s.pGstAmd64PML4R3 = 0;
603# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
604 pVM->pgm.s.pGstAmd64PML4R0 = 0;
605# endif
606 if (!HWACCMIsNestedPagingActive(pVM))
607 {
608 pVM->pgm.s.pShwPaePml4R3 = 0;
609# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
610 pVM->pgm.s.pShwPaePml4R0 = 0;
611# endif
612 pVM->pgm.s.HCPhysPaePML4 = 0;
613 if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3))
614 {
615 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
616 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->GCPhys >> PAGE_SHIFT);
617 pVM->pgm.s.pShwAmd64CR3R3 = 0;
618 pVM->pgm.s.pShwAmd64CR3R0 = 0;
619 }
620 }
621
622#else /* prot/real mode stub */
623 /* nothing to do */
624#endif
625 return rc;
626}
627
628
629#undef LOG_GROUP
630#define LOG_GROUP LOG_GROUP_PGM_POOL
631
632/**
633 * Registers physical page monitors for the necessary paging
634 * structures to detect conflicts with our guest mappings.
635 *
636 * This is always called after mapping CR3.
637 * This is never called with fixed mappings.
638 *
639 * @returns VBox status, no specials.
640 * @param pVM VM handle.
641 * @param GCPhysCR3 The physical address in the CR3 register.
642 */
643PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
644{
645 Assert(!pVM->pgm.s.fMappingsFixed);
646 int rc = VINF_SUCCESS;
647
648 /*
649 * Register/Modify write phys handler for guest's CR3 if it changed.
650 */
651#if PGM_GST_TYPE == PGM_TYPE_32BIT
652
653 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
654 {
655# ifndef PGMPOOL_WITH_MIXED_PT_CR3
656 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
657 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
658 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
659 else
660 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
661 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
662 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
663 pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
664 pVM->pgm.s.pszR3GstWriteHandlerCR3);
665# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
666 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
667 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
668 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
669 ? PGMPOOL_IDX_PAE_PD
670 : PGMPOOL_IDX_PD,
671 GCPhysCR3);
672# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
673 if (RT_FAILURE(rc))
674 {
675 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
676 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
677 return rc;
678 }
679 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
680 }
681
682#elif PGM_GST_TYPE == PGM_TYPE_PAE
683 /* Monitor the PDPT page */
684 /*
685 * Register/Modify write phys handler for guest's CR3 if it changed.
686 */
687# ifndef PGMPOOL_WITH_MIXED_PT_CR3
688 AssertFailed();
689# endif
690 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
691 {
692 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
693 if (RT_FAILURE(rc))
694 {
695 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
696 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
697 return rc;
698 }
699 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
700 }
701
702 /*
703 * Do the 4 PDs.
704 */
705 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
706 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
707 {
708 if (pGuestPDPT->a[i].n.u1Present)
709 {
710 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
711 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
712 {
713 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
714
715 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
716 }
717
718 if (RT_FAILURE(rc))
719 {
720 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
721 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
722 return rc;
723 }
724 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
725 }
726 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
727 {
728 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
729 AssertRC(rc);
730 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
731 }
732 }
733
734#else
735 /* prot/real/amd64 mode stub */
736
737#endif
738 return rc;
739}
740
741/**
742 * Deregisters any physical page monitors installed by MonitorCR3.
743 *
744 * @returns VBox status code, no specials.
745 * @param pVM The VM handle.
746 */
747PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
748{
749 int rc = VINF_SUCCESS;
750
751 /*
752 * Deregister the access handlers.
753 *
754 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
755 * before we enter GC again.
756 */
757#if PGM_GST_TYPE == PGM_TYPE_32BIT
758 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
759 {
760# ifndef PGMPOOL_WITH_MIXED_PT_CR3
761 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
762 AssertRCReturn(rc, rc);
763# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
764 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
765 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
766 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
767 ? PGMPOOL_IDX_PAE_PD
768 : PGMPOOL_IDX_PD);
769 AssertRCReturn(rc, rc);
770# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
771 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
772 }
773
774#elif PGM_GST_TYPE == PGM_TYPE_PAE
775 /* The PDPT page */
776# ifndef PGMPOOL_WITH_MIXED_PT_CR3
777 AssertFailed();
778# endif
779
780 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
781 {
782 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
783 AssertRC(rc);
784 }
785
786 /* The 4 PDs. */
787 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
788 {
789 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
790 {
791 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
792 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
793 AssertRC(rc2);
794 if (RT_FAILURE(rc2))
795 rc = rc2;
796 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
797 }
798 }
799#else
800 /* prot/real/amd64 mode stub */
801#endif
802 return rc;
803
804}
805
806#undef LOG_GROUP
807#define LOG_GROUP LOG_GROUP_PGM
808
809
810#if PGM_GST_TYPE == PGM_TYPE_32BIT \
811 || PGM_GST_TYPE == PGM_TYPE_PAE \
812 || PGM_GST_TYPE == PGM_TYPE_AMD64
813/**
814 * Updates one virtual handler range.
815 *
816 * @returns 0
817 * @param pNode Pointer to a PGMVIRTHANDLER.
818 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
819 */
820static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
821{
822 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
823 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
824 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
825
826#if PGM_GST_TYPE == PGM_TYPE_32BIT
827 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pState->pVM->pgm.s);
828#endif
829
830 RTGCPTR GCPtr = pCur->Core.Key;
831#if PGM_GST_MODE != PGM_MODE_AMD64
832 /* skip all stuff above 4GB if not AMD64 mode. */
833 if (GCPtr >= _4GB)
834 return 0;
835#endif
836
837 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
838 unsigned iPage = 0;
839 while (iPage < pCur->cPages)
840 {
841#if PGM_GST_TYPE == PGM_TYPE_32BIT
842 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
843#elif PGM_GST_TYPE == PGM_TYPE_PAE
844 X86PDEPAE Pde;
845 Pde.u = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
846#elif PGM_GST_TYPE == PGM_TYPE_AMD64
847 X86PDEPAE Pde;
848 Pde.u = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
849#endif
850 if (Pde.n.u1Present)
851 {
852 if ( !Pde.b.u1Size
853# if PGM_GST_TYPE != PGM_TYPE_AMD64
854 || !(pState->cr4 & X86_CR4_PSE)
855# endif
856 )
857 {
858 /*
859 * Normal page table.
860 */
861 PGSTPT pPT;
862 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
863 if (RT_SUCCESS(rc))
864 {
865 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
866 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
867 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
868 {
869 GSTPTE Pte = pPT->a[iPTE];
870 RTGCPHYS GCPhysNew;
871 if (Pte.n.u1Present)
872 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
873 else
874 GCPhysNew = NIL_RTGCPHYS;
875 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
876 {
877 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
878 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
879#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
880 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
881 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
882 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
883 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
884#endif
885 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
886 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
887 }
888 }
889 }
890 else
891 {
892 /* not-present. */
893 offPage = 0;
894 AssertRC(rc);
895 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
896 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
897 iPTE++, iPage++, GCPtr += PAGE_SIZE)
898 {
899 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
900 {
901 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
902#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
903 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
904 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
905 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
906 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
907#endif
908 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
909 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
910 }
911 }
912 }
913 }
914 else
915 {
916 /*
917 * 2/4MB page.
918 */
919 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
920 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
921 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
922 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
923 {
924 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
925 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
926 {
927 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
928 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
929#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
930 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
931 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
932 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
933 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
934#endif
935 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
936 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
937 }
938 }
939 } /* pde type */
940 }
941 else
942 {
943 /* not-present. */
944 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
945 cPages && iPage < pCur->cPages;
946 iPage++, GCPtr += PAGE_SIZE)
947 {
948 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
949 {
950 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
951 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
952 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
953 }
954 }
955 offPage = 0;
956 }
957 } /* for pages in virtual mapping. */
958
959 return 0;
960}
961#endif /* 32BIT, PAE and AMD64 */
962
963
964/**
965 * Updates the virtual page access handlers.
966 *
967 * @returns true if bits were flushed.
968 * @returns false if bits weren't flushed.
969 * @param pVM VM handle.
970 * @param pPDSrc The page directory.
971 * @param cr4 The cr4 register value.
972 */
973PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
974{
975#if PGM_GST_TYPE == PGM_TYPE_32BIT \
976 || PGM_GST_TYPE == PGM_TYPE_PAE \
977 || PGM_GST_TYPE == PGM_TYPE_AMD64
978
979 /** @todo
980 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
981 */
982
983 /*
984 * Resolve any virtual address based access handlers to GC physical addresses.
985 * This should be fairly quick.
986 */
987 PGMHVUSTATE State;
988
989 pgmLock(pVM);
990 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
991 State.pVM = pVM;
992 State.fTodo = pVM->pgm.s.fSyncFlags;
993 State.cr4 = cr4;
994 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
995 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
996
997
998 /*
999 * Set / reset bits?
1000 */
1001 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
1002 {
1003 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
1004 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
1005 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
1006 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
1007 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
1008 }
1009 pgmUnlock(pVM);
1010
1011 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
1012
1013#else /* real / protected */
1014 return false;
1015#endif
1016}
1017
1018
1019#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
1020
1021/**
1022 * Write access handler for the Guest CR3 page in 32-bit mode.
1023 *
1024 * This will try interpret the instruction, if failure fail back to the recompiler.
1025 * Check if the changed PDEs are marked present and conflicts with our
1026 * mappings. If conflict, we'll switch to the host context and resolve it there
1027 *
1028 * @returns VBox status code (appropritate for trap handling and GC return).
1029 * @param pVM VM Handle.
1030 * @param uErrorCode CPU Error code.
1031 * @param pRegFrame Trap register frame.
1032 * @param pvFault The fault address (cr2).
1033 * @param GCPhysFault The GC physical address corresponding to pvFault.
1034 * @param pvUser User argument.
1035 */
1036PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1037{
1038 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1039
1040 /*
1041 * Try interpret the instruction.
1042 */
1043 uint32_t cb;
1044 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1045 if (RT_SUCCESS(rc) && cb)
1046 {
1047 /*
1048 * Check if the modified PDEs are present and mappings.
1049 */
1050 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1051 const unsigned iPD1 = offPD / sizeof(X86PDE);
1052 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
1053
1054 Assert(cb > 0 && cb <= 8);
1055 Assert(iPD1 < X86_PG_ENTRIES);
1056 Assert(iPD2 < X86_PG_ENTRIES);
1057
1058#ifdef DEBUG
1059 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
1060 if (iPD1 != iPD2)
1061 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
1062#endif
1063
1064 if (!pVM->pgm.s.fMappingsFixed)
1065 {
1066 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
1067 if ( ( pPDSrc->a[iPD1].n.u1Present
1068 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
1069 || ( iPD1 != iPD2
1070 && pPDSrc->a[iPD2].n.u1Present
1071 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
1072 )
1073 {
1074 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
1075 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1076 if (rc == VINF_SUCCESS)
1077 rc = VINF_PGM_SYNC_CR3;
1078 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
1079 return rc;
1080 }
1081 }
1082
1083 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1084 }
1085 else
1086 {
1087 Assert(RT_FAILURE(rc));
1088 if (rc == VERR_EM_INTERPRETER)
1089 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1090 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1091 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1092 }
1093 return rc;
1094}
1095
1096#endif /* PGM_TYPE_32BIT && !IN_RING3 */
1097#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
1098
1099/**
1100 * Write access handler for the Guest CR3 page in PAE mode.
1101 *
1102 * This will try interpret the instruction, if failure fail back to the recompiler.
1103 * Check if the changed PDEs are marked present and conflicts with our
1104 * mappings. If conflict, we'll switch to the host context and resolve it there
1105 *
1106 * @returns VBox status code (appropritate for trap handling and GC return).
1107 * @param pVM VM Handle.
1108 * @param uErrorCode CPU Error code.
1109 * @param pRegFrame Trap register frame.
1110 * @param pvFault The fault address (cr2).
1111 * @param GCPhysFault The GC physical address corresponding to pvFault.
1112 * @param pvUser User argument.
1113 */
1114PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1115{
1116 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1117
1118 /*
1119 * Try interpret the instruction.
1120 */
1121 uint32_t cb;
1122 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1123 if (RT_SUCCESS(rc) && cb)
1124 {
1125 /*
1126 * Check if any of the PDs have changed.
1127 * We'll simply check all of them instead of figuring out which one/two to check.
1128 */
1129 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
1130 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1131 {
1132 if ( pGuestPDPT->a[i].n.u1Present
1133 && (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
1134 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
1135 {
1136 /*
1137 * The PDPE has changed.
1138 * We will schedule a monitoring update for the next TLB Flush,
1139 * InvalidatePage or SyncCR3.
1140 *
1141 * This isn't perfect, because a lazy page sync might be dealing with an half
1142 * updated PDPE. However, we assume that the guest OS is disabling interrupts
1143 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
1144 * executing.
1145 */
1146 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1147 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
1148 i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
1149 }
1150 }
1151
1152 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1153 }
1154 else
1155 {
1156 Assert(RT_FAILURE(rc));
1157 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1158 if (rc == VERR_EM_INTERPRETER)
1159 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1160 }
1161 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
1162 return rc;
1163}
1164
1165
1166/**
1167 * Write access handler for the Guest PDs in PAE mode.
1168 *
1169 * This will try interpret the instruction, if failure fail back to the recompiler.
1170 * Check if the changed PDEs are marked present and conflicts with our
1171 * mappings. If conflict, we'll switch to the host context and resolve it there
1172 *
1173 * @returns VBox status code (appropritate for trap handling and GC return).
1174 * @param pVM VM Handle.
1175 * @param uErrorCode CPU Error code.
1176 * @param pRegFrame Trap register frame.
1177 * @param pvFault The fault address (cr2).
1178 * @param GCPhysFault The GC physical address corresponding to pvFault.
1179 * @param pvUser User argument.
1180 */
1181PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1182{
1183 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1184
1185 /*
1186 * Try interpret the instruction.
1187 */
1188 uint32_t cb;
1189 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1190 if (RT_SUCCESS(rc) && cb)
1191 {
1192 /*
1193 * Figure out which of the 4 PDs this is.
1194 */
1195 RTGCPTR i;
1196 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
1197 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1198 if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
1199 {
1200 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
1201 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1202 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
1203 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
1204
1205 Assert(cb > 0 && cb <= 8);
1206 Assert(iPD1 < X86_PG_PAE_ENTRIES);
1207 Assert(iPD2 < X86_PG_PAE_ENTRIES);
1208
1209# ifdef LOG_ENABLED
1210 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
1211 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
1212 if (iPD1 != iPD2)
1213 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
1214 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
1215# endif
1216
1217 if (!pVM->pgm.s.fMappingsFixed)
1218 {
1219 if ( ( pPDSrc->a[iPD1].n.u1Present
1220 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
1221 || ( iPD1 != iPD2
1222 && pPDSrc->a[iPD2].n.u1Present
1223 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
1224 )
1225 {
1226 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
1227 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
1228 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1229 return VINF_PGM_SYNC_CR3;
1230 }
1231 }
1232 break; /* ASSUMES no duplicate entries... */
1233 }
1234 Assert(i < 4);
1235
1236 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
1237 }
1238 else
1239 {
1240 Assert(RT_FAILURE(rc));
1241 if (rc == VERR_EM_INTERPRETER)
1242 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1243 else
1244 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1245 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
1246 }
1247 return rc;
1248}
1249
1250#endif /* PGM_TYPE_PAE && !IN_RING3 */
1251
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette